Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #ifndef __KVM_X86_MMU_TDP_MMU_H |
| 4 | #define __KVM_X86_MMU_TDP_MMU_H |
| 5 | |
| 6 | #include <linux/kvm_host.h> |
| 7 | |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 8 | hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 9 | |
Ben Gardon | fb10129 | 2021-04-01 16:37:30 -0700 | [diff] [blame] | 10 | __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, |
| 11 | struct kvm_mmu_page *root) |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 12 | { |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 13 | if (root->role.invalid) |
| 14 | return false; |
| 15 | |
Ben Gardon | fb10129 | 2021-04-01 16:37:30 -0700 | [diff] [blame] | 16 | return refcount_inc_not_zero(&root->tdp_mmu_root_count); |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 17 | } |
| 18 | |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 19 | void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, |
| 20 | bool shared); |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 21 | |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 22 | bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 23 | gfn_t end, bool can_yield, bool flush, |
| 24 | bool shared); |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 25 | static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 26 | gfn_t start, gfn_t end, bool flush, |
| 27 | bool shared) |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 28 | { |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 29 | return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush, |
| 30 | shared); |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 31 | } |
| 32 | static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 33 | { |
| 34 | gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); |
| 35 | |
| 36 | /* |
| 37 | * Don't allow yielding, as the caller may have a flush pending. Note, |
| 38 | * if mmu_lock is held for write, zapping will never yield in this case, |
| 39 | * but explicitly disallow it for safety. The TDP MMU does not yield |
| 40 | * until it has made forward progress (steps sideways), and when zapping |
| 41 | * a single shadow page that it's guaranteed to see (thus the mmu_lock |
| 42 | * requirement), its "step sideways" will always step beyond the bounds |
| 43 | * of the shadow page's gfn range and stop iterating before yielding. |
| 44 | */ |
| 45 | lockdep_assert_held_write(&kvm->mmu_lock); |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 46 | return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 47 | sp->gfn, end, false, false, false); |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 48 | } |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 49 | |
Ben Gardon | faaf05b0 | 2020-10-14 11:26:47 -0700 | [diff] [blame] | 50 | void kvm_tdp_mmu_zap_all(struct kvm *kvm); |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 51 | void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); |
Ben Gardon | 4c6654b | 2021-04-01 16:37:36 -0700 | [diff] [blame] | 52 | void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 53 | |
| 54 | int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 55 | int map_writable, int max_level, kvm_pfn_t pfn, |
| 56 | bool prefault); |
Ben Gardon | 063afac | 2020-10-14 11:26:52 -0700 | [diff] [blame] | 57 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 58 | bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, |
| 59 | bool flush); |
| 60 | bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); |
| 61 | bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
| 62 | bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 63 | |
| 64 | bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 65 | int min_level); |
| 66 | bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, |
| 67 | struct kvm_memory_slot *slot); |
| 68 | void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
| 69 | struct kvm_memory_slot *slot, |
| 70 | gfn_t gfn, unsigned long mask, |
| 71 | bool wrprot); |
Sean Christopherson | 142ccde | 2021-03-25 19:19:42 -0700 | [diff] [blame] | 72 | bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, |
Ben Gardon | 8ca6f06 | 2021-04-01 16:37:24 -0700 | [diff] [blame] | 73 | const struct kvm_memory_slot *slot, |
| 74 | bool flush); |
Ben Gardon | 46044f7 | 2020-10-14 11:26:57 -0700 | [diff] [blame] | 75 | |
| 76 | bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, |
| 77 | struct kvm_memory_slot *slot, gfn_t gfn); |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 78 | |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 79 | int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, |
| 80 | int *root_level); |
| 81 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 82 | #ifdef CONFIG_X86_64 |
| 83 | void kvm_mmu_init_tdp_mmu(struct kvm *kvm); |
| 84 | void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); |
| 85 | static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } |
| 86 | static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } |
| 87 | #else |
| 88 | static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} |
| 89 | static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} |
| 90 | static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } |
| 91 | static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } |
| 92 | #endif |
| 93 | |
| 94 | static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) |
| 95 | { |
| 96 | struct kvm_mmu_page *sp; |
| 97 | |
| 98 | if (!is_tdp_mmu_enabled(kvm)) |
| 99 | return false; |
| 100 | if (WARN_ON(!VALID_PAGE(hpa))) |
| 101 | return false; |
| 102 | |
| 103 | sp = to_shadow_page(hpa); |
| 104 | if (WARN_ON(!sp)) |
| 105 | return false; |
| 106 | |
| 107 | return is_tdp_mmu_page(sp) && sp->root_count; |
| 108 | } |
| 109 | |
Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 110 | #endif /* __KVM_X86_MMU_TDP_MMU_H */ |