Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #ifndef __KVM_X86_MMU_TDP_MMU_H |
| 4 | #define __KVM_X86_MMU_TDP_MMU_H |
| 5 | |
| 6 | #include <linux/kvm_host.h> |
| 7 | |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 8 | hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 9 | |
Ben Gardon | fb10129 | 2021-04-01 16:37:30 -0700 | [diff] [blame] | 10 | __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, |
| 11 | struct kvm_mmu_page *root) |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 12 | { |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 13 | if (root->role.invalid) |
| 14 | return false; |
| 15 | |
Ben Gardon | fb10129 | 2021-04-01 16:37:30 -0700 | [diff] [blame] | 16 | return refcount_inc_not_zero(&root->tdp_mmu_root_count); |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 17 | } |
| 18 | |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 19 | void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, |
| 20 | bool shared); |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 21 | |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 22 | bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, |
Sean Christopherson | 5a324c2 | 2021-08-10 23:52:36 +0300 | [diff] [blame] | 23 | gfn_t end, bool can_yield, bool flush); |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 24 | static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, |
Sean Christopherson | 5a324c2 | 2021-08-10 23:52:36 +0300 | [diff] [blame] | 25 | gfn_t start, gfn_t end, bool flush) |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 26 | { |
Sean Christopherson | 5a324c2 | 2021-08-10 23:52:36 +0300 | [diff] [blame] | 27 | return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 28 | } |
| 29 | static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 30 | { |
Kai Huang | f1b8325 | 2021-06-15 12:57:11 +1200 | [diff] [blame] | 31 | gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1); |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 32 | |
| 33 | /* |
| 34 | * Don't allow yielding, as the caller may have a flush pending. Note, |
| 35 | * if mmu_lock is held for write, zapping will never yield in this case, |
| 36 | * but explicitly disallow it for safety. The TDP MMU does not yield |
| 37 | * until it has made forward progress (steps sideways), and when zapping |
| 38 | * a single shadow page that it's guaranteed to see (thus the mmu_lock |
| 39 | * requirement), its "step sideways" will always step beyond the bounds |
| 40 | * of the shadow page's gfn range and stop iterating before yielding. |
| 41 | */ |
| 42 | lockdep_assert_held_write(&kvm->mmu_lock); |
Sean Christopherson | 2b9663d | 2021-03-25 19:19:44 -0700 | [diff] [blame] | 43 | return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), |
Sean Christopherson | 5a324c2 | 2021-08-10 23:52:36 +0300 | [diff] [blame] | 44 | sp->gfn, end, false, false); |
Sean Christopherson | 33a3164 | 2021-03-25 13:01:19 -0700 | [diff] [blame] | 45 | } |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 46 | |
Ben Gardon | faaf05b0 | 2020-10-14 11:26:47 -0700 | [diff] [blame] | 47 | void kvm_tdp_mmu_zap_all(struct kvm *kvm); |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 48 | void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); |
Ben Gardon | 4c6654b | 2021-04-01 16:37:36 -0700 | [diff] [blame] | 49 | void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 50 | |
Paolo Bonzini | 2f6305d | 2021-08-06 04:35:50 -0400 | [diff] [blame] | 51 | int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); |
Ben Gardon | 063afac | 2020-10-14 11:26:52 -0700 | [diff] [blame] | 52 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 53 | bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, |
| 54 | bool flush); |
| 55 | bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); |
| 56 | bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
| 57 | bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 58 | |
Hamza Mahfooz | 269e955 | 2021-07-12 22:33:38 -0400 | [diff] [blame] | 59 | bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, |
| 60 | const struct kvm_memory_slot *slot, int min_level); |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 61 | bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, |
Hamza Mahfooz | 269e955 | 2021-07-12 22:33:38 -0400 | [diff] [blame] | 62 | const struct kvm_memory_slot *slot); |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 63 | void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
| 64 | struct kvm_memory_slot *slot, |
| 65 | gfn_t gfn, unsigned long mask, |
| 66 | bool wrprot); |
Sean Christopherson | 142ccde | 2021-03-25 19:19:42 -0700 | [diff] [blame] | 67 | bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, |
Ben Gardon | 8ca6f06 | 2021-04-01 16:37:24 -0700 | [diff] [blame] | 68 | const struct kvm_memory_slot *slot, |
| 69 | bool flush); |
Ben Gardon | 46044f7 | 2020-10-14 11:26:57 -0700 | [diff] [blame] | 70 | |
| 71 | bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, |
Keqian Zhu | 3ad9356 | 2021-04-29 11:41:14 +0800 | [diff] [blame] | 72 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 73 | int min_level); |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 74 | |
David Matlack | c5c8c7c | 2021-07-13 22:09:54 +0000 | [diff] [blame] | 75 | static inline void kvm_tdp_mmu_walk_lockless_begin(void) |
| 76 | { |
| 77 | rcu_read_lock(); |
| 78 | } |
| 79 | |
| 80 | static inline void kvm_tdp_mmu_walk_lockless_end(void) |
| 81 | { |
| 82 | rcu_read_unlock(); |
| 83 | } |
| 84 | |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 85 | int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, |
| 86 | int *root_level); |
David Matlack | 6e8eb20 | 2021-07-13 22:09:55 +0000 | [diff] [blame] | 87 | u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, |
| 88 | u64 *spte); |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 89 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 90 | #ifdef CONFIG_X86_64 |
Ben Gardon | d501f74 | 2021-05-18 10:34:14 -0700 | [diff] [blame] | 91 | bool kvm_mmu_init_tdp_mmu(struct kvm *kvm); |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 92 | void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 93 | static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 94 | |
David Matlack | 63c0cac | 2021-06-17 23:19:47 +0000 | [diff] [blame] | 95 | static inline bool is_tdp_mmu(struct kvm_mmu *mmu) |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 96 | { |
| 97 | struct kvm_mmu_page *sp; |
David Matlack | 63c0cac | 2021-06-17 23:19:47 +0000 | [diff] [blame] | 98 | hpa_t hpa = mmu->root_hpa; |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 99 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 100 | if (WARN_ON(!VALID_PAGE(hpa))) |
| 101 | return false; |
| 102 | |
Sean Christopherson | 6c6e166 | 2021-06-22 00:24:54 -0700 | [diff] [blame] | 103 | /* |
| 104 | * A NULL shadow page is legal when shadowing a non-paging guest with |
| 105 | * PAE paging, as the MMU will be direct with root_hpa pointing at the |
| 106 | * pae_root page, not a shadow page. |
| 107 | */ |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 108 | sp = to_shadow_page(hpa); |
Sean Christopherson | 6c6e166 | 2021-06-22 00:24:54 -0700 | [diff] [blame] | 109 | return sp && is_tdp_mmu_page(sp) && sp->root_count; |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 110 | } |
Paolo Bonzini | c62efff | 2021-06-18 06:42:10 -0400 | [diff] [blame] | 111 | #else |
| 112 | static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } |
| 113 | static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} |
Paolo Bonzini | c62efff | 2021-06-18 06:42:10 -0400 | [diff] [blame] | 114 | static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } |
David Matlack | 63c0cac | 2021-06-17 23:19:47 +0000 | [diff] [blame] | 115 | static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } |
Paolo Bonzini | c62efff | 2021-06-18 06:42:10 -0400 | [diff] [blame] | 116 | #endif |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 117 | |
Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 118 | #endif /* __KVM_X86_MMU_TDP_MMU_H */ |