Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 2 | #ifndef __KVM_X86_MMU_H |
| 3 | #define __KVM_X86_MMU_H |
| 4 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 5 | #include <linux/kvm_host.h> |
Avi Kivity | fc78f51 | 2009-12-07 12:16:48 +0200 | [diff] [blame] | 6 | #include "kvm_cache_regs.h" |
Mohammed Gamal | 8978614 | 2020-07-10 17:48:03 +0200 | [diff] [blame] | 7 | #include "cpuid.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 8 | |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 9 | #define PT64_PT_BITS 9 |
| 10 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) |
| 11 | #define PT32_PT_BITS 10 |
| 12 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) |
| 13 | |
| 14 | #define PT_WRITABLE_SHIFT 1 |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 15 | #define PT_USER_SHIFT 2 |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 16 | |
| 17 | #define PT_PRESENT_MASK (1ULL << 0) |
| 18 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 19 | #define PT_USER_MASK (1ULL << PT_USER_SHIFT) |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 20 | #define PT_PWT_MASK (1ULL << 3) |
| 21 | #define PT_PCD_MASK (1ULL << 4) |
Avi Kivity | 1b7fcd3 | 2008-05-15 13:51:35 +0300 | [diff] [blame] | 22 | #define PT_ACCESSED_SHIFT 5 |
| 23 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) |
Avi Kivity | 8ea667f | 2012-09-12 13:44:53 +0300 | [diff] [blame] | 24 | #define PT_DIRTY_SHIFT 6 |
| 25 | #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 26 | #define PT_PAGE_SIZE_SHIFT 7 |
| 27 | #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 28 | #define PT_PAT_MASK (1ULL << 7) |
| 29 | #define PT_GLOBAL_MASK (1ULL << 8) |
| 30 | #define PT64_NX_SHIFT 63 |
| 31 | #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) |
| 32 | |
| 33 | #define PT_PAT_SHIFT 7 |
| 34 | #define PT_DIR_PAT_SHIFT 12 |
| 35 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) |
| 36 | |
| 37 | #define PT32_DIR_PSE36_SIZE 4 |
| 38 | #define PT32_DIR_PSE36_SHIFT 13 |
| 39 | #define PT32_DIR_PSE36_MASK \ |
| 40 | (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) |
| 41 | |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 42 | #define PT64_ROOT_5LEVEL 5 |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 43 | #define PT64_ROOT_4LEVEL 4 |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 44 | #define PT32_ROOT_LEVEL 2 |
| 45 | #define PT32E_ROOT_LEVEL 3 |
| 46 | |
Lai Jiangshan | a91a7c7 | 2021-09-19 10:42:46 +0800 | [diff] [blame] | 47 | #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \ |
| 48 | X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE) |
Sean Christopherson | 20f632b | 2021-06-22 10:57:02 -0700 | [diff] [blame] | 49 | |
| 50 | #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) |
| 51 | |
Sean Christopherson | eb79cd0 | 2021-01-13 12:45:15 -0800 | [diff] [blame] | 52 | static __always_inline u64 rsvd_bits(int s, int e) |
Tiejun Chen | d143148 | 2014-09-01 18:44:04 +0800 | [diff] [blame] | 53 | { |
Sean Christopherson | eb79cd0 | 2021-01-13 12:45:15 -0800 | [diff] [blame] | 54 | BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); |
| 55 | |
| 56 | if (__builtin_constant_p(e)) |
| 57 | BUILD_BUG_ON(e > 63); |
| 58 | else |
| 59 | e &= 63; |
| 60 | |
Yu Zhang | d1cd3ce | 2017-08-24 20:27:53 +0800 | [diff] [blame] | 61 | if (e < s) |
| 62 | return 0; |
| 63 | |
Paolo Bonzini | 2f80d50 | 2020-12-22 05:20:43 -0500 | [diff] [blame] | 64 | return ((2ULL << (e - s)) - 1) << s; |
Tiejun Chen | d143148 | 2014-09-01 18:44:04 +0800 | [diff] [blame] | 65 | } |
| 66 | |
Sean Christopherson | 8120337 | 2021-02-25 12:47:35 -0800 | [diff] [blame] | 67 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); |
Sean Christopherson | e7b7bde | 2021-02-25 12:47:42 -0800 | [diff] [blame] | 68 | void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); |
Xiao Guangrong | b37fbea | 2013-06-07 16:51:25 +0800 | [diff] [blame] | 69 | |
Sean Christopherson | c906066 | 2021-06-09 16:42:33 -0700 | [diff] [blame] | 70 | void kvm_init_mmu(struct kvm_vcpu *vcpu); |
Sean Christopherson | dbc4739 | 2021-06-22 10:56:59 -0700 | [diff] [blame] | 71 | void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, |
| 72 | unsigned long cr4, u64 efer, gpa_t nested_cr3); |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 73 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
Lai Jiangshan | cc022ae | 2021-11-24 20:20:49 +0800 | [diff] [blame] | 74 | int huge_page_level, bool accessed_dirty, |
| 75 | gpa_t new_eptp); |
Wanpeng Li | 9bc1f09 | 2017-06-08 20:13:40 -0700 | [diff] [blame] | 76 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 77 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 78 | u64 fault_address, char *insn, int insn_len); |
Marcelo Tosatti | 94d8b05 | 2009-06-11 12:07:42 -0300 | [diff] [blame] | 79 | |
Sean Christopherson | 61a1773 | 2021-03-04 17:10:59 -0800 | [diff] [blame] | 80 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
| 81 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
| 82 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
Lai Jiangshan | 61b05a9f | 2021-10-19 19:01:54 +0800 | [diff] [blame] | 83 | void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu); |
Sean Christopherson | 61a1773 | 2021-03-04 17:10:59 -0800 | [diff] [blame] | 84 | |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 85 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
| 86 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 87 | if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 88 | return 0; |
| 89 | |
| 90 | return kvm_mmu_load(vcpu); |
| 91 | } |
| 92 | |
Junaid Shahid | c9470a2 | 2018-06-27 14:59:13 -0700 | [diff] [blame] | 93 | static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) |
| 94 | { |
| 95 | BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); |
| 96 | |
| 97 | return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE) |
| 98 | ? cr3 & X86_CR3_PCID_MASK |
| 99 | : 0; |
| 100 | } |
| 101 | |
| 102 | static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) |
| 103 | { |
| 104 | return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); |
| 105 | } |
| 106 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 107 | static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) |
Junaid Shahid | 6e42782 | 2018-06-27 14:59:08 -0700 | [diff] [blame] | 108 | { |
Sean Christopherson | 2a40b90 | 2020-07-15 20:41:18 -0700 | [diff] [blame] | 109 | u64 root_hpa = vcpu->arch.mmu->root_hpa; |
| 110 | |
| 111 | if (!VALID_PAGE(root_hpa)) |
| 112 | return; |
| 113 | |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 114 | static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa, |
| 115 | vcpu->arch.mmu->shadow_root_level); |
Junaid Shahid | 6e42782 | 2018-06-27 14:59:08 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 118 | struct kvm_page_fault { |
| 119 | /* arguments to kvm_mmu_do_page_fault. */ |
| 120 | const gpa_t addr; |
| 121 | const u32 error_code; |
Paolo Bonzini | 2839180 | 2021-09-29 09:19:32 -0400 | [diff] [blame] | 122 | const bool prefetch; |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 123 | |
| 124 | /* Derived from error_code. */ |
| 125 | const bool exec; |
| 126 | const bool write; |
| 127 | const bool present; |
| 128 | const bool rsvd; |
| 129 | const bool user; |
| 130 | |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 131 | /* Derived from mmu and global state. */ |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 132 | const bool is_tdp; |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 133 | const bool nx_huge_page_workaround_enabled; |
Paolo Bonzini | 4326e57 | 2021-08-06 04:21:58 -0400 | [diff] [blame] | 134 | |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 135 | /* |
| 136 | * Whether a >4KB mapping can be created or is forbidden due to NX |
| 137 | * hugepages. |
| 138 | */ |
| 139 | bool huge_page_disallowed; |
| 140 | |
| 141 | /* |
| 142 | * Maximum page size that can be created for this fault; input to |
| 143 | * FNAME(fetch), __direct_map and kvm_tdp_mmu_map. |
| 144 | */ |
Paolo Bonzini | 4326e57 | 2021-08-06 04:21:58 -0400 | [diff] [blame] | 145 | u8 max_level; |
Paolo Bonzini | b8a5d55 | 2021-08-06 04:21:58 -0400 | [diff] [blame] | 146 | |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 147 | /* |
| 148 | * Page size that can be created based on the max_level and the |
| 149 | * page size used by the host mapping. |
| 150 | */ |
| 151 | u8 req_level; |
| 152 | |
| 153 | /* |
| 154 | * Page size that will be created based on the req_level and |
| 155 | * huge_page_disallowed. |
| 156 | */ |
| 157 | u8 goal_level; |
| 158 | |
Paolo Bonzini | b8a5d55 | 2021-08-06 04:21:58 -0400 | [diff] [blame] | 159 | /* Shifted addr, or result of guest page table walk if addr is a gva. */ |
| 160 | gfn_t gfn; |
Paolo Bonzini | 3647cd0 | 2021-08-07 08:57:34 -0400 | [diff] [blame] | 161 | |
David Matlack | e710c5f | 2021-09-24 05:05:26 -0400 | [diff] [blame] | 162 | /* The memslot containing gfn. May be NULL. */ |
| 163 | struct kvm_memory_slot *slot; |
| 164 | |
Paolo Bonzini | 3647cd0 | 2021-08-07 08:57:34 -0400 | [diff] [blame] | 165 | /* Outputs of kvm_faultin_pfn. */ |
| 166 | kvm_pfn_t pfn; |
| 167 | hva_t hva; |
| 168 | bool map_writable; |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 169 | }; |
| 170 | |
Paolo Bonzini | c501040 | 2021-08-06 04:35:50 -0400 | [diff] [blame] | 171 | int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 172 | |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 173 | extern int nx_huge_pages; |
| 174 | static inline bool is_nx_huge_page_enabled(void) |
| 175 | { |
| 176 | return READ_ONCE(nx_huge_pages); |
| 177 | } |
| 178 | |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 179 | static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
Paolo Bonzini | 2839180 | 2021-09-29 09:19:32 -0400 | [diff] [blame] | 180 | u32 err, bool prefetch) |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 181 | { |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 182 | struct kvm_page_fault fault = { |
| 183 | .addr = cr2_or_gpa, |
| 184 | .error_code = err, |
| 185 | .exec = err & PFERR_FETCH_MASK, |
| 186 | .write = err & PFERR_WRITE_MASK, |
| 187 | .present = err & PFERR_PRESENT_MASK, |
| 188 | .rsvd = err & PFERR_RSVD_MASK, |
| 189 | .user = err & PFERR_USER_MASK, |
Paolo Bonzini | 2839180 | 2021-09-29 09:19:32 -0400 | [diff] [blame] | 190 | .prefetch = prefetch, |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 191 | .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 192 | .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(), |
Paolo Bonzini | 4326e57 | 2021-08-06 04:21:58 -0400 | [diff] [blame] | 193 | |
| 194 | .max_level = KVM_MAX_HUGEPAGE_LEVEL, |
Paolo Bonzini | 73a3c65 | 2021-08-07 09:21:53 -0400 | [diff] [blame] | 195 | .req_level = PG_LEVEL_4K, |
| 196 | .goal_level = PG_LEVEL_4K, |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 197 | }; |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 198 | #ifdef CONFIG_RETPOLINE |
Paolo Bonzini | 6defd9b | 2021-08-06 03:52:18 -0400 | [diff] [blame] | 199 | if (fault.is_tdp) |
Paolo Bonzini | c501040 | 2021-08-06 04:35:50 -0400 | [diff] [blame] | 200 | return kvm_tdp_page_fault(vcpu, &fault); |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 201 | #endif |
Paolo Bonzini | c501040 | 2021-08-06 04:35:50 -0400 | [diff] [blame] | 202 | return vcpu->arch.mmu->page_fault(vcpu, &fault); |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 203 | } |
| 204 | |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 205 | /* |
| 206 | * Currently, we have two sorts of write-protection, a) the first one |
| 207 | * write-protects guest page to sync the guest modification, b) another one is |
| 208 | * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences |
| 209 | * between these two sorts are: |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 210 | * 1) the first case clears MMU-writable bit. |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 211 | * 2) the first case requires flushing tlb immediately avoiding corrupting |
| 212 | * shadow page table between all vcpus so it should be in the protection of |
| 213 | * mmu-lock. And the another case does not need to flush tlb until returning |
| 214 | * the dirty bitmap to userspace since it only write-protects the page |
| 215 | * logged in the bitmap, that means the page in the dirty bitmap is not |
| 216 | * missed, so it can flush tlb out of mmu-lock. |
| 217 | * |
| 218 | * So, there is the problem: the first case can meet the corrupted tlb caused |
| 219 | * by another case which write-protects pages but without flush tlb |
| 220 | * immediately. In order to making the first case be aware this problem we let |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 221 | * it flush tlb if we try to write-protect a spte whose MMU-writable bit |
| 222 | * is set, it works since another case never touches MMU-writable bit. |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 223 | * |
| 224 | * Anyway, whenever a spte is updated (only permission and status bits are |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 225 | * changed) we need to check whether the spte with MMU-writable becomes |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 226 | * readonly, if that happens, we need to flush tlb. Fortunately, |
| 227 | * mmu_spte_update() has already handled it perfectly. |
| 228 | * |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 229 | * The rules to use MMU-writable and PT_WRITABLE_MASK: |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 230 | * - if we want to see if it has writable tlb entry or if the spte can be |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 231 | * writable on the mmu mapping, check MMU-writable, this is the most |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 232 | * case, otherwise |
| 233 | * - if we fix page fault on the spte or do write-protection by dirty logging, |
| 234 | * check PT_WRITABLE_MASK. |
| 235 | * |
| 236 | * TODO: introduce APIs to split these two cases. |
| 237 | */ |
Sean Christopherson | 15e6a7e | 2021-01-22 16:30:03 -0800 | [diff] [blame] | 238 | static inline bool is_writable_pte(unsigned long pte) |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 239 | { |
| 240 | return pte & PT_WRITABLE_MASK; |
| 241 | } |
| 242 | |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 243 | /* |
Paolo Bonzini | f13577e | 2016-03-08 10:08:16 +0100 | [diff] [blame] | 244 | * Check if a given access (described through the I/D, W/R and U/S bits of a |
| 245 | * page fault error code pfec) causes a permission fault with the given PTE |
| 246 | * access rights (in ACC_* format). |
| 247 | * |
| 248 | * Return zero if the access does not fault; return the page fault error code |
| 249 | * if the access faults. |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 250 | */ |
Paolo Bonzini | f13577e | 2016-03-08 10:08:16 +0100 | [diff] [blame] | 251 | static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 252 | unsigned pte_access, unsigned pte_pkey, |
| 253 | unsigned pfec) |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 254 | { |
Jason Baron | b3646477 | 2021-01-14 22:27:56 -0500 | [diff] [blame] | 255 | int cpl = static_call(kvm_x86_get_cpl)(vcpu); |
| 256 | unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 257 | |
| 258 | /* |
| 259 | * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. |
| 260 | * |
| 261 | * If CPL = 3, SMAP applies to all supervisor-mode data accesses |
| 262 | * (these are implicit supervisor accesses) regardless of the value |
| 263 | * of EFLAGS.AC. |
| 264 | * |
| 265 | * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving |
| 266 | * the result in X86_EFLAGS_AC. We then insert it in place of |
| 267 | * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, |
| 268 | * but it will be one in index if SMAP checks are being overridden. |
| 269 | * It is important to keep this branchless. |
| 270 | */ |
| 271 | unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); |
| 272 | int index = (pfec >> 1) + |
| 273 | (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 274 | bool fault = (mmu->permissions[index] >> pte_access) & 1; |
Xiao Guangrong | 7a98205 | 2016-03-25 21:19:35 +0800 | [diff] [blame] | 275 | u32 errcode = PFERR_PRESENT_MASK; |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 276 | |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 277 | WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 278 | if (unlikely(mmu->pkru_mask)) { |
| 279 | u32 pkru_bits, offset; |
| 280 | |
| 281 | /* |
| 282 | * PKRU defines 32 bits, there are 16 domains and 2 |
| 283 | * attribute bits per domain in pkru. pte_pkey is the |
| 284 | * index of the protection domain, so pte_pkey * 2 is |
| 285 | * is the index of the first bit for the domain. |
| 286 | */ |
Paolo Bonzini | b9dd21e | 2017-08-23 23:14:38 +0200 | [diff] [blame] | 287 | pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 288 | |
| 289 | /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ |
Xiao Guangrong | 7a98205 | 2016-03-25 21:19:35 +0800 | [diff] [blame] | 290 | offset = (pfec & ~1) + |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 291 | ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); |
| 292 | |
| 293 | pkru_bits &= mmu->pkru_mask >> offset; |
Xiao Guangrong | 7a98205 | 2016-03-25 21:19:35 +0800 | [diff] [blame] | 294 | errcode |= -pkru_bits & PFERR_PK_MASK; |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 295 | fault |= (pkru_bits != 0); |
| 296 | } |
| 297 | |
Xiao Guangrong | 7a98205 | 2016-03-25 21:19:35 +0800 | [diff] [blame] | 298 | return -(u32)fault & errcode; |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 299 | } |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 300 | |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 301 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 302 | |
Sean Christopherson | 6ca9a6f | 2020-06-22 13:20:31 -0700 | [diff] [blame] | 303 | int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 304 | |
| 305 | int kvm_mmu_post_init_vm(struct kvm *kvm); |
| 306 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm); |
| 307 | |
David Stevens | 1e76a3c | 2021-10-15 12:30:21 -0400 | [diff] [blame] | 308 | static inline bool kvm_shadow_root_allocated(struct kvm *kvm) |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 309 | { |
Ben Gardon | d501f74 | 2021-05-18 10:34:14 -0700 | [diff] [blame] | 310 | /* |
David Stevens | 1e76a3c | 2021-10-15 12:30:21 -0400 | [diff] [blame] | 311 | * Read shadow_root_allocated before related pointers. Hence, threads |
| 312 | * reading shadow_root_allocated in any lock context are guaranteed to |
| 313 | * see the pointers. Pairs with smp_store_release in |
| 314 | * mmu_first_shadow_root_alloc. |
Ben Gardon | d501f74 | 2021-05-18 10:34:14 -0700 | [diff] [blame] | 315 | */ |
David Stevens | 1e76a3c | 2021-10-15 12:30:21 -0400 | [diff] [blame] | 316 | return smp_load_acquire(&kvm->arch.shadow_root_allocated); |
| 317 | } |
| 318 | |
| 319 | #ifdef CONFIG_X86_64 |
| 320 | static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } |
| 321 | #else |
| 322 | static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } |
| 323 | #endif |
| 324 | |
| 325 | static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) |
| 326 | { |
| 327 | return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm); |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 328 | } |
| 329 | |
Peter Xu | 4139b19 | 2021-07-30 18:04:51 -0400 | [diff] [blame] | 330 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
| 331 | { |
| 332 | /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */ |
| 333 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - |
| 334 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
| 335 | } |
| 336 | |
| 337 | static inline unsigned long |
| 338 | __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages, |
| 339 | int level) |
| 340 | { |
| 341 | return gfn_to_index(slot->base_gfn + npages - 1, |
| 342 | slot->base_gfn, level) + 1; |
| 343 | } |
| 344 | |
| 345 | static inline unsigned long |
| 346 | kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) |
| 347 | { |
| 348 | return __kvm_mmu_slot_lpages(slot, slot->npages, level); |
| 349 | } |
| 350 | |
Mingwei Zhang | 71f51d2 | 2021-08-02 21:46:07 -0700 | [diff] [blame] | 351 | static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) |
| 352 | { |
| 353 | atomic64_add(count, &kvm->stat.pages[level - 1]); |
| 354 | } |
Lai Jiangshan | c59a0f5 | 2021-11-24 20:20:45 +0800 | [diff] [blame] | 355 | |
| 356 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
| 357 | struct x86_exception *exception); |
| 358 | |
| 359 | static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, |
| 360 | struct kvm_mmu *mmu, |
| 361 | gpa_t gpa, u32 access, |
| 362 | struct x86_exception *exception) |
| 363 | { |
| 364 | if (mmu != &vcpu->arch.nested_mmu) |
| 365 | return gpa; |
| 366 | return translate_nested_gpa(vcpu, gpa, access, exception); |
| 367 | } |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 368 | #endif |