Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012,2013 - ARM Ltd |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef __ARM64_KVM_MMU_H__ |
| 19 | #define __ARM64_KVM_MMU_H__ |
| 20 | |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/memory.h> |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 23 | #include <asm/cpufeature.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 24 | |
| 25 | /* |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 26 | * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 27 | * "negative" addresses. This makes it impossible to directly share |
| 28 | * mappings with the kernel. |
| 29 | * |
| 30 | * Instead, give the HYP mode its own VA region at a fixed offset from |
| 31 | * the kernel by just masking the top bits (which are all ones for a |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 32 | * kernel address). We need to find out how many bits to mask. |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 33 | * |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 34 | * We want to build a set of page tables that cover both parts of the |
| 35 | * idmap (the trampoline page used to initialize EL2), and our normal |
| 36 | * runtime VA space, at the same time. |
| 37 | * |
| 38 | * Given that the kernel uses VA_BITS for its entire address space, |
| 39 | * and that half of that space (VA_BITS - 1) is used for the linear |
| 40 | * mapping, we can also limit the EL2 space to (VA_BITS - 1). |
| 41 | * |
| 42 | * The main question is "Within the VA_BITS space, does EL2 use the |
| 43 | * top or the bottom half of that space to shadow the kernel's linear |
| 44 | * mapping?". As we need to idmap the trampoline page, this is |
| 45 | * determined by the range in which this page lives. |
| 46 | * |
| 47 | * If the page is in the bottom half, we have to use the top half. If |
| 48 | * the page is in the top half, we have to use the bottom half: |
| 49 | * |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 50 | * T = __pa_symbol(__hyp_idmap_text_start) |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 51 | * if (T & BIT(VA_BITS - 1)) |
| 52 | * HYP_VA_MIN = 0 //idmap in upper half |
| 53 | * else |
| 54 | * HYP_VA_MIN = 1 << (VA_BITS - 1) |
| 55 | * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 |
| 56 | * |
| 57 | * This of course assumes that the trampoline page exists within the |
| 58 | * VA_BITS range. If it doesn't, then it means we're in the odd case |
| 59 | * where the kernel idmap (as well as HYP) uses more levels than the |
| 60 | * kernel runtime page tables (as seen when the kernel is configured |
| 61 | * for 4k pages, 39bits VA, and yet memory lives just above that |
| 62 | * limit, forcing the idmap to use 4 levels of page tables while the |
| 63 | * kernel itself only uses 3). In this particular case, it doesn't |
| 64 | * matter which side of VA_BITS we use, as we're guaranteed not to |
| 65 | * conflict with anything. |
| 66 | * |
| 67 | * When using VHE, there are no separate hyp mappings and all KVM |
| 68 | * functionality is already mapped as part of the main kernel |
| 69 | * mappings, and none of this applies in that case. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 70 | */ |
Marc Zyngier | d53d9bc6 | 2016-06-30 18:40:39 +0100 | [diff] [blame] | 71 | |
| 72 | #define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) |
| 73 | #define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) |
| 74 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 75 | #ifdef __ASSEMBLY__ |
| 76 | |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 77 | #include <asm/alternative.h> |
| 78 | #include <asm/cpufeature.h> |
| 79 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 80 | /* |
| 81 | * Convert a kernel VA into a HYP VA. |
| 82 | * reg: VA to be converted. |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 83 | * |
| 84 | * This generates the following sequences: |
| 85 | * - High mask: |
| 86 | * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK |
| 87 | * nop |
| 88 | * - Low mask: |
| 89 | * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK |
| 90 | * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK |
| 91 | * - VHE: |
| 92 | * nop |
| 93 | * nop |
| 94 | * |
| 95 | * The "low mask" version works because the mask is a strict subset of |
| 96 | * the "high mask", hence performing the first mask for nothing. |
| 97 | * Should be completely invisible on any viable CPU. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 98 | */ |
| 99 | .macro kern_hyp_va reg |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 100 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
| 101 | and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK |
Mark Rutland | e506236 | 2016-09-07 11:07:10 +0100 | [diff] [blame] | 102 | alternative_else_nop_endif |
| 103 | alternative_if ARM64_HYP_OFFSET_LOW |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 104 | and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK |
Mark Rutland | e506236 | 2016-09-07 11:07:10 +0100 | [diff] [blame] | 105 | alternative_else_nop_endif |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 106 | .endm |
| 107 | |
| 108 | #else |
| 109 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 110 | #include <asm/pgalloc.h> |
Will Deacon | 02f7760 | 2017-03-10 20:32:23 +0000 | [diff] [blame] | 111 | #include <asm/cache.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 112 | #include <asm/cacheflush.h> |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 113 | #include <asm/mmu_context.h> |
| 114 | #include <asm/pgtable.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 115 | |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 116 | static inline unsigned long __kern_hyp_va(unsigned long v) |
| 117 | { |
| 118 | asm volatile(ALTERNATIVE("and %0, %0, %1", |
| 119 | "nop", |
| 120 | ARM64_HAS_VIRT_HOST_EXTN) |
| 121 | : "+r" (v) |
| 122 | : "i" (HYP_PAGE_OFFSET_HIGH_MASK)); |
| 123 | asm volatile(ALTERNATIVE("nop", |
| 124 | "and %0, %0, %1", |
| 125 | ARM64_HYP_OFFSET_LOW) |
| 126 | : "+r" (v) |
| 127 | : "i" (HYP_PAGE_OFFSET_LOW_MASK)); |
| 128 | return v; |
| 129 | } |
| 130 | |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 131 | #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 132 | |
| 133 | /* |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 134 | * We currently only support a 40bit IPA. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 135 | */ |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 136 | #define KVM_PHYS_SHIFT (40) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 137 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
| 138 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
| 139 | |
Suzuki K Poulose | c0ef632 | 2016-03-22 14:16:52 +0000 | [diff] [blame] | 140 | #include <asm/stage2_pgtable.h> |
| 141 | |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 142 | int create_hyp_mappings(void *from, void *to, pgprot_t prot); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 143 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 144 | void free_hyp_pgds(void); |
| 145 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 146 | void stage2_unmap_vm(struct kvm *kvm); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 147 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
| 148 | void kvm_free_stage2_pgd(struct kvm *kvm); |
| 149 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 150 | phys_addr_t pa, unsigned long size, bool writable); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 151 | |
| 152 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
| 153 | |
| 154 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); |
| 155 | |
| 156 | phys_addr_t kvm_mmu_get_httbr(void); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 157 | phys_addr_t kvm_get_idmap_vector(void); |
| 158 | int kvm_mmu_init(void); |
| 159 | void kvm_clear_hyp_idmap(void); |
| 160 | |
| 161 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 162 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 163 | |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 164 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 165 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 166 | pte_val(pte) |= PTE_S2_RDWR; |
| 167 | return pte; |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 168 | } |
| 169 | |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 170 | static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 171 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 172 | pmd_val(pmd) |= PMD_S2_RDWR; |
| 173 | return pmd; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 174 | } |
| 175 | |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 176 | static inline pte_t kvm_s2pte_mkexec(pte_t pte) |
| 177 | { |
| 178 | pte_val(pte) &= ~PTE_S2_XN; |
| 179 | return pte; |
| 180 | } |
| 181 | |
| 182 | static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) |
| 183 | { |
| 184 | pmd_val(pmd) &= ~PMD_S2_XN; |
| 185 | return pmd; |
| 186 | } |
| 187 | |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 188 | static inline void kvm_set_s2pte_readonly(pte_t *pte) |
| 189 | { |
Catalin Marinas | 0966253 | 2017-07-06 11:46:39 +0100 | [diff] [blame] | 190 | pteval_t old_pteval, pteval; |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 191 | |
Catalin Marinas | 0966253 | 2017-07-06 11:46:39 +0100 | [diff] [blame] | 192 | pteval = READ_ONCE(pte_val(*pte)); |
| 193 | do { |
| 194 | old_pteval = pteval; |
| 195 | pteval &= ~PTE_S2_RDWR; |
| 196 | pteval |= PTE_S2_RDONLY; |
| 197 | pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval); |
| 198 | } while (pteval != old_pteval); |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | static inline bool kvm_s2pte_readonly(pte_t *pte) |
| 202 | { |
| 203 | return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; |
| 204 | } |
| 205 | |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 206 | static inline bool kvm_s2pte_exec(pte_t *pte) |
| 207 | { |
| 208 | return !(pte_val(*pte) & PTE_S2_XN); |
| 209 | } |
| 210 | |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 211 | static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) |
| 212 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 213 | kvm_set_s2pte_readonly((pte_t *)pmd); |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | static inline bool kvm_s2pmd_readonly(pmd_t *pmd) |
| 217 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 218 | return kvm_s2pte_readonly((pte_t *)pmd); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 219 | } |
| 220 | |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 221 | static inline bool kvm_s2pmd_exec(pmd_t *pmd) |
| 222 | { |
| 223 | return !(pmd_val(*pmd) & PMD_S2_XN); |
| 224 | } |
| 225 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 226 | static inline bool kvm_page_empty(void *ptr) |
| 227 | { |
| 228 | struct page *ptr_page = virt_to_page(ptr); |
| 229 | return page_count(ptr_page) == 1; |
| 230 | } |
| 231 | |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 232 | #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 233 | |
| 234 | #ifdef __PAGETABLE_PMD_FOLDED |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 235 | #define hyp_pmd_table_empty(pmdp) (0) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 236 | #else |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 237 | #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 238 | #endif |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 239 | |
| 240 | #ifdef __PAGETABLE_PUD_FOLDED |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 241 | #define hyp_pud_table_empty(pudp) (0) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 242 | #else |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 243 | #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 244 | #endif |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 245 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 246 | struct kvm; |
| 247 | |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 248 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) |
| 249 | |
| 250 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 251 | { |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 252 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
| 253 | } |
| 254 | |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 255 | static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 256 | { |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 257 | void *va = page_address(pfn_to_page(pfn)); |
| 258 | |
Marc Zyngier | 8f36eba | 2017-01-25 12:29:59 +0000 | [diff] [blame] | 259 | kvm_flush_dcache_to_poc(va, size); |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 260 | } |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 261 | |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 262 | static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 263 | unsigned long size) |
| 264 | { |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 265 | if (icache_is_aliasing()) { |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 266 | /* any kind of VIPT cache */ |
| 267 | __flush_icache_all(); |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 268 | } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { |
| 269 | /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 270 | void *va = page_address(pfn_to_page(pfn)); |
| 271 | |
Marc Zyngier | 4fee947 | 2017-10-23 17:11:16 +0100 | [diff] [blame] | 272 | invalidate_icache_range((unsigned long)va, |
| 273 | (unsigned long)va + size); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 274 | } |
| 275 | } |
| 276 | |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 277 | static inline void __kvm_flush_dcache_pte(pte_t pte) |
| 278 | { |
| 279 | struct page *page = pte_page(pte); |
| 280 | kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); |
| 281 | } |
| 282 | |
| 283 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) |
| 284 | { |
| 285 | struct page *page = pmd_page(pmd); |
| 286 | kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); |
| 287 | } |
| 288 | |
| 289 | static inline void __kvm_flush_dcache_pud(pud_t pud) |
| 290 | { |
| 291 | struct page *page = pud_page(pud); |
| 292 | kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); |
| 293 | } |
| 294 | |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 295 | #define kvm_virt_to_phys(x) __pa_symbol(x) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 296 | |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 297 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
| 298 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 299 | |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 300 | static inline bool __kvm_cpu_uses_extended_idmap(void) |
| 301 | { |
Kristina Martsenko | fa2a844 | 2017-12-13 17:07:24 +0000 | [diff] [blame] | 302 | return __cpu_uses_extended_idmap_level(); |
| 303 | } |
| 304 | |
| 305 | static inline unsigned long __kvm_idmap_ptrs_per_pgd(void) |
| 306 | { |
| 307 | return idmap_ptrs_per_pgd; |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 308 | } |
| 309 | |
Kristina Martsenko | 1933830 | 2017-12-13 17:07:20 +0000 | [diff] [blame] | 310 | /* |
| 311 | * Can't use pgd_populate here, because the extended idmap adds an extra level |
| 312 | * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended |
| 313 | * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4. |
| 314 | */ |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 315 | static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, |
| 316 | pgd_t *hyp_pgd, |
| 317 | pgd_t *merged_hyp_pgd, |
| 318 | unsigned long hyp_idmap_start) |
| 319 | { |
| 320 | int idmap_idx; |
Kristina Martsenko | 75387b9 | 2017-12-13 17:07:21 +0000 | [diff] [blame] | 321 | u64 pgd_addr; |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 322 | |
| 323 | /* |
| 324 | * Use the first entry to access the HYP mappings. It is |
| 325 | * guaranteed to be free, otherwise we wouldn't use an |
| 326 | * extended idmap. |
| 327 | */ |
| 328 | VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); |
Kristina Martsenko | 75387b9 | 2017-12-13 17:07:21 +0000 | [diff] [blame] | 329 | pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd)); |
| 330 | merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE); |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 331 | |
| 332 | /* |
| 333 | * Create another extended level entry that points to the boot HYP map, |
| 334 | * which contains an ID mapping of the HYP init code. We essentially |
| 335 | * merge the boot and runtime HYP maps by doing so, but they don't |
| 336 | * overlap anyway, so this is fine. |
| 337 | */ |
| 338 | idmap_idx = hyp_idmap_start >> VA_BITS; |
| 339 | VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); |
Kristina Martsenko | 75387b9 | 2017-12-13 17:07:21 +0000 | [diff] [blame] | 340 | pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd)); |
| 341 | merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE); |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 342 | } |
| 343 | |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 344 | static inline unsigned int kvm_get_vmid_bits(void) |
| 345 | { |
Dave Martin | 46823dd | 2017-03-23 15:14:39 +0000 | [diff] [blame] | 346 | int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 347 | |
Suzuki K Poulose | 28c5dcb | 2016-01-26 10:58:16 +0000 | [diff] [blame] | 348 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 349 | } |
| 350 | |
Marc Zyngier | 6840bdd | 2018-01-03 16:38:35 +0000 | [diff] [blame] | 351 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| 352 | #include <asm/mmu.h> |
| 353 | |
| 354 | static inline void *kvm_get_hyp_vector(void) |
| 355 | { |
| 356 | struct bp_hardening_data *data = arm64_get_bp_hardening_data(); |
| 357 | void *vect = kvm_ksym_ref(__kvm_hyp_vector); |
| 358 | |
| 359 | if (data->fn) { |
| 360 | vect = __bp_harden_hyp_vecs_start + |
| 361 | data->hyp_vectors_slot * SZ_2K; |
| 362 | |
| 363 | if (!has_vhe()) |
| 364 | vect = lm_alias(vect); |
| 365 | } |
| 366 | |
| 367 | return vect; |
| 368 | } |
| 369 | |
| 370 | static inline int kvm_map_vectors(void) |
| 371 | { |
| 372 | return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), |
| 373 | kvm_ksym_ref(__bp_harden_hyp_vecs_end), |
| 374 | PAGE_HYP_EXEC); |
| 375 | } |
| 376 | |
| 377 | #else |
| 378 | static inline void *kvm_get_hyp_vector(void) |
| 379 | { |
| 380 | return kvm_ksym_ref(__kvm_hyp_vector); |
| 381 | } |
| 382 | |
| 383 | static inline int kvm_map_vectors(void) |
| 384 | { |
| 385 | return 0; |
| 386 | } |
| 387 | #endif |
| 388 | |
Kristina Martsenko | 529c4b0 | 2017-12-13 17:07:18 +0000 | [diff] [blame] | 389 | #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) |
| 390 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 391 | #endif /* __ASSEMBLY__ */ |
| 392 | #endif /* __ARM64_KVM_MMU_H__ */ |