Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012,2013 - ARM Ltd |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef __ARM64_KVM_MMU_H__ |
| 19 | #define __ARM64_KVM_MMU_H__ |
| 20 | |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/memory.h> |
| 23 | |
| 24 | /* |
| 25 | * As we only have the TTBR0_EL2 register, we cannot express |
| 26 | * "negative" addresses. This makes it impossible to directly share |
| 27 | * mappings with the kernel. |
| 28 | * |
| 29 | * Instead, give the HYP mode its own VA region at a fixed offset from |
| 30 | * the kernel by just masking the top bits (which are all ones for a |
| 31 | * kernel address). |
| 32 | */ |
| 33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS |
| 34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) |
| 35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) |
| 36 | |
| 37 | /* |
| 38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be |
| 39 | * shared across all the page-tables. Conveniently, we use the last |
| 40 | * possible page, where no kernel mapping will ever exist. |
| 41 | */ |
| 42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) |
| 43 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 44 | /* |
| 45 | * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation |
| 46 | * levels in addition to the PGD and potentially the PUD which are |
| 47 | * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 |
| 48 | * tables use one level of tables less than the kernel. |
| 49 | */ |
| 50 | #ifdef CONFIG_ARM64_64K_PAGES |
| 51 | #define KVM_MMU_CACHE_MIN_PAGES 1 |
| 52 | #else |
| 53 | #define KVM_MMU_CACHE_MIN_PAGES 2 |
| 54 | #endif |
| 55 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 56 | #ifdef __ASSEMBLY__ |
| 57 | |
| 58 | /* |
| 59 | * Convert a kernel VA into a HYP VA. |
| 60 | * reg: VA to be converted. |
| 61 | */ |
| 62 | .macro kern_hyp_va reg |
| 63 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK |
| 64 | .endm |
| 65 | |
| 66 | #else |
| 67 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 68 | #include <asm/pgalloc.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 69 | #include <asm/cachetype.h> |
| 70 | #include <asm/cacheflush.h> |
| 71 | |
| 72 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
| 73 | |
| 74 | /* |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 75 | * We currently only support a 40bit IPA. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 76 | */ |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 77 | #define KVM_PHYS_SHIFT (40) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 78 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
| 79 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
| 80 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 81 | int create_hyp_mappings(void *from, void *to); |
| 82 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
| 83 | void free_boot_hyp_pgd(void); |
| 84 | void free_hyp_pgds(void); |
| 85 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 86 | void stage2_unmap_vm(struct kvm *kvm); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 87 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
| 88 | void kvm_free_stage2_pgd(struct kvm *kvm); |
| 89 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 90 | phys_addr_t pa, unsigned long size, bool writable); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 91 | |
| 92 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
| 93 | |
| 94 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); |
| 95 | |
| 96 | phys_addr_t kvm_mmu_get_httbr(void); |
| 97 | phys_addr_t kvm_mmu_get_boot_httbr(void); |
| 98 | phys_addr_t kvm_get_idmap_vector(void); |
| 99 | int kvm_mmu_init(void); |
| 100 | void kvm_clear_hyp_idmap(void); |
| 101 | |
| 102 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 103 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 104 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 105 | static inline void kvm_clean_pgd(pgd_t *pgd) {} |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 106 | static inline void kvm_clean_pmd(pmd_t *pmd) {} |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 107 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} |
| 108 | static inline void kvm_clean_pte(pte_t *pte) {} |
| 109 | static inline void kvm_clean_pte_entry(pte_t *pte) {} |
| 110 | |
| 111 | static inline void kvm_set_s2pte_writable(pte_t *pte) |
| 112 | { |
| 113 | pte_val(*pte) |= PTE_S2_RDWR; |
| 114 | } |
| 115 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 116 | static inline void kvm_set_s2pmd_writable(pmd_t *pmd) |
| 117 | { |
| 118 | pmd_val(*pmd) |= PMD_S2_RDWR; |
| 119 | } |
| 120 | |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame^] | 121 | static inline void kvm_set_s2pte_readonly(pte_t *pte) |
| 122 | { |
| 123 | pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY; |
| 124 | } |
| 125 | |
| 126 | static inline bool kvm_s2pte_readonly(pte_t *pte) |
| 127 | { |
| 128 | return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; |
| 129 | } |
| 130 | |
| 131 | static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) |
| 132 | { |
| 133 | pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY; |
| 134 | } |
| 135 | |
| 136 | static inline bool kvm_s2pmd_readonly(pmd_t *pmd) |
| 137 | { |
| 138 | return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY; |
| 139 | } |
| 140 | |
| 141 | |
Marc Zyngier | a3c8bd3 | 2014-02-18 14:29:03 +0000 | [diff] [blame] | 142 | #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) |
| 143 | #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) |
| 144 | #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) |
| 145 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 146 | /* |
| 147 | * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address |
| 148 | * the entire IPA input range with a single pgd entry, and we would only need |
| 149 | * one pgd entry. Note that in this case, the pgd is actually not used by |
| 150 | * the MMU for Stage-2 translations, but is merely a fake pgd used as a data |
| 151 | * structure for the kernel pgtable macros to work. |
| 152 | */ |
| 153 | #if PGDIR_SHIFT > KVM_PHYS_SHIFT |
| 154 | #define PTRS_PER_S2_PGD_SHIFT 0 |
| 155 | #else |
| 156 | #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) |
| 157 | #endif |
| 158 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) |
| 159 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) |
| 160 | |
| 161 | /* |
| 162 | * If we are concatenating first level stage-2 page tables, we would have less |
| 163 | * than or equal to 16 pointers in the fake PGD, because that's what the |
| 164 | * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS) |
| 165 | * represents the first level for the host, and we add 1 to go to the next |
| 166 | * level (which uses contatenation) for the stage-2 tables. |
| 167 | */ |
| 168 | #if PTRS_PER_S2_PGD <= 16 |
| 169 | #define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1) |
| 170 | #else |
| 171 | #define KVM_PREALLOC_LEVEL (0) |
| 172 | #endif |
| 173 | |
| 174 | /** |
| 175 | * kvm_prealloc_hwpgd - allocate inital table for VTTBR |
| 176 | * @kvm: The KVM struct pointer for the VM. |
| 177 | * @pgd: The kernel pseudo pgd |
| 178 | * |
| 179 | * When the kernel uses more levels of page tables than the guest, we allocate |
| 180 | * a fake PGD and pre-populate it to point to the next-level page table, which |
| 181 | * will be the real initial page table pointed to by the VTTBR. |
| 182 | * |
| 183 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and |
| 184 | * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we |
| 185 | * allocate 2 consecutive PUD pages. |
| 186 | */ |
| 187 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) |
| 188 | { |
| 189 | unsigned int i; |
| 190 | unsigned long hwpgd; |
| 191 | |
| 192 | if (KVM_PREALLOC_LEVEL == 0) |
| 193 | return 0; |
| 194 | |
| 195 | hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); |
| 196 | if (!hwpgd) |
| 197 | return -ENOMEM; |
| 198 | |
| 199 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { |
| 200 | if (KVM_PREALLOC_LEVEL == 1) |
| 201 | pgd_populate(NULL, pgd + i, |
| 202 | (pud_t *)hwpgd + i * PTRS_PER_PUD); |
| 203 | else if (KVM_PREALLOC_LEVEL == 2) |
| 204 | pud_populate(NULL, pud_offset(pgd, 0) + i, |
| 205 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); |
| 206 | } |
| 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
| 212 | { |
| 213 | pgd_t *pgd = kvm->arch.pgd; |
| 214 | pud_t *pud; |
| 215 | |
| 216 | if (KVM_PREALLOC_LEVEL == 0) |
| 217 | return pgd; |
| 218 | |
| 219 | pud = pud_offset(pgd, 0); |
| 220 | if (KVM_PREALLOC_LEVEL == 1) |
| 221 | return pud; |
| 222 | |
| 223 | BUG_ON(KVM_PREALLOC_LEVEL != 2); |
| 224 | return pmd_offset(pud, 0); |
| 225 | } |
| 226 | |
| 227 | static inline void kvm_free_hwpgd(struct kvm *kvm) |
| 228 | { |
| 229 | if (KVM_PREALLOC_LEVEL > 0) { |
| 230 | unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); |
| 231 | free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); |
| 232 | } |
| 233 | } |
| 234 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 235 | static inline bool kvm_page_empty(void *ptr) |
| 236 | { |
| 237 | struct page *ptr_page = virt_to_page(ptr); |
| 238 | return page_count(ptr_page) == 1; |
| 239 | } |
| 240 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 241 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) |
| 242 | |
| 243 | #ifdef __PAGETABLE_PMD_FOLDED |
| 244 | #define kvm_pmd_table_empty(kvm, pmdp) (0) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 245 | #else |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 246 | #define kvm_pmd_table_empty(kvm, pmdp) \ |
| 247 | (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 248 | #endif |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 249 | |
| 250 | #ifdef __PAGETABLE_PUD_FOLDED |
| 251 | #define kvm_pud_table_empty(kvm, pudp) (0) |
| 252 | #else |
| 253 | #define kvm_pud_table_empty(kvm, pudp) \ |
| 254 | (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) |
| 255 | #endif |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 256 | |
| 257 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 258 | struct kvm; |
| 259 | |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 260 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) |
| 261 | |
| 262 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 263 | { |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 264 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
| 265 | } |
| 266 | |
| 267 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, |
Laszlo Ersek | 840f4bf | 2014-11-17 14:58:52 +0000 | [diff] [blame] | 268 | unsigned long size, |
| 269 | bool ipa_uncached) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 270 | { |
Laszlo Ersek | 840f4bf | 2014-11-17 14:58:52 +0000 | [diff] [blame] | 271 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 272 | kvm_flush_dcache_to_poc((void *)hva, size); |
| 273 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 274 | if (!icache_is_aliasing()) { /* PIPT */ |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 275 | flush_icache_range(hva, hva + size); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 276 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ |
| 277 | /* any kind of VIPT cache */ |
| 278 | __flush_icache_all(); |
| 279 | } |
| 280 | } |
| 281 | |
Santosh Shilimkar | 4fda342 | 2013-11-19 14:59:12 -0500 | [diff] [blame] | 282 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 283 | |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 284 | void stage2_flush_vm(struct kvm *kvm); |
| 285 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 286 | #endif /* __ASSEMBLY__ */ |
| 287 | #endif /* __ARM64_KVM_MMU_H__ */ |