Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012,2013 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef __ARM64_KVM_MMU_H__ |
| 8 | #define __ARM64_KVM_MMU_H__ |
| 9 | |
| 10 | #include <asm/page.h> |
| 11 | #include <asm/memory.h> |
Will Deacon | 9ef2b48 | 2020-09-28 11:45:24 +0100 | [diff] [blame] | 12 | #include <asm/mmu.h> |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 13 | #include <asm/cpufeature.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 14 | |
| 15 | /* |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 16 | * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 17 | * "negative" addresses. This makes it impossible to directly share |
| 18 | * mappings with the kernel. |
| 19 | * |
| 20 | * Instead, give the HYP mode its own VA region at a fixed offset from |
| 21 | * the kernel by just masking the top bits (which are all ones for a |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 22 | * kernel address). We need to find out how many bits to mask. |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 23 | * |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 24 | * We want to build a set of page tables that cover both parts of the |
| 25 | * idmap (the trampoline page used to initialize EL2), and our normal |
| 26 | * runtime VA space, at the same time. |
| 27 | * |
| 28 | * Given that the kernel uses VA_BITS for its entire address space, |
| 29 | * and that half of that space (VA_BITS - 1) is used for the linear |
| 30 | * mapping, we can also limit the EL2 space to (VA_BITS - 1). |
| 31 | * |
| 32 | * The main question is "Within the VA_BITS space, does EL2 use the |
| 33 | * top or the bottom half of that space to shadow the kernel's linear |
| 34 | * mapping?". As we need to idmap the trampoline page, this is |
| 35 | * determined by the range in which this page lives. |
| 36 | * |
| 37 | * If the page is in the bottom half, we have to use the top half. If |
| 38 | * the page is in the top half, we have to use the bottom half: |
| 39 | * |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 40 | * T = __pa_symbol(__hyp_idmap_text_start) |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 41 | * if (T & BIT(VA_BITS - 1)) |
| 42 | * HYP_VA_MIN = 0 //idmap in upper half |
| 43 | * else |
| 44 | * HYP_VA_MIN = 1 << (VA_BITS - 1) |
| 45 | * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 |
| 46 | * |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 47 | * When using VHE, there are no separate hyp mappings and all KVM |
| 48 | * functionality is already mapped as part of the main kernel |
| 49 | * mappings, and none of this applies in that case. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 50 | */ |
Marc Zyngier | d53d9bc6 | 2016-06-30 18:40:39 +0100 | [diff] [blame] | 51 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 52 | #ifdef __ASSEMBLY__ |
| 53 | |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 54 | #include <asm/alternative.h> |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 55 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 56 | /* |
| 57 | * Convert a kernel VA into a HYP VA. |
| 58 | * reg: VA to be converted. |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 59 | * |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 60 | * The actual code generation takes place in kvm_update_va_mask, and |
| 61 | * the instructions below are only there to reserve the space and |
| 62 | * perform the register allocation (kvm_update_va_mask uses the |
| 63 | * specific registers encoded in the instructions). |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 64 | */ |
| 65 | .macro kern_hyp_va reg |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 66 | alternative_cb kvm_update_va_mask |
Marc Zyngier | ed57cac | 2017-12-03 18:22:49 +0000 | [diff] [blame] | 67 | and \reg, \reg, #1 /* mask with va_mask */ |
| 68 | ror \reg, \reg, #1 /* rotate to the first tag bit */ |
| 69 | add \reg, \reg, #0 /* insert the low 12 bits of the tag */ |
| 70 | add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ |
| 71 | ror \reg, \reg, #63 /* rotate back */ |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 72 | alternative_cb_end |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 73 | .endm |
| 74 | |
Marc Zyngier | 68b824e | 2020-10-24 16:33:38 +0100 | [diff] [blame] | 75 | /* |
David Brazdil | 97cbd2f | 2021-01-05 18:05:39 +0000 | [diff] [blame] | 76 | * Convert a hypervisor VA to a PA |
| 77 | * reg: hypervisor address to be converted in place |
| 78 | * tmp: temporary register |
| 79 | */ |
| 80 | .macro hyp_pa reg, tmp |
| 81 | ldr_l \tmp, hyp_physvirt_offset |
| 82 | add \reg, \reg, \tmp |
| 83 | .endm |
| 84 | |
| 85 | /* |
| 86 | * Convert a hypervisor VA to a kernel image address |
| 87 | * reg: hypervisor address to be converted in place |
Marc Zyngier | 68b824e | 2020-10-24 16:33:38 +0100 | [diff] [blame] | 88 | * tmp: temporary register |
| 89 | * |
| 90 | * The actual code generation takes place in kvm_get_kimage_voffset, and |
| 91 | * the instructions below are only there to reserve the space and |
| 92 | * perform the register allocation (kvm_get_kimage_voffset uses the |
| 93 | * specific registers encoded in the instructions). |
| 94 | */ |
David Brazdil | 97cbd2f | 2021-01-05 18:05:39 +0000 | [diff] [blame] | 95 | .macro hyp_kimg_va reg, tmp |
| 96 | /* Convert hyp VA -> PA. */ |
| 97 | hyp_pa \reg, \tmp |
| 98 | |
| 99 | /* Load kimage_voffset. */ |
Marc Zyngier | 68b824e | 2020-10-24 16:33:38 +0100 | [diff] [blame] | 100 | alternative_cb kvm_get_kimage_voffset |
| 101 | movz \tmp, #0 |
| 102 | movk \tmp, #0, lsl #16 |
| 103 | movk \tmp, #0, lsl #32 |
| 104 | movk \tmp, #0, lsl #48 |
| 105 | alternative_cb_end |
| 106 | |
David Brazdil | 97cbd2f | 2021-01-05 18:05:39 +0000 | [diff] [blame] | 107 | /* Convert PA -> kimg VA. */ |
| 108 | add \reg, \reg, \tmp |
David Brazdil | 5be1d62 | 2020-12-02 18:41:05 +0000 | [diff] [blame] | 109 | .endm |
| 110 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 111 | #else |
| 112 | |
Mike Rapoport | 65fddcf | 2020-06-08 21:32:42 -0700 | [diff] [blame] | 113 | #include <linux/pgtable.h> |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 114 | #include <asm/pgalloc.h> |
Will Deacon | 02f7760 | 2017-03-10 20:32:23 +0000 | [diff] [blame] | 115 | #include <asm/cache.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 116 | #include <asm/cacheflush.h> |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 117 | #include <asm/mmu_context.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 118 | |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 119 | void kvm_update_va_mask(struct alt_instr *alt, |
| 120 | __le32 *origptr, __le32 *updptr, int nr_inst); |
Sebastian Andrzej Siewior | 0492747c | 2019-11-28 20:58:05 +0100 | [diff] [blame] | 121 | void kvm_compute_layout(void); |
David Brazdil | 6ec6259 | 2021-01-05 18:05:38 +0000 | [diff] [blame] | 122 | void kvm_apply_hyp_relocations(void); |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 123 | |
Andrew Scull | aec0fae | 2021-03-18 14:33:11 +0000 | [diff] [blame] | 124 | #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) |
| 125 | |
James Morse | 5c37f1a | 2020-02-20 16:58:37 +0000 | [diff] [blame] | 126 | static __always_inline unsigned long __kern_hyp_va(unsigned long v) |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 127 | { |
Marc Zyngier | ed57cac | 2017-12-03 18:22:49 +0000 | [diff] [blame] | 128 | asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" |
| 129 | "ror %0, %0, #1\n" |
| 130 | "add %0, %0, #0\n" |
| 131 | "add %0, %0, #0, lsl 12\n" |
| 132 | "ror %0, %0, #63\n", |
Marc Zyngier | 2b4d160 | 2017-12-03 17:36:55 +0000 | [diff] [blame] | 133 | kvm_update_va_mask) |
| 134 | : "+r" (v)); |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame] | 135 | return v; |
| 136 | } |
| 137 | |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 138 | #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 139 | |
| 140 | /* |
Zenghui Yu | 1b44471 | 2019-02-14 01:45:46 +0000 | [diff] [blame] | 141 | * We currently support using a VM-specified IPA size. For backward |
| 142 | * compatibility, the default IPA size is fixed to 40bits. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 143 | */ |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 144 | #define KVM_PHYS_SHIFT (40) |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 145 | |
Suzuki K Poulose | 13ac4bb | 2018-09-26 17:32:49 +0100 | [diff] [blame] | 146 | #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 147 | #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) |
| 148 | #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 149 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 150 | #include <asm/kvm_pgtable.h> |
Suzuki K Poulose | c0ef632 | 2016-03-22 14:16:52 +0000 | [diff] [blame] | 151 | #include <asm/stage2_pgtable.h> |
| 152 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 153 | int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); |
Marc Zyngier | 807a378 | 2017-12-04 16:26:09 +0000 | [diff] [blame] | 154 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 155 | void __iomem **kaddr, |
| 156 | void __iomem **haddr); |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 157 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, |
| 158 | void **haddr); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 159 | void free_hyp_pgds(void); |
| 160 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 161 | void stage2_unmap_vm(struct kvm *kvm); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 162 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); |
| 163 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 164 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 165 | phys_addr_t pa, unsigned long size, bool writable); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 166 | |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 167 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 168 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 169 | phys_addr_t kvm_mmu_get_httbr(void); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 170 | phys_addr_t kvm_get_idmap_vector(void); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 171 | int kvm_mmu_init(u32 *hyp_va_bits); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 172 | |
Quentin Perret | bc1d289 | 2021-03-19 10:01:23 +0000 | [diff] [blame] | 173 | static inline void *__kvm_vector_slot2addr(void *base, |
| 174 | enum arm64_hyp_spectre_vector slot) |
| 175 | { |
| 176 | int idx = slot - (slot != HYP_VECTOR_DIRECT); |
| 177 | |
| 178 | return base + (idx * SZ_2K); |
| 179 | } |
| 180 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 181 | struct kvm; |
| 182 | |
Fuad Tabba | 814b186 | 2021-05-24 09:29:55 +0100 | [diff] [blame] | 183 | #define kvm_flush_dcache_to_poc(a,l) \ |
Fuad Tabba | fade9c2 | 2021-05-24 09:30:01 +0100 | [diff] [blame] | 184 | dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 185 | |
| 186 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 187 | { |
Christoffer Dall | 8d404c4 | 2016-03-16 15:38:53 +0100 | [diff] [blame] | 188 | return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 189 | } |
| 190 | |
Yanan Wang | 378e6a9 | 2021-06-17 18:58:23 +0800 | [diff] [blame] | 191 | static inline void __clean_dcache_guest_page(void *va, size_t size) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 192 | { |
Marc Zyngier | e48d53a | 2018-04-06 12:27:28 +0100 | [diff] [blame] | 193 | /* |
| 194 | * With FWB, we ensure that the guest always accesses memory using |
| 195 | * cacheable attributes, and we don't have to clean to PoC when |
| 196 | * faulting in pages. Furthermore, FWB implies IDC, so cleaning to |
| 197 | * PoU is not required either in this case. |
| 198 | */ |
| 199 | if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) |
| 200 | return; |
| 201 | |
Marc Zyngier | 8f36eba | 2017-01-25 12:29:59 +0000 | [diff] [blame] | 202 | kvm_flush_dcache_to_poc(va, size); |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 203 | } |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 204 | |
Yanan Wang | 378e6a9 | 2021-06-17 18:58:23 +0800 | [diff] [blame] | 205 | static inline void __invalidate_icache_guest_page(void *va, size_t size) |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 206 | { |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 207 | if (icache_is_aliasing()) { |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 208 | /* any kind of VIPT cache */ |
Fuad Tabba | fade9c2 | 2021-05-24 09:30:01 +0100 | [diff] [blame] | 209 | icache_inval_all_pou(); |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 210 | } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { |
| 211 | /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ |
Marc Zyngier | 85c653f | 2021-06-18 17:30:39 +0100 | [diff] [blame] | 212 | icache_inval_pou((unsigned long)va, (unsigned long)va + size); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 213 | } |
| 214 | } |
| 215 | |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 216 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
| 217 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 218 | |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 219 | static inline unsigned int kvm_get_vmid_bits(void) |
| 220 | { |
Dave Martin | 46823dd | 2017-03-23 15:14:39 +0000 | [diff] [blame] | 221 | int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 222 | |
Anshuman Khandual | c73433f | 2020-05-12 07:27:27 +0530 | [diff] [blame] | 223 | return get_vmid_bits(reg); |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 224 | } |
| 225 | |
Andre Przywara | bf30824 | 2018-05-11 15:20:14 +0100 | [diff] [blame] | 226 | /* |
| 227 | * We are not in the kvm->srcu critical section most of the time, so we take |
| 228 | * the SRCU read lock here. Since we copy the data from the user page, we |
| 229 | * can immediately drop the lock again. |
| 230 | */ |
| 231 | static inline int kvm_read_guest_lock(struct kvm *kvm, |
| 232 | gpa_t gpa, void *data, unsigned long len) |
| 233 | { |
| 234 | int srcu_idx = srcu_read_lock(&kvm->srcu); |
| 235 | int ret = kvm_read_guest(kvm, gpa, data, len); |
| 236 | |
| 237 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 238 | |
| 239 | return ret; |
| 240 | } |
| 241 | |
Marc Zyngier | a6ecfb1 | 2019-03-19 12:47:11 +0000 | [diff] [blame] | 242 | static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, |
| 243 | const void *data, unsigned long len) |
| 244 | { |
| 245 | int srcu_idx = srcu_read_lock(&kvm->srcu); |
| 246 | int ret = kvm_write_guest(kvm, gpa, data, len); |
| 247 | |
| 248 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 249 | |
| 250 | return ret; |
| 251 | } |
| 252 | |
Kristina Martsenko | 529c4b0 | 2017-12-13 17:07:18 +0000 | [diff] [blame] | 253 | #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) |
| 254 | |
Marc Zyngier | cf364e0 | 2021-08-06 12:31:08 +0100 | [diff] [blame] | 255 | /* |
| 256 | * When this is (directly or indirectly) used on the TLB invalidation |
| 257 | * path, we rely on a previously issued DSB so that page table updates |
| 258 | * and VMID reads are correctly ordered. |
| 259 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 260 | static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) |
Vladimir Murzin | ab51002 | 2018-07-31 14:08:57 +0100 | [diff] [blame] | 261 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 262 | struct kvm_vmid *vmid = &mmu->vmid; |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 263 | u64 vmid_field, baddr; |
| 264 | u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; |
| 265 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 266 | baddr = mmu->pgd_phys; |
Marc Zyngier | cf364e0 | 2021-08-06 12:31:08 +0100 | [diff] [blame] | 267 | vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT; |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 268 | return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; |
Vladimir Murzin | ab51002 | 2018-07-31 14:08:57 +0100 | [diff] [blame] | 269 | } |
| 270 | |
Marc Zyngier | fe677be | 2020-05-28 14:12:59 +0100 | [diff] [blame] | 271 | /* |
| 272 | * Must be called from hyp code running at EL2 with an updated VTTBR |
| 273 | * and interrupts disabled. |
| 274 | */ |
Marc Zyngier | 4efc0ed | 2021-08-06 12:31:07 +0100 | [diff] [blame] | 275 | static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, |
| 276 | struct kvm_arch *arch) |
Marc Zyngier | fe677be | 2020-05-28 14:12:59 +0100 | [diff] [blame] | 277 | { |
Marc Zyngier | 4efc0ed | 2021-08-06 12:31:07 +0100 | [diff] [blame] | 278 | write_sysreg(arch->vtcr, vtcr_el2); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 279 | write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); |
Marc Zyngier | fe677be | 2020-05-28 14:12:59 +0100 | [diff] [blame] | 280 | |
| 281 | /* |
| 282 | * ARM errata 1165522 and 1530923 require the actual execution of the |
| 283 | * above before we can switch to the EL1/EL0 translation regime used by |
| 284 | * the guest. |
| 285 | */ |
| 286 | asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); |
| 287 | } |
| 288 | |
Quentin Perret | cfb1a98 | 2021-03-19 10:01:28 +0000 | [diff] [blame] | 289 | static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) |
| 290 | { |
| 291 | return container_of(mmu->arch, struct kvm, arch); |
| 292 | } |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 293 | #endif /* __ASSEMBLY__ */ |
| 294 | #endif /* __ARM64_KVM_MMU_H__ */ |