blob: 77b1af9e64db1436a5d35ee026bed5f554c27fd0 [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000023#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000024
25/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000026 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000027 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
Marc Zyngier82a81bf2016-06-30 18:40:34 +010032 * kernel address). We need to find out how many bits to mask.
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000033 *
Marc Zyngier82a81bf2016-06-30 18:40:34 +010034 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
37 *
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
41 *
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
46 *
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
49 *
Laura Abbott2077be62017-01-10 13:35:49 -080050 * T = __pa_symbol(__hyp_idmap_text_start)
Marc Zyngier82a81bf2016-06-30 18:40:34 +010051 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
53 * else
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
56 *
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
66 *
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
Marc Zyngier37c43752012-12-10 15:35:24 +000070 */
Marc Zyngierd53d9bc62016-06-30 18:40:39 +010071
Marc Zyngier37c43752012-12-10 15:35:24 +000072#ifdef __ASSEMBLY__
73
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000074#include <asm/alternative.h>
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000075
Marc Zyngier37c43752012-12-10 15:35:24 +000076/*
77 * Convert a kernel VA into a HYP VA.
78 * reg: VA to be converted.
Marc Zyngierfd81e6b2016-06-30 18:40:40 +010079 *
Marc Zyngier2b4d1602017-12-03 17:36:55 +000080 * The actual code generation takes place in kvm_update_va_mask, and
81 * the instructions below are only there to reserve the space and
82 * perform the register allocation (kvm_update_va_mask uses the
83 * specific registers encoded in the instructions).
Marc Zyngier37c43752012-12-10 15:35:24 +000084 */
85.macro kern_hyp_va reg
Marc Zyngier2b4d1602017-12-03 17:36:55 +000086alternative_cb kvm_update_va_mask
Marc Zyngiered57cac2017-12-03 18:22:49 +000087 and \reg, \reg, #1 /* mask with va_mask */
88 ror \reg, \reg, #1 /* rotate to the first tag bit */
89 add \reg, \reg, #0 /* insert the low 12 bits of the tag */
90 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
91 ror \reg, \reg, #63 /* rotate back */
Marc Zyngier2b4d1602017-12-03 17:36:55 +000092alternative_cb_end
Marc Zyngier37c43752012-12-10 15:35:24 +000093.endm
94
95#else
96
Christoffer Dall38f791a2014-10-10 12:14:28 +020097#include <asm/pgalloc.h>
Will Deacon02f77602017-03-10 20:32:23 +000098#include <asm/cache.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000099#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000100#include <asm/mmu_context.h>
101#include <asm/pgtable.h>
Marc Zyngier37c43752012-12-10 15:35:24 +0000102
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000103void kvm_update_va_mask(struct alt_instr *alt,
104 __le32 *origptr, __le32 *updptr, int nr_inst);
105
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100106static inline unsigned long __kern_hyp_va(unsigned long v)
107{
Marc Zyngiered57cac2017-12-03 18:22:49 +0000108 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
109 "ror %0, %0, #1\n"
110 "add %0, %0, #0\n"
111 "add %0, %0, #0, lsl 12\n"
112 "ror %0, %0, #63\n",
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000113 kvm_update_va_mask)
114 : "+r" (v));
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100115 return v;
116}
117
Marc Zyngier94d0e592016-10-18 18:37:49 +0100118#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
Marc Zyngier37c43752012-12-10 15:35:24 +0000119
120/*
Marc Zyngier44a497a2017-12-03 19:28:56 +0000121 * Obtain the PC-relative address of a kernel symbol
122 * s: symbol
123 *
124 * The goal of this macro is to return a symbol's address based on a
125 * PC-relative computation, as opposed to a loading the VA from a
126 * constant pool or something similar. This works well for HYP, as an
127 * absolute VA is guaranteed to be wrong. Only use this if trying to
128 * obtain the address of a symbol (i.e. not something you obtained by
129 * following a pointer).
130 */
131#define hyp_symbol_addr(s) \
132 ({ \
133 typeof(s) *addr; \
134 asm("adrp %0, %1\n" \
135 "add %0, %0, :lo12:%1\n" \
136 : "=r" (addr) : "S" (&s)); \
137 addr; \
138 })
139
140/*
Joel Schoppdbff1242014-07-09 11:17:04 -0500141 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +0000142 */
Joel Schoppdbff1242014-07-09 11:17:04 -0500143#define KVM_PHYS_SHIFT (40)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100144
Suzuki K Poulose13ac4bb2018-09-26 17:32:49 +0100145#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100146#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
147#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
Marc Zyngier37c43752012-12-10 15:35:24 +0000148
Suzuki K Poulose865b30c2018-09-26 17:32:45 +0100149static inline bool kvm_page_empty(void *ptr)
150{
151 struct page *ptr_page = virt_to_page(ptr);
152 return page_count(ptr_page) == 1;
153}
154
Suzuki K Poulosec0ef6322016-03-22 14:16:52 +0000155#include <asm/stage2_pgtable.h>
156
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100157int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Marc Zyngier807a3782017-12-04 16:26:09 +0000158int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000159 void __iomem **kaddr,
160 void __iomem **haddr);
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000161int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
162 void **haddr);
Marc Zyngier37c43752012-12-10 15:35:24 +0000163void free_hyp_pgds(void);
164
Christoffer Dall957db102014-11-27 10:35:03 +0100165void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +0000166int kvm_alloc_stage2_pgd(struct kvm *kvm);
167void kvm_free_stage2_pgd(struct kvm *kvm);
168int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700169 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +0000170
171int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
172
173void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
174
175phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000176phys_addr_t kvm_get_idmap_vector(void);
177int kvm_mmu_init(void);
178void kvm_clear_hyp_idmap(void);
179
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100180#define kvm_mk_pmd(ptep) \
181 __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
182#define kvm_mk_pud(pmdp) \
183 __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
184#define kvm_mk_pgd(pudp) \
185 __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
186
Catalin Marinas06485052016-04-13 17:57:37 +0100187static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngier37c43752012-12-10 15:35:24 +0000188{
Catalin Marinas06485052016-04-13 17:57:37 +0100189 pte_val(pte) |= PTE_S2_RDWR;
190 return pte;
Marc Zyngier37c43752012-12-10 15:35:24 +0000191}
192
Catalin Marinas06485052016-04-13 17:57:37 +0100193static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +0100194{
Catalin Marinas06485052016-04-13 17:57:37 +0100195 pmd_val(pmd) |= PMD_S2_RDWR;
196 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +0100197}
198
Marc Zyngierd0e22b42017-10-23 17:11:19 +0100199static inline pte_t kvm_s2pte_mkexec(pte_t pte)
200{
201 pte_val(pte) &= ~PTE_S2_XN;
202 return pte;
203}
204
205static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
206{
207 pmd_val(pmd) &= ~PMD_S2_XN;
208 return pmd;
209}
210
Will Deacon20a004e2018-02-15 11:14:56 +0000211static inline void kvm_set_s2pte_readonly(pte_t *ptep)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800212{
Catalin Marinas09662532017-07-06 11:46:39 +0100213 pteval_t old_pteval, pteval;
Catalin Marinas06485052016-04-13 17:57:37 +0100214
Will Deacon20a004e2018-02-15 11:14:56 +0000215 pteval = READ_ONCE(pte_val(*ptep));
Catalin Marinas09662532017-07-06 11:46:39 +0100216 do {
217 old_pteval = pteval;
218 pteval &= ~PTE_S2_RDWR;
219 pteval |= PTE_S2_RDONLY;
Will Deacon20a004e2018-02-15 11:14:56 +0000220 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
Catalin Marinas09662532017-07-06 11:46:39 +0100221 } while (pteval != old_pteval);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800222}
223
Will Deacon20a004e2018-02-15 11:14:56 +0000224static inline bool kvm_s2pte_readonly(pte_t *ptep)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800225{
Will Deacon20a004e2018-02-15 11:14:56 +0000226 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
Mario Smarduch8199ed02015-01-15 15:58:59 -0800227}
228
Will Deacon20a004e2018-02-15 11:14:56 +0000229static inline bool kvm_s2pte_exec(pte_t *ptep)
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100230{
Will Deacon20a004e2018-02-15 11:14:56 +0000231 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100232}
233
Will Deacon20a004e2018-02-15 11:14:56 +0000234static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800235{
Will Deacon20a004e2018-02-15 11:14:56 +0000236 kvm_set_s2pte_readonly((pte_t *)pmdp);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800237}
238
Will Deacon20a004e2018-02-15 11:14:56 +0000239static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800240{
Will Deacon20a004e2018-02-15 11:14:56 +0000241 return kvm_s2pte_readonly((pte_t *)pmdp);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200242}
243
Will Deacon20a004e2018-02-15 11:14:56 +0000244static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100245{
Will Deacon20a004e2018-02-15 11:14:56 +0000246 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100247}
248
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000249#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200250
251#ifdef __PAGETABLE_PMD_FOLDED
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000252#define hyp_pmd_table_empty(pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200253#else
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000254#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200255#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200256
257#ifdef __PAGETABLE_PUD_FOLDED
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000258#define hyp_pud_table_empty(pudp) (0)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200259#else
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000260#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200261#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200262
Marc Zyngier37c43752012-12-10 15:35:24 +0000263struct kvm;
264
Marc Zyngier2d58b732014-01-14 19:13:10 +0000265#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
266
267static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000268{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100269 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
Marc Zyngier2d58b732014-01-14 19:13:10 +0000270}
271
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100272static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000273{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000274 void *va = page_address(pfn_to_page(pfn));
275
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100276 /*
277 * With FWB, we ensure that the guest always accesses memory using
278 * cacheable attributes, and we don't have to clean to PoC when
279 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
280 * PoU is not required either in this case.
281 */
282 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
283 return;
284
Marc Zyngier8f36eba2017-01-25 12:29:59 +0000285 kvm_flush_dcache_to_poc(va, size);
Marc Zyngiera15f6932017-10-23 17:11:15 +0100286}
Marc Zyngier2d58b732014-01-14 19:13:10 +0000287
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100288static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
Marc Zyngiera15f6932017-10-23 17:11:15 +0100289 unsigned long size)
290{
Will Deacon87da2362017-03-10 20:32:25 +0000291 if (icache_is_aliasing()) {
Marc Zyngier37c43752012-12-10 15:35:24 +0000292 /* any kind of VIPT cache */
293 __flush_icache_all();
Will Deacon87da2362017-03-10 20:32:25 +0000294 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
295 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
Marc Zyngiera15f6932017-10-23 17:11:15 +0100296 void *va = page_address(pfn_to_page(pfn));
297
Marc Zyngier4fee9472017-10-23 17:11:16 +0100298 invalidate_icache_range((unsigned long)va,
299 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000300 }
301}
302
Marc Zyngier363ef892014-12-19 16:48:06 +0000303static inline void __kvm_flush_dcache_pte(pte_t pte)
304{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100305 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
306 struct page *page = pte_page(pte);
307 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
308 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000309}
310
311static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
312{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100313 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
314 struct page *page = pmd_page(pmd);
315 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
316 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000317}
318
319static inline void __kvm_flush_dcache_pud(pud_t pud)
320{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100321 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
322 struct page *page = pud_page(pud);
323 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
324 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000325}
326
Laura Abbott2077be62017-01-10 13:35:49 -0800327#define kvm_virt_to_phys(x) __pa_symbol(x)
Marc Zyngier37c43752012-12-10 15:35:24 +0000328
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000329void kvm_set_way_flush(struct kvm_vcpu *vcpu);
330void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000331
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000332static inline bool __kvm_cpu_uses_extended_idmap(void)
333{
Kristina Martsenkofa2a8442017-12-13 17:07:24 +0000334 return __cpu_uses_extended_idmap_level();
335}
336
337static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
338{
339 return idmap_ptrs_per_pgd;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000340}
341
Kristina Martsenko19338302017-12-13 17:07:20 +0000342/*
343 * Can't use pgd_populate here, because the extended idmap adds an extra level
344 * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
345 * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
346 */
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000347static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
348 pgd_t *hyp_pgd,
349 pgd_t *merged_hyp_pgd,
350 unsigned long hyp_idmap_start)
351{
352 int idmap_idx;
Kristina Martsenko75387b92017-12-13 17:07:21 +0000353 u64 pgd_addr;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000354
355 /*
356 * Use the first entry to access the HYP mappings. It is
357 * guaranteed to be free, otherwise we wouldn't use an
358 * extended idmap.
359 */
360 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
Kristina Martsenko75387b92017-12-13 17:07:21 +0000361 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
362 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000363
364 /*
365 * Create another extended level entry that points to the boot HYP map,
366 * which contains an ID mapping of the HYP init code. We essentially
367 * merge the boot and runtime HYP maps by doing so, but they don't
368 * overlap anyway, so this is fine.
369 */
370 idmap_idx = hyp_idmap_start >> VA_BITS;
371 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
Kristina Martsenko75387b92017-12-13 17:07:21 +0000372 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
373 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000374}
375
Vladimir Murzin20475f72015-11-16 11:28:18 +0000376static inline unsigned int kvm_get_vmid_bits(void)
377{
Dave Martin46823dd2017-03-23 15:14:39 +0000378 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
Vladimir Murzin20475f72015-11-16 11:28:18 +0000379
Suzuki K Poulose28c5dcb2016-01-26 10:58:16 +0000380 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
Vladimir Murzin20475f72015-11-16 11:28:18 +0000381}
382
Andre Przywarabf308242018-05-11 15:20:14 +0100383/*
384 * We are not in the kvm->srcu critical section most of the time, so we take
385 * the SRCU read lock here. Since we copy the data from the user page, we
386 * can immediately drop the lock again.
387 */
388static inline int kvm_read_guest_lock(struct kvm *kvm,
389 gpa_t gpa, void *data, unsigned long len)
390{
391 int srcu_idx = srcu_read_lock(&kvm->srcu);
392 int ret = kvm_read_guest(kvm, gpa, data, len);
393
394 srcu_read_unlock(&kvm->srcu, srcu_idx);
395
396 return ret;
397}
398
Marc Zyngierdee39242018-02-15 11:47:14 +0000399#ifdef CONFIG_KVM_INDIRECT_VECTORS
400/*
401 * EL2 vectors can be mapped and rerouted in a number of ways,
402 * depending on the kernel configuration and CPU present:
403 *
404 * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
405 * hardening sequence is placed in one of the vector slots, which is
406 * executed before jumping to the real vectors.
407 *
408 * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
409 * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
410 * hardening sequence is mapped next to the idmap page, and executed
411 * before jumping to the real vectors.
412 *
413 * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
414 * empty slot is selected, mapped next to the idmap page, and
415 * executed before jumping to the real vectors.
416 *
417 * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
418 * VHE, as we don't have hypervisor-specific mappings. If the system
419 * is VHE and yet selects this capability, it will be ignored.
420 */
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000421#include <asm/mmu.h>
422
Marc Zyngierdee39242018-02-15 11:47:14 +0000423extern void *__kvm_bp_vect_base;
424extern int __kvm_harden_el2_vector_slot;
425
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000426static inline void *kvm_get_hyp_vector(void)
427{
428 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
Marc Zyngierdee39242018-02-15 11:47:14 +0000429 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
430 int slot = -1;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000431
Marc Zyngierdee39242018-02-15 11:47:14 +0000432 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
433 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
434 slot = data->hyp_vectors_slot;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000435 }
436
Marc Zyngierdee39242018-02-15 11:47:14 +0000437 if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
438 vect = __kvm_bp_vect_base;
439 if (slot == -1)
440 slot = __kvm_harden_el2_vector_slot;
441 }
442
443 if (slot != -1)
444 vect += slot * SZ_2K;
445
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000446 return vect;
447}
448
Marc Zyngierdee39242018-02-15 11:47:14 +0000449/* This is only called on a !VHE system */
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000450static inline int kvm_map_vectors(void)
451{
Marc Zyngierdee39242018-02-15 11:47:14 +0000452 /*
453 * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
454 * HEL2 = ARM64_HARDEN_EL2_VECTORS
455 *
456 * !HBP + !HEL2 -> use direct vectors
457 * HBP + !HEL2 -> use hardened vectors in place
458 * !HBP + HEL2 -> allocate one vector slot and use exec mapping
459 * HBP + HEL2 -> use hardened vertors and use exec mapping
460 */
461 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
462 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
463 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
464 }
465
466 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
467 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
468 unsigned long size = (__bp_harden_hyp_vecs_end -
469 __bp_harden_hyp_vecs_start);
470
471 /*
472 * Always allocate a spare vector slot, as we don't
473 * know yet which CPUs have a BP hardening slot that
474 * we can reuse.
475 */
476 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
477 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
478 return create_hyp_exec_mappings(vect_pa, size,
479 &__kvm_bp_vect_base);
480 }
481
Marc Zyngier4340ba82018-03-14 13:28:50 +0000482 return 0;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000483}
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000484#else
485static inline void *kvm_get_hyp_vector(void)
486{
Marc Zyngier3c5e8122018-02-12 16:50:19 +0000487 return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000488}
489
490static inline int kvm_map_vectors(void)
491{
492 return 0;
493}
494#endif
495
Marc Zyngier55e37482018-05-29 13:11:16 +0100496#ifdef CONFIG_ARM64_SSBD
497DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
498
499static inline int hyp_map_aux_data(void)
500{
501 int cpu, err;
502
503 for_each_possible_cpu(cpu) {
504 u64 *ptr;
505
506 ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
507 err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
508 if (err)
509 return err;
510 }
511 return 0;
512}
513#else
514static inline int hyp_map_aux_data(void)
515{
516 return 0;
517}
518#endif
519
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000520#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
521
Suzuki K Poulose59558332018-09-26 17:32:47 +0100522/*
523 * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
524 * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
525 * 52bit IPS.
526 */
527static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
528{
529 int x = ARM64_VTTBR_X(ipa_shift, levels);
530
531 return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
532}
533
534static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
535{
536 unsigned int x = arm64_vttbr_x(ipa_shift, levels);
537
538 return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
539}
540
541static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
542{
543 return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
544}
545
Marc Zyngier37c43752012-12-10 15:35:24 +0000546#endif /* __ASSEMBLY__ */
547#endif /* __ARM64_KVM_MMU_H__ */