blob: 40be8f6c7351dc1b16884a6b147111deda9f33d2 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marc Zyngier37c43752012-12-10 15:35:24 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier37c43752012-12-10 15:35:24 +00005 */
6
7#ifndef __ARM64_KVM_MMU_H__
8#define __ARM64_KVM_MMU_H__
9
10#include <asm/page.h>
11#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000012#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000013
14/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000015 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000016 * "negative" addresses. This makes it impossible to directly share
17 * mappings with the kernel.
18 *
19 * Instead, give the HYP mode its own VA region at a fixed offset from
20 * the kernel by just masking the top bits (which are all ones for a
Marc Zyngier82a81bf2016-06-30 18:40:34 +010021 * kernel address). We need to find out how many bits to mask.
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000022 *
Marc Zyngier82a81bf2016-06-30 18:40:34 +010023 * We want to build a set of page tables that cover both parts of the
24 * idmap (the trampoline page used to initialize EL2), and our normal
25 * runtime VA space, at the same time.
26 *
27 * Given that the kernel uses VA_BITS for its entire address space,
28 * and that half of that space (VA_BITS - 1) is used for the linear
29 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
30 *
31 * The main question is "Within the VA_BITS space, does EL2 use the
32 * top or the bottom half of that space to shadow the kernel's linear
33 * mapping?". As we need to idmap the trampoline page, this is
34 * determined by the range in which this page lives.
35 *
36 * If the page is in the bottom half, we have to use the top half. If
37 * the page is in the top half, we have to use the bottom half:
38 *
Laura Abbott2077be62017-01-10 13:35:49 -080039 * T = __pa_symbol(__hyp_idmap_text_start)
Marc Zyngier82a81bf2016-06-30 18:40:34 +010040 * if (T & BIT(VA_BITS - 1))
41 * HYP_VA_MIN = 0 //idmap in upper half
42 * else
43 * HYP_VA_MIN = 1 << (VA_BITS - 1)
44 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
45 *
46 * This of course assumes that the trampoline page exists within the
47 * VA_BITS range. If it doesn't, then it means we're in the odd case
48 * where the kernel idmap (as well as HYP) uses more levels than the
49 * kernel runtime page tables (as seen when the kernel is configured
50 * for 4k pages, 39bits VA, and yet memory lives just above that
51 * limit, forcing the idmap to use 4 levels of page tables while the
52 * kernel itself only uses 3). In this particular case, it doesn't
53 * matter which side of VA_BITS we use, as we're guaranteed not to
54 * conflict with anything.
55 *
56 * When using VHE, there are no separate hyp mappings and all KVM
57 * functionality is already mapped as part of the main kernel
58 * mappings, and none of this applies in that case.
Marc Zyngier37c43752012-12-10 15:35:24 +000059 */
Marc Zyngierd53d9bc62016-06-30 18:40:39 +010060
Marc Zyngier37c43752012-12-10 15:35:24 +000061#ifdef __ASSEMBLY__
62
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000063#include <asm/alternative.h>
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000064
Marc Zyngier37c43752012-12-10 15:35:24 +000065/*
66 * Convert a kernel VA into a HYP VA.
67 * reg: VA to be converted.
Marc Zyngierfd81e6b2016-06-30 18:40:40 +010068 *
Marc Zyngier2b4d1602017-12-03 17:36:55 +000069 * The actual code generation takes place in kvm_update_va_mask, and
70 * the instructions below are only there to reserve the space and
71 * perform the register allocation (kvm_update_va_mask uses the
72 * specific registers encoded in the instructions).
Marc Zyngier37c43752012-12-10 15:35:24 +000073 */
74.macro kern_hyp_va reg
Marc Zyngier2b4d1602017-12-03 17:36:55 +000075alternative_cb kvm_update_va_mask
Marc Zyngiered57cac2017-12-03 18:22:49 +000076 and \reg, \reg, #1 /* mask with va_mask */
77 ror \reg, \reg, #1 /* rotate to the first tag bit */
78 add \reg, \reg, #0 /* insert the low 12 bits of the tag */
79 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
80 ror \reg, \reg, #63 /* rotate back */
Marc Zyngier2b4d1602017-12-03 17:36:55 +000081alternative_cb_end
Marc Zyngier37c43752012-12-10 15:35:24 +000082.endm
83
84#else
85
Mike Rapoport65fddcf2020-06-08 21:32:42 -070086#include <linux/pgtable.h>
Christoffer Dall38f791a2014-10-10 12:14:28 +020087#include <asm/pgalloc.h>
Will Deacon02f77602017-03-10 20:32:23 +000088#include <asm/cache.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000089#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000090#include <asm/mmu_context.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000091
Marc Zyngier2b4d1602017-12-03 17:36:55 +000092void kvm_update_va_mask(struct alt_instr *alt,
93 __le32 *origptr, __le32 *updptr, int nr_inst);
Sebastian Andrzej Siewior0492747c2019-11-28 20:58:05 +010094void kvm_compute_layout(void);
Marc Zyngier2b4d1602017-12-03 17:36:55 +000095
James Morse5c37f1a2020-02-20 16:58:37 +000096static __always_inline unsigned long __kern_hyp_va(unsigned long v)
Marc Zyngierfd81e6b2016-06-30 18:40:40 +010097{
Marc Zyngiered57cac2017-12-03 18:22:49 +000098 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
99 "ror %0, %0, #1\n"
100 "add %0, %0, #0\n"
101 "add %0, %0, #0, lsl 12\n"
102 "ror %0, %0, #63\n",
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000103 kvm_update_va_mask)
104 : "+r" (v));
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100105 return v;
106}
107
Marc Zyngier94d0e592016-10-18 18:37:49 +0100108#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
Marc Zyngier37c43752012-12-10 15:35:24 +0000109
110/*
Zenghui Yu1b444712019-02-14 01:45:46 +0000111 * We currently support using a VM-specified IPA size. For backward
112 * compatibility, the default IPA size is fixed to 40bits.
Marc Zyngier37c43752012-12-10 15:35:24 +0000113 */
Joel Schoppdbff1242014-07-09 11:17:04 -0500114#define KVM_PHYS_SHIFT (40)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100115
Suzuki K Poulose13ac4bb2018-09-26 17:32:49 +0100116#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100117#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
118#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
Marc Zyngier37c43752012-12-10 15:35:24 +0000119
Suzuki K Poulose865b30c2018-09-26 17:32:45 +0100120static inline bool kvm_page_empty(void *ptr)
121{
122 struct page *ptr_page = virt_to_page(ptr);
123 return page_count(ptr_page) == 1;
124}
Marc Zyngier37c43752012-12-10 15:35:24 +0000125
Suzuki K Poulosec0ef6322016-03-22 14:16:52 +0000126#include <asm/stage2_pgtable.h>
127
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100128int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Marc Zyngier807a3782017-12-04 16:26:09 +0000129int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000130 void __iomem **kaddr,
131 void __iomem **haddr);
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000132int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
133 void **haddr);
Marc Zyngier37c43752012-12-10 15:35:24 +0000134void free_hyp_pgds(void);
135
Christoffer Dall957db102014-11-27 10:35:03 +0100136void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +0000137int kvm_alloc_stage2_pgd(struct kvm *kvm);
138void kvm_free_stage2_pgd(struct kvm *kvm);
139int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700140 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +0000141
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800142int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
Marc Zyngier37c43752012-12-10 15:35:24 +0000143
144void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
145
146phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000147phys_addr_t kvm_get_idmap_vector(void);
148int kvm_mmu_init(void);
149void kvm_clear_hyp_idmap(void);
150
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100151#define kvm_mk_pmd(ptep) \
152 __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
153#define kvm_mk_pud(pmdp) \
154 __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
Mike Rapoporte9f63762020-06-04 16:46:23 -0700155#define kvm_mk_p4d(pmdp) \
156 __p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100157
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000158#define kvm_set_pud(pudp, pud) set_pud(pudp, pud)
159
Punit Agrawalf8df7332018-12-11 17:10:36 +0000160#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
161#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000162#define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot)
Punit Agrawalf8df7332018-12-11 17:10:36 +0000163
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000164#define kvm_pud_pfn(pud) pud_pfn(pud)
165
Punit Agrawalf8df7332018-12-11 17:10:36 +0000166#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000167#define kvm_pud_mkhuge(pud) pud_mkhuge(pud)
Punit Agrawalf8df7332018-12-11 17:10:36 +0000168
Catalin Marinas06485052016-04-13 17:57:37 +0100169static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngier37c43752012-12-10 15:35:24 +0000170{
Catalin Marinas06485052016-04-13 17:57:37 +0100171 pte_val(pte) |= PTE_S2_RDWR;
172 return pte;
Marc Zyngier37c43752012-12-10 15:35:24 +0000173}
174
Catalin Marinas06485052016-04-13 17:57:37 +0100175static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +0100176{
Catalin Marinas06485052016-04-13 17:57:37 +0100177 pmd_val(pmd) |= PMD_S2_RDWR;
178 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +0100179}
180
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000181static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
182{
183 pud_val(pud) |= PUD_S2_RDWR;
184 return pud;
185}
186
Marc Zyngierd0e22b42017-10-23 17:11:19 +0100187static inline pte_t kvm_s2pte_mkexec(pte_t pte)
188{
189 pte_val(pte) &= ~PTE_S2_XN;
190 return pte;
191}
192
193static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
194{
195 pmd_val(pmd) &= ~PMD_S2_XN;
196 return pmd;
197}
198
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000199static inline pud_t kvm_s2pud_mkexec(pud_t pud)
200{
201 pud_val(pud) &= ~PUD_S2_XN;
202 return pud;
203}
204
Will Deacon20a004e2018-02-15 11:14:56 +0000205static inline void kvm_set_s2pte_readonly(pte_t *ptep)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800206{
Catalin Marinas09662532017-07-06 11:46:39 +0100207 pteval_t old_pteval, pteval;
Catalin Marinas06485052016-04-13 17:57:37 +0100208
Will Deacon20a004e2018-02-15 11:14:56 +0000209 pteval = READ_ONCE(pte_val(*ptep));
Catalin Marinas09662532017-07-06 11:46:39 +0100210 do {
211 old_pteval = pteval;
212 pteval &= ~PTE_S2_RDWR;
213 pteval |= PTE_S2_RDONLY;
Will Deacon20a004e2018-02-15 11:14:56 +0000214 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
Catalin Marinas09662532017-07-06 11:46:39 +0100215 } while (pteval != old_pteval);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800216}
217
Will Deacon20a004e2018-02-15 11:14:56 +0000218static inline bool kvm_s2pte_readonly(pte_t *ptep)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800219{
Will Deacon20a004e2018-02-15 11:14:56 +0000220 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
Mario Smarduch8199ed02015-01-15 15:58:59 -0800221}
222
Will Deacon20a004e2018-02-15 11:14:56 +0000223static inline bool kvm_s2pte_exec(pte_t *ptep)
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100224{
Will Deacon20a004e2018-02-15 11:14:56 +0000225 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100226}
227
Will Deacon20a004e2018-02-15 11:14:56 +0000228static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800229{
Will Deacon20a004e2018-02-15 11:14:56 +0000230 kvm_set_s2pte_readonly((pte_t *)pmdp);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800231}
232
Will Deacon20a004e2018-02-15 11:14:56 +0000233static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
Mario Smarduch8199ed02015-01-15 15:58:59 -0800234{
Will Deacon20a004e2018-02-15 11:14:56 +0000235 return kvm_s2pte_readonly((pte_t *)pmdp);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200236}
237
Will Deacon20a004e2018-02-15 11:14:56 +0000238static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100239{
Will Deacon20a004e2018-02-15 11:14:56 +0000240 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
Marc Zyngier7a3796d2017-10-23 17:11:21 +0100241}
242
Punit Agrawal4ea5af52018-12-11 17:10:37 +0000243static inline void kvm_set_s2pud_readonly(pud_t *pudp)
244{
245 kvm_set_s2pte_readonly((pte_t *)pudp);
246}
247
248static inline bool kvm_s2pud_readonly(pud_t *pudp)
249{
250 return kvm_s2pte_readonly((pte_t *)pudp);
251}
252
Punit Agrawal86d1c552018-12-11 17:10:38 +0000253static inline bool kvm_s2pud_exec(pud_t *pudp)
254{
255 return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
256}
257
Punit Agrawaleb3f06242018-12-11 17:10:39 +0000258static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
259{
260 return pud_mkyoung(pud);
261}
262
Punit Agrawal35a63962018-12-11 17:10:40 +0000263static inline bool kvm_s2pud_young(pud_t pud)
264{
265 return pud_young(pud);
266}
267
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000268#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200269
270#ifdef __PAGETABLE_PMD_FOLDED
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000271#define hyp_pmd_table_empty(pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200272#else
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000273#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200274#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200275
276#ifdef __PAGETABLE_PUD_FOLDED
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000277#define hyp_pud_table_empty(pudp) (0)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200278#else
Suzuki K Poulose66f877f2016-03-22 17:20:28 +0000279#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200280#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200281
Mike Rapoporte9f63762020-06-04 16:46:23 -0700282#ifdef __PAGETABLE_P4D_FOLDED
283#define hyp_p4d_table_empty(p4dp) (0)
284#else
285#define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp)
286#endif
287
Marc Zyngier37c43752012-12-10 15:35:24 +0000288struct kvm;
289
Marc Zyngier2d58b732014-01-14 19:13:10 +0000290#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
291
292static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000293{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100294 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
Marc Zyngier2d58b732014-01-14 19:13:10 +0000295}
296
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100297static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000298{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000299 void *va = page_address(pfn_to_page(pfn));
300
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100301 /*
302 * With FWB, we ensure that the guest always accesses memory using
303 * cacheable attributes, and we don't have to clean to PoC when
304 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
305 * PoU is not required either in this case.
306 */
307 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
308 return;
309
Marc Zyngier8f36eba2017-01-25 12:29:59 +0000310 kvm_flush_dcache_to_poc(va, size);
Marc Zyngiera15f6932017-10-23 17:11:15 +0100311}
Marc Zyngier2d58b732014-01-14 19:13:10 +0000312
Marc Zyngier17ab9d52017-10-23 17:11:22 +0100313static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
Marc Zyngiera15f6932017-10-23 17:11:15 +0100314 unsigned long size)
315{
Will Deacon87da2362017-03-10 20:32:25 +0000316 if (icache_is_aliasing()) {
Marc Zyngier37c43752012-12-10 15:35:24 +0000317 /* any kind of VIPT cache */
318 __flush_icache_all();
Will Deacon87da2362017-03-10 20:32:25 +0000319 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
320 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
Marc Zyngiera15f6932017-10-23 17:11:15 +0100321 void *va = page_address(pfn_to_page(pfn));
322
Marc Zyngier4fee9472017-10-23 17:11:16 +0100323 invalidate_icache_range((unsigned long)va,
324 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000325 }
326}
327
Marc Zyngier363ef892014-12-19 16:48:06 +0000328static inline void __kvm_flush_dcache_pte(pte_t pte)
329{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100330 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
331 struct page *page = pte_page(pte);
332 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
333 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000334}
335
336static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
337{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100338 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
339 struct page *page = pmd_page(pmd);
340 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
341 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000342}
343
344static inline void __kvm_flush_dcache_pud(pud_t pud)
345{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100346 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
347 struct page *page = pud_page(pud);
348 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
349 }
Marc Zyngier363ef892014-12-19 16:48:06 +0000350}
351
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000352void kvm_set_way_flush(struct kvm_vcpu *vcpu);
353void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000354
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000355static inline bool __kvm_cpu_uses_extended_idmap(void)
356{
Kristina Martsenkofa2a8442017-12-13 17:07:24 +0000357 return __cpu_uses_extended_idmap_level();
358}
359
360static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
361{
362 return idmap_ptrs_per_pgd;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000363}
364
Kristina Martsenko19338302017-12-13 17:07:20 +0000365/*
366 * Can't use pgd_populate here, because the extended idmap adds an extra level
367 * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
368 * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
369 */
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000370static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
371 pgd_t *hyp_pgd,
372 pgd_t *merged_hyp_pgd,
373 unsigned long hyp_idmap_start)
374{
375 int idmap_idx;
Kristina Martsenko75387b92017-12-13 17:07:21 +0000376 u64 pgd_addr;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000377
378 /*
379 * Use the first entry to access the HYP mappings. It is
380 * guaranteed to be free, otherwise we wouldn't use an
381 * extended idmap.
382 */
383 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
Kristina Martsenko75387b92017-12-13 17:07:21 +0000384 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
385 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000386
387 /*
388 * Create another extended level entry that points to the boot HYP map,
389 * which contains an ID mapping of the HYP init code. We essentially
390 * merge the boot and runtime HYP maps by doing so, but they don't
391 * overlap anyway, so this is fine.
392 */
393 idmap_idx = hyp_idmap_start >> VA_BITS;
394 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
Kristina Martsenko75387b92017-12-13 17:07:21 +0000395 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
396 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000397}
398
Vladimir Murzin20475f72015-11-16 11:28:18 +0000399static inline unsigned int kvm_get_vmid_bits(void)
400{
Dave Martin46823dd2017-03-23 15:14:39 +0000401 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
Vladimir Murzin20475f72015-11-16 11:28:18 +0000402
Anshuman Khandualc73433f2020-05-12 07:27:27 +0530403 return get_vmid_bits(reg);
Vladimir Murzin20475f72015-11-16 11:28:18 +0000404}
405
Andre Przywarabf308242018-05-11 15:20:14 +0100406/*
407 * We are not in the kvm->srcu critical section most of the time, so we take
408 * the SRCU read lock here. Since we copy the data from the user page, we
409 * can immediately drop the lock again.
410 */
411static inline int kvm_read_guest_lock(struct kvm *kvm,
412 gpa_t gpa, void *data, unsigned long len)
413{
414 int srcu_idx = srcu_read_lock(&kvm->srcu);
415 int ret = kvm_read_guest(kvm, gpa, data, len);
416
417 srcu_read_unlock(&kvm->srcu, srcu_idx);
418
419 return ret;
420}
421
Marc Zyngiera6ecfb12019-03-19 12:47:11 +0000422static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
423 const void *data, unsigned long len)
424{
425 int srcu_idx = srcu_read_lock(&kvm->srcu);
426 int ret = kvm_write_guest(kvm, gpa, data, len);
427
428 srcu_read_unlock(&kvm->srcu, srcu_idx);
429
430 return ret;
431}
432
Marc Zyngierdee39242018-02-15 11:47:14 +0000433#ifdef CONFIG_KVM_INDIRECT_VECTORS
434/*
435 * EL2 vectors can be mapped and rerouted in a number of ways,
436 * depending on the kernel configuration and CPU present:
437 *
438 * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
439 * hardening sequence is placed in one of the vector slots, which is
440 * executed before jumping to the real vectors.
441 *
442 * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
443 * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
444 * hardening sequence is mapped next to the idmap page, and executed
445 * before jumping to the real vectors.
446 *
447 * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
448 * empty slot is selected, mapped next to the idmap page, and
449 * executed before jumping to the real vectors.
450 *
451 * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
452 * VHE, as we don't have hypervisor-specific mappings. If the system
453 * is VHE and yet selects this capability, it will be ignored.
454 */
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000455#include <asm/mmu.h>
456
Marc Zyngierdee39242018-02-15 11:47:14 +0000457extern void *__kvm_bp_vect_base;
458extern int __kvm_harden_el2_vector_slot;
459
David Brazdil438f7112020-05-15 16:25:50 +0100460/* This is called on both VHE and !VHE systems */
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000461static inline void *kvm_get_hyp_vector(void)
462{
463 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
Marc Zyngierdee39242018-02-15 11:47:14 +0000464 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
465 int slot = -1;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000466
Marc Zyngierdee39242018-02-15 11:47:14 +0000467 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
Mark Brown6e52aab2020-02-18 19:58:38 +0000468 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
Marc Zyngierdee39242018-02-15 11:47:14 +0000469 slot = data->hyp_vectors_slot;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000470 }
471
Marc Zyngierdee39242018-02-15 11:47:14 +0000472 if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
473 vect = __kvm_bp_vect_base;
474 if (slot == -1)
475 slot = __kvm_harden_el2_vector_slot;
476 }
477
478 if (slot != -1)
479 vect += slot * SZ_2K;
480
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000481 return vect;
482}
483
Marc Zyngierdee39242018-02-15 11:47:14 +0000484/* This is only called on a !VHE system */
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000485static inline int kvm_map_vectors(void)
486{
Marc Zyngierdee39242018-02-15 11:47:14 +0000487 /*
488 * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
489 * HEL2 = ARM64_HARDEN_EL2_VECTORS
490 *
491 * !HBP + !HEL2 -> use direct vectors
492 * HBP + !HEL2 -> use hardened vectors in place
493 * !HBP + HEL2 -> allocate one vector slot and use exec mapping
494 * HBP + HEL2 -> use hardened vertors and use exec mapping
495 */
496 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
Mark Brown6e52aab2020-02-18 19:58:38 +0000497 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
Marc Zyngierdee39242018-02-15 11:47:14 +0000498 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
499 }
500
501 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
Mark Brown6e52aab2020-02-18 19:58:38 +0000502 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
503 unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
Marc Zyngierdee39242018-02-15 11:47:14 +0000504
505 /*
506 * Always allocate a spare vector slot, as we don't
507 * know yet which CPUs have a BP hardening slot that
508 * we can reuse.
509 */
510 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
511 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
512 return create_hyp_exec_mappings(vect_pa, size,
513 &__kvm_bp_vect_base);
514 }
515
Marc Zyngier4340ba82018-03-14 13:28:50 +0000516 return 0;
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000517}
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000518#else
519static inline void *kvm_get_hyp_vector(void)
520{
Marc Zyngier3c5e8122018-02-12 16:50:19 +0000521 return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000522}
523
524static inline int kvm_map_vectors(void)
525{
526 return 0;
527}
528#endif
529
Marc Zyngier55e37482018-05-29 13:11:16 +0100530#ifdef CONFIG_ARM64_SSBD
531DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
532
533static inline int hyp_map_aux_data(void)
534{
535 int cpu, err;
536
537 for_each_possible_cpu(cpu) {
538 u64 *ptr;
539
540 ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
541 err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
542 if (err)
543 return err;
544 }
545 return 0;
546}
547#else
548static inline int hyp_map_aux_data(void)
549{
550 return 0;
551}
552#endif
553
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000554#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
555
Suzuki K Poulose59558332018-09-26 17:32:47 +0100556/*
557 * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
558 * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
559 * 52bit IPS.
560 */
561static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
562{
563 int x = ARM64_VTTBR_X(ipa_shift, levels);
564
565 return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
566}
567
568static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
569{
570 unsigned int x = arm64_vttbr_x(ipa_shift, levels);
571
572 return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
573}
574
575static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
576{
577 return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
578}
579
Christoffer Dalle329fb72018-12-11 15:26:31 +0100580static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
Vladimir Murzinab510022018-07-31 14:08:57 +0100581{
Christoffer Dalle329fb72018-12-11 15:26:31 +0100582 struct kvm_vmid *vmid = &kvm->arch.vmid;
583 u64 vmid_field, baddr;
584 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
585
586 baddr = kvm->arch.pgd_phys;
587 vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
588 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
Vladimir Murzinab510022018-07-31 14:08:57 +0100589}
590
Marc Zyngierfe677be2020-05-28 14:12:59 +0100591/*
592 * Must be called from hyp code running at EL2 with an updated VTTBR
593 * and interrupts disabled.
594 */
595static __always_inline void __load_guest_stage2(struct kvm *kvm)
596{
597 write_sysreg(kvm->arch.vtcr, vtcr_el2);
598 write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
599
600 /*
601 * ARM errata 1165522 and 1530923 require the actual execution of the
602 * above before we can switch to the EL1/EL0 translation regime used by
603 * the guest.
604 */
605 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
606}
607
Marc Zyngier37c43752012-12-10 15:35:24 +0000608#endif /* __ASSEMBLY__ */
609#endif /* __ARM64_KVM_MMU_H__ */