blob: 66577581ce68e4d0c76add5cb1573224fd8a27d7 [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
Christoffer Dall38f791a2014-10-10 12:14:28 +020044/*
45 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
46 * levels in addition to the PGD and potentially the PUD which are
47 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
48 * tables use one level of tables less than the kernel.
49 */
50#ifdef CONFIG_ARM64_64K_PAGES
51#define KVM_MMU_CACHE_MIN_PAGES 1
52#else
53#define KVM_MMU_CACHE_MIN_PAGES 2
54#endif
55
Marc Zyngier37c43752012-12-10 15:35:24 +000056#ifdef __ASSEMBLY__
57
58/*
59 * Convert a kernel VA into a HYP VA.
60 * reg: VA to be converted.
61 */
62.macro kern_hyp_va reg
63 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
64.endm
65
66#else
67
Christoffer Dall38f791a2014-10-10 12:14:28 +020068#include <asm/pgalloc.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000069#include <asm/cachetype.h>
70#include <asm/cacheflush.h>
71
72#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
73
74/*
Joel Schoppdbff1242014-07-09 11:17:04 -050075 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +000076 */
Joel Schoppdbff1242014-07-09 11:17:04 -050077#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +000078#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
79#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
80
Marc Zyngier37c43752012-12-10 15:35:24 +000081int create_hyp_mappings(void *from, void *to);
82int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
83void free_boot_hyp_pgd(void);
84void free_hyp_pgds(void);
85
Christoffer Dall957db102014-11-27 10:35:03 +010086void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +000087int kvm_alloc_stage2_pgd(struct kvm *kvm);
88void kvm_free_stage2_pgd(struct kvm *kvm);
89int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070090 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +000091
92int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
93
94void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
95
96phys_addr_t kvm_mmu_get_httbr(void);
97phys_addr_t kvm_mmu_get_boot_httbr(void);
98phys_addr_t kvm_get_idmap_vector(void);
99int kvm_mmu_init(void);
100void kvm_clear_hyp_idmap(void);
101
102#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +0100103#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +0000104
Marc Zyngier37c43752012-12-10 15:35:24 +0000105static inline void kvm_clean_pgd(pgd_t *pgd) {}
Christoffer Dall38f791a2014-10-10 12:14:28 +0200106static inline void kvm_clean_pmd(pmd_t *pmd) {}
Marc Zyngier37c43752012-12-10 15:35:24 +0000107static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
108static inline void kvm_clean_pte(pte_t *pte) {}
109static inline void kvm_clean_pte_entry(pte_t *pte) {}
110
111static inline void kvm_set_s2pte_writable(pte_t *pte)
112{
113 pte_val(*pte) |= PTE_S2_RDWR;
114}
115
Christoffer Dallad361f02012-11-01 17:14:45 +0100116static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
117{
118 pmd_val(*pmd) |= PMD_S2_RDWR;
119}
120
Mario Smarduch8199ed02015-01-15 15:58:59 -0800121static inline void kvm_set_s2pte_readonly(pte_t *pte)
122{
123 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
124}
125
126static inline bool kvm_s2pte_readonly(pte_t *pte)
127{
128 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
129}
130
131static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
132{
133 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
134}
135
136static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
137{
138 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
139}
140
141
Marc Zyngiera3c8bd32014-02-18 14:29:03 +0000142#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
143#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
144#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
145
Christoffer Dall38f791a2014-10-10 12:14:28 +0200146/*
147 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
148 * the entire IPA input range with a single pgd entry, and we would only need
149 * one pgd entry. Note that in this case, the pgd is actually not used by
150 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
151 * structure for the kernel pgtable macros to work.
152 */
153#if PGDIR_SHIFT > KVM_PHYS_SHIFT
154#define PTRS_PER_S2_PGD_SHIFT 0
155#else
156#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
157#endif
158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
160
161/*
162 * If we are concatenating first level stage-2 page tables, we would have less
163 * than or equal to 16 pointers in the fake PGD, because that's what the
164 * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
165 * represents the first level for the host, and we add 1 to go to the next
166 * level (which uses contatenation) for the stage-2 tables.
167 */
168#if PTRS_PER_S2_PGD <= 16
169#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
170#else
171#define KVM_PREALLOC_LEVEL (0)
172#endif
173
174/**
175 * kvm_prealloc_hwpgd - allocate inital table for VTTBR
176 * @kvm: The KVM struct pointer for the VM.
177 * @pgd: The kernel pseudo pgd
178 *
179 * When the kernel uses more levels of page tables than the guest, we allocate
180 * a fake PGD and pre-populate it to point to the next-level page table, which
181 * will be the real initial page table pointed to by the VTTBR.
182 *
183 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
184 * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
185 * allocate 2 consecutive PUD pages.
186 */
187static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
188{
189 unsigned int i;
190 unsigned long hwpgd;
191
192 if (KVM_PREALLOC_LEVEL == 0)
193 return 0;
194
195 hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
196 if (!hwpgd)
197 return -ENOMEM;
198
199 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
200 if (KVM_PREALLOC_LEVEL == 1)
201 pgd_populate(NULL, pgd + i,
202 (pud_t *)hwpgd + i * PTRS_PER_PUD);
203 else if (KVM_PREALLOC_LEVEL == 2)
204 pud_populate(NULL, pud_offset(pgd, 0) + i,
205 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
206 }
207
208 return 0;
209}
210
211static inline void *kvm_get_hwpgd(struct kvm *kvm)
212{
213 pgd_t *pgd = kvm->arch.pgd;
214 pud_t *pud;
215
216 if (KVM_PREALLOC_LEVEL == 0)
217 return pgd;
218
219 pud = pud_offset(pgd, 0);
220 if (KVM_PREALLOC_LEVEL == 1)
221 return pud;
222
223 BUG_ON(KVM_PREALLOC_LEVEL != 2);
224 return pmd_offset(pud, 0);
225}
226
227static inline void kvm_free_hwpgd(struct kvm *kvm)
228{
229 if (KVM_PREALLOC_LEVEL > 0) {
230 unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
231 free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
232 }
233}
234
Christoffer Dall4f853a72014-05-09 23:31:31 +0200235static inline bool kvm_page_empty(void *ptr)
236{
237 struct page *ptr_page = virt_to_page(ptr);
238 return page_count(ptr_page) == 1;
239}
240
Christoffer Dall38f791a2014-10-10 12:14:28 +0200241#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
242
243#ifdef __PAGETABLE_PMD_FOLDED
244#define kvm_pmd_table_empty(kvm, pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200245#else
Christoffer Dall38f791a2014-10-10 12:14:28 +0200246#define kvm_pmd_table_empty(kvm, pmdp) \
247 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200248#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200249
250#ifdef __PAGETABLE_PUD_FOLDED
251#define kvm_pud_table_empty(kvm, pudp) (0)
252#else
253#define kvm_pud_table_empty(kvm, pudp) \
254 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
255#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200256
257
Marc Zyngier37c43752012-12-10 15:35:24 +0000258struct kvm;
259
Marc Zyngier2d58b732014-01-14 19:13:10 +0000260#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
261
262static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000263{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000264 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
265}
266
267static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000268 unsigned long size,
269 bool ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000270{
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000271 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000272 kvm_flush_dcache_to_poc((void *)hva, size);
273
Marc Zyngier37c43752012-12-10 15:35:24 +0000274 if (!icache_is_aliasing()) { /* PIPT */
Christoffer Dallad361f02012-11-01 17:14:45 +0100275 flush_icache_range(hva, hva + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000276 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
277 /* any kind of VIPT cache */
278 __flush_icache_all();
279 }
280}
281
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500282#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000283
Marc Zyngier9d218a12014-01-15 12:50:23 +0000284void stage2_flush_vm(struct kvm *kvm);
285
Marc Zyngier37c43752012-12-10 15:35:24 +0000286#endif /* __ASSEMBLY__ */
287#endif /* __ARM64_KVM_MMU_H__ */