blob: f9da2fad9bd6b403e32e2f95324fe21d9e7309b0 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
James Morse196f8782017-06-20 17:11:48 +010023#include <linux/sched/signal.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050024#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050026#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050027#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050029#include <asm/kvm_mmio.h>
James Morse0db5e022019-01-29 18:48:49 +000030#include <asm/kvm_ras.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050032#include <asm/kvm_emulate.h>
Marc Zyngier1e947ba2015-01-29 11:59:54 +000033#include <asm/virt.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050034
35#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050036
Marc Zyngier5a677ce2013-04-12 19:12:06 +010037static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010038static pgd_t *hyp_pgd;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000039static pgd_t *merged_hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050040static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
41
Marc Zyngier5a677ce2013-04-12 19:12:06 +010042static unsigned long hyp_idmap_start;
43static unsigned long hyp_idmap_end;
44static phys_addr_t hyp_idmap_vector;
45
Marc Zyngiere3f019b2017-12-04 17:04:38 +000046static unsigned long io_map_base;
47
Christoffer Dall38f791a2014-10-10 12:14:28 +020048#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
Mark Salter5d4e08c2014-03-28 14:25:19 +000049
Mario Smarduch15a49a42015-01-15 15:58:58 -080050#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
52
53static bool memslot_is_logging(struct kvm_memory_slot *memslot)
54{
Mario Smarduch15a49a42015-01-15 15:58:58 -080055 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
Mario Smarduch72760302015-01-15 15:59:01 -080056}
57
58/**
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
61 *
62 * Interface to HYP function to flush all VM TLB entries
63 */
64void kvm_flush_remote_tlbs(struct kvm *kvm)
65{
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
Mario Smarduch15a49a42015-01-15 15:58:58 -080067}
Christoffer Dallad361f02012-11-01 17:14:45 +010068
Marc Zyngier48762762013-01-28 15:27:00 +000069static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050070{
Suzuki K Poulose8684e702016-03-22 17:14:25 +000071 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050072}
73
Marc Zyngier363ef892014-12-19 16:48:06 +000074/*
75 * D-Cache management functions. They take the page table entries by
76 * value, as they are flushing the cache using the kernel mapping (or
77 * kmap on 32bit).
78 */
79static void kvm_flush_dcache_pte(pte_t pte)
80{
81 __kvm_flush_dcache_pte(pte);
82}
83
84static void kvm_flush_dcache_pmd(pmd_t pmd)
85{
86 __kvm_flush_dcache_pmd(pmd);
87}
88
89static void kvm_flush_dcache_pud(pud_t pud)
90{
91 __kvm_flush_dcache_pud(pud);
92}
93
Ard Biesheuvele6fab542015-11-10 15:11:20 +010094static bool kvm_is_device_pfn(unsigned long pfn)
95{
96 return !pfn_valid(pfn);
97}
98
Mario Smarduch15a49a42015-01-15 15:58:58 -080099/**
100 * stage2_dissolve_pmd() - clear and flush huge PMD entry
101 * @kvm: pointer to kvm structure.
102 * @addr: IPA
103 * @pmd: pmd pointer for IPA
104 *
105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
106 * pages in the range dirty.
107 */
108static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
109{
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000110 if (!pmd_thp_or_huge(*pmd))
Mario Smarduch15a49a42015-01-15 15:58:58 -0800111 return;
112
113 pmd_clear(pmd);
114 kvm_tlb_flush_vmid_ipa(kvm, addr);
115 put_page(virt_to_page(pmd));
116}
117
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000118/**
119 * stage2_dissolve_pud() - clear and flush huge PUD entry
120 * @kvm: pointer to kvm structure.
121 * @addr: IPA
122 * @pud: pud pointer for IPA
123 *
124 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
125 * pages in the range dirty.
126 */
127static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
128{
129 if (!stage2_pud_huge(kvm, *pudp))
130 return;
131
132 stage2_pud_clear(kvm, pudp);
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 put_page(virt_to_page(pudp));
135}
136
Christoffer Dalld5d81842013-01-20 18:28:07 -0500137static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
138 int min, int max)
139{
140 void *page;
141
142 BUG_ON(max > KVM_NR_MEM_OBJS);
143 if (cache->nobjs >= min)
144 return 0;
145 while (cache->nobjs < max) {
146 page = (void *)__get_free_page(PGALLOC_GFP);
147 if (!page)
148 return -ENOMEM;
149 cache->objects[cache->nobjs++] = page;
150 }
151 return 0;
152}
153
154static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
155{
156 while (mc->nobjs)
157 free_page((unsigned long)mc->objects[--mc->nobjs]);
158}
159
160static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
161{
162 void *p;
163
164 BUG_ON(!mc || !mc->nobjs);
165 p = mc->objects[--mc->nobjs];
166 return p;
167}
168
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000169static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
Marc Zyngier979acd52013-08-06 13:05:48 +0100170{
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100171 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
172 stage2_pgd_clear(kvm, pgd);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200173 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100174 stage2_pud_free(kvm, pud_table);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200175 put_page(virt_to_page(pgd));
Marc Zyngier979acd52013-08-06 13:05:48 +0100176}
177
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000178static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500179{
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100180 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
181 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
182 stage2_pud_clear(kvm, pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200183 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100184 stage2_pmd_free(kvm, pmd_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100185 put_page(virt_to_page(pud));
186}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500187
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000188static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100189{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200190 pte_t *pte_table = pte_offset_kernel(pmd, 0);
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000191 VM_BUG_ON(pmd_thp_or_huge(*pmd));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200192 pmd_clear(pmd);
193 kvm_tlb_flush_vmid_ipa(kvm, addr);
194 pte_free_kernel(NULL, pte_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100195 put_page(virt_to_page(pmd));
196}
197
Marc Zyngier88dc25e82018-05-25 12:23:11 +0100198static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
199{
200 WRITE_ONCE(*ptep, new_pte);
201 dsb(ishst);
202}
203
204static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
205{
206 WRITE_ONCE(*pmdp, new_pmd);
207 dsb(ishst);
208}
209
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100210static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
211{
212 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
213}
214
215static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
216{
217 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
218 dsb(ishst);
219}
220
221static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
222{
223 WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
224 dsb(ishst);
225}
226
Marc Zyngier363ef892014-12-19 16:48:06 +0000227/*
228 * Unmapping vs dcache management:
229 *
230 * If a guest maps certain memory pages as uncached, all writes will
231 * bypass the data cache and go directly to RAM. However, the CPUs
232 * can still speculate reads (not writes) and fill cache lines with
233 * data.
234 *
235 * Those cache lines will be *clean* cache lines though, so a
236 * clean+invalidate operation is equivalent to an invalidate
237 * operation, because no cache lines are marked dirty.
238 *
239 * Those clean cache lines could be filled prior to an uncached write
240 * by the guest, and the cache coherent IO subsystem would therefore
241 * end up writing old data to disk.
242 *
243 * This is why right after unmapping a page/section and invalidating
244 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
245 * the IO subsystem will never hit in the cache.
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100246 *
247 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
248 * we then fully enforce cacheability of RAM, no matter what the guest
249 * does.
Marc Zyngier363ef892014-12-19 16:48:06 +0000250 */
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000251static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200252 phys_addr_t addr, phys_addr_t end)
Marc Zyngier4f728272013-04-12 19:12:05 +0100253{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200254 phys_addr_t start_addr = addr;
255 pte_t *pte, *start_pte;
256
257 start_pte = pte = pte_offset_kernel(pmd, addr);
258 do {
259 if (!pte_none(*pte)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000260 pte_t old_pte = *pte;
261
Christoffer Dall4f853a72014-05-09 23:31:31 +0200262 kvm_set_pte(pte, __pte(0));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200263 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000264
265 /* No need to invalidate the cache for device mappings */
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100266 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000267 kvm_flush_dcache_pte(old_pte);
268
269 put_page(virt_to_page(pte));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200270 }
271 } while (pte++, addr += PAGE_SIZE, addr != end);
272
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100273 if (stage2_pte_table_empty(kvm, start_pte))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000274 clear_stage2_pmd_entry(kvm, pmd, start_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500275}
276
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000277static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200278 phys_addr_t addr, phys_addr_t end)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500279{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200280 phys_addr_t next, start_addr = addr;
281 pmd_t *pmd, *start_pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +0000282
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100283 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200284 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100285 next = stage2_pmd_addr_end(kvm, addr, end);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200286 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000287 if (pmd_thp_or_huge(*pmd)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000288 pmd_t old_pmd = *pmd;
289
Christoffer Dall4f853a72014-05-09 23:31:31 +0200290 pmd_clear(pmd);
291 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000292
293 kvm_flush_dcache_pmd(old_pmd);
294
Christoffer Dall4f853a72014-05-09 23:31:31 +0200295 put_page(virt_to_page(pmd));
296 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000297 unmap_stage2_ptes(kvm, pmd, addr, next);
Marc Zyngier4f728272013-04-12 19:12:05 +0100298 }
299 }
Christoffer Dall4f853a72014-05-09 23:31:31 +0200300 } while (pmd++, addr = next, addr != end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100301
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100302 if (stage2_pmd_table_empty(kvm, start_pmd))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000303 clear_stage2_pud_entry(kvm, pud, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200304}
305
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000306static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200307 phys_addr_t addr, phys_addr_t end)
308{
309 phys_addr_t next, start_addr = addr;
310 pud_t *pud, *start_pud;
311
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100312 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200313 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100314 next = stage2_pud_addr_end(kvm, addr, end);
315 if (!stage2_pud_none(kvm, *pud)) {
316 if (stage2_pud_huge(kvm, *pud)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000317 pud_t old_pud = *pud;
318
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100319 stage2_pud_clear(kvm, pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200320 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000321 kvm_flush_dcache_pud(old_pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200322 put_page(virt_to_page(pud));
323 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000324 unmap_stage2_pmds(kvm, pud, addr, next);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200325 }
326 }
327 } while (pud++, addr = next, addr != end);
328
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100329 if (stage2_pud_table_empty(kvm, start_pud))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000330 clear_stage2_pgd_entry(kvm, pgd, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200331}
332
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000333/**
334 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
335 * @kvm: The VM pointer
336 * @start: The intermediate physical base address of the range to unmap
337 * @size: The size of the area to unmap
338 *
339 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
340 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
341 * destroying the VM), otherwise another faulting VCPU may come in and mess
342 * with things behind our backs.
343 */
344static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200345{
346 pgd_t *pgd;
347 phys_addr_t addr = start, end = start + size;
348 phys_addr_t next;
349
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100350 assert_spin_locked(&kvm->mmu_lock);
Jia He47a91b72018-05-21 11:05:30 +0800351 WARN_ON(size & ~PAGE_MASK);
352
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100353 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200354 do {
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +0100355 /*
356 * Make sure the page table is still active, as another thread
357 * could have possibly freed the page table, while we released
358 * the lock.
359 */
360 if (!READ_ONCE(kvm->arch.pgd))
361 break;
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100362 next = stage2_pgd_addr_end(kvm, addr, end);
363 if (!stage2_pgd_none(kvm, *pgd))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000364 unmap_stage2_puds(kvm, pgd, addr, next);
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100365 /*
366 * If the range is too large, release the kvm->mmu_lock
367 * to prevent starvation and lockup detector warnings.
368 */
369 if (next != end)
370 cond_resched_lock(&kvm->mmu_lock);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200371 } while (pgd++, addr = next, addr != end);
Marc Zyngier000d3992013-03-05 02:43:17 +0000372}
373
Marc Zyngier9d218a12014-01-15 12:50:23 +0000374static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
375 phys_addr_t addr, phys_addr_t end)
376{
377 pte_t *pte;
378
379 pte = pte_offset_kernel(pmd, addr);
380 do {
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100381 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000382 kvm_flush_dcache_pte(*pte);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000383 } while (pte++, addr += PAGE_SIZE, addr != end);
384}
385
386static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
387 phys_addr_t addr, phys_addr_t end)
388{
389 pmd_t *pmd;
390 phys_addr_t next;
391
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100392 pmd = stage2_pmd_offset(kvm, pud, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000393 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100394 next = stage2_pmd_addr_end(kvm, addr, end);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000395 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000396 if (pmd_thp_or_huge(*pmd))
Marc Zyngier363ef892014-12-19 16:48:06 +0000397 kvm_flush_dcache_pmd(*pmd);
398 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000399 stage2_flush_ptes(kvm, pmd, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000400 }
401 } while (pmd++, addr = next, addr != end);
402}
403
404static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
405 phys_addr_t addr, phys_addr_t end)
406{
407 pud_t *pud;
408 phys_addr_t next;
409
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100410 pud = stage2_pud_offset(kvm, pgd, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000411 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100412 next = stage2_pud_addr_end(kvm, addr, end);
413 if (!stage2_pud_none(kvm, *pud)) {
414 if (stage2_pud_huge(kvm, *pud))
Marc Zyngier363ef892014-12-19 16:48:06 +0000415 kvm_flush_dcache_pud(*pud);
416 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000417 stage2_flush_pmds(kvm, pud, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000418 }
419 } while (pud++, addr = next, addr != end);
420}
421
422static void stage2_flush_memslot(struct kvm *kvm,
423 struct kvm_memory_slot *memslot)
424{
425 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
426 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
427 phys_addr_t next;
428 pgd_t *pgd;
429
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100430 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000431 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100432 next = stage2_pgd_addr_end(kvm, addr, end);
433 if (!stage2_pgd_none(kvm, *pgd))
Suzuki K Poulosed2db7772018-09-26 17:32:37 +0100434 stage2_flush_puds(kvm, pgd, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000435 } while (pgd++, addr = next, addr != end);
436}
437
438/**
439 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
440 * @kvm: The struct kvm pointer
441 *
442 * Go through the stage 2 page tables and invalidate any cache lines
443 * backing memory already mapped to the VM.
444 */
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000445static void stage2_flush_vm(struct kvm *kvm)
Marc Zyngier9d218a12014-01-15 12:50:23 +0000446{
447 struct kvm_memslots *slots;
448 struct kvm_memory_slot *memslot;
449 int idx;
450
451 idx = srcu_read_lock(&kvm->srcu);
452 spin_lock(&kvm->mmu_lock);
453
454 slots = kvm_memslots(kvm);
455 kvm_for_each_memslot(memslot, slots)
456 stage2_flush_memslot(kvm, memslot);
457
458 spin_unlock(&kvm->mmu_lock);
459 srcu_read_unlock(&kvm->srcu, idx);
460}
461
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000462static void clear_hyp_pgd_entry(pgd_t *pgd)
463{
464 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
465 pgd_clear(pgd);
466 pud_free(NULL, pud_table);
467 put_page(virt_to_page(pgd));
468}
469
470static void clear_hyp_pud_entry(pud_t *pud)
471{
472 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
473 VM_BUG_ON(pud_huge(*pud));
474 pud_clear(pud);
475 pmd_free(NULL, pmd_table);
476 put_page(virt_to_page(pud));
477}
478
479static void clear_hyp_pmd_entry(pmd_t *pmd)
480{
481 pte_t *pte_table = pte_offset_kernel(pmd, 0);
482 VM_BUG_ON(pmd_thp_or_huge(*pmd));
483 pmd_clear(pmd);
484 pte_free_kernel(NULL, pte_table);
485 put_page(virt_to_page(pmd));
486}
487
488static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
489{
490 pte_t *pte, *start_pte;
491
492 start_pte = pte = pte_offset_kernel(pmd, addr);
493 do {
494 if (!pte_none(*pte)) {
495 kvm_set_pte(pte, __pte(0));
496 put_page(virt_to_page(pte));
497 }
498 } while (pte++, addr += PAGE_SIZE, addr != end);
499
500 if (hyp_pte_table_empty(start_pte))
501 clear_hyp_pmd_entry(pmd);
502}
503
504static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
505{
506 phys_addr_t next;
507 pmd_t *pmd, *start_pmd;
508
509 start_pmd = pmd = pmd_offset(pud, addr);
510 do {
511 next = pmd_addr_end(addr, end);
512 /* Hyp doesn't use huge pmds */
513 if (!pmd_none(*pmd))
514 unmap_hyp_ptes(pmd, addr, next);
515 } while (pmd++, addr = next, addr != end);
516
517 if (hyp_pmd_table_empty(start_pmd))
518 clear_hyp_pud_entry(pud);
519}
520
521static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
522{
523 phys_addr_t next;
524 pud_t *pud, *start_pud;
525
526 start_pud = pud = pud_offset(pgd, addr);
527 do {
528 next = pud_addr_end(addr, end);
529 /* Hyp doesn't use huge puds */
530 if (!pud_none(*pud))
531 unmap_hyp_pmds(pud, addr, next);
532 } while (pud++, addr = next, addr != end);
533
534 if (hyp_pud_table_empty(start_pud))
535 clear_hyp_pgd_entry(pgd);
536}
537
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000538static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
539{
540 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
541}
542
543static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
544 phys_addr_t start, u64 size)
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000545{
546 pgd_t *pgd;
547 phys_addr_t addr = start, end = start + size;
548 phys_addr_t next;
549
550 /*
551 * We don't unmap anything from HYP, except at the hyp tear down.
552 * Hence, we don't have to invalidate the TLBs here.
553 */
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000554 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000555 do {
556 next = pgd_addr_end(addr, end);
557 if (!pgd_none(*pgd))
558 unmap_hyp_puds(pgd, addr, next);
559 } while (pgd++, addr = next, addr != end);
560}
561
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000562static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
563{
564 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
565}
566
567static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
568{
569 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
570}
571
Marc Zyngier000d3992013-03-05 02:43:17 +0000572/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100573 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000574 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100575 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
576 * therefore contains either mappings in the kernel memory area (above
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000577 * PAGE_OFFSET), or device mappings in the idmap range.
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100578 *
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000579 * boot_hyp_pgd should only map the idmap range, and is only used in
580 * the extended idmap case.
Marc Zyngier000d3992013-03-05 02:43:17 +0000581 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100582void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000583{
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000584 pgd_t *id_pgd;
585
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100586 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100587
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000588 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
589
590 if (id_pgd) {
591 /* In case we never called hyp_mmu_init() */
592 if (!io_map_base)
593 io_map_base = hyp_idmap_start;
594 unmap_hyp_idmap_range(id_pgd, io_map_base,
595 hyp_idmap_start + PAGE_SIZE - io_map_base);
596 }
597
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100598 if (boot_hyp_pgd) {
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100599 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
600 boot_hyp_pgd = NULL;
601 }
602
Marc Zyngier4f728272013-04-12 19:12:05 +0100603 if (hyp_pgd) {
Marc Zyngier7839c672017-12-07 11:45:45 +0000604 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
605 (uintptr_t)high_memory - PAGE_OFFSET);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100606
Christoffer Dall38f791a2014-10-10 12:14:28 +0200607 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100608 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100609 }
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000610 if (merged_hyp_pgd) {
611 clear_page(merged_hyp_pgd);
612 free_page((unsigned long)merged_hyp_pgd);
613 merged_hyp_pgd = NULL;
614 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100615
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500616 mutex_unlock(&kvm_hyp_pgd_mutex);
617}
618
619static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100620 unsigned long end, unsigned long pfn,
621 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500622{
623 pte_t *pte;
624 unsigned long addr;
625
Marc Zyngier3562c762013-04-12 19:12:02 +0100626 addr = start;
627 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100628 pte = pte_offset_kernel(pmd, addr);
Punit Agrawalf8df7332018-12-11 17:10:36 +0000629 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100630 get_page(virt_to_page(pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100631 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100632 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500633}
634
635static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100636 unsigned long end, unsigned long pfn,
637 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500638{
639 pmd_t *pmd;
640 pte_t *pte;
641 unsigned long addr, next;
642
Marc Zyngier3562c762013-04-12 19:12:02 +0100643 addr = start;
644 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100645 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500646
647 BUG_ON(pmd_sect(*pmd));
648
649 if (pmd_none(*pmd)) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800650 pte = pte_alloc_one_kernel(NULL);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500651 if (!pte) {
652 kvm_err("Cannot allocate Hyp pte\n");
653 return -ENOMEM;
654 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100655 kvm_pmd_populate(pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100656 get_page(virt_to_page(pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500657 }
658
659 next = pmd_addr_end(addr, end);
660
Marc Zyngier6060df82013-04-12 19:12:01 +0100661 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
662 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100663 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500664
665 return 0;
666}
667
Christoffer Dall38f791a2014-10-10 12:14:28 +0200668static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
669 unsigned long end, unsigned long pfn,
670 pgprot_t prot)
671{
672 pud_t *pud;
673 pmd_t *pmd;
674 unsigned long addr, next;
675 int ret;
676
677 addr = start;
678 do {
679 pud = pud_offset(pgd, addr);
680
681 if (pud_none_or_clear_bad(pud)) {
682 pmd = pmd_alloc_one(NULL, addr);
683 if (!pmd) {
684 kvm_err("Cannot allocate Hyp pmd\n");
685 return -ENOMEM;
686 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100687 kvm_pud_populate(pud, pmd);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200688 get_page(virt_to_page(pud));
Christoffer Dall38f791a2014-10-10 12:14:28 +0200689 }
690
691 next = pud_addr_end(addr, end);
692 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
693 if (ret)
694 return ret;
695 pfn += (next - addr) >> PAGE_SHIFT;
696 } while (addr = next, addr != end);
697
698 return 0;
699}
700
Kristina Martsenko98732d12018-01-15 15:23:49 +0000701static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
Marc Zyngier6060df82013-04-12 19:12:01 +0100702 unsigned long start, unsigned long end,
703 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500704{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500705 pgd_t *pgd;
706 pud_t *pud;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500707 unsigned long addr, next;
708 int err = 0;
709
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500710 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100711 addr = start & PAGE_MASK;
712 end = PAGE_ALIGN(end);
713 do {
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000714 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500715
Christoffer Dall38f791a2014-10-10 12:14:28 +0200716 if (pgd_none(*pgd)) {
717 pud = pud_alloc_one(NULL, addr);
718 if (!pud) {
719 kvm_err("Cannot allocate Hyp pud\n");
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500720 err = -ENOMEM;
721 goto out;
722 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100723 kvm_pgd_populate(pgd, pud);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200724 get_page(virt_to_page(pgd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500725 }
726
727 next = pgd_addr_end(addr, end);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200728 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500729 if (err)
730 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100731 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100732 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500733out:
734 mutex_unlock(&kvm_hyp_pgd_mutex);
735 return err;
736}
737
Christoffer Dall40c27292013-11-15 13:14:12 -0800738static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
739{
740 if (!is_vmalloc_addr(kaddr)) {
741 BUG_ON(!virt_addr_valid(kaddr));
742 return __pa(kaddr);
743 } else {
744 return page_to_phys(vmalloc_to_page(kaddr)) +
745 offset_in_page(kaddr);
746 }
747}
748
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500749/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100750 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500751 * @from: The virtual kernel start address of the range
752 * @to: The virtual kernel end address of the range (exclusive)
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100753 * @prot: The protection to be applied to this range
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500754 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100755 * The same virtual address as the kernel virtual address is also used
756 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
757 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500758 */
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100759int create_hyp_mappings(void *from, void *to, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500760{
Christoffer Dall40c27292013-11-15 13:14:12 -0800761 phys_addr_t phys_addr;
762 unsigned long virt_addr;
Marc Zyngier6c41a412016-06-30 18:40:51 +0100763 unsigned long start = kern_hyp_va((unsigned long)from);
764 unsigned long end = kern_hyp_va((unsigned long)to);
Marc Zyngier6060df82013-04-12 19:12:01 +0100765
Marc Zyngier1e947ba2015-01-29 11:59:54 +0000766 if (is_kernel_in_hyp_mode())
767 return 0;
768
Christoffer Dall40c27292013-11-15 13:14:12 -0800769 start = start & PAGE_MASK;
770 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100771
Christoffer Dall40c27292013-11-15 13:14:12 -0800772 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
773 int err;
774
775 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
Kristina Martsenko98732d12018-01-15 15:23:49 +0000776 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
777 virt_addr, virt_addr + PAGE_SIZE,
Christoffer Dall40c27292013-11-15 13:14:12 -0800778 __phys_to_pfn(phys_addr),
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100779 prot);
Christoffer Dall40c27292013-11-15 13:14:12 -0800780 if (err)
781 return err;
782 }
783
784 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500785}
786
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000787static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
788 unsigned long *haddr, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500789{
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000790 pgd_t *pgd = hyp_pgd;
791 unsigned long base;
792 int ret = 0;
Marc Zyngier6060df82013-04-12 19:12:01 +0100793
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000794 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier6060df82013-04-12 19:12:01 +0100795
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000796 /*
797 * This assumes that we we have enough space below the idmap
798 * page to allocate our VAs. If not, the check below will
799 * kick. A potential alternative would be to detect that
800 * overflow and switch to an allocation above the idmap.
801 *
802 * The allocated size is always a multiple of PAGE_SIZE.
803 */
804 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
805 base = io_map_base - size;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000806
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000807 /*
808 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
809 * allocating the new area, as it would indicate we've
810 * overflowed the idmap/IO address range.
811 */
812 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
813 ret = -ENOMEM;
814 else
815 io_map_base = base;
816
817 mutex_unlock(&kvm_hyp_pgd_mutex);
818
819 if (ret)
820 goto out;
821
822 if (__kvm_cpu_uses_extended_idmap())
823 pgd = boot_hyp_pgd;
824
825 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
826 base, base + size,
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000827 __phys_to_pfn(phys_addr), prot);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000828 if (ret)
829 goto out;
830
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000831 *haddr = base + offset_in_page(phys_addr);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000832
833out:
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000834 return ret;
835}
836
837/**
838 * create_hyp_io_mappings - Map IO into both kernel and HYP
839 * @phys_addr: The physical start address which gets mapped
840 * @size: Size of the region being mapped
841 * @kaddr: Kernel VA for this mapping
842 * @haddr: HYP VA for this mapping
843 */
844int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
845 void __iomem **kaddr,
846 void __iomem **haddr)
847{
848 unsigned long addr;
849 int ret;
850
851 *kaddr = ioremap(phys_addr, size);
852 if (!*kaddr)
853 return -ENOMEM;
854
855 if (is_kernel_in_hyp_mode()) {
856 *haddr = *kaddr;
857 return 0;
858 }
859
860 ret = __create_hyp_private_mapping(phys_addr, size,
861 &addr, PAGE_HYP_DEVICE);
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000862 if (ret) {
863 iounmap(*kaddr);
864 *kaddr = NULL;
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000865 *haddr = NULL;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000866 return ret;
867 }
868
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000869 *haddr = (void __iomem *)addr;
870 return 0;
871}
872
873/**
874 * create_hyp_exec_mappings - Map an executable range into HYP
875 * @phys_addr: The physical start address which gets mapped
876 * @size: Size of the region being mapped
877 * @haddr: HYP VA for this mapping
878 */
879int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
880 void **haddr)
881{
882 unsigned long addr;
883 int ret;
884
885 BUG_ON(is_kernel_in_hyp_mode());
886
887 ret = __create_hyp_private_mapping(phys_addr, size,
888 &addr, PAGE_HYP_EXEC);
889 if (ret) {
890 *haddr = NULL;
891 return ret;
892 }
893
894 *haddr = (void *)addr;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000895 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500896}
897
Christoffer Dalld5d81842013-01-20 18:28:07 -0500898/**
899 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
900 * @kvm: The KVM struct pointer for the VM.
901 *
Vladimir Murzin9d4dc6882015-11-16 11:28:16 +0000902 * Allocates only the stage-2 HW PGD level table(s) (can support either full
903 * 40-bit input addresses or limited to 32-bit input addresses). Clears the
904 * allocated pages.
Christoffer Dalld5d81842013-01-20 18:28:07 -0500905 *
906 * Note we don't need locking here as this is only called when the VM is
907 * created, which can only be done once.
908 */
909int kvm_alloc_stage2_pgd(struct kvm *kvm)
910{
Christoffer Dalle329fb72018-12-11 15:26:31 +0100911 phys_addr_t pgd_phys;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500912 pgd_t *pgd;
913
914 if (kvm->arch.pgd != NULL) {
915 kvm_err("kvm_arch already initialized?\n");
916 return -EINVAL;
917 }
918
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000919 /* Allocate the HW PGD, making sure that each page gets its own refcount */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100920 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000921 if (!pgd)
Marc Zyngiera9873702015-03-10 19:06:59 +0000922 return -ENOMEM;
923
Christoffer Dalle329fb72018-12-11 15:26:31 +0100924 pgd_phys = virt_to_phys(pgd);
925 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
926 return -EINVAL;
927
Christoffer Dalld5d81842013-01-20 18:28:07 -0500928 kvm->arch.pgd = pgd;
Christoffer Dalle329fb72018-12-11 15:26:31 +0100929 kvm->arch.pgd_phys = pgd_phys;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500930 return 0;
931}
932
Christoffer Dall957db102014-11-27 10:35:03 +0100933static void stage2_unmap_memslot(struct kvm *kvm,
934 struct kvm_memory_slot *memslot)
935{
936 hva_t hva = memslot->userspace_addr;
937 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
938 phys_addr_t size = PAGE_SIZE * memslot->npages;
939 hva_t reg_end = hva + size;
940
941 /*
942 * A memory region could potentially cover multiple VMAs, and any holes
943 * between them, so iterate over all of them to find out if we should
944 * unmap any of them.
945 *
946 * +--------------------------------------------+
947 * +---------------+----------------+ +----------------+
948 * | : VMA 1 | VMA 2 | | VMA 3 : |
949 * +---------------+----------------+ +----------------+
950 * | memory region |
951 * +--------------------------------------------+
952 */
953 do {
954 struct vm_area_struct *vma = find_vma(current->mm, hva);
955 hva_t vm_start, vm_end;
956
957 if (!vma || vma->vm_start >= reg_end)
958 break;
959
960 /*
961 * Take the intersection of this VMA with the memory region
962 */
963 vm_start = max(hva, vma->vm_start);
964 vm_end = min(reg_end, vma->vm_end);
965
966 if (!(vma->vm_flags & VM_PFNMAP)) {
967 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
968 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
969 }
970 hva = vm_end;
971 } while (hva < reg_end);
972}
973
974/**
975 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
976 * @kvm: The struct kvm pointer
977 *
978 * Go through the memregions and unmap any reguler RAM
979 * backing memory already mapped to the VM.
980 */
981void stage2_unmap_vm(struct kvm *kvm)
982{
983 struct kvm_memslots *slots;
984 struct kvm_memory_slot *memslot;
985 int idx;
986
987 idx = srcu_read_lock(&kvm->srcu);
Marc Zyngier90f6e152017-03-16 18:20:49 +0000988 down_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100989 spin_lock(&kvm->mmu_lock);
990
991 slots = kvm_memslots(kvm);
992 kvm_for_each_memslot(memslot, slots)
993 stage2_unmap_memslot(kvm, memslot);
994
995 spin_unlock(&kvm->mmu_lock);
Marc Zyngier90f6e152017-03-16 18:20:49 +0000996 up_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100997 srcu_read_unlock(&kvm->srcu, idx);
998}
999
Christoffer Dalld5d81842013-01-20 18:28:07 -05001000/**
1001 * kvm_free_stage2_pgd - free all stage-2 tables
1002 * @kvm: The KVM struct pointer for the VM.
1003 *
1004 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
1005 * underlying level-2 and level-3 tables before freeing the actual level-1 table
1006 * and setting the struct pointer to NULL.
Christoffer Dalld5d81842013-01-20 18:28:07 -05001007 */
1008void kvm_free_stage2_pgd(struct kvm *kvm)
1009{
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001010 void *pgd = NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001011
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +01001012 spin_lock(&kvm->mmu_lock);
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001013 if (kvm->arch.pgd) {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001014 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
Suzuki K Poulose2952a602017-05-16 10:34:54 +01001015 pgd = READ_ONCE(kvm->arch.pgd);
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001016 kvm->arch.pgd = NULL;
Christoffer Dalle329fb72018-12-11 15:26:31 +01001017 kvm->arch.pgd_phys = 0;
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001018 }
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +01001019 spin_unlock(&kvm->mmu_lock);
1020
Suzuki K Poulose9163ee232016-03-22 17:01:21 +00001021 /* Free the HW pgd, one page at a time */
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001022 if (pgd)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001023 free_pages_exact(pgd, stage2_pgd_size(kvm));
Christoffer Dalld5d81842013-01-20 18:28:07 -05001024}
1025
Christoffer Dall38f791a2014-10-10 12:14:28 +02001026static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1027 phys_addr_t addr)
1028{
1029 pgd_t *pgd;
1030 pud_t *pud;
1031
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001032 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1033 if (stage2_pgd_none(kvm, *pgd)) {
Christoffer Dall38f791a2014-10-10 12:14:28 +02001034 if (!cache)
1035 return NULL;
1036 pud = mmu_memory_cache_alloc(cache);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001037 stage2_pgd_populate(kvm, pgd, pud);
Christoffer Dall38f791a2014-10-10 12:14:28 +02001038 get_page(virt_to_page(pgd));
1039 }
1040
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001041 return stage2_pud_offset(kvm, pgd, addr);
Christoffer Dall38f791a2014-10-10 12:14:28 +02001042}
1043
Christoffer Dallad361f02012-11-01 17:14:45 +01001044static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1045 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001046{
Christoffer Dalld5d81842013-01-20 18:28:07 -05001047 pud_t *pud;
1048 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001049
Christoffer Dall38f791a2014-10-10 12:14:28 +02001050 pud = stage2_get_pud(kvm, cache, addr);
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001051 if (!pud || stage2_pud_huge(kvm, *pud))
Marc Zyngierd6dbdd32017-06-05 19:17:18 +01001052 return NULL;
1053
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001054 if (stage2_pud_none(kvm, *pud)) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001055 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +01001056 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001057 pmd = mmu_memory_cache_alloc(cache);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001058 stage2_pud_populate(kvm, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001059 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +01001060 }
1061
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001062 return stage2_pmd_offset(kvm, pud, addr);
Christoffer Dallad361f02012-11-01 17:14:45 +01001063}
Christoffer Dalld5d81842013-01-20 18:28:07 -05001064
Christoffer Dallad361f02012-11-01 17:14:45 +01001065static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1066 *cache, phys_addr_t addr, const pmd_t *new_pmd)
1067{
1068 pmd_t *pmd, old_pmd;
1069
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001070retry:
Christoffer Dallad361f02012-11-01 17:14:45 +01001071 pmd = stage2_get_pmd(kvm, cache, addr);
1072 VM_BUG_ON(!pmd);
1073
Christoffer Dallad361f02012-11-01 17:14:45 +01001074 old_pmd = *pmd;
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001075 /*
1076 * Multiple vcpus faulting on the same PMD entry, can
1077 * lead to them sequentially updating the PMD with the
1078 * same value. Following the break-before-make
1079 * (pmd_clear() followed by tlb_flush()) process can
1080 * hinder forward progress due to refaults generated
1081 * on missing translations.
1082 *
1083 * Skip updating the page table if the entry is
1084 * unchanged.
1085 */
1086 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1087 return 0;
1088
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001089 if (pmd_present(old_pmd)) {
Punit Agrawal86658b82018-08-13 11:43:50 +01001090 /*
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001091 * If we already have PTE level mapping for this block,
1092 * we must unmap it to avoid inconsistent TLB state and
1093 * leaking the table page. We could end up in this situation
1094 * if the memory slot was marked for dirty logging and was
1095 * reverted, leaving PTE level mappings for the pages accessed
1096 * during the period. So, unmap the PTE level mapping for this
1097 * block and retry, as we could have released the upper level
1098 * table in the process.
Punit Agrawal86658b82018-08-13 11:43:50 +01001099 *
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001100 * Normal THP split/merge follows mmu_notifier callbacks and do
1101 * get handled accordingly.
Punit Agrawal86658b82018-08-13 11:43:50 +01001102 */
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001103 if (!pmd_thp_or_huge(old_pmd)) {
1104 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1105 goto retry;
1106 }
Punit Agrawal86658b82018-08-13 11:43:50 +01001107 /*
1108 * Mapping in huge pages should only happen through a
1109 * fault. If a page is merged into a transparent huge
1110 * page, the individual subpages of that huge page
1111 * should be unmapped through MMU notifiers before we
1112 * get here.
1113 *
1114 * Merging of CompoundPages is not supported; they
1115 * should become splitting first, unmapped, merged,
1116 * and mapped back in on-demand.
1117 */
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001118 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001119 pmd_clear(pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +01001120 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001121 } else {
Christoffer Dallad361f02012-11-01 17:14:45 +01001122 get_page(virt_to_page(pmd));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001123 }
1124
1125 kvm_set_pmd(pmd, *new_pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +01001126 return 0;
1127}
1128
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001129static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1130 phys_addr_t addr, const pud_t *new_pudp)
1131{
1132 pud_t *pudp, old_pud;
1133
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001134retry:
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001135 pudp = stage2_get_pud(kvm, cache, addr);
1136 VM_BUG_ON(!pudp);
1137
1138 old_pud = *pudp;
1139
1140 /*
1141 * A large number of vcpus faulting on the same stage 2 entry,
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001142 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1143 * Skip updating the page tables if there is no change.
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001144 */
1145 if (pud_val(old_pud) == pud_val(*new_pudp))
1146 return 0;
1147
1148 if (stage2_pud_present(kvm, old_pud)) {
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001149 /*
1150 * If we already have table level mapping for this block, unmap
1151 * the range for this block and retry.
1152 */
1153 if (!stage2_pud_huge(kvm, old_pud)) {
1154 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1155 goto retry;
1156 }
1157
1158 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001159 stage2_pud_clear(kvm, pudp);
1160 kvm_tlb_flush_vmid_ipa(kvm, addr);
1161 } else {
1162 get_page(virt_to_page(pudp));
1163 }
1164
1165 kvm_set_pud(pudp, *new_pudp);
1166 return 0;
1167}
1168
Punit Agrawal86d1c552018-12-11 17:10:38 +00001169/*
1170 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1171 * true if a valid and present leaf-entry is found. A pointer to the
1172 * leaf-entry is returned in the appropriate level variable - pudpp,
1173 * pmdpp, ptepp.
1174 */
1175static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1176 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001177{
Punit Agrawal86d1c552018-12-11 17:10:38 +00001178 pud_t *pudp;
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001179 pmd_t *pmdp;
1180 pte_t *ptep;
1181
Punit Agrawal86d1c552018-12-11 17:10:38 +00001182 *pudpp = NULL;
1183 *pmdpp = NULL;
1184 *ptepp = NULL;
1185
1186 pudp = stage2_get_pud(kvm, NULL, addr);
1187 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1188 return false;
1189
1190 if (stage2_pud_huge(kvm, *pudp)) {
1191 *pudpp = pudp;
1192 return true;
1193 }
1194
1195 pmdp = stage2_pmd_offset(kvm, pudp, addr);
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001196 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1197 return false;
1198
Punit Agrawal86d1c552018-12-11 17:10:38 +00001199 if (pmd_thp_or_huge(*pmdp)) {
1200 *pmdpp = pmdp;
1201 return true;
1202 }
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001203
1204 ptep = pte_offset_kernel(pmdp, addr);
1205 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1206 return false;
1207
Punit Agrawal86d1c552018-12-11 17:10:38 +00001208 *ptepp = ptep;
1209 return true;
1210}
1211
1212static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1213{
1214 pud_t *pudp;
1215 pmd_t *pmdp;
1216 pte_t *ptep;
1217 bool found;
1218
1219 found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1220 if (!found)
1221 return false;
1222
1223 if (pudp)
1224 return kvm_s2pud_exec(pudp);
1225 else if (pmdp)
1226 return kvm_s2pmd_exec(pmdp);
1227 else
1228 return kvm_s2pte_exec(ptep);
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001229}
1230
Christoffer Dallad361f02012-11-01 17:14:45 +01001231static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
Mario Smarduch15a49a42015-01-15 15:58:58 -08001232 phys_addr_t addr, const pte_t *new_pte,
1233 unsigned long flags)
Christoffer Dallad361f02012-11-01 17:14:45 +01001234{
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001235 pud_t *pud;
Christoffer Dallad361f02012-11-01 17:14:45 +01001236 pmd_t *pmd;
1237 pte_t *pte, old_pte;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001238 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1239 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1240
1241 VM_BUG_ON(logging_active && !cache);
Christoffer Dallad361f02012-11-01 17:14:45 +01001242
Christoffer Dall38f791a2014-10-10 12:14:28 +02001243 /* Create stage-2 page table mapping - Levels 0 and 1 */
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001244 pud = stage2_get_pud(kvm, cache, addr);
1245 if (!pud) {
1246 /*
1247 * Ignore calls from kvm_set_spte_hva for unallocated
1248 * address ranges.
1249 */
1250 return 0;
1251 }
1252
1253 /*
1254 * While dirty page logging - dissolve huge PUD, then continue
1255 * on to allocate page.
1256 */
1257 if (logging_active)
1258 stage2_dissolve_pud(kvm, addr, pud);
1259
1260 if (stage2_pud_none(kvm, *pud)) {
1261 if (!cache)
1262 return 0; /* ignore calls from kvm_set_spte_hva */
1263 pmd = mmu_memory_cache_alloc(cache);
1264 stage2_pud_populate(kvm, pud, pmd);
1265 get_page(virt_to_page(pud));
1266 }
1267
1268 pmd = stage2_pmd_offset(kvm, pud, addr);
Christoffer Dallad361f02012-11-01 17:14:45 +01001269 if (!pmd) {
1270 /*
1271 * Ignore calls from kvm_set_spte_hva for unallocated
1272 * address ranges.
1273 */
1274 return 0;
1275 }
1276
Mario Smarduch15a49a42015-01-15 15:58:58 -08001277 /*
1278 * While dirty page logging - dissolve huge PMD, then continue on to
1279 * allocate page.
1280 */
1281 if (logging_active)
1282 stage2_dissolve_pmd(kvm, addr, pmd);
1283
Christoffer Dallad361f02012-11-01 17:14:45 +01001284 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -05001285 if (pmd_none(*pmd)) {
1286 if (!cache)
1287 return 0; /* ignore calls from kvm_set_spte_hva */
1288 pte = mmu_memory_cache_alloc(cache);
Marc Zyngier0db9dd82018-06-27 15:51:05 +01001289 kvm_pmd_populate(pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001290 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +01001291 }
1292
1293 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001294
1295 if (iomap && pte_present(*pte))
1296 return -EFAULT;
1297
1298 /* Create 2nd stage page table mapping - Level 3 */
1299 old_pte = *pte;
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001300 if (pte_present(old_pte)) {
Punit Agrawal976d34e2018-08-13 11:43:51 +01001301 /* Skip page table update if there is no change */
1302 if (pte_val(old_pte) == pte_val(*new_pte))
1303 return 0;
1304
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001305 kvm_set_pte(pte, __pte(0));
Marc Zyngier48762762013-01-28 15:27:00 +00001306 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001307 } else {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001308 get_page(virt_to_page(pte));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001309 }
Christoffer Dalld5d81842013-01-20 18:28:07 -05001310
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001311 kvm_set_pte(pte, *new_pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001312 return 0;
1313}
1314
Catalin Marinas06485052016-04-13 17:57:37 +01001315#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1316static int stage2_ptep_test_and_clear_young(pte_t *pte)
1317{
1318 if (pte_young(*pte)) {
1319 *pte = pte_mkold(*pte);
1320 return 1;
1321 }
1322 return 0;
1323}
1324#else
1325static int stage2_ptep_test_and_clear_young(pte_t *pte)
1326{
1327 return __ptep_test_and_clear_young(pte);
1328}
1329#endif
1330
1331static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1332{
1333 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1334}
1335
Punit Agrawal35a63962018-12-11 17:10:40 +00001336static int stage2_pudp_test_and_clear_young(pud_t *pud)
1337{
1338 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1339}
1340
Christoffer Dalld5d81842013-01-20 18:28:07 -05001341/**
1342 * kvm_phys_addr_ioremap - map a device range to guest IPA
1343 *
1344 * @kvm: The KVM pointer
1345 * @guest_ipa: The IPA at which to insert the mapping
1346 * @pa: The physical address of the device
1347 * @size: The size of the mapping
1348 */
1349int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001350 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001351{
1352 phys_addr_t addr, end;
1353 int ret = 0;
1354 unsigned long pfn;
1355 struct kvm_mmu_memory_cache cache = { 0, };
1356
1357 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1358 pfn = __phys_to_pfn(pa);
1359
1360 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001361 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001362
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001363 if (writable)
Catalin Marinas06485052016-04-13 17:57:37 +01001364 pte = kvm_s2pte_mkwrite(pte);
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001365
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001366 ret = mmu_topup_memory_cache(&cache,
1367 kvm_mmu_cache_min_pages(kvm),
1368 KVM_NR_MEM_OBJS);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001369 if (ret)
1370 goto out;
1371 spin_lock(&kvm->mmu_lock);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001372 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1373 KVM_S2PTE_FLAG_IS_IOMAP);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001374 spin_unlock(&kvm->mmu_lock);
1375 if (ret)
1376 goto out;
1377
1378 pfn++;
1379 }
1380
1381out:
1382 mmu_free_memory_cache(&cache);
1383 return ret;
1384}
1385
Dan Williamsba049e92016-01-15 16:56:11 -08001386static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001387{
Dan Williamsba049e92016-01-15 16:56:11 -08001388 kvm_pfn_t pfn = *pfnp;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001389 gfn_t gfn = *ipap >> PAGE_SHIFT;
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001390 struct page *page = pfn_to_page(pfn);
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001391
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001392 /*
Christoffer Dall69921952018-11-06 13:33:38 +01001393 * PageTransCompoundMap() returns true for THP and
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001394 * hugetlbfs. Make sure the adjustment is done only for THP
1395 * pages.
1396 */
1397 if (!PageHuge(page) && PageTransCompoundMap(page)) {
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001398 unsigned long mask;
1399 /*
1400 * The address we faulted on is backed by a transparent huge
1401 * page. However, because we map the compound huge page and
1402 * not the individual tail page, we need to transfer the
1403 * refcount to the head page. We have to be careful that the
1404 * THP doesn't start to split while we are adjusting the
1405 * refcounts.
1406 *
1407 * We are sure this doesn't happen, because mmu_notifier_retry
1408 * was successful and we are holding the mmu_lock, so if this
1409 * THP is trying to split, it will be blocked in the mmu
1410 * notifier before touching any of the pages, specifically
1411 * before being able to call __split_huge_page_refcount().
1412 *
1413 * We can therefore safely transfer the refcount from PG_tail
1414 * to PG_head and switch the pfn from a tail page to the head
1415 * page accordingly.
1416 */
1417 mask = PTRS_PER_PMD - 1;
1418 VM_BUG_ON((gfn & mask) != (pfn & mask));
1419 if (pfn & mask) {
1420 *ipap &= PMD_MASK;
1421 kvm_release_pfn_clean(pfn);
1422 pfn &= ~mask;
1423 kvm_get_pfn(pfn);
1424 *pfnp = pfn;
1425 }
1426
1427 return true;
1428 }
1429
1430 return false;
1431}
1432
Mario Smarduchc6473552015-01-15 15:58:56 -08001433/**
1434 * stage2_wp_ptes - write protect PMD range
1435 * @pmd: pointer to pmd entry
1436 * @addr: range start address
1437 * @end: range end address
1438 */
1439static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1440{
1441 pte_t *pte;
1442
1443 pte = pte_offset_kernel(pmd, addr);
1444 do {
1445 if (!pte_none(*pte)) {
1446 if (!kvm_s2pte_readonly(pte))
1447 kvm_set_s2pte_readonly(pte);
1448 }
1449 } while (pte++, addr += PAGE_SIZE, addr != end);
1450}
1451
1452/**
1453 * stage2_wp_pmds - write protect PUD range
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001454 * kvm: kvm instance for the VM
Mario Smarduchc6473552015-01-15 15:58:56 -08001455 * @pud: pointer to pud entry
1456 * @addr: range start address
1457 * @end: range end address
1458 */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001459static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1460 phys_addr_t addr, phys_addr_t end)
Mario Smarduchc6473552015-01-15 15:58:56 -08001461{
1462 pmd_t *pmd;
1463 phys_addr_t next;
1464
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001465 pmd = stage2_pmd_offset(kvm, pud, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001466
1467 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001468 next = stage2_pmd_addr_end(kvm, addr, end);
Mario Smarduchc6473552015-01-15 15:58:56 -08001469 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +00001470 if (pmd_thp_or_huge(*pmd)) {
Mario Smarduchc6473552015-01-15 15:58:56 -08001471 if (!kvm_s2pmd_readonly(pmd))
1472 kvm_set_s2pmd_readonly(pmd);
1473 } else {
1474 stage2_wp_ptes(pmd, addr, next);
1475 }
1476 }
1477 } while (pmd++, addr = next, addr != end);
1478}
1479
1480/**
1481 * stage2_wp_puds - write protect PGD range
1482 * @pgd: pointer to pgd entry
1483 * @addr: range start address
1484 * @end: range end address
1485 *
1486 * Process PUD entries, for a huge PUD we cause a panic.
1487 */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001488static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1489 phys_addr_t addr, phys_addr_t end)
Mario Smarduchc6473552015-01-15 15:58:56 -08001490{
1491 pud_t *pud;
1492 phys_addr_t next;
1493
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001494 pud = stage2_pud_offset(kvm, pgd, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001495 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001496 next = stage2_pud_addr_end(kvm, addr, end);
1497 if (!stage2_pud_none(kvm, *pud)) {
Punit Agrawal4ea5af52018-12-11 17:10:37 +00001498 if (stage2_pud_huge(kvm, *pud)) {
1499 if (!kvm_s2pud_readonly(pud))
1500 kvm_set_s2pud_readonly(pud);
1501 } else {
1502 stage2_wp_pmds(kvm, pud, addr, next);
1503 }
Mario Smarduchc6473552015-01-15 15:58:56 -08001504 }
1505 } while (pud++, addr = next, addr != end);
1506}
1507
1508/**
1509 * stage2_wp_range() - write protect stage2 memory region range
1510 * @kvm: The KVM pointer
1511 * @addr: Start address of range
1512 * @end: End address of range
1513 */
1514static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1515{
1516 pgd_t *pgd;
1517 phys_addr_t next;
1518
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001519 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001520 do {
1521 /*
1522 * Release kvm_mmu_lock periodically if the memory region is
1523 * large. Otherwise, we may see kernel panics with
Christoffer Dall227ea812015-01-23 10:49:31 +01001524 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1525 * CONFIG_LOCKDEP. Additionally, holding the lock too long
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +01001526 * will also starve other vCPUs. We have to also make sure
1527 * that the page tables are not freed while we released
1528 * the lock.
Mario Smarduchc6473552015-01-15 15:58:56 -08001529 */
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +01001530 cond_resched_lock(&kvm->mmu_lock);
1531 if (!READ_ONCE(kvm->arch.pgd))
1532 break;
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001533 next = stage2_pgd_addr_end(kvm, addr, end);
1534 if (stage2_pgd_present(kvm, *pgd))
1535 stage2_wp_puds(kvm, pgd, addr, next);
Mario Smarduchc6473552015-01-15 15:58:56 -08001536 } while (pgd++, addr = next, addr != end);
1537}
1538
1539/**
1540 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1541 * @kvm: The KVM pointer
1542 * @slot: The memory slot to write protect
1543 *
1544 * Called to start logging dirty pages after memory region
1545 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
Punit Agrawal4ea5af52018-12-11 17:10:37 +00001546 * all present PUD, PMD and PTEs are write protected in the memory region.
Mario Smarduchc6473552015-01-15 15:58:56 -08001547 * Afterwards read of dirty page log can be called.
1548 *
1549 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1550 * serializing operations for VM memory regions.
1551 */
1552void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1553{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001554 struct kvm_memslots *slots = kvm_memslots(kvm);
1555 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
Mario Smarduchc6473552015-01-15 15:58:56 -08001556 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1557 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1558
1559 spin_lock(&kvm->mmu_lock);
1560 stage2_wp_range(kvm, start, end);
1561 spin_unlock(&kvm->mmu_lock);
1562 kvm_flush_remote_tlbs(kvm);
1563}
Mario Smarduch53c810c2015-01-15 15:58:57 -08001564
1565/**
Kai Huang3b0f1d02015-01-28 10:54:23 +08001566 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
Mario Smarduch53c810c2015-01-15 15:58:57 -08001567 * @kvm: The KVM pointer
1568 * @slot: The memory slot associated with mask
1569 * @gfn_offset: The gfn offset in memory slot
1570 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1571 * slot to be write protected
1572 *
1573 * Walks bits set in mask write protects the associated pte's. Caller must
1574 * acquire kvm_mmu_lock.
1575 */
Kai Huang3b0f1d02015-01-28 10:54:23 +08001576static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
Mario Smarduch53c810c2015-01-15 15:58:57 -08001577 struct kvm_memory_slot *slot,
1578 gfn_t gfn_offset, unsigned long mask)
1579{
1580 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1581 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1582 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1583
1584 stage2_wp_range(kvm, start, end);
1585}
Mario Smarduchc6473552015-01-15 15:58:56 -08001586
Kai Huang3b0f1d02015-01-28 10:54:23 +08001587/*
1588 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1589 * dirty pages.
1590 *
1591 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1592 * enable dirty logging for them.
1593 */
1594void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1595 struct kvm_memory_slot *slot,
1596 gfn_t gfn_offset, unsigned long mask)
1597{
1598 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1599}
1600
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001601static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001602{
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001603 __clean_dcache_guest_page(pfn, size);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001604}
1605
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001606static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngiera15f6932017-10-23 17:11:15 +01001607{
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001608 __invalidate_icache_guest_page(pfn, size);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001609}
1610
James Morse196f8782017-06-20 17:11:48 +01001611static void kvm_send_hwpoison_signal(unsigned long address,
1612 struct vm_area_struct *vma)
1613{
Eric W. Biederman795a8372018-04-16 13:39:10 -05001614 short lsb;
James Morse196f8782017-06-20 17:11:48 +01001615
1616 if (is_vm_hugetlb_page(vma))
Eric W. Biederman795a8372018-04-16 13:39:10 -05001617 lsb = huge_page_shift(hstate_vma(vma));
James Morse196f8782017-06-20 17:11:48 +01001618 else
Eric W. Biederman795a8372018-04-16 13:39:10 -05001619 lsb = PAGE_SHIFT;
James Morse196f8782017-06-20 17:11:48 +01001620
Eric W. Biederman795a8372018-04-16 13:39:10 -05001621 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
James Morse196f8782017-06-20 17:11:48 +01001622}
1623
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001624static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1625 unsigned long hva,
1626 unsigned long map_size)
Christoffer Dall6794ad52018-11-02 08:53:22 +01001627{
Shaokun Zhangc2be79a2019-02-19 17:22:21 +08001628 gpa_t gpa_start;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001629 hva_t uaddr_start, uaddr_end;
1630 size_t size;
1631
1632 size = memslot->npages * PAGE_SIZE;
1633
1634 gpa_start = memslot->base_gfn << PAGE_SHIFT;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001635
1636 uaddr_start = memslot->userspace_addr;
1637 uaddr_end = uaddr_start + size;
1638
1639 /*
1640 * Pages belonging to memslots that don't have the same alignment
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001641 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1642 * PMD/PUD entries, because we'll end up mapping the wrong pages.
Christoffer Dall6794ad52018-11-02 08:53:22 +01001643 *
1644 * Consider a layout like the following:
1645 *
1646 * memslot->userspace_addr:
1647 * +-----+--------------------+--------------------+---+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001648 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +01001649 * +-----+--------------------+--------------------+---+
1650 *
1651 * memslot->base_gfn << PAGE_SIZE:
1652 * +---+--------------------+--------------------+-----+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001653 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +01001654 * +---+--------------------+--------------------+-----+
1655 *
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001656 * If we create those stage-2 blocks, we'll end up with this incorrect
Christoffer Dall6794ad52018-11-02 08:53:22 +01001657 * mapping:
1658 * d -> f
1659 * e -> g
1660 * f -> h
1661 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001662 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
Christoffer Dall6794ad52018-11-02 08:53:22 +01001663 return false;
1664
1665 /*
1666 * Next, let's make sure we're not trying to map anything not covered
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001667 * by the memslot. This means we have to prohibit block size mappings
1668 * for the beginning and end of a non-block aligned and non-block sized
Christoffer Dall6794ad52018-11-02 08:53:22 +01001669 * memory slot (illustrated by the head and tail parts of the
1670 * userspace view above containing pages 'abcde' and 'xyz',
1671 * respectively).
1672 *
1673 * Note that it doesn't matter if we do the check using the
1674 * userspace_addr or the base_gfn, as both are equally aligned (per
1675 * the check above) and equally sized.
1676 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001677 return (hva & ~(map_size - 1)) >= uaddr_start &&
1678 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001679}
1680
Christoffer Dall94f8e642013-01-20 18:28:12 -05001681static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +02001682 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -05001683 unsigned long fault_status)
1684{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001685 int ret;
Punit Agrawal6396b852018-12-11 17:10:35 +00001686 bool write_fault, writable, force_pte = false;
1687 bool exec_fault, needs_exec;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001688 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +01001689 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001690 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001691 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +01001692 struct vm_area_struct *vma;
Dan Williamsba049e92016-01-15 16:56:11 -08001693 kvm_pfn_t pfn;
Kim Phillipsb8865762014-06-26 01:45:51 +01001694 pgprot_t mem_type = PAGE_S2;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001695 bool logging_active = memslot_is_logging(memslot);
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001696 unsigned long vma_pagesize, flags = 0;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001697
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001698 write_fault = kvm_is_write_fault(vcpu);
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001699 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1700 VM_BUG_ON(write_fault && exec_fault);
1701
1702 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001703 kvm_err("Unexpected L2 read permission error\n");
1704 return -EFAULT;
1705 }
1706
Christoffer Dallad361f02012-11-01 17:14:45 +01001707 /* Let's check if we will get back a huge page backed by hugetlbfs */
1708 down_read(&current->mm->mmap_sem);
1709 vma = find_vma_intersection(current->mm, hva, hva + 1);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001710 if (unlikely(!vma)) {
1711 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1712 up_read(&current->mm->mmap_sem);
1713 return -EFAULT;
1714 }
1715
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001716 vma_pagesize = vma_kernel_pagesize(vma);
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001717 if (logging_active ||
1718 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1719 force_pte = true;
1720 vma_pagesize = PAGE_SIZE;
1721 }
1722
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001723 /*
Suzuki K Poulose280cebf2019-01-29 19:12:17 +00001724 * The stage2 has a minimum of 2 level table (For arm64 see
1725 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1726 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1727 * As for PUD huge maps, we must make sure that we have at least
1728 * 3 levels, i.e, PMD is not folded.
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001729 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001730 if (vma_pagesize == PMD_SIZE ||
1731 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001732 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001733 up_read(&current->mm->mmap_sem);
1734
Christoffer Dall94f8e642013-01-20 18:28:12 -05001735 /* We need minimum second+third level pages */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001736 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
Christoffer Dall38f791a2014-10-10 12:14:28 +02001737 KVM_NR_MEM_OBJS);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001738 if (ret)
1739 return ret;
1740
1741 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1742 /*
1743 * Ensure the read of mmu_notifier_seq happens before we call
1744 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1745 * the page we just got a reference to gets unmapped before we have a
1746 * chance to grab the mmu_lock, which ensure that if the page gets
1747 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1748 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1749 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1750 */
1751 smp_rmb();
1752
Christoffer Dallad361f02012-11-01 17:14:45 +01001753 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
James Morse196f8782017-06-20 17:11:48 +01001754 if (pfn == KVM_PFN_ERR_HWPOISON) {
1755 kvm_send_hwpoison_signal(hva, vma);
1756 return 0;
1757 }
Christoffer Dall9ac71592016-08-17 10:46:10 +02001758 if (is_error_noslot_pfn(pfn))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001759 return -EFAULT;
1760
Mario Smarduch15a49a42015-01-15 15:58:58 -08001761 if (kvm_is_device_pfn(pfn)) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001762 mem_type = PAGE_S2_DEVICE;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001763 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1764 } else if (logging_active) {
1765 /*
1766 * Faults on pages in a memslot with logging enabled
1767 * should not be mapped with huge pages (it introduces churn
1768 * and performance degradation), so force a pte mapping.
1769 */
Mario Smarduch15a49a42015-01-15 15:58:58 -08001770 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1771
1772 /*
1773 * Only actually map the page as writable if this was a write
1774 * fault.
1775 */
1776 if (!write_fault)
1777 writable = false;
1778 }
Kim Phillipsb8865762014-06-26 01:45:51 +01001779
Christoffer Dallad361f02012-11-01 17:14:45 +01001780 spin_lock(&kvm->mmu_lock);
1781 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001782 goto out_unlock;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001783
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001784 if (vma_pagesize == PAGE_SIZE && !force_pte) {
1785 /*
1786 * Only PMD_SIZE transparent hugepages(THP) are
1787 * currently supported. This code will need to be
1788 * updated to support other THP sizes.
1789 */
1790 if (transparent_hugepage_adjust(&pfn, &fault_ipa))
1791 vma_pagesize = PMD_SIZE;
1792 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001793
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001794 if (writable)
1795 kvm_set_pfn_dirty(pfn);
1796
1797 if (fault_status != FSC_PERM)
1798 clean_dcache_guest_page(pfn, vma_pagesize);
1799
1800 if (exec_fault)
1801 invalidate_icache_guest_page(pfn, vma_pagesize);
1802
Punit Agrawal6396b852018-12-11 17:10:35 +00001803 /*
1804 * If we took an execution fault we have made the
1805 * icache/dcache coherent above and should now let the s2
1806 * mapping be executable.
1807 *
1808 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1809 * execute permissions, and we preserve whatever we have.
1810 */
1811 needs_exec = exec_fault ||
1812 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1813
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001814 if (vma_pagesize == PUD_SIZE) {
1815 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1816
1817 new_pud = kvm_pud_mkhuge(new_pud);
1818 if (writable)
1819 new_pud = kvm_s2pud_mkwrite(new_pud);
1820
1821 if (needs_exec)
1822 new_pud = kvm_s2pud_mkexec(new_pud);
1823
1824 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1825 } else if (vma_pagesize == PMD_SIZE) {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001826 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1827
1828 new_pmd = kvm_pmd_mkhuge(new_pmd);
1829
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001830 if (writable)
Catalin Marinas06485052016-04-13 17:57:37 +01001831 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001832
Punit Agrawal6396b852018-12-11 17:10:35 +00001833 if (needs_exec)
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001834 new_pmd = kvm_s2pmd_mkexec(new_pmd);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001835
Christoffer Dallad361f02012-11-01 17:14:45 +01001836 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1837 } else {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001838 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001839
Christoffer Dallad361f02012-11-01 17:14:45 +01001840 if (writable) {
Catalin Marinas06485052016-04-13 17:57:37 +01001841 new_pte = kvm_s2pte_mkwrite(new_pte);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001842 mark_page_dirty(kvm, gfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001843 }
Marc Zyngiera9c0e122017-10-23 17:11:20 +01001844
Punit Agrawal6396b852018-12-11 17:10:35 +00001845 if (needs_exec)
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001846 new_pte = kvm_s2pte_mkexec(new_pte);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001847
Mario Smarduch15a49a42015-01-15 15:58:58 -08001848 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001849 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001850
Christoffer Dall94f8e642013-01-20 18:28:12 -05001851out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001852 spin_unlock(&kvm->mmu_lock);
Marc Zyngier35307b92015-03-12 18:16:51 +00001853 kvm_set_pfn_accessed(pfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001854 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001855 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001856}
1857
Marc Zyngieraeda9132015-03-12 18:16:52 +00001858/*
1859 * Resolve the access fault by making the page young again.
1860 * Note that because the faulting entry is guaranteed not to be
1861 * cached in the TLB, we don't need to invalidate anything.
Catalin Marinas06485052016-04-13 17:57:37 +01001862 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1863 * so there is no need for atomic (pte|pmd)_mkyoung operations.
Marc Zyngieraeda9132015-03-12 18:16:52 +00001864 */
1865static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1866{
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001867 pud_t *pud;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001868 pmd_t *pmd;
1869 pte_t *pte;
Dan Williamsba049e92016-01-15 16:56:11 -08001870 kvm_pfn_t pfn;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001871 bool pfn_valid = false;
1872
1873 trace_kvm_access_fault(fault_ipa);
1874
1875 spin_lock(&vcpu->kvm->mmu_lock);
1876
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001877 if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
Marc Zyngieraeda9132015-03-12 18:16:52 +00001878 goto out;
1879
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001880 if (pud) { /* HugeTLB */
1881 *pud = kvm_s2pud_mkyoung(*pud);
1882 pfn = kvm_pud_pfn(*pud);
1883 pfn_valid = true;
1884 } else if (pmd) { /* THP, HugeTLB */
Marc Zyngieraeda9132015-03-12 18:16:52 +00001885 *pmd = pmd_mkyoung(*pmd);
1886 pfn = pmd_pfn(*pmd);
1887 pfn_valid = true;
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001888 } else {
1889 *pte = pte_mkyoung(*pte); /* Just a page... */
1890 pfn = pte_pfn(*pte);
1891 pfn_valid = true;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001892 }
1893
Marc Zyngieraeda9132015-03-12 18:16:52 +00001894out:
1895 spin_unlock(&vcpu->kvm->mmu_lock);
1896 if (pfn_valid)
1897 kvm_set_pfn_accessed(pfn);
1898}
1899
Christoffer Dall94f8e642013-01-20 18:28:12 -05001900/**
1901 * kvm_handle_guest_abort - handles all 2nd stage aborts
1902 * @vcpu: the VCPU pointer
1903 * @run: the kvm_run structure
1904 *
1905 * Any abort that gets to the host is almost guaranteed to be caused by a
1906 * missing second stage translation table entry, which can mean that either the
1907 * guest simply needs more memory and we must allocate an appropriate page or it
1908 * can mean that the guest tried to access I/O memory, which is emulated by user
1909 * space. The distinction is based on the IPA causing the fault and whether this
1910 * memory region has been registered as standard RAM by user space.
1911 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001912int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1913{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001914 unsigned long fault_status;
1915 phys_addr_t fault_ipa;
1916 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001917 unsigned long hva;
1918 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001919 gfn_t gfn;
1920 int ret, idx;
1921
Tyler Baicar621f48e2017-06-21 12:17:14 -06001922 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1923
1924 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
James Morsebb428922017-07-18 13:37:41 +01001925 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Tyler Baicar621f48e2017-06-21 12:17:14 -06001926
James Morsebb428922017-07-18 13:37:41 +01001927 /* Synchronous External Abort? */
1928 if (kvm_vcpu_dabt_isextabt(vcpu)) {
1929 /*
1930 * For RAS the host kernel may handle this abort.
1931 * There is no need to pass the error into the guest.
1932 */
James Morse0db5e022019-01-29 18:48:49 +00001933 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
Tyler Baicar621f48e2017-06-21 12:17:14 -06001934 return 1;
Tyler Baicar621f48e2017-06-21 12:17:14 -06001935
James Morsebb428922017-07-18 13:37:41 +01001936 if (unlikely(!is_iabt)) {
1937 kvm_inject_vabt(vcpu);
1938 return 1;
1939 }
Marc Zyngier40557102016-09-06 14:02:15 +01001940 }
1941
Marc Zyngier7393b592012-09-17 19:27:09 +01001942 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1943 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001944
1945 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier35307b92015-03-12 18:16:51 +00001946 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1947 fault_status != FSC_ACCESS) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001948 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1949 kvm_vcpu_trap_get_class(vcpu),
1950 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1951 (unsigned long)kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001952 return -EFAULT;
1953 }
1954
1955 idx = srcu_read_lock(&vcpu->kvm->srcu);
1956
1957 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001958 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1959 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001960 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001961 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001962 if (is_iabt) {
1963 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +01001964 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001965 ret = 1;
1966 goto out_unlock;
1967 }
1968
Marc Zyngiercfe39502012-12-12 14:42:09 +00001969 /*
Marc Zyngier57c841f2016-01-29 15:01:28 +00001970 * Check for a cache maintenance operation. Since we
1971 * ended-up here, we know it is outside of any memory
1972 * slot. But we can't find out if that is for a device,
1973 * or if the guest is just being stupid. The only thing
1974 * we know for sure is that this range cannot be cached.
1975 *
1976 * So let's assume that the guest is just being
1977 * cautious, and skip the instruction.
1978 */
1979 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1980 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1981 ret = 1;
1982 goto out_unlock;
1983 }
1984
1985 /*
Marc Zyngiercfe39502012-12-12 14:42:09 +00001986 * The IPA is reported as [MAX:12], so we need to
1987 * complement it with the bottom 12 bits from the
1988 * faulting VA. This is always 12 bits, irrespective
1989 * of the page size.
1990 */
1991 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001992 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001993 goto out_unlock;
1994 }
1995
Christoffer Dallc3058d52014-10-10 12:14:29 +02001996 /* Userspace should not be able to register out-of-bounds IPAs */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001997 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
Christoffer Dallc3058d52014-10-10 12:14:29 +02001998
Marc Zyngieraeda9132015-03-12 18:16:52 +00001999 if (fault_status == FSC_ACCESS) {
2000 handle_access_fault(vcpu, fault_ipa);
2001 ret = 1;
2002 goto out_unlock;
2003 }
2004
Christoffer Dall98047882014-08-19 12:18:04 +02002005 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05002006 if (ret == 0)
2007 ret = 1;
2008out_unlock:
2009 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2010 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002011}
2012
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002013static int handle_hva_to_gpa(struct kvm *kvm,
2014 unsigned long start,
2015 unsigned long end,
2016 int (*handler)(struct kvm *kvm,
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002017 gpa_t gpa, u64 size,
2018 void *data),
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002019 void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002020{
2021 struct kvm_memslots *slots;
2022 struct kvm_memory_slot *memslot;
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002023 int ret = 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002024
2025 slots = kvm_memslots(kvm);
2026
2027 /* we only care about the pages that the guest sees */
2028 kvm_for_each_memslot(memslot, slots) {
2029 unsigned long hva_start, hva_end;
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002030 gfn_t gpa;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002031
2032 hva_start = max(start, memslot->userspace_addr);
2033 hva_end = min(end, memslot->userspace_addr +
2034 (memslot->npages << PAGE_SHIFT));
2035 if (hva_start >= hva_end)
2036 continue;
2037
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002038 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2039 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002040 }
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002041
2042 return ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002043}
2044
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002045static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002046{
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002047 unmap_stage2_range(kvm, gpa, size);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002048 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002049}
2050
Christoffer Dalld5d81842013-01-20 18:28:07 -05002051int kvm_unmap_hva_range(struct kvm *kvm,
2052 unsigned long start, unsigned long end)
2053{
2054 if (!kvm->arch.pgd)
2055 return 0;
2056
2057 trace_kvm_unmap_hva_range(start, end);
2058 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2059 return 0;
2060}
2061
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002062static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002063{
2064 pte_t *pte = (pte_t *)data;
2065
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002066 WARN_ON(size != PAGE_SIZE);
Mario Smarduch15a49a42015-01-15 15:58:58 -08002067 /*
2068 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2069 * flag clear because MMU notifiers will have unmapped a huge PMD before
2070 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2071 * therefore stage2_set_pte() never needs to clear out a huge PMD
2072 * through this calling path.
2073 */
2074 stage2_set_pte(kvm, NULL, gpa, pte, 0);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002075 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002076}
2077
2078
Lan Tianyu748c0e32018-12-06 21:21:10 +08002079int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002080{
2081 unsigned long end = hva + PAGE_SIZE;
Marc Zyngier694556d2018-08-23 09:58:27 +01002082 kvm_pfn_t pfn = pte_pfn(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002083 pte_t stage2_pte;
2084
2085 if (!kvm->arch.pgd)
Lan Tianyu748c0e32018-12-06 21:21:10 +08002086 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002087
2088 trace_kvm_set_spte_hva(hva);
Marc Zyngier694556d2018-08-23 09:58:27 +01002089
2090 /*
2091 * We've moved a page around, probably through CoW, so let's treat it
2092 * just like a translation fault and clean the cache to the PoC.
2093 */
2094 clean_dcache_guest_page(pfn, PAGE_SIZE);
Punit Agrawalf8df7332018-12-11 17:10:36 +00002095 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002096 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
Lan Tianyu748c0e32018-12-06 21:21:10 +08002097
2098 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002099}
2100
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002101static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Marc Zyngier35307b92015-03-12 18:16:51 +00002102{
Punit Agrawal35a63962018-12-11 17:10:40 +00002103 pud_t *pud;
Marc Zyngier35307b92015-03-12 18:16:51 +00002104 pmd_t *pmd;
2105 pte_t *pte;
2106
Punit Agrawal35a63962018-12-11 17:10:40 +00002107 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2108 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
Marc Zyngier35307b92015-03-12 18:16:51 +00002109 return 0;
2110
Punit Agrawal35a63962018-12-11 17:10:40 +00002111 if (pud)
2112 return stage2_pudp_test_and_clear_young(pud);
2113 else if (pmd)
Catalin Marinas06485052016-04-13 17:57:37 +01002114 return stage2_pmdp_test_and_clear_young(pmd);
Punit Agrawal35a63962018-12-11 17:10:40 +00002115 else
2116 return stage2_ptep_test_and_clear_young(pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00002117}
2118
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002119static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Marc Zyngier35307b92015-03-12 18:16:51 +00002120{
Punit Agrawal35a63962018-12-11 17:10:40 +00002121 pud_t *pud;
Marc Zyngier35307b92015-03-12 18:16:51 +00002122 pmd_t *pmd;
2123 pte_t *pte;
2124
Punit Agrawal35a63962018-12-11 17:10:40 +00002125 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2126 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
Marc Zyngier35307b92015-03-12 18:16:51 +00002127 return 0;
2128
Punit Agrawal35a63962018-12-11 17:10:40 +00002129 if (pud)
2130 return kvm_s2pud_young(*pud);
2131 else if (pmd)
Marc Zyngier35307b92015-03-12 18:16:51 +00002132 return pmd_young(*pmd);
Punit Agrawal35a63962018-12-11 17:10:40 +00002133 else
Marc Zyngier35307b92015-03-12 18:16:51 +00002134 return pte_young(*pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00002135}
2136
2137int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2138{
Suzuki K Poulose7e5a6722017-07-05 09:57:00 +01002139 if (!kvm->arch.pgd)
2140 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00002141 trace_kvm_age_hva(start, end);
2142 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2143}
2144
2145int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2146{
Suzuki K Poulose7e5a6722017-07-05 09:57:00 +01002147 if (!kvm->arch.pgd)
2148 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00002149 trace_kvm_test_age_hva(hva);
2150 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
2151}
2152
Christoffer Dalld5d81842013-01-20 18:28:07 -05002153void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2154{
2155 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2156}
2157
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002158phys_addr_t kvm_mmu_get_httbr(void)
2159{
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002160 if (__kvm_cpu_uses_extended_idmap())
2161 return virt_to_phys(merged_hyp_pgd);
2162 else
2163 return virt_to_phys(hyp_pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002164}
2165
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002166phys_addr_t kvm_get_idmap_vector(void)
2167{
2168 return hyp_idmap_vector;
2169}
2170
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002171static int kvm_map_idmap_text(pgd_t *pgd)
2172{
2173 int err;
2174
2175 /* Create the idmap in the boot page tables */
Kristina Martsenko98732d12018-01-15 15:23:49 +00002176 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002177 hyp_idmap_start, hyp_idmap_end,
2178 __phys_to_pfn(hyp_idmap_start),
2179 PAGE_HYP_EXEC);
2180 if (err)
2181 kvm_err("Failed to idmap %lx-%lx\n",
2182 hyp_idmap_start, hyp_idmap_end);
2183
2184 return err;
2185}
2186
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002187int kvm_mmu_init(void)
2188{
Marc Zyngier2fb41052013-04-12 19:12:03 +01002189 int err;
2190
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002191 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
Marc Zyngier46fef152018-03-12 14:25:10 +00002192 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002193 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
Marc Zyngier46fef152018-03-12 14:25:10 +00002194 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002195 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002196
Ard Biesheuvel06f75a12015-03-19 16:42:26 +00002197 /*
2198 * We rely on the linker script to ensure at build time that the HYP
2199 * init code does not cross a page boundary.
2200 */
2201 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002202
Marc Zyngierb4ef0492017-12-03 20:04:51 +00002203 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2204 kvm_debug("HYP VA range: %lx:%lx\n",
2205 kern_hyp_va(PAGE_OFFSET),
2206 kern_hyp_va((unsigned long)high_memory - 1));
Marc Zyngiereac378a2016-06-30 18:40:50 +01002207
Marc Zyngier6c41a412016-06-30 18:40:51 +01002208 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
Marc Zyngiered57cac2017-12-03 18:22:49 +00002209 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
Marc Zyngierd2896d42016-08-22 09:01:17 +01002210 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
Marc Zyngiereac378a2016-06-30 18:40:50 +01002211 /*
2212 * The idmap page is intersecting with the VA space,
2213 * it is not safe to continue further.
2214 */
2215 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2216 err = -EINVAL;
2217 goto out;
2218 }
2219
Christoffer Dall38f791a2014-10-10 12:14:28 +02002220 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002221 if (!hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05002222 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01002223 err = -ENOMEM;
2224 goto out;
2225 }
2226
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002227 if (__kvm_cpu_uses_extended_idmap()) {
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002228 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2229 hyp_pgd_order);
2230 if (!boot_hyp_pgd) {
2231 kvm_err("Hyp boot PGD not allocated\n");
2232 err = -ENOMEM;
2233 goto out;
2234 }
2235
2236 err = kvm_map_idmap_text(boot_hyp_pgd);
2237 if (err)
2238 goto out;
2239
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002240 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2241 if (!merged_hyp_pgd) {
2242 kvm_err("Failed to allocate extra HYP pgd\n");
2243 goto out;
2244 }
2245 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2246 hyp_idmap_start);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002247 } else {
2248 err = kvm_map_idmap_text(hyp_pgd);
2249 if (err)
2250 goto out;
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002251 }
2252
Marc Zyngiere3f019b2017-12-04 17:04:38 +00002253 io_map_base = hyp_idmap_start;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002254 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +01002255out:
Marc Zyngier4f728272013-04-12 19:12:05 +01002256 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +01002257 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002258}
Eric Augerdf6ce242014-06-06 11:10:23 +02002259
2260void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002261 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02002262 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02002263 const struct kvm_memory_slot *new,
Eric Augerdf6ce242014-06-06 11:10:23 +02002264 enum kvm_mr_change change)
2265{
Mario Smarduchc6473552015-01-15 15:58:56 -08002266 /*
2267 * At this point memslot has been committed and there is an
2268 * allocated dirty_bitmap[], dirty pages will be be tracked while the
2269 * memory slot is write protected.
2270 */
2271 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2272 kvm_mmu_wp_memory_region(kvm, mem->slot);
Eric Augerdf6ce242014-06-06 11:10:23 +02002273}
2274
2275int kvm_arch_prepare_memory_region(struct kvm *kvm,
2276 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002277 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02002278 enum kvm_mr_change change)
2279{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002280 hva_t hva = mem->userspace_addr;
2281 hva_t reg_end = hva + mem->memory_size;
2282 bool writable = !(mem->flags & KVM_MEM_READONLY);
2283 int ret = 0;
2284
Mario Smarduch15a49a42015-01-15 15:58:58 -08002285 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2286 change != KVM_MR_FLAGS_ONLY)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002287 return 0;
2288
2289 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02002290 * Prevent userspace from creating a memory region outside of the IPA
2291 * space addressable by the KVM guest IPA space.
2292 */
2293 if (memslot->base_gfn + memslot->npages >=
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01002294 (kvm_phys_size(kvm) >> PAGE_SHIFT))
Christoffer Dallc3058d52014-10-10 12:14:29 +02002295 return -EFAULT;
2296
Marc Zyngier72f31042017-03-16 18:20:50 +00002297 down_read(&current->mm->mmap_sem);
Christoffer Dallc3058d52014-10-10 12:14:29 +02002298 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002299 * A memory region could potentially cover multiple VMAs, and any holes
2300 * between them, so iterate over all of them to find out if we can map
2301 * any of them right now.
2302 *
2303 * +--------------------------------------------+
2304 * +---------------+----------------+ +----------------+
2305 * | : VMA 1 | VMA 2 | | VMA 3 : |
2306 * +---------------+----------------+ +----------------+
2307 * | memory region |
2308 * +--------------------------------------------+
2309 */
2310 do {
2311 struct vm_area_struct *vma = find_vma(current->mm, hva);
2312 hva_t vm_start, vm_end;
2313
2314 if (!vma || vma->vm_start >= reg_end)
2315 break;
2316
2317 /*
2318 * Mapping a read-only VMA is only allowed if the
2319 * memory region is configured as read-only.
2320 */
2321 if (writable && !(vma->vm_flags & VM_WRITE)) {
2322 ret = -EPERM;
2323 break;
2324 }
2325
2326 /*
2327 * Take the intersection of this VMA with the memory region
2328 */
2329 vm_start = max(hva, vma->vm_start);
2330 vm_end = min(reg_end, vma->vm_end);
2331
2332 if (vma->vm_flags & VM_PFNMAP) {
2333 gpa_t gpa = mem->guest_phys_addr +
2334 (vm_start - mem->userspace_addr);
Marek Majtykaca09f022015-09-16 12:04:55 +02002335 phys_addr_t pa;
2336
2337 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2338 pa += vm_start - vma->vm_start;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002339
Mario Smarduch15a49a42015-01-15 15:58:58 -08002340 /* IO region dirty page logging not allowed */
Marc Zyngier72f31042017-03-16 18:20:50 +00002341 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2342 ret = -EINVAL;
2343 goto out;
2344 }
Mario Smarduch15a49a42015-01-15 15:58:58 -08002345
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002346 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2347 vm_end - vm_start,
2348 writable);
2349 if (ret)
2350 break;
2351 }
2352 hva = vm_end;
2353 } while (hva < reg_end);
2354
Mario Smarduch15a49a42015-01-15 15:58:58 -08002355 if (change == KVM_MR_FLAGS_ONLY)
Marc Zyngier72f31042017-03-16 18:20:50 +00002356 goto out;
Mario Smarduch15a49a42015-01-15 15:58:58 -08002357
Ard Biesheuvel849260c2014-11-17 14:58:53 +00002358 spin_lock(&kvm->mmu_lock);
2359 if (ret)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002360 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
Ard Biesheuvel849260c2014-11-17 14:58:53 +00002361 else
2362 stage2_flush_memslot(kvm, memslot);
2363 spin_unlock(&kvm->mmu_lock);
Marc Zyngier72f31042017-03-16 18:20:50 +00002364out:
2365 up_read(&current->mm->mmap_sem);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002366 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02002367}
2368
2369void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
2370 struct kvm_memory_slot *dont)
2371{
2372}
2373
2374int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2375 unsigned long npages)
2376{
2377 return 0;
2378}
2379
Sean Christopherson15248252019-02-05 12:54:17 -08002380void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
Eric Augerdf6ce242014-06-06 11:10:23 +02002381{
2382}
2383
2384void kvm_arch_flush_shadow_all(struct kvm *kvm)
2385{
Suzuki K Poulose293f2932016-09-08 16:25:49 +01002386 kvm_free_stage2_pgd(kvm);
Eric Augerdf6ce242014-06-06 11:10:23 +02002387}
2388
2389void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2390 struct kvm_memory_slot *slot)
2391{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002392 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2393 phys_addr_t size = slot->npages << PAGE_SHIFT;
2394
2395 spin_lock(&kvm->mmu_lock);
2396 unmap_stage2_range(kvm, gpa, size);
2397 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02002398}
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002399
2400/*
2401 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2402 *
2403 * Main problems:
2404 * - S/W ops are local to a CPU (not broadcast)
2405 * - We have line migration behind our back (speculation)
2406 * - System caches don't support S/W at all (damn!)
2407 *
2408 * In the face of the above, the best we can do is to try and convert
2409 * S/W ops to VA ops. Because the guest is not allowed to infer the
2410 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2411 * which is a rather good thing for us.
2412 *
2413 * Also, it is only used when turning caches on/off ("The expected
2414 * usage of the cache maintenance instructions that operate by set/way
2415 * is associated with the cache maintenance instructions associated
2416 * with the powerdown and powerup of caches, if this is required by
2417 * the implementation.").
2418 *
2419 * We use the following policy:
2420 *
2421 * - If we trap a S/W operation, we enable VM trapping to detect
2422 * caches being turned on/off, and do a full clean.
2423 *
2424 * - We flush the caches on both caches being turned on and off.
2425 *
2426 * - Once the caches are enabled, we stop trapping VM ops.
2427 */
2428void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2429{
Christoffer Dall3df59d82017-08-03 12:09:05 +02002430 unsigned long hcr = *vcpu_hcr(vcpu);
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002431
2432 /*
2433 * If this is the first time we do a S/W operation
2434 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2435 * VM trapping.
2436 *
2437 * Otherwise, rely on the VM trapping to wait for the MMU +
2438 * Caches to be turned off. At that point, we'll be able to
2439 * clean the caches again.
2440 */
2441 if (!(hcr & HCR_TVM)) {
2442 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2443 vcpu_has_cache_enabled(vcpu));
2444 stage2_flush_vm(vcpu->kvm);
Christoffer Dall3df59d82017-08-03 12:09:05 +02002445 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002446 }
2447}
2448
2449void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2450{
2451 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2452
2453 /*
2454 * If switching the MMU+caches on, need to invalidate the caches.
2455 * If switching it off, need to clean the caches.
2456 * Clean + invalidate does the trick always.
2457 */
2458 if (now_enabled != was_enabled)
2459 stage2_flush_vm(vcpu->kvm);
2460
2461 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2462 if (now_enabled)
Christoffer Dall3df59d82017-08-03 12:09:05 +02002463 *vcpu_hcr(vcpu) &= ~HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002464
2465 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2466}