blob: a39dcfdbcc6527afc0ebc911cf0262238a2c131d [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
James Morse196f8782017-06-20 17:11:48 +010023#include <linux/sched/signal.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050024#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050026#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050027#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050029#include <asm/kvm_mmio.h>
James Morse0db5e022019-01-29 18:48:49 +000030#include <asm/kvm_ras.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050032#include <asm/kvm_emulate.h>
Marc Zyngier1e947ba2015-01-29 11:59:54 +000033#include <asm/virt.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050034
35#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050036
Marc Zyngier5a677ce2013-04-12 19:12:06 +010037static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010038static pgd_t *hyp_pgd;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000039static pgd_t *merged_hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050040static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
41
Marc Zyngier5a677ce2013-04-12 19:12:06 +010042static unsigned long hyp_idmap_start;
43static unsigned long hyp_idmap_end;
44static phys_addr_t hyp_idmap_vector;
45
Marc Zyngiere3f019b2017-12-04 17:04:38 +000046static unsigned long io_map_base;
47
Christoffer Dall38f791a2014-10-10 12:14:28 +020048#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
Mark Salter5d4e08c2014-03-28 14:25:19 +000049
Mario Smarduch15a49a42015-01-15 15:58:58 -080050#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
52
53static bool memslot_is_logging(struct kvm_memory_slot *memslot)
54{
Mario Smarduch15a49a42015-01-15 15:58:58 -080055 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
Mario Smarduch72760302015-01-15 15:59:01 -080056}
57
58/**
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
61 *
62 * Interface to HYP function to flush all VM TLB entries
63 */
64void kvm_flush_remote_tlbs(struct kvm *kvm)
65{
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
Mario Smarduch15a49a42015-01-15 15:58:58 -080067}
Christoffer Dallad361f02012-11-01 17:14:45 +010068
Marc Zyngier48762762013-01-28 15:27:00 +000069static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050070{
Suzuki K Poulose8684e702016-03-22 17:14:25 +000071 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050072}
73
Marc Zyngier363ef892014-12-19 16:48:06 +000074/*
75 * D-Cache management functions. They take the page table entries by
76 * value, as they are flushing the cache using the kernel mapping (or
77 * kmap on 32bit).
78 */
79static void kvm_flush_dcache_pte(pte_t pte)
80{
81 __kvm_flush_dcache_pte(pte);
82}
83
84static void kvm_flush_dcache_pmd(pmd_t pmd)
85{
86 __kvm_flush_dcache_pmd(pmd);
87}
88
89static void kvm_flush_dcache_pud(pud_t pud)
90{
91 __kvm_flush_dcache_pud(pud);
92}
93
Ard Biesheuvele6fab542015-11-10 15:11:20 +010094static bool kvm_is_device_pfn(unsigned long pfn)
95{
96 return !pfn_valid(pfn);
97}
98
Mario Smarduch15a49a42015-01-15 15:58:58 -080099/**
100 * stage2_dissolve_pmd() - clear and flush huge PMD entry
101 * @kvm: pointer to kvm structure.
102 * @addr: IPA
103 * @pmd: pmd pointer for IPA
104 *
Zenghui Yu8324c3d2019-03-25 08:02:05 +0000105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
Mario Smarduch15a49a42015-01-15 15:58:58 -0800106 */
107static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
108{
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000109 if (!pmd_thp_or_huge(*pmd))
Mario Smarduch15a49a42015-01-15 15:58:58 -0800110 return;
111
112 pmd_clear(pmd);
113 kvm_tlb_flush_vmid_ipa(kvm, addr);
114 put_page(virt_to_page(pmd));
115}
116
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000117/**
118 * stage2_dissolve_pud() - clear and flush huge PUD entry
119 * @kvm: pointer to kvm structure.
120 * @addr: IPA
121 * @pud: pud pointer for IPA
122 *
Zenghui Yu8324c3d2019-03-25 08:02:05 +0000123 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
Punit Agrawalb8e0ba72018-12-11 17:10:41 +0000124 */
125static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
126{
127 if (!stage2_pud_huge(kvm, *pudp))
128 return;
129
130 stage2_pud_clear(kvm, pudp);
131 kvm_tlb_flush_vmid_ipa(kvm, addr);
132 put_page(virt_to_page(pudp));
133}
134
Christoffer Dalld5d81842013-01-20 18:28:07 -0500135static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
136 int min, int max)
137{
138 void *page;
139
140 BUG_ON(max > KVM_NR_MEM_OBJS);
141 if (cache->nobjs >= min)
142 return 0;
143 while (cache->nobjs < max) {
144 page = (void *)__get_free_page(PGALLOC_GFP);
145 if (!page)
146 return -ENOMEM;
147 cache->objects[cache->nobjs++] = page;
148 }
149 return 0;
150}
151
152static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
153{
154 while (mc->nobjs)
155 free_page((unsigned long)mc->objects[--mc->nobjs]);
156}
157
158static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
159{
160 void *p;
161
162 BUG_ON(!mc || !mc->nobjs);
163 p = mc->objects[--mc->nobjs];
164 return p;
165}
166
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000167static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
Marc Zyngier979acd52013-08-06 13:05:48 +0100168{
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100169 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
170 stage2_pgd_clear(kvm, pgd);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200171 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100172 stage2_pud_free(kvm, pud_table);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200173 put_page(virt_to_page(pgd));
Marc Zyngier979acd52013-08-06 13:05:48 +0100174}
175
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000176static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500177{
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100178 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
179 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
180 stage2_pud_clear(kvm, pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200181 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100182 stage2_pmd_free(kvm, pmd_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100183 put_page(virt_to_page(pud));
184}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500185
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000186static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100187{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200188 pte_t *pte_table = pte_offset_kernel(pmd, 0);
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000189 VM_BUG_ON(pmd_thp_or_huge(*pmd));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200190 pmd_clear(pmd);
191 kvm_tlb_flush_vmid_ipa(kvm, addr);
192 pte_free_kernel(NULL, pte_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100193 put_page(virt_to_page(pmd));
194}
195
Marc Zyngier88dc25e82018-05-25 12:23:11 +0100196static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
197{
198 WRITE_ONCE(*ptep, new_pte);
199 dsb(ishst);
200}
201
202static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
203{
204 WRITE_ONCE(*pmdp, new_pmd);
205 dsb(ishst);
206}
207
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100208static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
209{
210 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
211}
212
213static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
214{
215 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
216 dsb(ishst);
217}
218
219static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
220{
221 WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
222 dsb(ishst);
223}
224
Marc Zyngier363ef892014-12-19 16:48:06 +0000225/*
226 * Unmapping vs dcache management:
227 *
228 * If a guest maps certain memory pages as uncached, all writes will
229 * bypass the data cache and go directly to RAM. However, the CPUs
230 * can still speculate reads (not writes) and fill cache lines with
231 * data.
232 *
233 * Those cache lines will be *clean* cache lines though, so a
234 * clean+invalidate operation is equivalent to an invalidate
235 * operation, because no cache lines are marked dirty.
236 *
237 * Those clean cache lines could be filled prior to an uncached write
238 * by the guest, and the cache coherent IO subsystem would therefore
239 * end up writing old data to disk.
240 *
241 * This is why right after unmapping a page/section and invalidating
242 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
243 * the IO subsystem will never hit in the cache.
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100244 *
245 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
246 * we then fully enforce cacheability of RAM, no matter what the guest
247 * does.
Marc Zyngier363ef892014-12-19 16:48:06 +0000248 */
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000249static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200250 phys_addr_t addr, phys_addr_t end)
Marc Zyngier4f728272013-04-12 19:12:05 +0100251{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200252 phys_addr_t start_addr = addr;
253 pte_t *pte, *start_pte;
254
255 start_pte = pte = pte_offset_kernel(pmd, addr);
256 do {
257 if (!pte_none(*pte)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000258 pte_t old_pte = *pte;
259
Christoffer Dall4f853a72014-05-09 23:31:31 +0200260 kvm_set_pte(pte, __pte(0));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200261 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000262
263 /* No need to invalidate the cache for device mappings */
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100264 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000265 kvm_flush_dcache_pte(old_pte);
266
267 put_page(virt_to_page(pte));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200268 }
269 } while (pte++, addr += PAGE_SIZE, addr != end);
270
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100271 if (stage2_pte_table_empty(kvm, start_pte))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000272 clear_stage2_pmd_entry(kvm, pmd, start_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500273}
274
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000275static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200276 phys_addr_t addr, phys_addr_t end)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500277{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200278 phys_addr_t next, start_addr = addr;
279 pmd_t *pmd, *start_pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +0000280
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100281 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200282 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100283 next = stage2_pmd_addr_end(kvm, addr, end);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200284 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000285 if (pmd_thp_or_huge(*pmd)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000286 pmd_t old_pmd = *pmd;
287
Christoffer Dall4f853a72014-05-09 23:31:31 +0200288 pmd_clear(pmd);
289 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000290
291 kvm_flush_dcache_pmd(old_pmd);
292
Christoffer Dall4f853a72014-05-09 23:31:31 +0200293 put_page(virt_to_page(pmd));
294 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000295 unmap_stage2_ptes(kvm, pmd, addr, next);
Marc Zyngier4f728272013-04-12 19:12:05 +0100296 }
297 }
Christoffer Dall4f853a72014-05-09 23:31:31 +0200298 } while (pmd++, addr = next, addr != end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100299
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100300 if (stage2_pmd_table_empty(kvm, start_pmd))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000301 clear_stage2_pud_entry(kvm, pud, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200302}
303
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000304static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200305 phys_addr_t addr, phys_addr_t end)
306{
307 phys_addr_t next, start_addr = addr;
308 pud_t *pud, *start_pud;
309
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100310 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200311 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100312 next = stage2_pud_addr_end(kvm, addr, end);
313 if (!stage2_pud_none(kvm, *pud)) {
314 if (stage2_pud_huge(kvm, *pud)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000315 pud_t old_pud = *pud;
316
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100317 stage2_pud_clear(kvm, pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200318 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000319 kvm_flush_dcache_pud(old_pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200320 put_page(virt_to_page(pud));
321 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000322 unmap_stage2_pmds(kvm, pud, addr, next);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200323 }
324 }
325 } while (pud++, addr = next, addr != end);
326
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100327 if (stage2_pud_table_empty(kvm, start_pud))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000328 clear_stage2_pgd_entry(kvm, pgd, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200329}
330
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000331/**
332 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
333 * @kvm: The VM pointer
334 * @start: The intermediate physical base address of the range to unmap
335 * @size: The size of the area to unmap
336 *
337 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
338 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
339 * destroying the VM), otherwise another faulting VCPU may come in and mess
340 * with things behind our backs.
341 */
342static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200343{
344 pgd_t *pgd;
345 phys_addr_t addr = start, end = start + size;
346 phys_addr_t next;
347
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100348 assert_spin_locked(&kvm->mmu_lock);
Jia He47a91b72018-05-21 11:05:30 +0800349 WARN_ON(size & ~PAGE_MASK);
350
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100351 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200352 do {
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +0100353 /*
354 * Make sure the page table is still active, as another thread
355 * could have possibly freed the page table, while we released
356 * the lock.
357 */
358 if (!READ_ONCE(kvm->arch.pgd))
359 break;
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100360 next = stage2_pgd_addr_end(kvm, addr, end);
361 if (!stage2_pgd_none(kvm, *pgd))
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000362 unmap_stage2_puds(kvm, pgd, addr, next);
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100363 /*
364 * If the range is too large, release the kvm->mmu_lock
365 * to prevent starvation and lockup detector warnings.
366 */
367 if (next != end)
368 cond_resched_lock(&kvm->mmu_lock);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200369 } while (pgd++, addr = next, addr != end);
Marc Zyngier000d3992013-03-05 02:43:17 +0000370}
371
Marc Zyngier9d218a12014-01-15 12:50:23 +0000372static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
373 phys_addr_t addr, phys_addr_t end)
374{
375 pte_t *pte;
376
377 pte = pte_offset_kernel(pmd, addr);
378 do {
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100379 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000380 kvm_flush_dcache_pte(*pte);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000381 } while (pte++, addr += PAGE_SIZE, addr != end);
382}
383
384static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
385 phys_addr_t addr, phys_addr_t end)
386{
387 pmd_t *pmd;
388 phys_addr_t next;
389
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100390 pmd = stage2_pmd_offset(kvm, pud, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000391 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100392 next = stage2_pmd_addr_end(kvm, addr, end);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000393 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000394 if (pmd_thp_or_huge(*pmd))
Marc Zyngier363ef892014-12-19 16:48:06 +0000395 kvm_flush_dcache_pmd(*pmd);
396 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000397 stage2_flush_ptes(kvm, pmd, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000398 }
399 } while (pmd++, addr = next, addr != end);
400}
401
402static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
403 phys_addr_t addr, phys_addr_t end)
404{
405 pud_t *pud;
406 phys_addr_t next;
407
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100408 pud = stage2_pud_offset(kvm, pgd, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000409 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100410 next = stage2_pud_addr_end(kvm, addr, end);
411 if (!stage2_pud_none(kvm, *pud)) {
412 if (stage2_pud_huge(kvm, *pud))
Marc Zyngier363ef892014-12-19 16:48:06 +0000413 kvm_flush_dcache_pud(*pud);
414 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000415 stage2_flush_pmds(kvm, pud, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000416 }
417 } while (pud++, addr = next, addr != end);
418}
419
420static void stage2_flush_memslot(struct kvm *kvm,
421 struct kvm_memory_slot *memslot)
422{
423 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
424 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
425 phys_addr_t next;
426 pgd_t *pgd;
427
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100428 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000429 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100430 next = stage2_pgd_addr_end(kvm, addr, end);
431 if (!stage2_pgd_none(kvm, *pgd))
Suzuki K Poulosed2db7772018-09-26 17:32:37 +0100432 stage2_flush_puds(kvm, pgd, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000433 } while (pgd++, addr = next, addr != end);
434}
435
436/**
437 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
438 * @kvm: The struct kvm pointer
439 *
440 * Go through the stage 2 page tables and invalidate any cache lines
441 * backing memory already mapped to the VM.
442 */
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000443static void stage2_flush_vm(struct kvm *kvm)
Marc Zyngier9d218a12014-01-15 12:50:23 +0000444{
445 struct kvm_memslots *slots;
446 struct kvm_memory_slot *memslot;
447 int idx;
448
449 idx = srcu_read_lock(&kvm->srcu);
450 spin_lock(&kvm->mmu_lock);
451
452 slots = kvm_memslots(kvm);
453 kvm_for_each_memslot(memslot, slots)
454 stage2_flush_memslot(kvm, memslot);
455
456 spin_unlock(&kvm->mmu_lock);
457 srcu_read_unlock(&kvm->srcu, idx);
458}
459
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000460static void clear_hyp_pgd_entry(pgd_t *pgd)
461{
462 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
463 pgd_clear(pgd);
464 pud_free(NULL, pud_table);
465 put_page(virt_to_page(pgd));
466}
467
468static void clear_hyp_pud_entry(pud_t *pud)
469{
470 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
471 VM_BUG_ON(pud_huge(*pud));
472 pud_clear(pud);
473 pmd_free(NULL, pmd_table);
474 put_page(virt_to_page(pud));
475}
476
477static void clear_hyp_pmd_entry(pmd_t *pmd)
478{
479 pte_t *pte_table = pte_offset_kernel(pmd, 0);
480 VM_BUG_ON(pmd_thp_or_huge(*pmd));
481 pmd_clear(pmd);
482 pte_free_kernel(NULL, pte_table);
483 put_page(virt_to_page(pmd));
484}
485
486static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
487{
488 pte_t *pte, *start_pte;
489
490 start_pte = pte = pte_offset_kernel(pmd, addr);
491 do {
492 if (!pte_none(*pte)) {
493 kvm_set_pte(pte, __pte(0));
494 put_page(virt_to_page(pte));
495 }
496 } while (pte++, addr += PAGE_SIZE, addr != end);
497
498 if (hyp_pte_table_empty(start_pte))
499 clear_hyp_pmd_entry(pmd);
500}
501
502static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
503{
504 phys_addr_t next;
505 pmd_t *pmd, *start_pmd;
506
507 start_pmd = pmd = pmd_offset(pud, addr);
508 do {
509 next = pmd_addr_end(addr, end);
510 /* Hyp doesn't use huge pmds */
511 if (!pmd_none(*pmd))
512 unmap_hyp_ptes(pmd, addr, next);
513 } while (pmd++, addr = next, addr != end);
514
515 if (hyp_pmd_table_empty(start_pmd))
516 clear_hyp_pud_entry(pud);
517}
518
519static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
520{
521 phys_addr_t next;
522 pud_t *pud, *start_pud;
523
524 start_pud = pud = pud_offset(pgd, addr);
525 do {
526 next = pud_addr_end(addr, end);
527 /* Hyp doesn't use huge puds */
528 if (!pud_none(*pud))
529 unmap_hyp_pmds(pud, addr, next);
530 } while (pud++, addr = next, addr != end);
531
532 if (hyp_pud_table_empty(start_pud))
533 clear_hyp_pgd_entry(pgd);
534}
535
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000536static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
537{
538 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
539}
540
541static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
542 phys_addr_t start, u64 size)
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000543{
544 pgd_t *pgd;
545 phys_addr_t addr = start, end = start + size;
546 phys_addr_t next;
547
548 /*
549 * We don't unmap anything from HYP, except at the hyp tear down.
550 * Hence, we don't have to invalidate the TLBs here.
551 */
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000552 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000553 do {
554 next = pgd_addr_end(addr, end);
555 if (!pgd_none(*pgd))
556 unmap_hyp_puds(pgd, addr, next);
557 } while (pgd++, addr = next, addr != end);
558}
559
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000560static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
561{
562 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
563}
564
565static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
566{
567 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
568}
569
Marc Zyngier000d3992013-03-05 02:43:17 +0000570/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100571 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000572 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100573 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
574 * therefore contains either mappings in the kernel memory area (above
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000575 * PAGE_OFFSET), or device mappings in the idmap range.
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100576 *
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000577 * boot_hyp_pgd should only map the idmap range, and is only used in
578 * the extended idmap case.
Marc Zyngier000d3992013-03-05 02:43:17 +0000579 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100580void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000581{
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000582 pgd_t *id_pgd;
583
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100584 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100585
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000586 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
587
588 if (id_pgd) {
589 /* In case we never called hyp_mmu_init() */
590 if (!io_map_base)
591 io_map_base = hyp_idmap_start;
592 unmap_hyp_idmap_range(id_pgd, io_map_base,
593 hyp_idmap_start + PAGE_SIZE - io_map_base);
594 }
595
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100596 if (boot_hyp_pgd) {
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100597 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
598 boot_hyp_pgd = NULL;
599 }
600
Marc Zyngier4f728272013-04-12 19:12:05 +0100601 if (hyp_pgd) {
Marc Zyngier7839c672017-12-07 11:45:45 +0000602 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
603 (uintptr_t)high_memory - PAGE_OFFSET);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100604
Christoffer Dall38f791a2014-10-10 12:14:28 +0200605 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100606 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100607 }
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000608 if (merged_hyp_pgd) {
609 clear_page(merged_hyp_pgd);
610 free_page((unsigned long)merged_hyp_pgd);
611 merged_hyp_pgd = NULL;
612 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100613
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500614 mutex_unlock(&kvm_hyp_pgd_mutex);
615}
616
617static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100618 unsigned long end, unsigned long pfn,
619 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500620{
621 pte_t *pte;
622 unsigned long addr;
623
Marc Zyngier3562c762013-04-12 19:12:02 +0100624 addr = start;
625 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100626 pte = pte_offset_kernel(pmd, addr);
Punit Agrawalf8df7332018-12-11 17:10:36 +0000627 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100628 get_page(virt_to_page(pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100629 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100630 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500631}
632
633static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100634 unsigned long end, unsigned long pfn,
635 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500636{
637 pmd_t *pmd;
638 pte_t *pte;
639 unsigned long addr, next;
640
Marc Zyngier3562c762013-04-12 19:12:02 +0100641 addr = start;
642 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100643 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500644
645 BUG_ON(pmd_sect(*pmd));
646
647 if (pmd_none(*pmd)) {
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800648 pte = pte_alloc_one_kernel(NULL);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500649 if (!pte) {
650 kvm_err("Cannot allocate Hyp pte\n");
651 return -ENOMEM;
652 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100653 kvm_pmd_populate(pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100654 get_page(virt_to_page(pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500655 }
656
657 next = pmd_addr_end(addr, end);
658
Marc Zyngier6060df82013-04-12 19:12:01 +0100659 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
660 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100661 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500662
663 return 0;
664}
665
Christoffer Dall38f791a2014-10-10 12:14:28 +0200666static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
667 unsigned long end, unsigned long pfn,
668 pgprot_t prot)
669{
670 pud_t *pud;
671 pmd_t *pmd;
672 unsigned long addr, next;
673 int ret;
674
675 addr = start;
676 do {
677 pud = pud_offset(pgd, addr);
678
679 if (pud_none_or_clear_bad(pud)) {
680 pmd = pmd_alloc_one(NULL, addr);
681 if (!pmd) {
682 kvm_err("Cannot allocate Hyp pmd\n");
683 return -ENOMEM;
684 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100685 kvm_pud_populate(pud, pmd);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200686 get_page(virt_to_page(pud));
Christoffer Dall38f791a2014-10-10 12:14:28 +0200687 }
688
689 next = pud_addr_end(addr, end);
690 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
691 if (ret)
692 return ret;
693 pfn += (next - addr) >> PAGE_SHIFT;
694 } while (addr = next, addr != end);
695
696 return 0;
697}
698
Kristina Martsenko98732d12018-01-15 15:23:49 +0000699static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
Marc Zyngier6060df82013-04-12 19:12:01 +0100700 unsigned long start, unsigned long end,
701 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500702{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500703 pgd_t *pgd;
704 pud_t *pud;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500705 unsigned long addr, next;
706 int err = 0;
707
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500708 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100709 addr = start & PAGE_MASK;
710 end = PAGE_ALIGN(end);
711 do {
Marc Zyngier3ddd4552018-03-14 15:17:33 +0000712 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500713
Christoffer Dall38f791a2014-10-10 12:14:28 +0200714 if (pgd_none(*pgd)) {
715 pud = pud_alloc_one(NULL, addr);
716 if (!pud) {
717 kvm_err("Cannot allocate Hyp pud\n");
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500718 err = -ENOMEM;
719 goto out;
720 }
Marc Zyngier0db9dd82018-06-27 15:51:05 +0100721 kvm_pgd_populate(pgd, pud);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200722 get_page(virt_to_page(pgd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500723 }
724
725 next = pgd_addr_end(addr, end);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200726 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500727 if (err)
728 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100729 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100730 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500731out:
732 mutex_unlock(&kvm_hyp_pgd_mutex);
733 return err;
734}
735
Christoffer Dall40c27292013-11-15 13:14:12 -0800736static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
737{
738 if (!is_vmalloc_addr(kaddr)) {
739 BUG_ON(!virt_addr_valid(kaddr));
740 return __pa(kaddr);
741 } else {
742 return page_to_phys(vmalloc_to_page(kaddr)) +
743 offset_in_page(kaddr);
744 }
745}
746
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500747/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100748 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500749 * @from: The virtual kernel start address of the range
750 * @to: The virtual kernel end address of the range (exclusive)
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100751 * @prot: The protection to be applied to this range
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500752 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100753 * The same virtual address as the kernel virtual address is also used
754 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
755 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500756 */
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100757int create_hyp_mappings(void *from, void *to, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500758{
Christoffer Dall40c27292013-11-15 13:14:12 -0800759 phys_addr_t phys_addr;
760 unsigned long virt_addr;
Marc Zyngier6c41a412016-06-30 18:40:51 +0100761 unsigned long start = kern_hyp_va((unsigned long)from);
762 unsigned long end = kern_hyp_va((unsigned long)to);
Marc Zyngier6060df82013-04-12 19:12:01 +0100763
Marc Zyngier1e947ba2015-01-29 11:59:54 +0000764 if (is_kernel_in_hyp_mode())
765 return 0;
766
Christoffer Dall40c27292013-11-15 13:14:12 -0800767 start = start & PAGE_MASK;
768 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100769
Christoffer Dall40c27292013-11-15 13:14:12 -0800770 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
771 int err;
772
773 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
Kristina Martsenko98732d12018-01-15 15:23:49 +0000774 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
775 virt_addr, virt_addr + PAGE_SIZE,
Christoffer Dall40c27292013-11-15 13:14:12 -0800776 __phys_to_pfn(phys_addr),
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100777 prot);
Christoffer Dall40c27292013-11-15 13:14:12 -0800778 if (err)
779 return err;
780 }
781
782 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500783}
784
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000785static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
786 unsigned long *haddr, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500787{
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000788 pgd_t *pgd = hyp_pgd;
789 unsigned long base;
790 int ret = 0;
Marc Zyngier6060df82013-04-12 19:12:01 +0100791
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000792 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier6060df82013-04-12 19:12:01 +0100793
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000794 /*
795 * This assumes that we we have enough space below the idmap
796 * page to allocate our VAs. If not, the check below will
797 * kick. A potential alternative would be to detect that
798 * overflow and switch to an allocation above the idmap.
799 *
800 * The allocated size is always a multiple of PAGE_SIZE.
801 */
802 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
803 base = io_map_base - size;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000804
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000805 /*
806 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
807 * allocating the new area, as it would indicate we've
808 * overflowed the idmap/IO address range.
809 */
810 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
811 ret = -ENOMEM;
812 else
813 io_map_base = base;
814
815 mutex_unlock(&kvm_hyp_pgd_mutex);
816
817 if (ret)
818 goto out;
819
820 if (__kvm_cpu_uses_extended_idmap())
821 pgd = boot_hyp_pgd;
822
823 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
824 base, base + size,
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000825 __phys_to_pfn(phys_addr), prot);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000826 if (ret)
827 goto out;
828
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000829 *haddr = base + offset_in_page(phys_addr);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000830
831out:
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000832 return ret;
833}
834
835/**
836 * create_hyp_io_mappings - Map IO into both kernel and HYP
837 * @phys_addr: The physical start address which gets mapped
838 * @size: Size of the region being mapped
839 * @kaddr: Kernel VA for this mapping
840 * @haddr: HYP VA for this mapping
841 */
842int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
843 void __iomem **kaddr,
844 void __iomem **haddr)
845{
846 unsigned long addr;
847 int ret;
848
849 *kaddr = ioremap(phys_addr, size);
850 if (!*kaddr)
851 return -ENOMEM;
852
853 if (is_kernel_in_hyp_mode()) {
854 *haddr = *kaddr;
855 return 0;
856 }
857
858 ret = __create_hyp_private_mapping(phys_addr, size,
859 &addr, PAGE_HYP_DEVICE);
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000860 if (ret) {
861 iounmap(*kaddr);
862 *kaddr = NULL;
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000863 *haddr = NULL;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000864 return ret;
865 }
866
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000867 *haddr = (void __iomem *)addr;
868 return 0;
869}
870
871/**
872 * create_hyp_exec_mappings - Map an executable range into HYP
873 * @phys_addr: The physical start address which gets mapped
874 * @size: Size of the region being mapped
875 * @haddr: HYP VA for this mapping
876 */
877int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
878 void **haddr)
879{
880 unsigned long addr;
881 int ret;
882
883 BUG_ON(is_kernel_in_hyp_mode());
884
885 ret = __create_hyp_private_mapping(phys_addr, size,
886 &addr, PAGE_HYP_EXEC);
887 if (ret) {
888 *haddr = NULL;
889 return ret;
890 }
891
892 *haddr = (void *)addr;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000893 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500894}
895
Christoffer Dalld5d81842013-01-20 18:28:07 -0500896/**
897 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
898 * @kvm: The KVM struct pointer for the VM.
899 *
Zenghui Yu8324c3d2019-03-25 08:02:05 +0000900 * Allocates only the stage-2 HW PGD level table(s) of size defined by
901 * stage2_pgd_size(kvm).
Christoffer Dalld5d81842013-01-20 18:28:07 -0500902 *
903 * Note we don't need locking here as this is only called when the VM is
904 * created, which can only be done once.
905 */
906int kvm_alloc_stage2_pgd(struct kvm *kvm)
907{
Christoffer Dalle329fb72018-12-11 15:26:31 +0100908 phys_addr_t pgd_phys;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500909 pgd_t *pgd;
910
911 if (kvm->arch.pgd != NULL) {
912 kvm_err("kvm_arch already initialized?\n");
913 return -EINVAL;
914 }
915
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000916 /* Allocate the HW PGD, making sure that each page gets its own refcount */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100917 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000918 if (!pgd)
Marc Zyngiera9873702015-03-10 19:06:59 +0000919 return -ENOMEM;
920
Christoffer Dalle329fb72018-12-11 15:26:31 +0100921 pgd_phys = virt_to_phys(pgd);
922 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
923 return -EINVAL;
924
Christoffer Dalld5d81842013-01-20 18:28:07 -0500925 kvm->arch.pgd = pgd;
Christoffer Dalle329fb72018-12-11 15:26:31 +0100926 kvm->arch.pgd_phys = pgd_phys;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500927 return 0;
928}
929
Christoffer Dall957db102014-11-27 10:35:03 +0100930static void stage2_unmap_memslot(struct kvm *kvm,
931 struct kvm_memory_slot *memslot)
932{
933 hva_t hva = memslot->userspace_addr;
934 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
935 phys_addr_t size = PAGE_SIZE * memslot->npages;
936 hva_t reg_end = hva + size;
937
938 /*
939 * A memory region could potentially cover multiple VMAs, and any holes
940 * between them, so iterate over all of them to find out if we should
941 * unmap any of them.
942 *
943 * +--------------------------------------------+
944 * +---------------+----------------+ +----------------+
945 * | : VMA 1 | VMA 2 | | VMA 3 : |
946 * +---------------+----------------+ +----------------+
947 * | memory region |
948 * +--------------------------------------------+
949 */
950 do {
951 struct vm_area_struct *vma = find_vma(current->mm, hva);
952 hva_t vm_start, vm_end;
953
954 if (!vma || vma->vm_start >= reg_end)
955 break;
956
957 /*
958 * Take the intersection of this VMA with the memory region
959 */
960 vm_start = max(hva, vma->vm_start);
961 vm_end = min(reg_end, vma->vm_end);
962
963 if (!(vma->vm_flags & VM_PFNMAP)) {
964 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
965 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
966 }
967 hva = vm_end;
968 } while (hva < reg_end);
969}
970
971/**
972 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
973 * @kvm: The struct kvm pointer
974 *
975 * Go through the memregions and unmap any reguler RAM
976 * backing memory already mapped to the VM.
977 */
978void stage2_unmap_vm(struct kvm *kvm)
979{
980 struct kvm_memslots *slots;
981 struct kvm_memory_slot *memslot;
982 int idx;
983
984 idx = srcu_read_lock(&kvm->srcu);
Marc Zyngier90f6e152017-03-16 18:20:49 +0000985 down_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100986 spin_lock(&kvm->mmu_lock);
987
988 slots = kvm_memslots(kvm);
989 kvm_for_each_memslot(memslot, slots)
990 stage2_unmap_memslot(kvm, memslot);
991
992 spin_unlock(&kvm->mmu_lock);
Marc Zyngier90f6e152017-03-16 18:20:49 +0000993 up_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100994 srcu_read_unlock(&kvm->srcu, idx);
995}
996
Christoffer Dalld5d81842013-01-20 18:28:07 -0500997/**
998 * kvm_free_stage2_pgd - free all stage-2 tables
999 * @kvm: The KVM struct pointer for the VM.
1000 *
1001 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
1002 * underlying level-2 and level-3 tables before freeing the actual level-1 table
1003 * and setting the struct pointer to NULL.
Christoffer Dalld5d81842013-01-20 18:28:07 -05001004 */
1005void kvm_free_stage2_pgd(struct kvm *kvm)
1006{
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001007 void *pgd = NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001008
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +01001009 spin_lock(&kvm->mmu_lock);
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001010 if (kvm->arch.pgd) {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001011 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
Suzuki K Poulose2952a602017-05-16 10:34:54 +01001012 pgd = READ_ONCE(kvm->arch.pgd);
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001013 kvm->arch.pgd = NULL;
Christoffer Dalle329fb72018-12-11 15:26:31 +01001014 kvm->arch.pgd_phys = 0;
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001015 }
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +01001016 spin_unlock(&kvm->mmu_lock);
1017
Suzuki K Poulose9163ee232016-03-22 17:01:21 +00001018 /* Free the HW pgd, one page at a time */
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +01001019 if (pgd)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001020 free_pages_exact(pgd, stage2_pgd_size(kvm));
Christoffer Dalld5d81842013-01-20 18:28:07 -05001021}
1022
Christoffer Dall38f791a2014-10-10 12:14:28 +02001023static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1024 phys_addr_t addr)
1025{
1026 pgd_t *pgd;
1027 pud_t *pud;
1028
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001029 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1030 if (stage2_pgd_none(kvm, *pgd)) {
Christoffer Dall38f791a2014-10-10 12:14:28 +02001031 if (!cache)
1032 return NULL;
1033 pud = mmu_memory_cache_alloc(cache);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001034 stage2_pgd_populate(kvm, pgd, pud);
Christoffer Dall38f791a2014-10-10 12:14:28 +02001035 get_page(virt_to_page(pgd));
1036 }
1037
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001038 return stage2_pud_offset(kvm, pgd, addr);
Christoffer Dall38f791a2014-10-10 12:14:28 +02001039}
1040
Christoffer Dallad361f02012-11-01 17:14:45 +01001041static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1042 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001043{
Christoffer Dalld5d81842013-01-20 18:28:07 -05001044 pud_t *pud;
1045 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001046
Christoffer Dall38f791a2014-10-10 12:14:28 +02001047 pud = stage2_get_pud(kvm, cache, addr);
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001048 if (!pud || stage2_pud_huge(kvm, *pud))
Marc Zyngierd6dbdd32017-06-05 19:17:18 +01001049 return NULL;
1050
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001051 if (stage2_pud_none(kvm, *pud)) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001052 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +01001053 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001054 pmd = mmu_memory_cache_alloc(cache);
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001055 stage2_pud_populate(kvm, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001056 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +01001057 }
1058
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001059 return stage2_pmd_offset(kvm, pud, addr);
Christoffer Dallad361f02012-11-01 17:14:45 +01001060}
Christoffer Dalld5d81842013-01-20 18:28:07 -05001061
Christoffer Dallad361f02012-11-01 17:14:45 +01001062static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1063 *cache, phys_addr_t addr, const pmd_t *new_pmd)
1064{
1065 pmd_t *pmd, old_pmd;
1066
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001067retry:
Christoffer Dallad361f02012-11-01 17:14:45 +01001068 pmd = stage2_get_pmd(kvm, cache, addr);
1069 VM_BUG_ON(!pmd);
1070
Christoffer Dallad361f02012-11-01 17:14:45 +01001071 old_pmd = *pmd;
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001072 /*
1073 * Multiple vcpus faulting on the same PMD entry, can
1074 * lead to them sequentially updating the PMD with the
1075 * same value. Following the break-before-make
1076 * (pmd_clear() followed by tlb_flush()) process can
1077 * hinder forward progress due to refaults generated
1078 * on missing translations.
1079 *
1080 * Skip updating the page table if the entry is
1081 * unchanged.
1082 */
1083 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1084 return 0;
1085
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001086 if (pmd_present(old_pmd)) {
Punit Agrawal86658b82018-08-13 11:43:50 +01001087 /*
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001088 * If we already have PTE level mapping for this block,
1089 * we must unmap it to avoid inconsistent TLB state and
1090 * leaking the table page. We could end up in this situation
1091 * if the memory slot was marked for dirty logging and was
1092 * reverted, leaving PTE level mappings for the pages accessed
1093 * during the period. So, unmap the PTE level mapping for this
1094 * block and retry, as we could have released the upper level
1095 * table in the process.
Punit Agrawal86658b82018-08-13 11:43:50 +01001096 *
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001097 * Normal THP split/merge follows mmu_notifier callbacks and do
1098 * get handled accordingly.
Punit Agrawal86658b82018-08-13 11:43:50 +01001099 */
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001100 if (!pmd_thp_or_huge(old_pmd)) {
1101 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1102 goto retry;
1103 }
Punit Agrawal86658b82018-08-13 11:43:50 +01001104 /*
1105 * Mapping in huge pages should only happen through a
1106 * fault. If a page is merged into a transparent huge
1107 * page, the individual subpages of that huge page
1108 * should be unmapped through MMU notifiers before we
1109 * get here.
1110 *
1111 * Merging of CompoundPages is not supported; they
1112 * should become splitting first, unmapped, merged,
1113 * and mapped back in on-demand.
1114 */
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001115 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001116 pmd_clear(pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +01001117 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001118 } else {
Christoffer Dallad361f02012-11-01 17:14:45 +01001119 get_page(virt_to_page(pmd));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001120 }
1121
1122 kvm_set_pmd(pmd, *new_pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +01001123 return 0;
1124}
1125
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001126static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1127 phys_addr_t addr, const pud_t *new_pudp)
1128{
1129 pud_t *pudp, old_pud;
1130
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001131retry:
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001132 pudp = stage2_get_pud(kvm, cache, addr);
1133 VM_BUG_ON(!pudp);
1134
1135 old_pud = *pudp;
1136
1137 /*
1138 * A large number of vcpus faulting on the same stage 2 entry,
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001139 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1140 * Skip updating the page tables if there is no change.
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001141 */
1142 if (pud_val(old_pud) == pud_val(*new_pudp))
1143 return 0;
1144
1145 if (stage2_pud_present(kvm, old_pud)) {
Suzuki K Poulose3c3736c2019-03-20 14:57:19 +00001146 /*
1147 * If we already have table level mapping for this block, unmap
1148 * the range for this block and retry.
1149 */
1150 if (!stage2_pud_huge(kvm, old_pud)) {
1151 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1152 goto retry;
1153 }
1154
1155 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001156 stage2_pud_clear(kvm, pudp);
1157 kvm_tlb_flush_vmid_ipa(kvm, addr);
1158 } else {
1159 get_page(virt_to_page(pudp));
1160 }
1161
1162 kvm_set_pud(pudp, *new_pudp);
1163 return 0;
1164}
1165
Punit Agrawal86d1c552018-12-11 17:10:38 +00001166/*
1167 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1168 * true if a valid and present leaf-entry is found. A pointer to the
1169 * leaf-entry is returned in the appropriate level variable - pudpp,
1170 * pmdpp, ptepp.
1171 */
1172static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1173 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001174{
Punit Agrawal86d1c552018-12-11 17:10:38 +00001175 pud_t *pudp;
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001176 pmd_t *pmdp;
1177 pte_t *ptep;
1178
Punit Agrawal86d1c552018-12-11 17:10:38 +00001179 *pudpp = NULL;
1180 *pmdpp = NULL;
1181 *ptepp = NULL;
1182
1183 pudp = stage2_get_pud(kvm, NULL, addr);
1184 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1185 return false;
1186
1187 if (stage2_pud_huge(kvm, *pudp)) {
1188 *pudpp = pudp;
1189 return true;
1190 }
1191
1192 pmdp = stage2_pmd_offset(kvm, pudp, addr);
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001193 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1194 return false;
1195
Punit Agrawal86d1c552018-12-11 17:10:38 +00001196 if (pmd_thp_or_huge(*pmdp)) {
1197 *pmdpp = pmdp;
1198 return true;
1199 }
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001200
1201 ptep = pte_offset_kernel(pmdp, addr);
1202 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1203 return false;
1204
Punit Agrawal86d1c552018-12-11 17:10:38 +00001205 *ptepp = ptep;
1206 return true;
1207}
1208
1209static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1210{
1211 pud_t *pudp;
1212 pmd_t *pmdp;
1213 pte_t *ptep;
1214 bool found;
1215
1216 found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1217 if (!found)
1218 return false;
1219
1220 if (pudp)
1221 return kvm_s2pud_exec(pudp);
1222 else if (pmdp)
1223 return kvm_s2pmd_exec(pmdp);
1224 else
1225 return kvm_s2pte_exec(ptep);
Marc Zyngier7a3796d2017-10-23 17:11:21 +01001226}
1227
Christoffer Dallad361f02012-11-01 17:14:45 +01001228static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
Mario Smarduch15a49a42015-01-15 15:58:58 -08001229 phys_addr_t addr, const pte_t *new_pte,
1230 unsigned long flags)
Christoffer Dallad361f02012-11-01 17:14:45 +01001231{
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001232 pud_t *pud;
Christoffer Dallad361f02012-11-01 17:14:45 +01001233 pmd_t *pmd;
1234 pte_t *pte, old_pte;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001235 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1236 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1237
1238 VM_BUG_ON(logging_active && !cache);
Christoffer Dallad361f02012-11-01 17:14:45 +01001239
Christoffer Dall38f791a2014-10-10 12:14:28 +02001240 /* Create stage-2 page table mapping - Levels 0 and 1 */
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001241 pud = stage2_get_pud(kvm, cache, addr);
1242 if (!pud) {
1243 /*
1244 * Ignore calls from kvm_set_spte_hva for unallocated
1245 * address ranges.
1246 */
1247 return 0;
1248 }
1249
1250 /*
1251 * While dirty page logging - dissolve huge PUD, then continue
1252 * on to allocate page.
1253 */
1254 if (logging_active)
1255 stage2_dissolve_pud(kvm, addr, pud);
1256
1257 if (stage2_pud_none(kvm, *pud)) {
1258 if (!cache)
1259 return 0; /* ignore calls from kvm_set_spte_hva */
1260 pmd = mmu_memory_cache_alloc(cache);
1261 stage2_pud_populate(kvm, pud, pmd);
1262 get_page(virt_to_page(pud));
1263 }
1264
1265 pmd = stage2_pmd_offset(kvm, pud, addr);
Christoffer Dallad361f02012-11-01 17:14:45 +01001266 if (!pmd) {
1267 /*
1268 * Ignore calls from kvm_set_spte_hva for unallocated
1269 * address ranges.
1270 */
1271 return 0;
1272 }
1273
Mario Smarduch15a49a42015-01-15 15:58:58 -08001274 /*
1275 * While dirty page logging - dissolve huge PMD, then continue on to
1276 * allocate page.
1277 */
1278 if (logging_active)
1279 stage2_dissolve_pmd(kvm, addr, pmd);
1280
Christoffer Dallad361f02012-11-01 17:14:45 +01001281 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -05001282 if (pmd_none(*pmd)) {
1283 if (!cache)
1284 return 0; /* ignore calls from kvm_set_spte_hva */
1285 pte = mmu_memory_cache_alloc(cache);
Marc Zyngier0db9dd82018-06-27 15:51:05 +01001286 kvm_pmd_populate(pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001287 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +01001288 }
1289
1290 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001291
1292 if (iomap && pte_present(*pte))
1293 return -EFAULT;
1294
1295 /* Create 2nd stage page table mapping - Level 3 */
1296 old_pte = *pte;
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001297 if (pte_present(old_pte)) {
Punit Agrawal976d34e2018-08-13 11:43:51 +01001298 /* Skip page table update if there is no change */
1299 if (pte_val(old_pte) == pte_val(*new_pte))
1300 return 0;
1301
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001302 kvm_set_pte(pte, __pte(0));
Marc Zyngier48762762013-01-28 15:27:00 +00001303 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001304 } else {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001305 get_page(virt_to_page(pte));
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001306 }
Christoffer Dalld5d81842013-01-20 18:28:07 -05001307
Marc Zyngierd4b9e072016-04-28 16:16:31 +01001308 kvm_set_pte(pte, *new_pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001309 return 0;
1310}
1311
Catalin Marinas06485052016-04-13 17:57:37 +01001312#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1313static int stage2_ptep_test_and_clear_young(pte_t *pte)
1314{
1315 if (pte_young(*pte)) {
1316 *pte = pte_mkold(*pte);
1317 return 1;
1318 }
1319 return 0;
1320}
1321#else
1322static int stage2_ptep_test_and_clear_young(pte_t *pte)
1323{
1324 return __ptep_test_and_clear_young(pte);
1325}
1326#endif
1327
1328static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1329{
1330 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1331}
1332
Punit Agrawal35a63962018-12-11 17:10:40 +00001333static int stage2_pudp_test_and_clear_young(pud_t *pud)
1334{
1335 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1336}
1337
Christoffer Dalld5d81842013-01-20 18:28:07 -05001338/**
1339 * kvm_phys_addr_ioremap - map a device range to guest IPA
1340 *
1341 * @kvm: The KVM pointer
1342 * @guest_ipa: The IPA at which to insert the mapping
1343 * @pa: The physical address of the device
1344 * @size: The size of the mapping
1345 */
1346int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001347 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001348{
1349 phys_addr_t addr, end;
1350 int ret = 0;
1351 unsigned long pfn;
1352 struct kvm_mmu_memory_cache cache = { 0, };
1353
1354 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1355 pfn = __phys_to_pfn(pa);
1356
1357 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001358 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001359
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001360 if (writable)
Catalin Marinas06485052016-04-13 17:57:37 +01001361 pte = kvm_s2pte_mkwrite(pte);
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001362
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001363 ret = mmu_topup_memory_cache(&cache,
1364 kvm_mmu_cache_min_pages(kvm),
1365 KVM_NR_MEM_OBJS);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001366 if (ret)
1367 goto out;
1368 spin_lock(&kvm->mmu_lock);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001369 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1370 KVM_S2PTE_FLAG_IS_IOMAP);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001371 spin_unlock(&kvm->mmu_lock);
1372 if (ret)
1373 goto out;
1374
1375 pfn++;
1376 }
1377
1378out:
1379 mmu_free_memory_cache(&cache);
1380 return ret;
1381}
1382
Dan Williamsba049e92016-01-15 16:56:11 -08001383static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001384{
Dan Williamsba049e92016-01-15 16:56:11 -08001385 kvm_pfn_t pfn = *pfnp;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001386 gfn_t gfn = *ipap >> PAGE_SHIFT;
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001387 struct page *page = pfn_to_page(pfn);
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001388
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001389 /*
Christoffer Dall69921952018-11-06 13:33:38 +01001390 * PageTransCompoundMap() returns true for THP and
Punit Agrawalfd2ef352018-10-01 16:54:35 +01001391 * hugetlbfs. Make sure the adjustment is done only for THP
1392 * pages.
1393 */
1394 if (!PageHuge(page) && PageTransCompoundMap(page)) {
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001395 unsigned long mask;
1396 /*
1397 * The address we faulted on is backed by a transparent huge
1398 * page. However, because we map the compound huge page and
1399 * not the individual tail page, we need to transfer the
1400 * refcount to the head page. We have to be careful that the
1401 * THP doesn't start to split while we are adjusting the
1402 * refcounts.
1403 *
1404 * We are sure this doesn't happen, because mmu_notifier_retry
1405 * was successful and we are holding the mmu_lock, so if this
1406 * THP is trying to split, it will be blocked in the mmu
1407 * notifier before touching any of the pages, specifically
1408 * before being able to call __split_huge_page_refcount().
1409 *
1410 * We can therefore safely transfer the refcount from PG_tail
1411 * to PG_head and switch the pfn from a tail page to the head
1412 * page accordingly.
1413 */
1414 mask = PTRS_PER_PMD - 1;
1415 VM_BUG_ON((gfn & mask) != (pfn & mask));
1416 if (pfn & mask) {
1417 *ipap &= PMD_MASK;
1418 kvm_release_pfn_clean(pfn);
1419 pfn &= ~mask;
1420 kvm_get_pfn(pfn);
1421 *pfnp = pfn;
1422 }
1423
1424 return true;
1425 }
1426
1427 return false;
1428}
1429
Mario Smarduchc6473552015-01-15 15:58:56 -08001430/**
1431 * stage2_wp_ptes - write protect PMD range
1432 * @pmd: pointer to pmd entry
1433 * @addr: range start address
1434 * @end: range end address
1435 */
1436static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1437{
1438 pte_t *pte;
1439
1440 pte = pte_offset_kernel(pmd, addr);
1441 do {
1442 if (!pte_none(*pte)) {
1443 if (!kvm_s2pte_readonly(pte))
1444 kvm_set_s2pte_readonly(pte);
1445 }
1446 } while (pte++, addr += PAGE_SIZE, addr != end);
1447}
1448
1449/**
1450 * stage2_wp_pmds - write protect PUD range
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001451 * kvm: kvm instance for the VM
Mario Smarduchc6473552015-01-15 15:58:56 -08001452 * @pud: pointer to pud entry
1453 * @addr: range start address
1454 * @end: range end address
1455 */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001456static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1457 phys_addr_t addr, phys_addr_t end)
Mario Smarduchc6473552015-01-15 15:58:56 -08001458{
1459 pmd_t *pmd;
1460 phys_addr_t next;
1461
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001462 pmd = stage2_pmd_offset(kvm, pud, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001463
1464 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001465 next = stage2_pmd_addr_end(kvm, addr, end);
Mario Smarduchc6473552015-01-15 15:58:56 -08001466 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +00001467 if (pmd_thp_or_huge(*pmd)) {
Mario Smarduchc6473552015-01-15 15:58:56 -08001468 if (!kvm_s2pmd_readonly(pmd))
1469 kvm_set_s2pmd_readonly(pmd);
1470 } else {
1471 stage2_wp_ptes(pmd, addr, next);
1472 }
1473 }
1474 } while (pmd++, addr = next, addr != end);
1475}
1476
1477/**
Zenghui Yu8324c3d2019-03-25 08:02:05 +00001478 * stage2_wp_puds - write protect PGD range
1479 * @pgd: pointer to pgd entry
1480 * @addr: range start address
1481 * @end: range end address
1482 */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001483static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1484 phys_addr_t addr, phys_addr_t end)
Mario Smarduchc6473552015-01-15 15:58:56 -08001485{
1486 pud_t *pud;
1487 phys_addr_t next;
1488
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001489 pud = stage2_pud_offset(kvm, pgd, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001490 do {
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001491 next = stage2_pud_addr_end(kvm, addr, end);
1492 if (!stage2_pud_none(kvm, *pud)) {
Punit Agrawal4ea5af52018-12-11 17:10:37 +00001493 if (stage2_pud_huge(kvm, *pud)) {
1494 if (!kvm_s2pud_readonly(pud))
1495 kvm_set_s2pud_readonly(pud);
1496 } else {
1497 stage2_wp_pmds(kvm, pud, addr, next);
1498 }
Mario Smarduchc6473552015-01-15 15:58:56 -08001499 }
1500 } while (pud++, addr = next, addr != end);
1501}
1502
1503/**
1504 * stage2_wp_range() - write protect stage2 memory region range
1505 * @kvm: The KVM pointer
1506 * @addr: Start address of range
1507 * @end: End address of range
1508 */
1509static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1510{
1511 pgd_t *pgd;
1512 phys_addr_t next;
1513
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001514 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001515 do {
1516 /*
1517 * Release kvm_mmu_lock periodically if the memory region is
1518 * large. Otherwise, we may see kernel panics with
Christoffer Dall227ea812015-01-23 10:49:31 +01001519 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1520 * CONFIG_LOCKDEP. Additionally, holding the lock too long
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +01001521 * will also starve other vCPUs. We have to also make sure
1522 * that the page tables are not freed while we released
1523 * the lock.
Mario Smarduchc6473552015-01-15 15:58:56 -08001524 */
Suzuki K Poulose0c428a6a2017-05-16 10:34:55 +01001525 cond_resched_lock(&kvm->mmu_lock);
1526 if (!READ_ONCE(kvm->arch.pgd))
1527 break;
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001528 next = stage2_pgd_addr_end(kvm, addr, end);
1529 if (stage2_pgd_present(kvm, *pgd))
1530 stage2_wp_puds(kvm, pgd, addr, next);
Mario Smarduchc6473552015-01-15 15:58:56 -08001531 } while (pgd++, addr = next, addr != end);
1532}
1533
1534/**
1535 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1536 * @kvm: The KVM pointer
1537 * @slot: The memory slot to write protect
1538 *
1539 * Called to start logging dirty pages after memory region
1540 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
Punit Agrawal4ea5af52018-12-11 17:10:37 +00001541 * all present PUD, PMD and PTEs are write protected in the memory region.
Mario Smarduchc6473552015-01-15 15:58:56 -08001542 * Afterwards read of dirty page log can be called.
1543 *
1544 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1545 * serializing operations for VM memory regions.
1546 */
1547void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1548{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001549 struct kvm_memslots *slots = kvm_memslots(kvm);
1550 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
Mario Smarduchc6473552015-01-15 15:58:56 -08001551 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1552 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1553
1554 spin_lock(&kvm->mmu_lock);
1555 stage2_wp_range(kvm, start, end);
1556 spin_unlock(&kvm->mmu_lock);
1557 kvm_flush_remote_tlbs(kvm);
1558}
Mario Smarduch53c810c2015-01-15 15:58:57 -08001559
1560/**
Kai Huang3b0f1d02015-01-28 10:54:23 +08001561 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
Mario Smarduch53c810c2015-01-15 15:58:57 -08001562 * @kvm: The KVM pointer
1563 * @slot: The memory slot associated with mask
1564 * @gfn_offset: The gfn offset in memory slot
1565 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1566 * slot to be write protected
1567 *
1568 * Walks bits set in mask write protects the associated pte's. Caller must
1569 * acquire kvm_mmu_lock.
1570 */
Kai Huang3b0f1d02015-01-28 10:54:23 +08001571static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
Mario Smarduch53c810c2015-01-15 15:58:57 -08001572 struct kvm_memory_slot *slot,
1573 gfn_t gfn_offset, unsigned long mask)
1574{
1575 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1576 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1577 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1578
1579 stage2_wp_range(kvm, start, end);
1580}
Mario Smarduchc6473552015-01-15 15:58:56 -08001581
Kai Huang3b0f1d02015-01-28 10:54:23 +08001582/*
1583 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1584 * dirty pages.
1585 *
1586 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1587 * enable dirty logging for them.
1588 */
1589void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1590 struct kvm_memory_slot *slot,
1591 gfn_t gfn_offset, unsigned long mask)
1592{
1593 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1594}
1595
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001596static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001597{
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001598 __clean_dcache_guest_page(pfn, size);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001599}
1600
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001601static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
Marc Zyngiera15f6932017-10-23 17:11:15 +01001602{
Marc Zyngier17ab9d52017-10-23 17:11:22 +01001603 __invalidate_icache_guest_page(pfn, size);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001604}
1605
James Morse196f8782017-06-20 17:11:48 +01001606static void kvm_send_hwpoison_signal(unsigned long address,
1607 struct vm_area_struct *vma)
1608{
Eric W. Biederman795a8372018-04-16 13:39:10 -05001609 short lsb;
James Morse196f8782017-06-20 17:11:48 +01001610
1611 if (is_vm_hugetlb_page(vma))
Eric W. Biederman795a8372018-04-16 13:39:10 -05001612 lsb = huge_page_shift(hstate_vma(vma));
James Morse196f8782017-06-20 17:11:48 +01001613 else
Eric W. Biederman795a8372018-04-16 13:39:10 -05001614 lsb = PAGE_SHIFT;
James Morse196f8782017-06-20 17:11:48 +01001615
Eric W. Biederman795a8372018-04-16 13:39:10 -05001616 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
James Morse196f8782017-06-20 17:11:48 +01001617}
1618
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001619static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1620 unsigned long hva,
1621 unsigned long map_size)
Christoffer Dall6794ad52018-11-02 08:53:22 +01001622{
Shaokun Zhangc2be79a2019-02-19 17:22:21 +08001623 gpa_t gpa_start;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001624 hva_t uaddr_start, uaddr_end;
1625 size_t size;
1626
1627 size = memslot->npages * PAGE_SIZE;
1628
1629 gpa_start = memslot->base_gfn << PAGE_SHIFT;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001630
1631 uaddr_start = memslot->userspace_addr;
1632 uaddr_end = uaddr_start + size;
1633
1634 /*
1635 * Pages belonging to memslots that don't have the same alignment
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001636 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1637 * PMD/PUD entries, because we'll end up mapping the wrong pages.
Christoffer Dall6794ad52018-11-02 08:53:22 +01001638 *
1639 * Consider a layout like the following:
1640 *
1641 * memslot->userspace_addr:
1642 * +-----+--------------------+--------------------+---+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001643 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +01001644 * +-----+--------------------+--------------------+---+
1645 *
1646 * memslot->base_gfn << PAGE_SIZE:
1647 * +---+--------------------+--------------------+-----+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001648 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +01001649 * +---+--------------------+--------------------+-----+
1650 *
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001651 * If we create those stage-2 blocks, we'll end up with this incorrect
Christoffer Dall6794ad52018-11-02 08:53:22 +01001652 * mapping:
1653 * d -> f
1654 * e -> g
1655 * f -> h
1656 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001657 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
Christoffer Dall6794ad52018-11-02 08:53:22 +01001658 return false;
1659
1660 /*
1661 * Next, let's make sure we're not trying to map anything not covered
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001662 * by the memslot. This means we have to prohibit block size mappings
1663 * for the beginning and end of a non-block aligned and non-block sized
Christoffer Dall6794ad52018-11-02 08:53:22 +01001664 * memory slot (illustrated by the head and tail parts of the
1665 * userspace view above containing pages 'abcde' and 'xyz',
1666 * respectively).
1667 *
1668 * Note that it doesn't matter if we do the check using the
1669 * userspace_addr or the base_gfn, as both are equally aligned (per
1670 * the check above) and equally sized.
1671 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001672 return (hva & ~(map_size - 1)) >= uaddr_start &&
1673 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
Christoffer Dall6794ad52018-11-02 08:53:22 +01001674}
1675
Christoffer Dall94f8e642013-01-20 18:28:12 -05001676static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +02001677 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -05001678 unsigned long fault_status)
1679{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001680 int ret;
Punit Agrawal6396b852018-12-11 17:10:35 +00001681 bool write_fault, writable, force_pte = false;
1682 bool exec_fault, needs_exec;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001683 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +01001684 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001685 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001686 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +01001687 struct vm_area_struct *vma;
Dan Williamsba049e92016-01-15 16:56:11 -08001688 kvm_pfn_t pfn;
Kim Phillipsb8865762014-06-26 01:45:51 +01001689 pgprot_t mem_type = PAGE_S2;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001690 bool logging_active = memslot_is_logging(memslot);
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001691 unsigned long vma_pagesize, flags = 0;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001692
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001693 write_fault = kvm_is_write_fault(vcpu);
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001694 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1695 VM_BUG_ON(write_fault && exec_fault);
1696
1697 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001698 kvm_err("Unexpected L2 read permission error\n");
1699 return -EFAULT;
1700 }
1701
Christoffer Dallad361f02012-11-01 17:14:45 +01001702 /* Let's check if we will get back a huge page backed by hugetlbfs */
1703 down_read(&current->mm->mmap_sem);
1704 vma = find_vma_intersection(current->mm, hva, hva + 1);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001705 if (unlikely(!vma)) {
1706 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1707 up_read(&current->mm->mmap_sem);
1708 return -EFAULT;
1709 }
1710
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001711 vma_pagesize = vma_kernel_pagesize(vma);
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001712 if (logging_active ||
1713 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1714 force_pte = true;
1715 vma_pagesize = PAGE_SIZE;
1716 }
1717
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001718 /*
Suzuki K Poulose280cebf2019-01-29 19:12:17 +00001719 * The stage2 has a minimum of 2 level table (For arm64 see
1720 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1721 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1722 * As for PUD huge maps, we must make sure that we have at least
1723 * 3 levels, i.e, PMD is not folded.
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001724 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001725 if (vma_pagesize == PMD_SIZE ||
1726 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001727 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001728 up_read(&current->mm->mmap_sem);
1729
Christoffer Dall94f8e642013-01-20 18:28:12 -05001730 /* We need minimum second+third level pages */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001731 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
Christoffer Dall38f791a2014-10-10 12:14:28 +02001732 KVM_NR_MEM_OBJS);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001733 if (ret)
1734 return ret;
1735
1736 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1737 /*
1738 * Ensure the read of mmu_notifier_seq happens before we call
1739 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1740 * the page we just got a reference to gets unmapped before we have a
1741 * chance to grab the mmu_lock, which ensure that if the page gets
1742 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1743 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1744 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1745 */
1746 smp_rmb();
1747
Christoffer Dallad361f02012-11-01 17:14:45 +01001748 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
James Morse196f8782017-06-20 17:11:48 +01001749 if (pfn == KVM_PFN_ERR_HWPOISON) {
1750 kvm_send_hwpoison_signal(hva, vma);
1751 return 0;
1752 }
Christoffer Dall9ac71592016-08-17 10:46:10 +02001753 if (is_error_noslot_pfn(pfn))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001754 return -EFAULT;
1755
Mario Smarduch15a49a42015-01-15 15:58:58 -08001756 if (kvm_is_device_pfn(pfn)) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001757 mem_type = PAGE_S2_DEVICE;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001758 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1759 } else if (logging_active) {
1760 /*
1761 * Faults on pages in a memslot with logging enabled
1762 * should not be mapped with huge pages (it introduces churn
1763 * and performance degradation), so force a pte mapping.
1764 */
Mario Smarduch15a49a42015-01-15 15:58:58 -08001765 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1766
1767 /*
1768 * Only actually map the page as writable if this was a write
1769 * fault.
1770 */
1771 if (!write_fault)
1772 writable = false;
1773 }
Kim Phillipsb8865762014-06-26 01:45:51 +01001774
Christoffer Dallad361f02012-11-01 17:14:45 +01001775 spin_lock(&kvm->mmu_lock);
1776 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001777 goto out_unlock;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001778
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001779 if (vma_pagesize == PAGE_SIZE && !force_pte) {
1780 /*
1781 * Only PMD_SIZE transparent hugepages(THP) are
1782 * currently supported. This code will need to be
1783 * updated to support other THP sizes.
Suzuki K Poulose2e8010b2019-04-10 16:14:57 +01001784 *
1785 * Make sure the host VA and the guest IPA are sufficiently
1786 * aligned and that the block is contained within the memslot.
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001787 */
Suzuki K Poulose2e8010b2019-04-10 16:14:57 +01001788 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
1789 transparent_hugepage_adjust(&pfn, &fault_ipa))
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001790 vma_pagesize = PMD_SIZE;
1791 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001792
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001793 if (writable)
1794 kvm_set_pfn_dirty(pfn);
1795
1796 if (fault_status != FSC_PERM)
1797 clean_dcache_guest_page(pfn, vma_pagesize);
1798
1799 if (exec_fault)
1800 invalidate_icache_guest_page(pfn, vma_pagesize);
1801
Punit Agrawal6396b852018-12-11 17:10:35 +00001802 /*
1803 * If we took an execution fault we have made the
1804 * icache/dcache coherent above and should now let the s2
1805 * mapping be executable.
1806 *
1807 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1808 * execute permissions, and we preserve whatever we have.
1809 */
1810 needs_exec = exec_fault ||
1811 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1812
Punit Agrawalb8e0ba72018-12-11 17:10:41 +00001813 if (vma_pagesize == PUD_SIZE) {
1814 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1815
1816 new_pud = kvm_pud_mkhuge(new_pud);
1817 if (writable)
1818 new_pud = kvm_s2pud_mkwrite(new_pud);
1819
1820 if (needs_exec)
1821 new_pud = kvm_s2pud_mkexec(new_pud);
1822
1823 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1824 } else if (vma_pagesize == PMD_SIZE) {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001825 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1826
1827 new_pmd = kvm_pmd_mkhuge(new_pmd);
1828
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001829 if (writable)
Catalin Marinas06485052016-04-13 17:57:37 +01001830 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001831
Punit Agrawal6396b852018-12-11 17:10:35 +00001832 if (needs_exec)
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001833 new_pmd = kvm_s2pmd_mkexec(new_pmd);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001834
Christoffer Dallad361f02012-11-01 17:14:45 +01001835 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1836 } else {
Punit Agrawalf8df7332018-12-11 17:10:36 +00001837 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001838
Christoffer Dallad361f02012-11-01 17:14:45 +01001839 if (writable) {
Catalin Marinas06485052016-04-13 17:57:37 +01001840 new_pte = kvm_s2pte_mkwrite(new_pte);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001841 mark_page_dirty(kvm, gfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001842 }
Marc Zyngiera9c0e122017-10-23 17:11:20 +01001843
Punit Agrawal6396b852018-12-11 17:10:35 +00001844 if (needs_exec)
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001845 new_pte = kvm_s2pte_mkexec(new_pte);
Marc Zyngiera15f6932017-10-23 17:11:15 +01001846
Mario Smarduch15a49a42015-01-15 15:58:58 -08001847 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001848 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001849
Christoffer Dall94f8e642013-01-20 18:28:12 -05001850out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001851 spin_unlock(&kvm->mmu_lock);
Marc Zyngier35307b92015-03-12 18:16:51 +00001852 kvm_set_pfn_accessed(pfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001853 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001854 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001855}
1856
Marc Zyngieraeda9132015-03-12 18:16:52 +00001857/*
1858 * Resolve the access fault by making the page young again.
1859 * Note that because the faulting entry is guaranteed not to be
1860 * cached in the TLB, we don't need to invalidate anything.
Catalin Marinas06485052016-04-13 17:57:37 +01001861 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1862 * so there is no need for atomic (pte|pmd)_mkyoung operations.
Marc Zyngieraeda9132015-03-12 18:16:52 +00001863 */
1864static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1865{
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001866 pud_t *pud;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001867 pmd_t *pmd;
1868 pte_t *pte;
Dan Williamsba049e92016-01-15 16:56:11 -08001869 kvm_pfn_t pfn;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001870 bool pfn_valid = false;
1871
1872 trace_kvm_access_fault(fault_ipa);
1873
1874 spin_lock(&vcpu->kvm->mmu_lock);
1875
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001876 if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
Marc Zyngieraeda9132015-03-12 18:16:52 +00001877 goto out;
1878
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001879 if (pud) { /* HugeTLB */
1880 *pud = kvm_s2pud_mkyoung(*pud);
1881 pfn = kvm_pud_pfn(*pud);
1882 pfn_valid = true;
1883 } else if (pmd) { /* THP, HugeTLB */
Marc Zyngieraeda9132015-03-12 18:16:52 +00001884 *pmd = pmd_mkyoung(*pmd);
1885 pfn = pmd_pfn(*pmd);
1886 pfn_valid = true;
Punit Agrawaleb3f06242018-12-11 17:10:39 +00001887 } else {
1888 *pte = pte_mkyoung(*pte); /* Just a page... */
1889 pfn = pte_pfn(*pte);
1890 pfn_valid = true;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001891 }
1892
Marc Zyngieraeda9132015-03-12 18:16:52 +00001893out:
1894 spin_unlock(&vcpu->kvm->mmu_lock);
1895 if (pfn_valid)
1896 kvm_set_pfn_accessed(pfn);
1897}
1898
Christoffer Dall94f8e642013-01-20 18:28:12 -05001899/**
1900 * kvm_handle_guest_abort - handles all 2nd stage aborts
1901 * @vcpu: the VCPU pointer
1902 * @run: the kvm_run structure
1903 *
1904 * Any abort that gets to the host is almost guaranteed to be caused by a
1905 * missing second stage translation table entry, which can mean that either the
1906 * guest simply needs more memory and we must allocate an appropriate page or it
1907 * can mean that the guest tried to access I/O memory, which is emulated by user
1908 * space. The distinction is based on the IPA causing the fault and whether this
1909 * memory region has been registered as standard RAM by user space.
1910 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001911int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1912{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001913 unsigned long fault_status;
1914 phys_addr_t fault_ipa;
1915 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001916 unsigned long hva;
1917 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001918 gfn_t gfn;
1919 int ret, idx;
1920
Tyler Baicar621f48e2017-06-21 12:17:14 -06001921 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1922
1923 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
James Morsebb428922017-07-18 13:37:41 +01001924 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Tyler Baicar621f48e2017-06-21 12:17:14 -06001925
James Morsebb428922017-07-18 13:37:41 +01001926 /* Synchronous External Abort? */
1927 if (kvm_vcpu_dabt_isextabt(vcpu)) {
1928 /*
1929 * For RAS the host kernel may handle this abort.
1930 * There is no need to pass the error into the guest.
1931 */
James Morse0db5e022019-01-29 18:48:49 +00001932 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
Tyler Baicar621f48e2017-06-21 12:17:14 -06001933 return 1;
Tyler Baicar621f48e2017-06-21 12:17:14 -06001934
James Morsebb428922017-07-18 13:37:41 +01001935 if (unlikely(!is_iabt)) {
1936 kvm_inject_vabt(vcpu);
1937 return 1;
1938 }
Marc Zyngier40557102016-09-06 14:02:15 +01001939 }
1940
Marc Zyngier7393b592012-09-17 19:27:09 +01001941 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1942 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001943
1944 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier35307b92015-03-12 18:16:51 +00001945 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1946 fault_status != FSC_ACCESS) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001947 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1948 kvm_vcpu_trap_get_class(vcpu),
1949 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1950 (unsigned long)kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001951 return -EFAULT;
1952 }
1953
1954 idx = srcu_read_lock(&vcpu->kvm->srcu);
1955
1956 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001957 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1958 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001959 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001960 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001961 if (is_iabt) {
1962 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +01001963 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001964 ret = 1;
1965 goto out_unlock;
1966 }
1967
Marc Zyngiercfe39502012-12-12 14:42:09 +00001968 /*
Marc Zyngier57c841f2016-01-29 15:01:28 +00001969 * Check for a cache maintenance operation. Since we
1970 * ended-up here, we know it is outside of any memory
1971 * slot. But we can't find out if that is for a device,
1972 * or if the guest is just being stupid. The only thing
1973 * we know for sure is that this range cannot be cached.
1974 *
1975 * So let's assume that the guest is just being
1976 * cautious, and skip the instruction.
1977 */
1978 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1979 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1980 ret = 1;
1981 goto out_unlock;
1982 }
1983
1984 /*
Marc Zyngiercfe39502012-12-12 14:42:09 +00001985 * The IPA is reported as [MAX:12], so we need to
1986 * complement it with the bottom 12 bits from the
1987 * faulting VA. This is always 12 bits, irrespective
1988 * of the page size.
1989 */
1990 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001991 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001992 goto out_unlock;
1993 }
1994
Christoffer Dallc3058d52014-10-10 12:14:29 +02001995 /* Userspace should not be able to register out-of-bounds IPAs */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001996 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
Christoffer Dallc3058d52014-10-10 12:14:29 +02001997
Marc Zyngieraeda9132015-03-12 18:16:52 +00001998 if (fault_status == FSC_ACCESS) {
1999 handle_access_fault(vcpu, fault_ipa);
2000 ret = 1;
2001 goto out_unlock;
2002 }
2003
Christoffer Dall98047882014-08-19 12:18:04 +02002004 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05002005 if (ret == 0)
2006 ret = 1;
2007out_unlock:
2008 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2009 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002010}
2011
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002012static int handle_hva_to_gpa(struct kvm *kvm,
2013 unsigned long start,
2014 unsigned long end,
2015 int (*handler)(struct kvm *kvm,
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002016 gpa_t gpa, u64 size,
2017 void *data),
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002018 void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002019{
2020 struct kvm_memslots *slots;
2021 struct kvm_memory_slot *memslot;
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002022 int ret = 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002023
2024 slots = kvm_memslots(kvm);
2025
2026 /* we only care about the pages that the guest sees */
2027 kvm_for_each_memslot(memslot, slots) {
2028 unsigned long hva_start, hva_end;
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002029 gfn_t gpa;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002030
2031 hva_start = max(start, memslot->userspace_addr);
2032 hva_end = min(end, memslot->userspace_addr +
2033 (memslot->npages << PAGE_SHIFT));
2034 if (hva_start >= hva_end)
2035 continue;
2036
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002037 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2038 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002039 }
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002040
2041 return ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002042}
2043
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002044static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002045{
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002046 unmap_stage2_range(kvm, gpa, size);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002047 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002048}
2049
Christoffer Dalld5d81842013-01-20 18:28:07 -05002050int kvm_unmap_hva_range(struct kvm *kvm,
2051 unsigned long start, unsigned long end)
2052{
2053 if (!kvm->arch.pgd)
2054 return 0;
2055
2056 trace_kvm_unmap_hva_range(start, end);
2057 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2058 return 0;
2059}
2060
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002061static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002062{
2063 pte_t *pte = (pte_t *)data;
2064
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002065 WARN_ON(size != PAGE_SIZE);
Mario Smarduch15a49a42015-01-15 15:58:58 -08002066 /*
2067 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2068 * flag clear because MMU notifiers will have unmapped a huge PMD before
2069 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2070 * therefore stage2_set_pte() never needs to clear out a huge PMD
2071 * through this calling path.
2072 */
2073 stage2_set_pte(kvm, NULL, gpa, pte, 0);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00002074 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002075}
2076
2077
Lan Tianyu748c0e32018-12-06 21:21:10 +08002078int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
Christoffer Dalld5d81842013-01-20 18:28:07 -05002079{
2080 unsigned long end = hva + PAGE_SIZE;
Marc Zyngier694556d2018-08-23 09:58:27 +01002081 kvm_pfn_t pfn = pte_pfn(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002082 pte_t stage2_pte;
2083
2084 if (!kvm->arch.pgd)
Lan Tianyu748c0e32018-12-06 21:21:10 +08002085 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002086
2087 trace_kvm_set_spte_hva(hva);
Marc Zyngier694556d2018-08-23 09:58:27 +01002088
2089 /*
2090 * We've moved a page around, probably through CoW, so let's treat it
2091 * just like a translation fault and clean the cache to the PoC.
2092 */
2093 clean_dcache_guest_page(pfn, PAGE_SIZE);
Punit Agrawalf8df7332018-12-11 17:10:36 +00002094 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
Christoffer Dalld5d81842013-01-20 18:28:07 -05002095 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
Lan Tianyu748c0e32018-12-06 21:21:10 +08002096
2097 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002098}
2099
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002100static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Marc Zyngier35307b92015-03-12 18:16:51 +00002101{
Punit Agrawal35a63962018-12-11 17:10:40 +00002102 pud_t *pud;
Marc Zyngier35307b92015-03-12 18:16:51 +00002103 pmd_t *pmd;
2104 pte_t *pte;
2105
Punit Agrawal35a63962018-12-11 17:10:40 +00002106 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2107 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
Marc Zyngier35307b92015-03-12 18:16:51 +00002108 return 0;
2109
Punit Agrawal35a63962018-12-11 17:10:40 +00002110 if (pud)
2111 return stage2_pudp_test_and_clear_young(pud);
2112 else if (pmd)
Catalin Marinas06485052016-04-13 17:57:37 +01002113 return stage2_pmdp_test_and_clear_young(pmd);
Punit Agrawal35a63962018-12-11 17:10:40 +00002114 else
2115 return stage2_ptep_test_and_clear_young(pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00002116}
2117
Suzuki K Poulose056aad62017-03-20 18:26:42 +00002118static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
Marc Zyngier35307b92015-03-12 18:16:51 +00002119{
Punit Agrawal35a63962018-12-11 17:10:40 +00002120 pud_t *pud;
Marc Zyngier35307b92015-03-12 18:16:51 +00002121 pmd_t *pmd;
2122 pte_t *pte;
2123
Punit Agrawal35a63962018-12-11 17:10:40 +00002124 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2125 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
Marc Zyngier35307b92015-03-12 18:16:51 +00002126 return 0;
2127
Punit Agrawal35a63962018-12-11 17:10:40 +00002128 if (pud)
2129 return kvm_s2pud_young(*pud);
2130 else if (pmd)
Marc Zyngier35307b92015-03-12 18:16:51 +00002131 return pmd_young(*pmd);
Punit Agrawal35a63962018-12-11 17:10:40 +00002132 else
Marc Zyngier35307b92015-03-12 18:16:51 +00002133 return pte_young(*pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00002134}
2135
2136int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2137{
Suzuki K Poulose7e5a6722017-07-05 09:57:00 +01002138 if (!kvm->arch.pgd)
2139 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00002140 trace_kvm_age_hva(start, end);
2141 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2142}
2143
2144int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2145{
Suzuki K Poulose7e5a6722017-07-05 09:57:00 +01002146 if (!kvm->arch.pgd)
2147 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00002148 trace_kvm_test_age_hva(hva);
2149 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
2150}
2151
Christoffer Dalld5d81842013-01-20 18:28:07 -05002152void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2153{
2154 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2155}
2156
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002157phys_addr_t kvm_mmu_get_httbr(void)
2158{
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002159 if (__kvm_cpu_uses_extended_idmap())
2160 return virt_to_phys(merged_hyp_pgd);
2161 else
2162 return virt_to_phys(hyp_pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002163}
2164
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002165phys_addr_t kvm_get_idmap_vector(void)
2166{
2167 return hyp_idmap_vector;
2168}
2169
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002170static int kvm_map_idmap_text(pgd_t *pgd)
2171{
2172 int err;
2173
2174 /* Create the idmap in the boot page tables */
Kristina Martsenko98732d12018-01-15 15:23:49 +00002175 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002176 hyp_idmap_start, hyp_idmap_end,
2177 __phys_to_pfn(hyp_idmap_start),
2178 PAGE_HYP_EXEC);
2179 if (err)
2180 kvm_err("Failed to idmap %lx-%lx\n",
2181 hyp_idmap_start, hyp_idmap_end);
2182
2183 return err;
2184}
2185
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002186int kvm_mmu_init(void)
2187{
Marc Zyngier2fb41052013-04-12 19:12:03 +01002188 int err;
2189
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002190 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
Marc Zyngier46fef152018-03-12 14:25:10 +00002191 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002192 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
Marc Zyngier46fef152018-03-12 14:25:10 +00002193 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05002194 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002195
Ard Biesheuvel06f75a12015-03-19 16:42:26 +00002196 /*
2197 * We rely on the linker script to ensure at build time that the HYP
2198 * init code does not cross a page boundary.
2199 */
2200 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002201
Marc Zyngierb4ef0492017-12-03 20:04:51 +00002202 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2203 kvm_debug("HYP VA range: %lx:%lx\n",
2204 kern_hyp_va(PAGE_OFFSET),
2205 kern_hyp_va((unsigned long)high_memory - 1));
Marc Zyngiereac378a2016-06-30 18:40:50 +01002206
Marc Zyngier6c41a412016-06-30 18:40:51 +01002207 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
Marc Zyngiered57cac2017-12-03 18:22:49 +00002208 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
Marc Zyngierd2896d42016-08-22 09:01:17 +01002209 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
Marc Zyngiereac378a2016-06-30 18:40:50 +01002210 /*
2211 * The idmap page is intersecting with the VA space,
2212 * it is not safe to continue further.
2213 */
2214 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2215 err = -EINVAL;
2216 goto out;
2217 }
2218
Christoffer Dall38f791a2014-10-10 12:14:28 +02002219 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002220 if (!hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05002221 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01002222 err = -ENOMEM;
2223 goto out;
2224 }
2225
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002226 if (__kvm_cpu_uses_extended_idmap()) {
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002227 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2228 hyp_pgd_order);
2229 if (!boot_hyp_pgd) {
2230 kvm_err("Hyp boot PGD not allocated\n");
2231 err = -ENOMEM;
2232 goto out;
2233 }
2234
2235 err = kvm_map_idmap_text(boot_hyp_pgd);
2236 if (err)
2237 goto out;
2238
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00002239 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2240 if (!merged_hyp_pgd) {
2241 kvm_err("Failed to allocate extra HYP pgd\n");
2242 goto out;
2243 }
2244 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2245 hyp_idmap_start);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01002246 } else {
2247 err = kvm_map_idmap_text(hyp_pgd);
2248 if (err)
2249 goto out;
Marc Zyngier5a677ce2013-04-12 19:12:06 +01002250 }
2251
Marc Zyngiere3f019b2017-12-04 17:04:38 +00002252 io_map_base = hyp_idmap_start;
Christoffer Dalld5d81842013-01-20 18:28:07 -05002253 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +01002254out:
Marc Zyngier4f728272013-04-12 19:12:05 +01002255 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +01002256 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05002257}
Eric Augerdf6ce242014-06-06 11:10:23 +02002258
2259void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002260 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02002261 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02002262 const struct kvm_memory_slot *new,
Eric Augerdf6ce242014-06-06 11:10:23 +02002263 enum kvm_mr_change change)
2264{
Mario Smarduchc6473552015-01-15 15:58:56 -08002265 /*
2266 * At this point memslot has been committed and there is an
2267 * allocated dirty_bitmap[], dirty pages will be be tracked while the
2268 * memory slot is write protected.
2269 */
2270 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2271 kvm_mmu_wp_memory_region(kvm, mem->slot);
Eric Augerdf6ce242014-06-06 11:10:23 +02002272}
2273
2274int kvm_arch_prepare_memory_region(struct kvm *kvm,
2275 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002276 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02002277 enum kvm_mr_change change)
2278{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002279 hva_t hva = mem->userspace_addr;
2280 hva_t reg_end = hva + mem->memory_size;
2281 bool writable = !(mem->flags & KVM_MEM_READONLY);
2282 int ret = 0;
2283
Mario Smarduch15a49a42015-01-15 15:58:58 -08002284 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2285 change != KVM_MR_FLAGS_ONLY)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002286 return 0;
2287
2288 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02002289 * Prevent userspace from creating a memory region outside of the IPA
2290 * space addressable by the KVM guest IPA space.
2291 */
2292 if (memslot->base_gfn + memslot->npages >=
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01002293 (kvm_phys_size(kvm) >> PAGE_SHIFT))
Christoffer Dallc3058d52014-10-10 12:14:29 +02002294 return -EFAULT;
2295
Marc Zyngier72f31042017-03-16 18:20:50 +00002296 down_read(&current->mm->mmap_sem);
Christoffer Dallc3058d52014-10-10 12:14:29 +02002297 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002298 * A memory region could potentially cover multiple VMAs, and any holes
2299 * between them, so iterate over all of them to find out if we can map
2300 * any of them right now.
2301 *
2302 * +--------------------------------------------+
2303 * +---------------+----------------+ +----------------+
2304 * | : VMA 1 | VMA 2 | | VMA 3 : |
2305 * +---------------+----------------+ +----------------+
2306 * | memory region |
2307 * +--------------------------------------------+
2308 */
2309 do {
2310 struct vm_area_struct *vma = find_vma(current->mm, hva);
2311 hva_t vm_start, vm_end;
2312
2313 if (!vma || vma->vm_start >= reg_end)
2314 break;
2315
2316 /*
2317 * Mapping a read-only VMA is only allowed if the
2318 * memory region is configured as read-only.
2319 */
2320 if (writable && !(vma->vm_flags & VM_WRITE)) {
2321 ret = -EPERM;
2322 break;
2323 }
2324
2325 /*
2326 * Take the intersection of this VMA with the memory region
2327 */
2328 vm_start = max(hva, vma->vm_start);
2329 vm_end = min(reg_end, vma->vm_end);
2330
2331 if (vma->vm_flags & VM_PFNMAP) {
2332 gpa_t gpa = mem->guest_phys_addr +
2333 (vm_start - mem->userspace_addr);
Marek Majtykaca09f022015-09-16 12:04:55 +02002334 phys_addr_t pa;
2335
2336 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2337 pa += vm_start - vma->vm_start;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002338
Mario Smarduch15a49a42015-01-15 15:58:58 -08002339 /* IO region dirty page logging not allowed */
Marc Zyngier72f31042017-03-16 18:20:50 +00002340 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2341 ret = -EINVAL;
2342 goto out;
2343 }
Mario Smarduch15a49a42015-01-15 15:58:58 -08002344
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002345 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2346 vm_end - vm_start,
2347 writable);
2348 if (ret)
2349 break;
2350 }
2351 hva = vm_end;
2352 } while (hva < reg_end);
2353
Mario Smarduch15a49a42015-01-15 15:58:58 -08002354 if (change == KVM_MR_FLAGS_ONLY)
Marc Zyngier72f31042017-03-16 18:20:50 +00002355 goto out;
Mario Smarduch15a49a42015-01-15 15:58:58 -08002356
Ard Biesheuvel849260c2014-11-17 14:58:53 +00002357 spin_lock(&kvm->mmu_lock);
2358 if (ret)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002359 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
Ard Biesheuvel849260c2014-11-17 14:58:53 +00002360 else
2361 stage2_flush_memslot(kvm, memslot);
2362 spin_unlock(&kvm->mmu_lock);
Marc Zyngier72f31042017-03-16 18:20:50 +00002363out:
2364 up_read(&current->mm->mmap_sem);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002365 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02002366}
2367
2368void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
2369 struct kvm_memory_slot *dont)
2370{
2371}
2372
2373int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2374 unsigned long npages)
2375{
2376 return 0;
2377}
2378
Sean Christopherson15248252019-02-05 12:54:17 -08002379void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
Eric Augerdf6ce242014-06-06 11:10:23 +02002380{
2381}
2382
2383void kvm_arch_flush_shadow_all(struct kvm *kvm)
2384{
Suzuki K Poulose293f2932016-09-08 16:25:49 +01002385 kvm_free_stage2_pgd(kvm);
Eric Augerdf6ce242014-06-06 11:10:23 +02002386}
2387
2388void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2389 struct kvm_memory_slot *slot)
2390{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02002391 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2392 phys_addr_t size = slot->npages << PAGE_SHIFT;
2393
2394 spin_lock(&kvm->mmu_lock);
2395 unmap_stage2_range(kvm, gpa, size);
2396 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02002397}
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002398
2399/*
2400 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2401 *
2402 * Main problems:
2403 * - S/W ops are local to a CPU (not broadcast)
2404 * - We have line migration behind our back (speculation)
2405 * - System caches don't support S/W at all (damn!)
2406 *
2407 * In the face of the above, the best we can do is to try and convert
2408 * S/W ops to VA ops. Because the guest is not allowed to infer the
2409 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2410 * which is a rather good thing for us.
2411 *
2412 * Also, it is only used when turning caches on/off ("The expected
2413 * usage of the cache maintenance instructions that operate by set/way
2414 * is associated with the cache maintenance instructions associated
2415 * with the powerdown and powerup of caches, if this is required by
2416 * the implementation.").
2417 *
2418 * We use the following policy:
2419 *
2420 * - If we trap a S/W operation, we enable VM trapping to detect
2421 * caches being turned on/off, and do a full clean.
2422 *
2423 * - We flush the caches on both caches being turned on and off.
2424 *
2425 * - Once the caches are enabled, we stop trapping VM ops.
2426 */
2427void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2428{
Christoffer Dall3df59d82017-08-03 12:09:05 +02002429 unsigned long hcr = *vcpu_hcr(vcpu);
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002430
2431 /*
2432 * If this is the first time we do a S/W operation
2433 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2434 * VM trapping.
2435 *
2436 * Otherwise, rely on the VM trapping to wait for the MMU +
2437 * Caches to be turned off. At that point, we'll be able to
2438 * clean the caches again.
2439 */
2440 if (!(hcr & HCR_TVM)) {
2441 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2442 vcpu_has_cache_enabled(vcpu));
2443 stage2_flush_vm(vcpu->kvm);
Christoffer Dall3df59d82017-08-03 12:09:05 +02002444 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002445 }
2446}
2447
2448void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2449{
2450 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2451
2452 /*
2453 * If switching the MMU+caches on, need to invalidate the caches.
2454 * If switching it off, need to clean the caches.
2455 * Clean + invalidate does the trick always.
2456 */
2457 if (now_enabled != was_enabled)
2458 stage2_flush_vm(vcpu->kvm);
2459
2460 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2461 if (now_enabled)
Christoffer Dall3df59d82017-08-03 12:09:05 +02002462 *vcpu_hcr(vcpu) &= ~HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00002463
2464 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2465}