blob: bc2aba9532994379daf94451f23658d662269ab8 [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Christoffer Dall749cf76c2013-01-20 18:28:06 -05002/*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Christoffer Dall749cf76c2013-01-20 18:28:06 -05005 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05006
7#include <linux/mman.h>
8#include <linux/kvm_host.h>
9#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010010#include <linux/hugetlb.h>
James Morse196f8782017-06-20 17:11:48 +010011#include <linux/sched/signal.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050012#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050013#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050014#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050015#include <asm/kvm_arm.h>
16#include <asm/kvm_mmu.h>
Will Deacon0f9d09b2020-09-11 14:25:12 +010017#include <asm/kvm_pgtable.h>
James Morse0db5e022019-01-29 18:48:49 +000018#include <asm/kvm_ras.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050019#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050020#include <asm/kvm_emulate.h>
Marc Zyngier1e947ba2015-01-29 11:59:54 +000021#include <asm/virt.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050022
23#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050024
Will Deacon0f9d09b2020-09-11 14:25:12 +010025static struct kvm_pgtable *hyp_pgtable;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
27
Marc Zyngier5a677ce2013-04-12 19:12:06 +010028static unsigned long hyp_idmap_start;
29static unsigned long hyp_idmap_end;
30static phys_addr_t hyp_idmap_vector;
31
Marc Zyngiere3f019b2017-12-04 17:04:38 +000032static unsigned long io_map_base;
33
Marc Zyngier6d674e22019-12-11 16:56:48 +000034
Will Deacon52bae932020-09-11 14:25:17 +010035/*
36 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
37 * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
38 * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
39 * long will also starve other vCPUs. We have to also make sure that the page
40 * tables are not freed while we released the lock.
41 */
42static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
43 phys_addr_t end,
44 int (*fn)(struct kvm_pgtable *, u64, u64),
45 bool resched)
46{
47 int ret;
48 u64 next;
49
50 do {
51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
52 if (!pgt)
53 return -EINVAL;
54
55 next = stage2_pgd_addr_end(kvm, addr, end);
56 ret = fn(pgt, addr, next - addr);
57 if (ret)
58 break;
59
60 if (resched && next != end)
61 cond_resched_lock(&kvm->mmu_lock);
62 } while (addr = next, addr != end);
63
64 return ret;
65}
66
Quentin Perretcc38d612020-09-11 14:25:21 +010067#define stage2_apply_range_resched(kvm, addr, end, fn) \
68 stage2_apply_range(kvm, addr, end, fn, true)
69
Mario Smarduch15a49a42015-01-15 15:58:58 -080070static bool memslot_is_logging(struct kvm_memory_slot *memslot)
71{
Mario Smarduch15a49a42015-01-15 15:58:58 -080072 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
Mario Smarduch72760302015-01-15 15:59:01 -080073}
74
75/**
76 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
77 * @kvm: pointer to kvm structure.
78 *
79 * Interface to HYP function to flush all VM TLB entries
80 */
81void kvm_flush_remote_tlbs(struct kvm *kvm)
82{
Jing Zhang3cc4e142021-08-17 00:26:39 +000083 ++kvm->stat.generic.remote_tlb_flush_requests;
Christoffer Dalla0e50aa2019-01-04 21:09:05 +010084 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
Mario Smarduch15a49a42015-01-15 15:58:58 -080085}
Christoffer Dallad361f02012-11-01 17:14:45 +010086
Ard Biesheuvele6fab542015-11-10 15:11:20 +010087static bool kvm_is_device_pfn(unsigned long pfn)
88{
Mike Rapoport873ba462021-06-30 18:51:19 -070089 return !pfn_is_map_memory(pfn);
Ard Biesheuvele6fab542015-11-10 15:11:20 +010090}
91
Quentin Perret7aef0cb2021-03-19 10:01:14 +000092static void *stage2_memcache_zalloc_page(void *arg)
93{
94 struct kvm_mmu_memory_cache *mc = arg;
95
96 /* Allocated with __GFP_ZERO, so no need to zero */
97 return kvm_mmu_memory_cache_alloc(mc);
98}
99
100static void *kvm_host_zalloc_pages_exact(size_t size)
101{
102 return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
103}
104
105static void kvm_host_get_page(void *addr)
106{
107 get_page(virt_to_page(addr));
108}
109
110static void kvm_host_put_page(void *addr)
111{
112 put_page(virt_to_page(addr));
113}
114
115static int kvm_host_page_count(void *addr)
116{
117 return page_count(virt_to_page(addr));
118}
119
120static phys_addr_t kvm_host_pa(void *addr)
121{
122 return __pa(addr);
123}
124
125static void *kvm_host_va(phys_addr_t phys)
126{
127 return __va(phys);
128}
129
Yanan Wang378e6a92021-06-17 18:58:23 +0800130static void clean_dcache_guest_page(void *va, size_t size)
131{
132 __clean_dcache_guest_page(va, size);
133}
134
135static void invalidate_icache_guest_page(void *va, size_t size)
136{
137 __invalidate_icache_guest_page(va, size);
138}
139
Marc Zyngier363ef892014-12-19 16:48:06 +0000140/*
141 * Unmapping vs dcache management:
142 *
143 * If a guest maps certain memory pages as uncached, all writes will
144 * bypass the data cache and go directly to RAM. However, the CPUs
145 * can still speculate reads (not writes) and fill cache lines with
146 * data.
147 *
148 * Those cache lines will be *clean* cache lines though, so a
149 * clean+invalidate operation is equivalent to an invalidate
150 * operation, because no cache lines are marked dirty.
151 *
152 * Those clean cache lines could be filled prior to an uncached write
153 * by the guest, and the cache coherent IO subsystem would therefore
154 * end up writing old data to disk.
155 *
156 * This is why right after unmapping a page/section and invalidating
Will Deacon52bae932020-09-11 14:25:17 +0100157 * the corresponding TLBs, we flush to make sure the IO subsystem will
158 * never hit in the cache.
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100159 *
160 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
161 * we then fully enforce cacheability of RAM, no matter what the guest
162 * does.
Marc Zyngier363ef892014-12-19 16:48:06 +0000163 */
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000164/**
165 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
Xiaofei Tanc9c02792020-09-17 09:47:49 +0800166 * @mmu: The KVM stage-2 MMU pointer
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000167 * @start: The intermediate physical base address of the range to unmap
168 * @size: The size of the area to unmap
Xiaofei Tanc9c02792020-09-17 09:47:49 +0800169 * @may_block: Whether or not we are permitted to block
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000170 *
171 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
172 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
173 * destroying the VM), otherwise another faulting VCPU may come in and mess
174 * with things behind our backs.
175 */
Will Deaconb5331372020-08-11 11:27:25 +0100176static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
177 bool may_block)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200178{
Quentin Perretcfb1a982021-03-19 10:01:28 +0000179 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
Will Deacon52bae932020-09-11 14:25:17 +0100180 phys_addr_t end = start + size;
Christoffer Dall4f853a72014-05-09 23:31:31 +0200181
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100182 assert_spin_locked(&kvm->mmu_lock);
Jia He47a91b72018-05-21 11:05:30 +0800183 WARN_ON(size & ~PAGE_MASK);
Will Deacon52bae932020-09-11 14:25:17 +0100184 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
185 may_block));
Marc Zyngier000d3992013-03-05 02:43:17 +0000186}
187
Will Deaconb5331372020-08-11 11:27:25 +0100188static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
189{
190 __unmap_stage2_range(mmu, start, size, true);
191}
192
Marc Zyngier9d218a12014-01-15 12:50:23 +0000193static void stage2_flush_memslot(struct kvm *kvm,
194 struct kvm_memory_slot *memslot)
195{
196 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
197 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
Marc Zyngier9d218a12014-01-15 12:50:23 +0000198
Quentin Perret8d5207b2020-09-11 14:25:23 +0100199 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000200}
201
202/**
203 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
204 * @kvm: The struct kvm pointer
205 *
206 * Go through the stage 2 page tables and invalidate any cache lines
207 * backing memory already mapped to the VM.
208 */
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000209static void stage2_flush_vm(struct kvm *kvm)
Marc Zyngier9d218a12014-01-15 12:50:23 +0000210{
211 struct kvm_memslots *slots;
212 struct kvm_memory_slot *memslot;
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +0100213 int idx, bkt;
Marc Zyngier9d218a12014-01-15 12:50:23 +0000214
215 idx = srcu_read_lock(&kvm->srcu);
216 spin_lock(&kvm->mmu_lock);
217
218 slots = kvm_memslots(kvm);
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +0100219 kvm_for_each_memslot(memslot, bkt, slots)
Marc Zyngier9d218a12014-01-15 12:50:23 +0000220 stage2_flush_memslot(kvm, memslot);
221
222 spin_unlock(&kvm->mmu_lock);
223 srcu_read_unlock(&kvm->srcu, idx);
224}
225
Marc Zyngier000d3992013-03-05 02:43:17 +0000226/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100227 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000228 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100229void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000230{
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100231 mutex_lock(&kvm_hyp_pgd_mutex);
Will Deacon0f9d09b2020-09-11 14:25:12 +0100232 if (hyp_pgtable) {
233 kvm_pgtable_hyp_destroy(hyp_pgtable);
234 kfree(hyp_pgtable);
Quentin Perretbfa79a82021-03-19 10:01:26 +0000235 hyp_pgtable = NULL;
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000236 }
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500237 mutex_unlock(&kvm_hyp_pgd_mutex);
238}
239
Quentin Perretbfa79a82021-03-19 10:01:26 +0000240static bool kvm_host_owns_hyp_mappings(void)
241{
Quentin Perret64a1fbd2021-12-08 15:22:59 +0000242 if (is_kernel_in_hyp_mode())
243 return false;
244
Quentin Perretbfa79a82021-03-19 10:01:26 +0000245 if (static_branch_likely(&kvm_protected_mode_initialized))
246 return false;
247
248 /*
249 * This can happen at boot time when __create_hyp_mappings() is called
250 * after the hyp protection has been enabled, but the static key has
251 * not been flipped yet.
252 */
253 if (!hyp_pgtable && is_protected_kvm_enabled())
254 return false;
255
256 WARN_ON(!hyp_pgtable);
257
258 return true;
259}
260
Will Deacon0f9d09b2020-09-11 14:25:12 +0100261static int __create_hyp_mappings(unsigned long start, unsigned long size,
262 unsigned long phys, enum kvm_pgtable_prot prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500263{
Will Deacon0f9d09b2020-09-11 14:25:12 +0100264 int err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500265
Quentin Perret66c57ed2021-08-09 16:24:47 +0100266 if (WARN_ON(!kvm_host_owns_hyp_mappings()))
267 return -EINVAL;
Quentin Perretbfa79a82021-03-19 10:01:26 +0000268
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500269 mutex_lock(&kvm_hyp_pgd_mutex);
Will Deacon0f9d09b2020-09-11 14:25:12 +0100270 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500271 mutex_unlock(&kvm_hyp_pgd_mutex);
Will Deacon0f9d09b2020-09-11 14:25:12 +0100272
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500273 return err;
274}
275
Christoffer Dall40c27292013-11-15 13:14:12 -0800276static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
277{
278 if (!is_vmalloc_addr(kaddr)) {
279 BUG_ON(!virt_addr_valid(kaddr));
280 return __pa(kaddr);
281 } else {
282 return page_to_phys(vmalloc_to_page(kaddr)) +
283 offset_in_page(kaddr);
284 }
285}
286
Quentin Perreta83e2192021-12-15 16:12:24 +0000287struct hyp_shared_pfn {
288 u64 pfn;
289 int count;
290 struct rb_node node;
291};
Quentin Perret66c57ed2021-08-09 16:24:47 +0100292
Quentin Perreta83e2192021-12-15 16:12:24 +0000293static DEFINE_MUTEX(hyp_shared_pfns_lock);
294static struct rb_root hyp_shared_pfns = RB_ROOT;
295
296static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
297 struct rb_node **parent)
Quentin Perret66c57ed2021-08-09 16:24:47 +0100298{
Quentin Perreta83e2192021-12-15 16:12:24 +0000299 struct hyp_shared_pfn *this;
300
301 *node = &hyp_shared_pfns.rb_node;
302 *parent = NULL;
303 while (**node) {
304 this = container_of(**node, struct hyp_shared_pfn, node);
305 *parent = **node;
306 if (this->pfn < pfn)
307 *node = &((**node)->rb_left);
308 else if (this->pfn > pfn)
309 *node = &((**node)->rb_right);
310 else
311 return this;
Quentin Perret66c57ed2021-08-09 16:24:47 +0100312 }
313
Quentin Perreta83e2192021-12-15 16:12:24 +0000314 return NULL;
315}
316
317static int share_pfn_hyp(u64 pfn)
318{
319 struct rb_node **node, *parent;
320 struct hyp_shared_pfn *this;
321 int ret = 0;
322
323 mutex_lock(&hyp_shared_pfns_lock);
324 this = find_shared_pfn(pfn, &node, &parent);
325 if (this) {
326 this->count++;
327 goto unlock;
328 }
329
330 this = kzalloc(sizeof(*this), GFP_KERNEL);
331 if (!this) {
332 ret = -ENOMEM;
333 goto unlock;
334 }
335
336 this->pfn = pfn;
337 this->count = 1;
338 rb_link_node(&this->node, parent, node);
339 rb_insert_color(&this->node, &hyp_shared_pfns);
340 ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
341unlock:
342 mutex_unlock(&hyp_shared_pfns_lock);
343
344 return ret;
Quentin Perret66c57ed2021-08-09 16:24:47 +0100345}
346
Quentin Perret52b28652021-12-15 16:12:31 +0000347static int unshare_pfn_hyp(u64 pfn)
348{
349 struct rb_node **node, *parent;
350 struct hyp_shared_pfn *this;
351 int ret = 0;
352
353 mutex_lock(&hyp_shared_pfns_lock);
354 this = find_shared_pfn(pfn, &node, &parent);
355 if (WARN_ON(!this)) {
356 ret = -ENOENT;
357 goto unlock;
358 }
359
360 this->count--;
361 if (this->count)
362 goto unlock;
363
364 rb_erase(&this->node, &hyp_shared_pfns);
365 kfree(this);
366 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
367unlock:
368 mutex_unlock(&hyp_shared_pfns_lock);
369
370 return ret;
371}
372
Quentin Perret3f868e12021-12-15 16:12:23 +0000373int kvm_share_hyp(void *from, void *to)
374{
Quentin Perreta83e2192021-12-15 16:12:24 +0000375 phys_addr_t start, end, cur;
376 u64 pfn;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500377 int ret;
378
Quentin Perret3f868e12021-12-15 16:12:23 +0000379 if (is_kernel_in_hyp_mode())
380 return 0;
381
382 /*
383 * The share hcall maps things in the 'fixed-offset' region of the hyp
384 * VA space, so we can only share physically contiguous data-structures
385 * for now.
386 */
387 if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
388 return -EINVAL;
389
390 if (kvm_host_owns_hyp_mappings())
391 return create_hyp_mappings(from, to, PAGE_HYP);
392
Quentin Perreta83e2192021-12-15 16:12:24 +0000393 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
394 end = PAGE_ALIGN(__pa(to));
395 for (cur = start; cur < end; cur += PAGE_SIZE) {
396 pfn = __phys_to_pfn(cur);
397 ret = share_pfn_hyp(pfn);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500398 if (ret)
399 return ret;
400 }
401
402 return 0;
403}
404
Quentin Perret52b28652021-12-15 16:12:31 +0000405void kvm_unshare_hyp(void *from, void *to)
406{
407 phys_addr_t start, end, cur;
408 u64 pfn;
409
410 if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
411 return;
412
413 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
414 end = PAGE_ALIGN(__pa(to));
415 for (cur = start; cur < end; cur += PAGE_SIZE) {
416 pfn = __phys_to_pfn(cur);
417 WARN_ON(unshare_pfn_hyp(pfn));
418 }
419}
420
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500421/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100422 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500423 * @from: The virtual kernel start address of the range
424 * @to: The virtual kernel end address of the range (exclusive)
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100425 * @prot: The protection to be applied to this range
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500426 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100427 * The same virtual address as the kernel virtual address is also used
428 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
429 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500430 */
Will Deacon0f9d09b2020-09-11 14:25:12 +0100431int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500432{
Christoffer Dall40c27292013-11-15 13:14:12 -0800433 phys_addr_t phys_addr;
434 unsigned long virt_addr;
Marc Zyngier6c41a412016-06-30 18:40:51 +0100435 unsigned long start = kern_hyp_va((unsigned long)from);
436 unsigned long end = kern_hyp_va((unsigned long)to);
Marc Zyngier6060df82013-04-12 19:12:01 +0100437
Marc Zyngier1e947ba2015-01-29 11:59:54 +0000438 if (is_kernel_in_hyp_mode())
439 return 0;
440
Quentin Perret3f868e12021-12-15 16:12:23 +0000441 if (!kvm_host_owns_hyp_mappings())
442 return -EPERM;
Quentin Perret66c57ed2021-08-09 16:24:47 +0100443
Christoffer Dall40c27292013-11-15 13:14:12 -0800444 start = start & PAGE_MASK;
445 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100446
Christoffer Dall40c27292013-11-15 13:14:12 -0800447 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
448 int err;
449
450 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
Will Deacon0f9d09b2020-09-11 14:25:12 +0100451 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100452 prot);
Christoffer Dall40c27292013-11-15 13:14:12 -0800453 if (err)
454 return err;
455 }
456
457 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500458}
459
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000460static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
Will Deacon0f9d09b2020-09-11 14:25:12 +0100461 unsigned long *haddr,
462 enum kvm_pgtable_prot prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500463{
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000464 unsigned long base;
465 int ret = 0;
Marc Zyngier6060df82013-04-12 19:12:01 +0100466
Quentin Perretbfa79a82021-03-19 10:01:26 +0000467 if (!kvm_host_owns_hyp_mappings()) {
468 base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
469 phys_addr, size, prot);
470 if (IS_ERR_OR_NULL((void *)base))
471 return PTR_ERR((void *)base);
472 *haddr = base;
473
474 return 0;
475 }
476
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000477 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier6060df82013-04-12 19:12:01 +0100478
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000479 /*
Fuad Tabba656012c2020-04-01 15:03:10 +0100480 * This assumes that we have enough space below the idmap
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000481 * page to allocate our VAs. If not, the check below will
482 * kick. A potential alternative would be to detect that
483 * overflow and switch to an allocation above the idmap.
484 *
485 * The allocated size is always a multiple of PAGE_SIZE.
486 */
487 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
488 base = io_map_base - size;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000489
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000490 /*
491 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
492 * allocating the new area, as it would indicate we've
493 * overflowed the idmap/IO address range.
494 */
495 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
496 ret = -ENOMEM;
497 else
498 io_map_base = base;
499
500 mutex_unlock(&kvm_hyp_pgd_mutex);
501
502 if (ret)
503 goto out;
504
Will Deacon0f9d09b2020-09-11 14:25:12 +0100505 ret = __create_hyp_mappings(base, size, phys_addr, prot);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000506 if (ret)
507 goto out;
508
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000509 *haddr = base + offset_in_page(phys_addr);
Marc Zyngiere3f019b2017-12-04 17:04:38 +0000510out:
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000511 return ret;
512}
513
514/**
515 * create_hyp_io_mappings - Map IO into both kernel and HYP
516 * @phys_addr: The physical start address which gets mapped
517 * @size: Size of the region being mapped
518 * @kaddr: Kernel VA for this mapping
519 * @haddr: HYP VA for this mapping
520 */
521int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
522 void __iomem **kaddr,
523 void __iomem **haddr)
524{
525 unsigned long addr;
526 int ret;
527
Quentin Perretbff01cb2021-12-08 15:22:58 +0000528 if (is_protected_kvm_enabled())
529 return -EPERM;
530
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000531 *kaddr = ioremap(phys_addr, size);
532 if (!*kaddr)
533 return -ENOMEM;
534
535 if (is_kernel_in_hyp_mode()) {
536 *haddr = *kaddr;
537 return 0;
538 }
539
540 ret = __create_hyp_private_mapping(phys_addr, size,
541 &addr, PAGE_HYP_DEVICE);
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000542 if (ret) {
543 iounmap(*kaddr);
544 *kaddr = NULL;
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000545 *haddr = NULL;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000546 return ret;
547 }
548
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000549 *haddr = (void __iomem *)addr;
550 return 0;
551}
552
553/**
554 * create_hyp_exec_mappings - Map an executable range into HYP
555 * @phys_addr: The physical start address which gets mapped
556 * @size: Size of the region being mapped
557 * @haddr: HYP VA for this mapping
558 */
559int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
560 void **haddr)
561{
562 unsigned long addr;
563 int ret;
564
565 BUG_ON(is_kernel_in_hyp_mode());
566
567 ret = __create_hyp_private_mapping(phys_addr, size,
568 &addr, PAGE_HYP_EXEC);
569 if (ret) {
570 *haddr = NULL;
571 return ret;
572 }
573
574 *haddr = (void *)addr;
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000575 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500576}
577
Marc Zyngier6011cf62021-07-26 16:35:48 +0100578static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
579 /* We shouldn't need any other callback to walk the PT */
580 .phys_to_virt = kvm_host_va,
581};
582
583static int get_user_mapping_size(struct kvm *kvm, u64 addr)
584{
585 struct kvm_pgtable pgt = {
586 .pgd = (kvm_pte_t *)kvm->mm->pgd,
587 .ia_bits = VA_BITS,
588 .start_level = (KVM_PGTABLE_MAX_LEVELS -
589 CONFIG_PGTABLE_LEVELS),
590 .mm_ops = &kvm_user_mm_ops,
591 };
592 kvm_pte_t pte = 0; /* Keep GCC quiet... */
593 u32 level = ~0;
594 int ret;
595
596 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
597 VM_BUG_ON(ret);
598 VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
599 VM_BUG_ON(!(pte & PTE_VALID));
600
601 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
602}
603
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000604static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
605 .zalloc_page = stage2_memcache_zalloc_page,
606 .zalloc_pages_exact = kvm_host_zalloc_pages_exact,
607 .free_pages_exact = free_pages_exact,
608 .get_page = kvm_host_get_page,
609 .put_page = kvm_host_put_page,
610 .page_count = kvm_host_page_count,
611 .phys_to_virt = kvm_host_va,
612 .virt_to_phys = kvm_host_pa,
Yanan Wang25aa2862021-06-17 18:58:24 +0800613 .dcache_clean_inval_poc = clean_dcache_guest_page,
614 .icache_inval_pou = invalidate_icache_guest_page,
Quentin Perret7aef0cb2021-03-19 10:01:14 +0000615};
616
Christoffer Dalld5d81842013-01-20 18:28:07 -0500617/**
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100618 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
619 * @kvm: The pointer to the KVM structure
620 * @mmu: The pointer to the s2 MMU structure
Christoffer Dalld5d81842013-01-20 18:28:07 -0500621 *
Will Deacon71233d02020-09-11 14:25:13 +0100622 * Allocates only the stage-2 HW PGD level table(s).
Christoffer Dalld5d81842013-01-20 18:28:07 -0500623 * Note we don't need locking here as this is only called when the VM is
624 * created, which can only be done once.
625 */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100626int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500627{
Will Deacon71233d02020-09-11 14:25:13 +0100628 int cpu, err;
629 struct kvm_pgtable *pgt;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500630
Will Deacon71233d02020-09-11 14:25:13 +0100631 if (mmu->pgt != NULL) {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500632 kvm_err("kvm_arch already initialized?\n");
633 return -EINVAL;
634 }
635
Jia He115bae92021-09-07 20:31:12 +0800636 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
Will Deacon71233d02020-09-11 14:25:13 +0100637 if (!pgt)
Marc Zyngiera9873702015-03-10 19:06:59 +0000638 return -ENOMEM;
639
Marc Zyngier9d8604b2021-11-29 20:00:45 +0000640 mmu->arch = &kvm->arch;
641 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
Will Deacon71233d02020-09-11 14:25:13 +0100642 if (err)
643 goto out_free_pgtable;
Christoffer Dalle329fb72018-12-11 15:26:31 +0100644
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100645 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
646 if (!mmu->last_vcpu_ran) {
Will Deacon71233d02020-09-11 14:25:13 +0100647 err = -ENOMEM;
648 goto out_destroy_pgtable;
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100649 }
650
651 for_each_possible_cpu(cpu)
652 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
653
Will Deacon71233d02020-09-11 14:25:13 +0100654 mmu->pgt = pgt;
655 mmu->pgd_phys = __pa(pgt->pgd);
Marc Zyngiercf364e02021-08-06 12:31:08 +0100656 WRITE_ONCE(mmu->vmid.vmid_gen, 0);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500657 return 0;
Will Deacon71233d02020-09-11 14:25:13 +0100658
659out_destroy_pgtable:
660 kvm_pgtable_stage2_destroy(pgt);
661out_free_pgtable:
662 kfree(pgt);
663 return err;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500664}
665
Christoffer Dall957db102014-11-27 10:35:03 +0100666static void stage2_unmap_memslot(struct kvm *kvm,
667 struct kvm_memory_slot *memslot)
668{
669 hva_t hva = memslot->userspace_addr;
670 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
671 phys_addr_t size = PAGE_SIZE * memslot->npages;
672 hva_t reg_end = hva + size;
673
674 /*
675 * A memory region could potentially cover multiple VMAs, and any holes
676 * between them, so iterate over all of them to find out if we should
677 * unmap any of them.
678 *
679 * +--------------------------------------------+
680 * +---------------+----------------+ +----------------+
681 * | : VMA 1 | VMA 2 | | VMA 3 : |
682 * +---------------+----------------+ +----------------+
683 * | memory region |
684 * +--------------------------------------------+
685 */
686 do {
Gavin Shanc728fd42021-03-16 12:11:25 +0800687 struct vm_area_struct *vma;
Christoffer Dall957db102014-11-27 10:35:03 +0100688 hva_t vm_start, vm_end;
689
Gavin Shanc728fd42021-03-16 12:11:25 +0800690 vma = find_vma_intersection(current->mm, hva, reg_end);
691 if (!vma)
Christoffer Dall957db102014-11-27 10:35:03 +0100692 break;
693
694 /*
695 * Take the intersection of this VMA with the memory region
696 */
697 vm_start = max(hva, vma->vm_start);
698 vm_end = min(reg_end, vma->vm_end);
699
700 if (!(vma->vm_flags & VM_PFNMAP)) {
701 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100702 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
Christoffer Dall957db102014-11-27 10:35:03 +0100703 }
704 hva = vm_end;
705 } while (hva < reg_end);
706}
707
708/**
709 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
710 * @kvm: The struct kvm pointer
711 *
Fuad Tabba656012c2020-04-01 15:03:10 +0100712 * Go through the memregions and unmap any regular RAM
Christoffer Dall957db102014-11-27 10:35:03 +0100713 * backing memory already mapped to the VM.
714 */
715void stage2_unmap_vm(struct kvm *kvm)
716{
717 struct kvm_memslots *slots;
718 struct kvm_memory_slot *memslot;
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +0100719 int idx, bkt;
Christoffer Dall957db102014-11-27 10:35:03 +0100720
721 idx = srcu_read_lock(&kvm->srcu);
Michel Lespinasse89154dd2020-06-08 21:33:29 -0700722 mmap_read_lock(current->mm);
Christoffer Dall957db102014-11-27 10:35:03 +0100723 spin_lock(&kvm->mmu_lock);
724
725 slots = kvm_memslots(kvm);
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +0100726 kvm_for_each_memslot(memslot, bkt, slots)
Christoffer Dall957db102014-11-27 10:35:03 +0100727 stage2_unmap_memslot(kvm, memslot);
728
729 spin_unlock(&kvm->mmu_lock);
Michel Lespinasse89154dd2020-06-08 21:33:29 -0700730 mmap_read_unlock(current->mm);
Christoffer Dall957db102014-11-27 10:35:03 +0100731 srcu_read_unlock(&kvm->srcu, idx);
732}
733
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100734void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500735{
Quentin Perretcfb1a982021-03-19 10:01:28 +0000736 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
Will Deacon71233d02020-09-11 14:25:13 +0100737 struct kvm_pgtable *pgt = NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500738
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100739 spin_lock(&kvm->mmu_lock);
Will Deacon71233d02020-09-11 14:25:13 +0100740 pgt = mmu->pgt;
741 if (pgt) {
Will Deacon71233d02020-09-11 14:25:13 +0100742 mmu->pgd_phys = 0;
743 mmu->pgt = NULL;
744 free_percpu(mmu->last_vcpu_ran);
Suzuki K Poulose6c0d7062017-05-03 15:17:51 +0100745 }
Suzuki K Poulose8b3405e2017-04-03 15:12:43 +0100746 spin_unlock(&kvm->mmu_lock);
747
Will Deacon71233d02020-09-11 14:25:13 +0100748 if (pgt) {
749 kvm_pgtable_stage2_destroy(pgt);
750 kfree(pgt);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100751 }
Christoffer Dalld5d81842013-01-20 18:28:07 -0500752}
753
Christoffer Dalld5d81842013-01-20 18:28:07 -0500754/**
755 * kvm_phys_addr_ioremap - map a device range to guest IPA
756 *
757 * @kvm: The KVM pointer
758 * @guest_ipa: The IPA at which to insert the mapping
759 * @pa: The physical address of the device
760 * @size: The size of the mapping
Xiaofei Tanc9c02792020-09-17 09:47:49 +0800761 * @writable: Whether or not to create a writable mapping
Christoffer Dalld5d81842013-01-20 18:28:07 -0500762 */
763int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700764 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500765{
Will Deacon02bbd372020-09-11 14:25:15 +0100766 phys_addr_t addr;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500767 int ret = 0;
Sean Christophersonc1a33ae2020-07-02 19:35:42 -0700768 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
Will Deacon02bbd372020-09-11 14:25:15 +0100769 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
770 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
771 KVM_PGTABLE_PROT_R |
772 (writable ? KVM_PGTABLE_PROT_W : 0);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500773
Quentin Perretbff01cb2021-12-08 15:22:58 +0000774 if (is_protected_kvm_enabled())
775 return -EPERM;
776
Will Deacon02bbd372020-09-11 14:25:15 +0100777 size += offset_in_page(guest_ipa);
778 guest_ipa &= PAGE_MASK;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500779
Will Deacon02bbd372020-09-11 14:25:15 +0100780 for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
Sean Christophersonc1a33ae2020-07-02 19:35:42 -0700781 ret = kvm_mmu_topup_memory_cache(&cache,
782 kvm_mmu_cache_min_pages(kvm));
Christoffer Dalld5d81842013-01-20 18:28:07 -0500783 if (ret)
Will Deacon02bbd372020-09-11 14:25:15 +0100784 break;
785
Christoffer Dalld5d81842013-01-20 18:28:07 -0500786 spin_lock(&kvm->mmu_lock);
Will Deacon02bbd372020-09-11 14:25:15 +0100787 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
788 &cache);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500789 spin_unlock(&kvm->mmu_lock);
790 if (ret)
Will Deacon02bbd372020-09-11 14:25:15 +0100791 break;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500792
Will Deacon02bbd372020-09-11 14:25:15 +0100793 pa += PAGE_SIZE;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500794 }
795
Sean Christophersonc1a33ae2020-07-02 19:35:42 -0700796 kvm_mmu_free_memory_cache(&cache);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500797 return ret;
798}
799
Mario Smarduchc6473552015-01-15 15:58:56 -0800800/**
Mario Smarduchc6473552015-01-15 15:58:56 -0800801 * stage2_wp_range() - write protect stage2 memory region range
Xiaofei Tanc9c02792020-09-17 09:47:49 +0800802 * @mmu: The KVM stage-2 MMU pointer
Mario Smarduchc6473552015-01-15 15:58:56 -0800803 * @addr: Start address of range
804 * @end: End address of range
805 */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100806static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
Mario Smarduchc6473552015-01-15 15:58:56 -0800807{
Quentin Perretcfb1a982021-03-19 10:01:28 +0000808 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
Quentin Perretcc38d612020-09-11 14:25:21 +0100809 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
Mario Smarduchc6473552015-01-15 15:58:56 -0800810}
811
812/**
813 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
814 * @kvm: The KVM pointer
815 * @slot: The memory slot to write protect
816 *
817 * Called to start logging dirty pages after memory region
818 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
Punit Agrawal4ea5af52018-12-11 17:10:37 +0000819 * all present PUD, PMD and PTEs are write protected in the memory region.
Mario Smarduchc6473552015-01-15 15:58:56 -0800820 * Afterwards read of dirty page log can be called.
821 *
822 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
823 * serializing operations for VM memory regions.
824 */
Gavin Shaneab62142021-03-16 12:11:24 +0800825static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
Mario Smarduchc6473552015-01-15 15:58:56 -0800826{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200827 struct kvm_memslots *slots = kvm_memslots(kvm);
828 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
Sean Christopherson0577d1a2020-02-18 13:07:31 -0800829 phys_addr_t start, end;
830
831 if (WARN_ON_ONCE(!memslot))
832 return;
833
834 start = memslot->base_gfn << PAGE_SHIFT;
835 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
Mario Smarduchc6473552015-01-15 15:58:56 -0800836
837 spin_lock(&kvm->mmu_lock);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100838 stage2_wp_range(&kvm->arch.mmu, start, end);
Mario Smarduchc6473552015-01-15 15:58:56 -0800839 spin_unlock(&kvm->mmu_lock);
840 kvm_flush_remote_tlbs(kvm);
841}
Mario Smarduch53c810c2015-01-15 15:58:57 -0800842
843/**
Kai Huang3b0f1d02015-01-28 10:54:23 +0800844 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
Mario Smarduch53c810c2015-01-15 15:58:57 -0800845 * @kvm: The KVM pointer
846 * @slot: The memory slot associated with mask
847 * @gfn_offset: The gfn offset in memory slot
848 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
849 * slot to be write protected
850 *
851 * Walks bits set in mask write protects the associated pte's. Caller must
852 * acquire kvm_mmu_lock.
853 */
Kai Huang3b0f1d02015-01-28 10:54:23 +0800854static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
Mario Smarduch53c810c2015-01-15 15:58:57 -0800855 struct kvm_memory_slot *slot,
856 gfn_t gfn_offset, unsigned long mask)
857{
858 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
859 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
860 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
861
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100862 stage2_wp_range(&kvm->arch.mmu, start, end);
Mario Smarduch53c810c2015-01-15 15:58:57 -0800863}
Mario Smarduchc6473552015-01-15 15:58:56 -0800864
Kai Huang3b0f1d02015-01-28 10:54:23 +0800865/*
866 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
867 * dirty pages.
868 *
869 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
870 * enable dirty logging for them.
871 */
872void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
873 struct kvm_memory_slot *slot,
874 gfn_t gfn_offset, unsigned long mask)
875{
876 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
877}
878
James Morse1559b752019-12-17 12:38:09 +0000879static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
James Morse196f8782017-06-20 17:11:48 +0100880{
Eric W. Biederman795a8372018-04-16 13:39:10 -0500881 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
James Morse196f8782017-06-20 17:11:48 +0100882}
883
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000884static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
885 unsigned long hva,
886 unsigned long map_size)
Christoffer Dall6794ad52018-11-02 08:53:22 +0100887{
Shaokun Zhangc2be79a2019-02-19 17:22:21 +0800888 gpa_t gpa_start;
Christoffer Dall6794ad52018-11-02 08:53:22 +0100889 hva_t uaddr_start, uaddr_end;
890 size_t size;
891
Suzuki K Poulose9f283612020-05-07 20:35:45 +0800892 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
893 if (map_size == PAGE_SIZE)
894 return true;
895
Christoffer Dall6794ad52018-11-02 08:53:22 +0100896 size = memslot->npages * PAGE_SIZE;
897
898 gpa_start = memslot->base_gfn << PAGE_SHIFT;
Christoffer Dall6794ad52018-11-02 08:53:22 +0100899
900 uaddr_start = memslot->userspace_addr;
901 uaddr_end = uaddr_start + size;
902
903 /*
904 * Pages belonging to memslots that don't have the same alignment
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000905 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
906 * PMD/PUD entries, because we'll end up mapping the wrong pages.
Christoffer Dall6794ad52018-11-02 08:53:22 +0100907 *
908 * Consider a layout like the following:
909 *
910 * memslot->userspace_addr:
911 * +-----+--------------------+--------------------+---+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000912 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +0100913 * +-----+--------------------+--------------------+---+
914 *
Suzuki K Poulose9f283612020-05-07 20:35:45 +0800915 * memslot->base_gfn << PAGE_SHIFT:
Christoffer Dall6794ad52018-11-02 08:53:22 +0100916 * +---+--------------------+--------------------+-----+
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000917 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
Christoffer Dall6794ad52018-11-02 08:53:22 +0100918 * +---+--------------------+--------------------+-----+
919 *
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000920 * If we create those stage-2 blocks, we'll end up with this incorrect
Christoffer Dall6794ad52018-11-02 08:53:22 +0100921 * mapping:
922 * d -> f
923 * e -> g
924 * f -> h
925 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000926 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
Christoffer Dall6794ad52018-11-02 08:53:22 +0100927 return false;
928
929 /*
930 * Next, let's make sure we're not trying to map anything not covered
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000931 * by the memslot. This means we have to prohibit block size mappings
932 * for the beginning and end of a non-block aligned and non-block sized
Christoffer Dall6794ad52018-11-02 08:53:22 +0100933 * memory slot (illustrated by the head and tail parts of the
934 * userspace view above containing pages 'abcde' and 'xyz',
935 * respectively).
936 *
937 * Note that it doesn't matter if we do the check using the
938 * userspace_addr or the base_gfn, as both are equally aligned (per
939 * the check above) and equally sized.
940 */
Suzuki K Poulosea80868f2019-03-12 09:52:51 +0000941 return (hva & ~(map_size - 1)) >= uaddr_start &&
942 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
Christoffer Dall6794ad52018-11-02 08:53:22 +0100943}
944
Suzuki K Poulose0529c902020-05-07 20:35:46 +0800945/*
946 * Check if the given hva is backed by a transparent huge page (THP) and
947 * whether it can be mapped using block mapping in stage2. If so, adjust
948 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
949 * supported. This will need to be updated to support other THP sizes.
950 *
951 * Returns the size of the mapping.
952 */
953static unsigned long
Marc Zyngier6011cf62021-07-26 16:35:48 +0100954transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
Suzuki K Poulose0529c902020-05-07 20:35:46 +0800955 unsigned long hva, kvm_pfn_t *pfnp,
956 phys_addr_t *ipap)
957{
958 kvm_pfn_t pfn = *pfnp;
959
960 /*
961 * Make sure the adjustment is done only for THP pages. Also make
962 * sure that the HVA and IPA are sufficiently aligned and that the
963 * block map is contained within the memslot.
964 */
Marc Zyngier6011cf62021-07-26 16:35:48 +0100965 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
966 get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
Suzuki K Poulose0529c902020-05-07 20:35:46 +0800967 /*
968 * The address we faulted on is backed by a transparent huge
969 * page. However, because we map the compound huge page and
970 * not the individual tail page, we need to transfer the
971 * refcount to the head page. We have to be careful that the
972 * THP doesn't start to split while we are adjusting the
973 * refcounts.
974 *
975 * We are sure this doesn't happen, because mmu_notifier_retry
976 * was successful and we are holding the mmu_lock, so if this
977 * THP is trying to split, it will be blocked in the mmu
978 * notifier before touching any of the pages, specifically
979 * before being able to call __split_huge_page_refcount().
980 *
981 * We can therefore safely transfer the refcount from PG_tail
982 * to PG_head and switch the pfn from a tail page to the head
983 * page accordingly.
984 */
985 *ipap &= PMD_MASK;
986 kvm_release_pfn_clean(pfn);
987 pfn &= ~(PTRS_PER_PMD - 1);
Marc Zyngier0fe49632021-07-26 16:35:51 +0100988 get_page(pfn_to_page(pfn));
Suzuki K Poulose0529c902020-05-07 20:35:46 +0800989 *pfnp = pfn;
990
991 return PMD_SIZE;
992 }
993
994 /* Use page mapping if we cannot use block mapping. */
995 return PAGE_SIZE;
996}
997
Keqian Zhu2aa53d62021-05-07 19:03:22 +0800998static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
999{
1000 unsigned long pa;
1001
1002 if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
1003 return huge_page_shift(hstate_vma(vma));
1004
1005 if (!(vma->vm_flags & VM_PFNMAP))
1006 return PAGE_SHIFT;
1007
1008 VM_BUG_ON(is_vm_hugetlb_page(vma));
1009
1010 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1011
1012#ifndef __PAGETABLE_PMD_FOLDED
1013 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1014 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1015 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1016 return PUD_SHIFT;
1017#endif
1018
1019 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1020 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1021 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1022 return PMD_SHIFT;
1023
1024 return PAGE_SHIFT;
1025}
1026
Steven Priceea7fc1b2021-06-21 12:17:12 +01001027/*
1028 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
1029 * able to see the page's tags and therefore they must be initialised first. If
1030 * PG_mte_tagged is set, tags have already been initialised.
1031 *
1032 * The race in the test/set of the PG_mte_tagged flag is handled by:
1033 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
1034 * racing to santise the same page
1035 * - mmap_lock protects between a VM faulting a page in and the VMM performing
1036 * an mprotect() to add VM_MTE
1037 */
1038static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
1039 unsigned long size)
1040{
1041 unsigned long i, nr_pages = size >> PAGE_SHIFT;
1042 struct page *page;
1043
1044 if (!kvm_has_mte(kvm))
1045 return 0;
1046
1047 /*
1048 * pfn_to_online_page() is used to reject ZONE_DEVICE pages
1049 * that may not support tags.
1050 */
1051 page = pfn_to_online_page(pfn);
1052
1053 if (!page)
1054 return -EFAULT;
1055
1056 for (i = 0; i < nr_pages; i++, page++) {
1057 if (!test_bit(PG_mte_tagged, &page->flags)) {
1058 mte_clear_page_tags(page_address(page));
1059 set_bit(PG_mte_tagged, &page->flags);
1060 }
1061 }
1062
1063 return 0;
1064}
1065
Christoffer Dall94f8e642013-01-20 18:28:12 -05001066static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +02001067 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -05001068 unsigned long fault_status)
1069{
Will Deaconffd1b632020-09-30 11:24:42 +01001070 int ret = 0;
Punit Agrawal6396b852018-12-11 17:10:35 +00001071 bool write_fault, writable, force_pte = false;
Will Deacon6f745f12020-09-11 14:25:25 +01001072 bool exec_fault;
1073 bool device = false;
Steven Priceea7fc1b2021-06-21 12:17:12 +01001074 bool shared;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001075 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +01001076 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001077 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +01001078 struct vm_area_struct *vma;
James Morse1559b752019-12-17 12:38:09 +00001079 short vma_shift;
Will Deacon6f745f12020-09-11 14:25:25 +01001080 gfn_t gfn;
Dan Williamsba049e92016-01-15 16:56:11 -08001081 kvm_pfn_t pfn;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001082 bool logging_active = memslot_is_logging(memslot);
Yanan Wang7d894832020-12-02 04:10:34 +08001083 unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
1084 unsigned long vma_pagesize, fault_granule;
Will Deacon6f745f12020-09-11 14:25:25 +01001085 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
1086 struct kvm_pgtable *pgt;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001087
Yanan Wang7d894832020-12-02 04:10:34 +08001088 fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001089 write_fault = kvm_is_write_fault(vcpu);
Marc Zyngierc4ad98e2020-09-15 11:42:17 +01001090 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
Marc Zyngierd0e22b42017-10-23 17:11:19 +01001091 VM_BUG_ON(write_fault && exec_fault);
1092
1093 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001094 kvm_err("Unexpected L2 read permission error\n");
1095 return -EFAULT;
1096 }
1097
Keqian Zhu2aa53d62021-05-07 19:03:22 +08001098 /*
1099 * Let's check if we will get back a huge page backed by hugetlbfs, or
1100 * get block mapping for device MMIO region.
1101 */
Michel Lespinasse89154dd2020-06-08 21:33:29 -07001102 mmap_read_lock(current->mm);
Liam Howlett09eef832021-06-28 19:38:59 -07001103 vma = vma_lookup(current->mm, hva);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001104 if (unlikely(!vma)) {
1105 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
Michel Lespinasse89154dd2020-06-08 21:33:29 -07001106 mmap_read_unlock(current->mm);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001107 return -EFAULT;
1108 }
1109
Keqian Zhu2aa53d62021-05-07 19:03:22 +08001110 /*
1111 * logging_active is guaranteed to never be true for VM_PFNMAP
1112 * memslots.
1113 */
1114 if (logging_active) {
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001115 force_pte = true;
Alexandru Elisei523b3992020-09-10 14:33:51 +01001116 vma_shift = PAGE_SHIFT;
Keqian Zhu2aa53d62021-05-07 19:03:22 +08001117 } else {
1118 vma_shift = get_vma_page_shift(vma, hva);
Suzuki K Poulosea80868f2019-03-12 09:52:51 +00001119 }
1120
Marc Zyngier80d9ac92021-07-13 12:36:41 +01001121 shared = (vma->vm_flags & VM_SHARED);
Steven Priceea7fc1b2021-06-21 12:17:12 +01001122
Gavin Shan2f40c462020-10-26 10:06:26 +11001123 switch (vma_shift) {
Gavin Shanfaf00032020-11-03 11:30:09 +11001124#ifndef __PAGETABLE_PMD_FOLDED
Gavin Shan2f40c462020-10-26 10:06:26 +11001125 case PUD_SHIFT:
1126 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1127 break;
1128 fallthrough;
Gavin Shanfaf00032020-11-03 11:30:09 +11001129#endif
Gavin Shan2f40c462020-10-26 10:06:26 +11001130 case CONT_PMD_SHIFT:
1131 vma_shift = PMD_SHIFT;
1132 fallthrough;
1133 case PMD_SHIFT:
1134 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1135 break;
1136 fallthrough;
1137 case CONT_PTE_SHIFT:
Alexandru Elisei523b3992020-09-10 14:33:51 +01001138 vma_shift = PAGE_SHIFT;
Gavin Shan2f40c462020-10-26 10:06:26 +11001139 force_pte = true;
1140 fallthrough;
1141 case PAGE_SHIFT:
1142 break;
1143 default:
1144 WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
Alexandru Elisei523b3992020-09-10 14:33:51 +01001145 }
1146
1147 vma_pagesize = 1UL << vma_shift;
Will Deacon6f745f12020-09-11 14:25:25 +01001148 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
Alexandru Elisei523b3992020-09-10 14:33:51 +01001149 fault_ipa &= ~(vma_pagesize - 1);
Will Deacon6f745f12020-09-11 14:25:25 +01001150
1151 gfn = fault_ipa >> PAGE_SHIFT;
Michel Lespinasse89154dd2020-06-08 21:33:29 -07001152 mmap_read_unlock(current->mm);
Christoffer Dallad361f02012-11-01 17:14:45 +01001153
Will Deacon6f745f12020-09-11 14:25:25 +01001154 /*
1155 * Permission faults just need to update the existing leaf entry,
1156 * and so normally don't require allocations from the memcache. The
1157 * only exception to this is when dirty logging is enabled at runtime
1158 * and a write fault needs to collapse a block entry into a table.
1159 */
1160 if (fault_status != FSC_PERM || (logging_active && write_fault)) {
1161 ret = kvm_mmu_topup_memory_cache(memcache,
1162 kvm_mmu_cache_min_pages(kvm));
1163 if (ret)
1164 return ret;
1165 }
Christoffer Dall94f8e642013-01-20 18:28:12 -05001166
1167 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1168 /*
1169 * Ensure the read of mmu_notifier_seq happens before we call
1170 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1171 * the page we just got a reference to gets unmapped before we have a
1172 * chance to grab the mmu_lock, which ensure that if the page gets
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001173 * unmapped afterwards, the call to kvm_unmap_gfn will take it away
Christoffer Dall94f8e642013-01-20 18:28:12 -05001174 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1175 * in kvm_mmu_notifier_invalidate_<page|range_end>.
Gavin Shan10ba2d12021-03-16 12:11:26 +08001176 *
1177 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
1178 * used to avoid unnecessary overhead introduced to locate the memory
1179 * slot because it's always fixed even @gfn is adjusted for huge pages.
Christoffer Dall94f8e642013-01-20 18:28:12 -05001180 */
1181 smp_rmb();
1182
Gavin Shan10ba2d12021-03-16 12:11:26 +08001183 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
1184 write_fault, &writable, NULL);
James Morse196f8782017-06-20 17:11:48 +01001185 if (pfn == KVM_PFN_ERR_HWPOISON) {
James Morse1559b752019-12-17 12:38:09 +00001186 kvm_send_hwpoison_signal(hva, vma_shift);
James Morse196f8782017-06-20 17:11:48 +01001187 return 0;
1188 }
Christoffer Dall9ac71592016-08-17 10:46:10 +02001189 if (is_error_noslot_pfn(pfn))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001190 return -EFAULT;
1191
Mario Smarduch15a49a42015-01-15 15:58:58 -08001192 if (kvm_is_device_pfn(pfn)) {
Keqian Zhu2aa53d62021-05-07 19:03:22 +08001193 /*
1194 * If the page was identified as device early by looking at
1195 * the VMA flags, vma_pagesize is already representing the
1196 * largest quantity we can map. If instead it was mapped
1197 * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
1198 * and must not be upgraded.
1199 *
1200 * In both cases, we don't let transparent_hugepage_adjust()
1201 * change things at the last minute.
1202 */
Will Deacon6f745f12020-09-11 14:25:25 +01001203 device = true;
1204 } else if (logging_active && !write_fault) {
Mario Smarduch15a49a42015-01-15 15:58:58 -08001205 /*
1206 * Only actually map the page as writable if this was a write
1207 * fault.
1208 */
Will Deacon6f745f12020-09-11 14:25:25 +01001209 writable = false;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001210 }
Kim Phillipsb8865762014-06-26 01:45:51 +01001211
Will Deacon6f745f12020-09-11 14:25:25 +01001212 if (exec_fault && device)
Marc Zyngier6d674e22019-12-11 16:56:48 +00001213 return -ENOEXEC;
1214
Christoffer Dallad361f02012-11-01 17:14:45 +01001215 spin_lock(&kvm->mmu_lock);
Will Deacon6f745f12020-09-11 14:25:25 +01001216 pgt = vcpu->arch.hw_mmu->pgt;
Christoffer Dallad361f02012-11-01 17:14:45 +01001217 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001218 goto out_unlock;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001219
Suzuki K Poulose0529c902020-05-07 20:35:46 +08001220 /*
1221 * If we are not forced to use page mapping, check if we are
1222 * backed by a THP and thus use block mapping if possible.
1223 */
Marc Zyngierf2cc3272021-07-26 16:35:49 +01001224 if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1225 if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
1226 vma_pagesize = fault_granule;
1227 else
1228 vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
1229 hva, &pfn,
1230 &fault_ipa);
1231 }
Marc Zyngier9f03db62021-06-22 15:09:34 +01001232
1233 if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
1234 /* Check the VMM hasn't introduced a new VM_SHARED VMA */
1235 if (!shared)
1236 ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
1237 else
1238 ret = -EFAULT;
1239 if (ret)
1240 goto out_unlock;
1241 }
1242
Yanan Wang509552e2021-01-14 20:13:50 +08001243 if (writable)
Will Deacon6f745f12020-09-11 14:25:25 +01001244 prot |= KVM_PGTABLE_PROT_W;
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001245
Yanan Wang25aa2862021-06-17 18:58:24 +08001246 if (exec_fault)
Will Deacon6f745f12020-09-11 14:25:25 +01001247 prot |= KVM_PGTABLE_PROT_X;
Punit Agrawal3f58bf62018-12-11 17:10:34 +00001248
Will Deacon6f745f12020-09-11 14:25:25 +01001249 if (device)
1250 prot |= KVM_PGTABLE_PROT_DEVICE;
1251 else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
1252 prot |= KVM_PGTABLE_PROT_X;
Punit Agrawal6396b852018-12-11 17:10:35 +00001253
Yanan Wang7d894832020-12-02 04:10:34 +08001254 /*
1255 * Under the premise of getting a FSC_PERM fault, we just need to relax
1256 * permissions only if vma_pagesize equals fault_granule. Otherwise,
1257 * kvm_pgtable_stage2_map() should be called to change block size.
1258 */
1259 if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
Will Deacon6f745f12020-09-11 14:25:25 +01001260 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
Christoffer Dallad361f02012-11-01 17:14:45 +01001261 } else {
Will Deacon6f745f12020-09-11 14:25:25 +01001262 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1263 __pfn_to_phys(pfn), prot,
1264 memcache);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001265 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001266
Yanan Wang509552e2021-01-14 20:13:50 +08001267 /* Mark the page dirty only if the fault is handled successfully */
1268 if (writable && !ret) {
1269 kvm_set_pfn_dirty(pfn);
Gavin Shan10ba2d12021-03-16 12:11:26 +08001270 mark_page_dirty_in_slot(kvm, memslot, gfn);
Yanan Wang509552e2021-01-14 20:13:50 +08001271 }
1272
Christoffer Dall94f8e642013-01-20 18:28:12 -05001273out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001274 spin_unlock(&kvm->mmu_lock);
Marc Zyngier35307b92015-03-12 18:16:51 +00001275 kvm_set_pfn_accessed(pfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001276 kvm_release_pfn_clean(pfn);
Yanan Wang509552e2021-01-14 20:13:50 +08001277 return ret != -EAGAIN ? ret : 0;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001278}
1279
Will Deaconee8efad2020-09-11 14:25:19 +01001280/* Resolve the access fault by making the page young again. */
Marc Zyngieraeda9132015-03-12 18:16:52 +00001281static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1282{
Will Deaconee8efad2020-09-11 14:25:19 +01001283 pte_t pte;
1284 kvm_pte_t kpte;
1285 struct kvm_s2_mmu *mmu;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001286
1287 trace_kvm_access_fault(fault_ipa);
1288
1289 spin_lock(&vcpu->kvm->mmu_lock);
Will Deaconee8efad2020-09-11 14:25:19 +01001290 mmu = vcpu->arch.hw_mmu;
1291 kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
Marc Zyngieraeda9132015-03-12 18:16:52 +00001292 spin_unlock(&vcpu->kvm->mmu_lock);
Will Deaconee8efad2020-09-11 14:25:19 +01001293
1294 pte = __pte(kpte);
1295 if (pte_valid(pte))
1296 kvm_set_pfn_accessed(pte_pfn(pte));
Marc Zyngieraeda9132015-03-12 18:16:52 +00001297}
1298
Christoffer Dall94f8e642013-01-20 18:28:12 -05001299/**
1300 * kvm_handle_guest_abort - handles all 2nd stage aborts
1301 * @vcpu: the VCPU pointer
Christoffer Dall94f8e642013-01-20 18:28:12 -05001302 *
1303 * Any abort that gets to the host is almost guaranteed to be caused by a
1304 * missing second stage translation table entry, which can mean that either the
1305 * guest simply needs more memory and we must allocate an appropriate page or it
1306 * can mean that the guest tried to access I/O memory, which is emulated by user
1307 * space. The distinction is based on the IPA causing the fault and whether this
1308 * memory region has been registered as standard RAM by user space.
1309 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +08001310int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001311{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001312 unsigned long fault_status;
1313 phys_addr_t fault_ipa;
1314 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001315 unsigned long hva;
1316 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001317 gfn_t gfn;
1318 int ret, idx;
1319
Tyler Baicar621f48e2017-06-21 12:17:14 -06001320 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1321
1322 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
James Morsebb428922017-07-18 13:37:41 +01001323 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Tyler Baicar621f48e2017-06-21 12:17:14 -06001324
James Morsebb428922017-07-18 13:37:41 +01001325 /* Synchronous External Abort? */
Will Deaconc9a636f2020-07-29 11:28:18 +01001326 if (kvm_vcpu_abt_issea(vcpu)) {
James Morsebb428922017-07-18 13:37:41 +01001327 /*
1328 * For RAS the host kernel may handle this abort.
1329 * There is no need to pass the error into the guest.
1330 */
Will Deacon84b951a2020-07-29 11:28:19 +01001331 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
James Morsebb428922017-07-18 13:37:41 +01001332 kvm_inject_vabt(vcpu);
Will Deacon84b951a2020-07-29 11:28:19 +01001333
1334 return 1;
Marc Zyngier40557102016-09-06 14:02:15 +01001335 }
1336
Gavin Shan3a949f42020-06-30 11:57:05 +10001337 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
Marc Zyngier7393b592012-09-17 19:27:09 +01001338 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001339
1340 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier35307b92015-03-12 18:16:51 +00001341 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1342 fault_status != FSC_ACCESS) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001343 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1344 kvm_vcpu_trap_get_class(vcpu),
1345 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
Gavin Shan3a949f42020-06-30 11:57:05 +10001346 (unsigned long)kvm_vcpu_get_esr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001347 return -EFAULT;
1348 }
1349
1350 idx = srcu_read_lock(&vcpu->kvm->srcu);
1351
1352 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001353 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1354 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001355 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001356 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Will Deacon022c8322020-07-29 11:28:21 +01001357 /*
1358 * The guest has put either its instructions or its page-tables
1359 * somewhere it shouldn't have. Userspace won't be able to do
1360 * anything about this (there's no syndrome for a start), so
1361 * re-inject the abort back into the guest.
1362 */
Christoffer Dall94f8e642013-01-20 18:28:12 -05001363 if (is_iabt) {
Marc Zyngier6d674e22019-12-11 16:56:48 +00001364 ret = -ENOEXEC;
1365 goto out;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001366 }
1367
Marc Zyngierc4ad98e2020-09-15 11:42:17 +01001368 if (kvm_vcpu_abt_iss1tw(vcpu)) {
Will Deacon022c8322020-07-29 11:28:21 +01001369 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1370 ret = 1;
1371 goto out_unlock;
1372 }
1373
Marc Zyngiercfe39502012-12-12 14:42:09 +00001374 /*
Marc Zyngier57c841f2016-01-29 15:01:28 +00001375 * Check for a cache maintenance operation. Since we
1376 * ended-up here, we know it is outside of any memory
1377 * slot. But we can't find out if that is for a device,
1378 * or if the guest is just being stupid. The only thing
1379 * we know for sure is that this range cannot be cached.
1380 *
1381 * So let's assume that the guest is just being
1382 * cautious, and skip the instruction.
1383 */
Will Deacon54dc0d22020-07-29 11:28:20 +01001384 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
Marc Zyngiercdb5e022020-10-14 09:29:27 +01001385 kvm_incr_pc(vcpu);
Marc Zyngier57c841f2016-01-29 15:01:28 +00001386 ret = 1;
1387 goto out_unlock;
1388 }
1389
1390 /*
Marc Zyngiercfe39502012-12-12 14:42:09 +00001391 * The IPA is reported as [MAX:12], so we need to
1392 * complement it with the bottom 12 bits from the
1393 * faulting VA. This is always 12 bits, irrespective
1394 * of the page size.
1395 */
1396 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Tianjia Zhang74cc7e02020-06-23 21:14:15 +08001397 ret = io_mem_abort(vcpu, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001398 goto out_unlock;
1399 }
1400
Christoffer Dallc3058d52014-10-10 12:14:29 +02001401 /* Userspace should not be able to register out-of-bounds IPAs */
Suzuki K Poulosee55cac52018-09-26 17:32:44 +01001402 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
Christoffer Dallc3058d52014-10-10 12:14:29 +02001403
Marc Zyngieraeda9132015-03-12 18:16:52 +00001404 if (fault_status == FSC_ACCESS) {
1405 handle_access_fault(vcpu, fault_ipa);
1406 ret = 1;
1407 goto out_unlock;
1408 }
1409
Christoffer Dall98047882014-08-19 12:18:04 +02001410 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001411 if (ret == 0)
1412 ret = 1;
Marc Zyngier6d674e22019-12-11 16:56:48 +00001413out:
1414 if (ret == -ENOEXEC) {
1415 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1416 ret = 1;
1417 }
Christoffer Dall94f8e642013-01-20 18:28:12 -05001418out_unlock:
1419 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1420 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001421}
1422
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001423bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001424{
Will Deacon063deeb2020-09-11 14:25:26 +01001425 if (!kvm->arch.mmu.pgt)
kernel test robotfcb82832021-04-27 06:33:57 +08001426 return false;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001427
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001428 __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
1429 (range->end - range->start) << PAGE_SHIFT,
1430 range->may_block);
1431
kernel test robotfcb82832021-04-27 06:33:57 +08001432 return false;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001433}
1434
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001435bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001436{
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001437 kvm_pfn_t pfn = pte_pfn(range->pte);
Steven Priceea7fc1b2021-06-21 12:17:12 +01001438 int ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001439
Will Deacone9edb172020-09-11 14:25:16 +01001440 if (!kvm->arch.mmu.pgt)
kernel test robotfcb82832021-04-27 06:33:57 +08001441 return false;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001442
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001443 WARN_ON(range->end - range->start != 1);
1444
Steven Priceea7fc1b2021-06-21 12:17:12 +01001445 ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
1446 if (ret)
1447 return false;
1448
Marc Zyngier694556d2018-08-23 09:58:27 +01001449 /*
Yanan Wang25aa2862021-06-17 18:58:24 +08001450 * We've moved a page around, probably through CoW, so let's treat
1451 * it just like a translation fault and the map handler will clean
1452 * the cache to the PoC.
1453 *
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001454 * The MMU notifiers will have unmapped a huge PMD before calling
1455 * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
1456 * therefore we never need to clear out a huge PMD through this
1457 * calling path and a memcache is not required.
1458 */
1459 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1460 PAGE_SIZE, __pfn_to_phys(pfn),
1461 KVM_PGTABLE_PROT_R, NULL);
1462
kernel test robotfcb82832021-04-27 06:33:57 +08001463 return false;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001464}
1465
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001466bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Marc Zyngier35307b92015-03-12 18:16:51 +00001467{
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001468 u64 size = (range->end - range->start) << PAGE_SHIFT;
Will Deaconee8efad2020-09-11 14:25:19 +01001469 kvm_pte_t kpte;
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001470 pte_t pte;
1471
1472 if (!kvm->arch.mmu.pgt)
kernel test robotfcb82832021-04-27 06:33:57 +08001473 return false;
Marc Zyngier35307b92015-03-12 18:16:51 +00001474
Punit Agrawal35a63962018-12-11 17:10:40 +00001475 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001476
1477 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
1478 range->start << PAGE_SHIFT);
Will Deaconee8efad2020-09-11 14:25:19 +01001479 pte = __pte(kpte);
1480 return pte_valid(pte) && pte_young(pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00001481}
1482
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001483bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Marc Zyngier35307b92015-03-12 18:16:51 +00001484{
Will Deacon063deeb2020-09-11 14:25:26 +01001485 if (!kvm->arch.mmu.pgt)
kernel test robotfcb82832021-04-27 06:33:57 +08001486 return false;
Sean Christopherson501b9182021-03-25 19:19:48 -07001487
Sean Christophersoncd4c7182021-04-01 17:56:51 -07001488 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
1489 range->start << PAGE_SHIFT);
Marc Zyngier35307b92015-03-12 18:16:51 +00001490}
1491
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001492phys_addr_t kvm_mmu_get_httbr(void)
1493{
Will Deacon0f9d09b2020-09-11 14:25:12 +01001494 return __pa(hyp_pgtable->pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001495}
1496
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001497phys_addr_t kvm_get_idmap_vector(void)
1498{
1499 return hyp_idmap_vector;
1500}
1501
Will Deacon0f9d09b2020-09-11 14:25:12 +01001502static int kvm_map_idmap_text(void)
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001503{
Will Deacon0f9d09b2020-09-11 14:25:12 +01001504 unsigned long size = hyp_idmap_end - hyp_idmap_start;
1505 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
1506 PAGE_HYP_EXEC);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001507 if (err)
1508 kvm_err("Failed to idmap %lx-%lx\n",
1509 hyp_idmap_start, hyp_idmap_end);
1510
1511 return err;
1512}
1513
Quentin Perret7aef0cb2021-03-19 10:01:14 +00001514static void *kvm_hyp_zalloc_page(void *arg)
1515{
1516 return (void *)get_zeroed_page(GFP_KERNEL);
1517}
1518
1519static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
1520 .zalloc_page = kvm_hyp_zalloc_page,
1521 .get_page = kvm_host_get_page,
1522 .put_page = kvm_host_put_page,
1523 .phys_to_virt = kvm_host_va,
1524 .virt_to_phys = kvm_host_pa,
1525};
1526
Quentin Perretbfa79a82021-03-19 10:01:26 +00001527int kvm_mmu_init(u32 *hyp_va_bits)
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001528{
Marc Zyngier2fb41052013-04-12 19:12:03 +01001529 int err;
1530
Andrew Scull0a787912020-05-19 11:40:36 +01001531 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
Marc Zyngier46fef152018-03-12 14:25:10 +00001532 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
Andrew Scull0a787912020-05-19 11:40:36 +01001533 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
Marc Zyngier46fef152018-03-12 14:25:10 +00001534 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
Andrew Scull0a787912020-05-19 11:40:36 +01001535 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001536
Ard Biesheuvel06f75a12015-03-19 16:42:26 +00001537 /*
1538 * We rely on the linker script to ensure at build time that the HYP
1539 * init code does not cross a page boundary.
1540 */
1541 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001542
Quentin Perretbfa79a82021-03-19 10:01:26 +00001543 *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1544 kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
Marc Zyngierb4ef0492017-12-03 20:04:51 +00001545 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1546 kvm_debug("HYP VA range: %lx:%lx\n",
1547 kern_hyp_va(PAGE_OFFSET),
1548 kern_hyp_va((unsigned long)high_memory - 1));
Marc Zyngiereac378a2016-06-30 18:40:50 +01001549
Marc Zyngier6c41a412016-06-30 18:40:51 +01001550 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
Marc Zyngiered57cac2017-12-03 18:22:49 +00001551 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
Marc Zyngierd2896d42016-08-22 09:01:17 +01001552 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
Marc Zyngiereac378a2016-06-30 18:40:50 +01001553 /*
1554 * The idmap page is intersecting with the VA space,
1555 * it is not safe to continue further.
1556 */
1557 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1558 err = -EINVAL;
1559 goto out;
1560 }
1561
Will Deacon0f9d09b2020-09-11 14:25:12 +01001562 hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
1563 if (!hyp_pgtable) {
1564 kvm_err("Hyp mode page-table not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01001565 err = -ENOMEM;
1566 goto out;
1567 }
1568
Quentin Perretbfa79a82021-03-19 10:01:26 +00001569 err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
Will Deacon0f9d09b2020-09-11 14:25:12 +01001570 if (err)
1571 goto out_free_pgtable;
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001572
Will Deacon0f9d09b2020-09-11 14:25:12 +01001573 err = kvm_map_idmap_text();
1574 if (err)
1575 goto out_destroy_pgtable;
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001576
Marc Zyngiere3f019b2017-12-04 17:04:38 +00001577 io_map_base = hyp_idmap_start;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001578 return 0;
Will Deacon0f9d09b2020-09-11 14:25:12 +01001579
1580out_destroy_pgtable:
1581 kvm_pgtable_hyp_destroy(hyp_pgtable);
1582out_free_pgtable:
1583 kfree(hyp_pgtable);
1584 hyp_pgtable = NULL;
Marc Zyngier2fb41052013-04-12 19:12:03 +01001585out:
Marc Zyngier2fb41052013-04-12 19:12:03 +01001586 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001587}
Eric Augerdf6ce242014-06-06 11:10:23 +02001588
1589void kvm_arch_commit_memory_region(struct kvm *kvm,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08001590 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001591 const struct kvm_memory_slot *new,
Eric Augerdf6ce242014-06-06 11:10:23 +02001592 enum kvm_mr_change change)
1593{
Mario Smarduchc6473552015-01-15 15:58:56 -08001594 /*
1595 * At this point memslot has been committed and there is an
Fuad Tabba656012c2020-04-01 15:03:10 +01001596 * allocated dirty_bitmap[], dirty pages will be tracked while the
Mario Smarduchc6473552015-01-15 15:58:56 -08001597 * memory slot is write protected.
1598 */
Sean Christopherson509c5942021-12-06 20:54:12 +01001599 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
Keqian Zhuc8626262020-04-13 20:20:23 +08001600 /*
1601 * If we're with initial-all-set, we don't need to write
1602 * protect any pages because they're all reported as dirty.
1603 * Huge pages and normal pages will be write protect gradually.
1604 */
1605 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
Sean Christopherson509c5942021-12-06 20:54:12 +01001606 kvm_mmu_wp_memory_region(kvm, new->id);
Keqian Zhuc8626262020-04-13 20:20:23 +08001607 }
1608 }
Eric Augerdf6ce242014-06-06 11:10:23 +02001609}
1610
1611int kvm_arch_prepare_memory_region(struct kvm *kvm,
Sean Christopherson537a17b2021-12-06 20:54:11 +01001612 const struct kvm_memory_slot *old,
1613 struct kvm_memory_slot *new,
Eric Augerdf6ce242014-06-06 11:10:23 +02001614 enum kvm_mr_change change)
1615{
Sean Christopherson509c5942021-12-06 20:54:12 +01001616 hva_t hva, reg_end;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001617 int ret = 0;
1618
Mario Smarduch15a49a42015-01-15 15:58:58 -08001619 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1620 change != KVM_MR_FLAGS_ONLY)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001621 return 0;
1622
1623 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02001624 * Prevent userspace from creating a memory region outside of the IPA
1625 * space addressable by the KVM guest IPA space.
1626 */
Sean Christopherson537a17b2021-12-06 20:54:11 +01001627 if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
Christoffer Dallc3058d52014-10-10 12:14:29 +02001628 return -EFAULT;
1629
Sean Christopherson509c5942021-12-06 20:54:12 +01001630 hva = new->userspace_addr;
1631 reg_end = hva + (new->npages << PAGE_SHIFT);
1632
Michel Lespinasse89154dd2020-06-08 21:33:29 -07001633 mmap_read_lock(current->mm);
Christoffer Dallc3058d52014-10-10 12:14:29 +02001634 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001635 * A memory region could potentially cover multiple VMAs, and any holes
Keqian Zhufd6f17b2021-05-07 19:03:21 +08001636 * between them, so iterate over all of them.
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001637 *
1638 * +--------------------------------------------+
1639 * +---------------+----------------+ +----------------+
1640 * | : VMA 1 | VMA 2 | | VMA 3 : |
1641 * +---------------+----------------+ +----------------+
1642 * | memory region |
1643 * +--------------------------------------------+
1644 */
1645 do {
Gavin Shanc728fd42021-03-16 12:11:25 +08001646 struct vm_area_struct *vma;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001647
Gavin Shanc728fd42021-03-16 12:11:25 +08001648 vma = find_vma_intersection(current->mm, hva, reg_end);
1649 if (!vma)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001650 break;
1651
1652 /*
Steven Priceea7fc1b2021-06-21 12:17:12 +01001653 * VM_SHARED mappings are not allowed with MTE to avoid races
1654 * when updating the PG_mte_tagged page flag, see
1655 * sanitise_mte_tags for more details.
1656 */
Quentin Perret6e6a8ef082021-10-05 13:20:31 +01001657 if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
1658 ret = -EINVAL;
1659 break;
1660 }
Steven Priceea7fc1b2021-06-21 12:17:12 +01001661
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001662 if (vma->vm_flags & VM_PFNMAP) {
Mario Smarduch15a49a42015-01-15 15:58:58 -08001663 /* IO region dirty page logging not allowed */
Sean Christopherson537a17b2021-12-06 20:54:11 +01001664 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
Marc Zyngier72f31042017-03-16 18:20:50 +00001665 ret = -EINVAL;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001666 break;
Keqian Zhufd6f17b2021-05-07 19:03:21 +08001667 }
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001668 }
Keqian Zhufd6f17b2021-05-07 19:03:21 +08001669 hva = min(reg_end, vma->vm_end);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001670 } while (hva < reg_end);
1671
Michel Lespinasse89154dd2020-06-08 21:33:29 -07001672 mmap_read_unlock(current->mm);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001673 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02001674}
1675
Sean Christophersone96c81e2020-02-18 13:07:27 -08001676void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
Eric Augerdf6ce242014-06-06 11:10:23 +02001677{
1678}
1679
Sean Christopherson15248252019-02-05 12:54:17 -08001680void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
Eric Augerdf6ce242014-06-06 11:10:23 +02001681{
1682}
1683
1684void kvm_arch_flush_shadow_all(struct kvm *kvm)
1685{
Christoffer Dalla0e50aa2019-01-04 21:09:05 +01001686 kvm_free_stage2_pgd(&kvm->arch.mmu);
Eric Augerdf6ce242014-06-06 11:10:23 +02001687}
1688
1689void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1690 struct kvm_memory_slot *slot)
1691{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001692 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1693 phys_addr_t size = slot->npages << PAGE_SHIFT;
1694
1695 spin_lock(&kvm->mmu_lock);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +01001696 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001697 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02001698}
Marc Zyngier3c1e7162014-12-19 16:05:31 +00001699
1700/*
1701 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1702 *
1703 * Main problems:
1704 * - S/W ops are local to a CPU (not broadcast)
1705 * - We have line migration behind our back (speculation)
1706 * - System caches don't support S/W at all (damn!)
1707 *
1708 * In the face of the above, the best we can do is to try and convert
1709 * S/W ops to VA ops. Because the guest is not allowed to infer the
1710 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1711 * which is a rather good thing for us.
1712 *
1713 * Also, it is only used when turning caches on/off ("The expected
1714 * usage of the cache maintenance instructions that operate by set/way
1715 * is associated with the cache maintenance instructions associated
1716 * with the powerdown and powerup of caches, if this is required by
1717 * the implementation.").
1718 *
1719 * We use the following policy:
1720 *
1721 * - If we trap a S/W operation, we enable VM trapping to detect
1722 * caches being turned on/off, and do a full clean.
1723 *
1724 * - We flush the caches on both caches being turned on and off.
1725 *
1726 * - Once the caches are enabled, we stop trapping VM ops.
1727 */
1728void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1729{
Christoffer Dall3df59d82017-08-03 12:09:05 +02001730 unsigned long hcr = *vcpu_hcr(vcpu);
Marc Zyngier3c1e7162014-12-19 16:05:31 +00001731
1732 /*
1733 * If this is the first time we do a S/W operation
1734 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1735 * VM trapping.
1736 *
1737 * Otherwise, rely on the VM trapping to wait for the MMU +
1738 * Caches to be turned off. At that point, we'll be able to
1739 * clean the caches again.
1740 */
1741 if (!(hcr & HCR_TVM)) {
1742 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1743 vcpu_has_cache_enabled(vcpu));
1744 stage2_flush_vm(vcpu->kvm);
Christoffer Dall3df59d82017-08-03 12:09:05 +02001745 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00001746 }
1747}
1748
1749void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1750{
1751 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1752
1753 /*
1754 * If switching the MMU+caches on, need to invalidate the caches.
1755 * If switching it off, need to clean the caches.
1756 * Clean + invalidate does the trick always.
1757 */
1758 if (now_enabled != was_enabled)
1759 stage2_flush_vm(vcpu->kvm);
1760
1761 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1762 if (now_enabled)
Christoffer Dall3df59d82017-08-03 12:09:05 +02001763 *vcpu_hcr(vcpu) &= ~HCR_TVM;
Marc Zyngier3c1e7162014-12-19 16:05:31 +00001764
1765 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
1766}