Thomas Gleixner | d94d71c | 2019-05-29 07:12:40 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 5 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 6 | |
| 7 | #include <linux/mman.h> |
| 8 | #include <linux/kvm_host.h> |
| 9 | #include <linux/io.h> |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 10 | #include <linux/hugetlb.h> |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 11 | #include <linux/sched/signal.h> |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 12 | #include <trace/events/kvm.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 13 | #include <asm/pgalloc.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 14 | #include <asm/cacheflush.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 15 | #include <asm/kvm_arm.h> |
| 16 | #include <asm/kvm_mmu.h> |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 17 | #include <asm/kvm_pgtable.h> |
James Morse | 0db5e02 | 2019-01-29 18:48:49 +0000 | [diff] [blame] | 18 | #include <asm/kvm_ras.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 19 | #include <asm/kvm_asm.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 20 | #include <asm/kvm_emulate.h> |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 21 | #include <asm/virt.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 22 | |
| 23 | #include "trace.h" |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 24 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 25 | static struct kvm_pgtable *hyp_pgtable; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 26 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
| 27 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 28 | static unsigned long hyp_idmap_start; |
| 29 | static unsigned long hyp_idmap_end; |
| 30 | static phys_addr_t hyp_idmap_vector; |
| 31 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 32 | static unsigned long io_map_base; |
| 33 | |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 34 | |
Will Deacon | 52bae93 | 2020-09-11 14:25:17 +0100 | [diff] [blame] | 35 | /* |
| 36 | * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, |
| 37 | * we may see kernel panics with CONFIG_DETECT_HUNG_TASK, |
| 38 | * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too |
| 39 | * long will also starve other vCPUs. We have to also make sure that the page |
| 40 | * tables are not freed while we released the lock. |
| 41 | */ |
| 42 | static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, |
| 43 | phys_addr_t end, |
| 44 | int (*fn)(struct kvm_pgtable *, u64, u64), |
| 45 | bool resched) |
| 46 | { |
| 47 | int ret; |
| 48 | u64 next; |
| 49 | |
| 50 | do { |
| 51 | struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; |
| 52 | if (!pgt) |
| 53 | return -EINVAL; |
| 54 | |
| 55 | next = stage2_pgd_addr_end(kvm, addr, end); |
| 56 | ret = fn(pgt, addr, next - addr); |
| 57 | if (ret) |
| 58 | break; |
| 59 | |
| 60 | if (resched && next != end) |
| 61 | cond_resched_lock(&kvm->mmu_lock); |
| 62 | } while (addr = next, addr != end); |
| 63 | |
| 64 | return ret; |
| 65 | } |
| 66 | |
Quentin Perret | cc38d61 | 2020-09-11 14:25:21 +0100 | [diff] [blame] | 67 | #define stage2_apply_range_resched(kvm, addr, end, fn) \ |
| 68 | stage2_apply_range(kvm, addr, end, fn, true) |
| 69 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 70 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
| 71 | { |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 72 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
Mario Smarduch | 7276030 | 2015-01-15 15:59:01 -0800 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /** |
| 76 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 |
| 77 | * @kvm: pointer to kvm structure. |
| 78 | * |
| 79 | * Interface to HYP function to flush all VM TLB entries |
| 80 | */ |
| 81 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 82 | { |
Jing Zhang | 3cc4e14 | 2021-08-17 00:26:39 +0000 | [diff] [blame] | 83 | ++kvm->stat.generic.remote_tlb_flush_requests; |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 84 | kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 85 | } |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 86 | |
Ard Biesheuvel | e6fab54 | 2015-11-10 15:11:20 +0100 | [diff] [blame] | 87 | static bool kvm_is_device_pfn(unsigned long pfn) |
| 88 | { |
Mike Rapoport | 873ba46 | 2021-06-30 18:51:19 -0700 | [diff] [blame] | 89 | return !pfn_is_map_memory(pfn); |
Ard Biesheuvel | e6fab54 | 2015-11-10 15:11:20 +0100 | [diff] [blame] | 90 | } |
| 91 | |
Quentin Perret | 7aef0cb | 2021-03-19 10:01:14 +0000 | [diff] [blame] | 92 | static void *stage2_memcache_zalloc_page(void *arg) |
| 93 | { |
| 94 | struct kvm_mmu_memory_cache *mc = arg; |
| 95 | |
| 96 | /* Allocated with __GFP_ZERO, so no need to zero */ |
| 97 | return kvm_mmu_memory_cache_alloc(mc); |
| 98 | } |
| 99 | |
| 100 | static void *kvm_host_zalloc_pages_exact(size_t size) |
| 101 | { |
| 102 | return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 103 | } |
| 104 | |
| 105 | static void kvm_host_get_page(void *addr) |
| 106 | { |
| 107 | get_page(virt_to_page(addr)); |
| 108 | } |
| 109 | |
| 110 | static void kvm_host_put_page(void *addr) |
| 111 | { |
| 112 | put_page(virt_to_page(addr)); |
| 113 | } |
| 114 | |
| 115 | static int kvm_host_page_count(void *addr) |
| 116 | { |
| 117 | return page_count(virt_to_page(addr)); |
| 118 | } |
| 119 | |
| 120 | static phys_addr_t kvm_host_pa(void *addr) |
| 121 | { |
| 122 | return __pa(addr); |
| 123 | } |
| 124 | |
| 125 | static void *kvm_host_va(phys_addr_t phys) |
| 126 | { |
| 127 | return __va(phys); |
| 128 | } |
| 129 | |
Yanan Wang | 378e6a9 | 2021-06-17 18:58:23 +0800 | [diff] [blame] | 130 | static void clean_dcache_guest_page(void *va, size_t size) |
| 131 | { |
| 132 | __clean_dcache_guest_page(va, size); |
| 133 | } |
| 134 | |
| 135 | static void invalidate_icache_guest_page(void *va, size_t size) |
| 136 | { |
| 137 | __invalidate_icache_guest_page(va, size); |
| 138 | } |
| 139 | |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 140 | /* |
| 141 | * Unmapping vs dcache management: |
| 142 | * |
| 143 | * If a guest maps certain memory pages as uncached, all writes will |
| 144 | * bypass the data cache and go directly to RAM. However, the CPUs |
| 145 | * can still speculate reads (not writes) and fill cache lines with |
| 146 | * data. |
| 147 | * |
| 148 | * Those cache lines will be *clean* cache lines though, so a |
| 149 | * clean+invalidate operation is equivalent to an invalidate |
| 150 | * operation, because no cache lines are marked dirty. |
| 151 | * |
| 152 | * Those clean cache lines could be filled prior to an uncached write |
| 153 | * by the guest, and the cache coherent IO subsystem would therefore |
| 154 | * end up writing old data to disk. |
| 155 | * |
| 156 | * This is why right after unmapping a page/section and invalidating |
Will Deacon | 52bae93 | 2020-09-11 14:25:17 +0100 | [diff] [blame] | 157 | * the corresponding TLBs, we flush to make sure the IO subsystem will |
| 158 | * never hit in the cache. |
Marc Zyngier | e48d53a | 2018-04-06 12:27:28 +0100 | [diff] [blame] | 159 | * |
| 160 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as |
| 161 | * we then fully enforce cacheability of RAM, no matter what the guest |
| 162 | * does. |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 163 | */ |
Suzuki K Poulose | 7a1c831 | 2016-03-23 12:08:02 +0000 | [diff] [blame] | 164 | /** |
| 165 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
Xiaofei Tan | c9c0279 | 2020-09-17 09:47:49 +0800 | [diff] [blame] | 166 | * @mmu: The KVM stage-2 MMU pointer |
Suzuki K Poulose | 7a1c831 | 2016-03-23 12:08:02 +0000 | [diff] [blame] | 167 | * @start: The intermediate physical base address of the range to unmap |
| 168 | * @size: The size of the area to unmap |
Xiaofei Tan | c9c0279 | 2020-09-17 09:47:49 +0800 | [diff] [blame] | 169 | * @may_block: Whether or not we are permitted to block |
Suzuki K Poulose | 7a1c831 | 2016-03-23 12:08:02 +0000 | [diff] [blame] | 170 | * |
| 171 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must |
| 172 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before |
| 173 | * destroying the VM), otherwise another faulting VCPU may come in and mess |
| 174 | * with things behind our backs. |
| 175 | */ |
Will Deacon | b533137 | 2020-08-11 11:27:25 +0100 | [diff] [blame] | 176 | static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, |
| 177 | bool may_block) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 178 | { |
Quentin Perret | cfb1a98 | 2021-03-19 10:01:28 +0000 | [diff] [blame] | 179 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
Will Deacon | 52bae93 | 2020-09-11 14:25:17 +0100 | [diff] [blame] | 180 | phys_addr_t end = start + size; |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 181 | |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 182 | assert_spin_locked(&kvm->mmu_lock); |
Jia He | 47a91b7 | 2018-05-21 11:05:30 +0800 | [diff] [blame] | 183 | WARN_ON(size & ~PAGE_MASK); |
Will Deacon | 52bae93 | 2020-09-11 14:25:17 +0100 | [diff] [blame] | 184 | WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, |
| 185 | may_block)); |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 186 | } |
| 187 | |
Will Deacon | b533137 | 2020-08-11 11:27:25 +0100 | [diff] [blame] | 188 | static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) |
| 189 | { |
| 190 | __unmap_stage2_range(mmu, start, size, true); |
| 191 | } |
| 192 | |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 193 | static void stage2_flush_memslot(struct kvm *kvm, |
| 194 | struct kvm_memory_slot *memslot) |
| 195 | { |
| 196 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
| 197 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 198 | |
Quentin Perret | 8d5207b | 2020-09-11 14:25:23 +0100 | [diff] [blame] | 199 | stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | /** |
| 203 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 |
| 204 | * @kvm: The struct kvm pointer |
| 205 | * |
| 206 | * Go through the stage 2 page tables and invalidate any cache lines |
| 207 | * backing memory already mapped to the VM. |
| 208 | */ |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 209 | static void stage2_flush_vm(struct kvm *kvm) |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 210 | { |
| 211 | struct kvm_memslots *slots; |
| 212 | struct kvm_memory_slot *memslot; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 213 | int idx, bkt; |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 214 | |
| 215 | idx = srcu_read_lock(&kvm->srcu); |
| 216 | spin_lock(&kvm->mmu_lock); |
| 217 | |
| 218 | slots = kvm_memslots(kvm); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 219 | kvm_for_each_memslot(memslot, bkt, slots) |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 220 | stage2_flush_memslot(kvm, memslot); |
| 221 | |
| 222 | spin_unlock(&kvm->mmu_lock); |
| 223 | srcu_read_unlock(&kvm->srcu, idx); |
| 224 | } |
| 225 | |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 226 | /** |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 227 | * free_hyp_pgds - free Hyp-mode page tables |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 228 | */ |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 229 | void free_hyp_pgds(void) |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 230 | { |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 231 | mutex_lock(&kvm_hyp_pgd_mutex); |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 232 | if (hyp_pgtable) { |
| 233 | kvm_pgtable_hyp_destroy(hyp_pgtable); |
| 234 | kfree(hyp_pgtable); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 235 | hyp_pgtable = NULL; |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 236 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 237 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 238 | } |
| 239 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 240 | static bool kvm_host_owns_hyp_mappings(void) |
| 241 | { |
Quentin Perret | 64a1fbd | 2021-12-08 15:22:59 +0000 | [diff] [blame] | 242 | if (is_kernel_in_hyp_mode()) |
| 243 | return false; |
| 244 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 245 | if (static_branch_likely(&kvm_protected_mode_initialized)) |
| 246 | return false; |
| 247 | |
| 248 | /* |
| 249 | * This can happen at boot time when __create_hyp_mappings() is called |
| 250 | * after the hyp protection has been enabled, but the static key has |
| 251 | * not been flipped yet. |
| 252 | */ |
| 253 | if (!hyp_pgtable && is_protected_kvm_enabled()) |
| 254 | return false; |
| 255 | |
| 256 | WARN_ON(!hyp_pgtable); |
| 257 | |
| 258 | return true; |
| 259 | } |
| 260 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 261 | static int __create_hyp_mappings(unsigned long start, unsigned long size, |
| 262 | unsigned long phys, enum kvm_pgtable_prot prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 263 | { |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 264 | int err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 265 | |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 266 | if (WARN_ON(!kvm_host_owns_hyp_mappings())) |
| 267 | return -EINVAL; |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 268 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 269 | mutex_lock(&kvm_hyp_pgd_mutex); |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 270 | err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 271 | mutex_unlock(&kvm_hyp_pgd_mutex); |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 272 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 273 | return err; |
| 274 | } |
| 275 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 276 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
| 277 | { |
| 278 | if (!is_vmalloc_addr(kaddr)) { |
| 279 | BUG_ON(!virt_addr_valid(kaddr)); |
| 280 | return __pa(kaddr); |
| 281 | } else { |
| 282 | return page_to_phys(vmalloc_to_page(kaddr)) + |
| 283 | offset_in_page(kaddr); |
| 284 | } |
| 285 | } |
| 286 | |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 287 | struct hyp_shared_pfn { |
| 288 | u64 pfn; |
| 289 | int count; |
| 290 | struct rb_node node; |
| 291 | }; |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 292 | |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 293 | static DEFINE_MUTEX(hyp_shared_pfns_lock); |
| 294 | static struct rb_root hyp_shared_pfns = RB_ROOT; |
| 295 | |
| 296 | static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node, |
| 297 | struct rb_node **parent) |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 298 | { |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 299 | struct hyp_shared_pfn *this; |
| 300 | |
| 301 | *node = &hyp_shared_pfns.rb_node; |
| 302 | *parent = NULL; |
| 303 | while (**node) { |
| 304 | this = container_of(**node, struct hyp_shared_pfn, node); |
| 305 | *parent = **node; |
| 306 | if (this->pfn < pfn) |
| 307 | *node = &((**node)->rb_left); |
| 308 | else if (this->pfn > pfn) |
| 309 | *node = &((**node)->rb_right); |
| 310 | else |
| 311 | return this; |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 312 | } |
| 313 | |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 314 | return NULL; |
| 315 | } |
| 316 | |
| 317 | static int share_pfn_hyp(u64 pfn) |
| 318 | { |
| 319 | struct rb_node **node, *parent; |
| 320 | struct hyp_shared_pfn *this; |
| 321 | int ret = 0; |
| 322 | |
| 323 | mutex_lock(&hyp_shared_pfns_lock); |
| 324 | this = find_shared_pfn(pfn, &node, &parent); |
| 325 | if (this) { |
| 326 | this->count++; |
| 327 | goto unlock; |
| 328 | } |
| 329 | |
| 330 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
| 331 | if (!this) { |
| 332 | ret = -ENOMEM; |
| 333 | goto unlock; |
| 334 | } |
| 335 | |
| 336 | this->pfn = pfn; |
| 337 | this->count = 1; |
| 338 | rb_link_node(&this->node, parent, node); |
| 339 | rb_insert_color(&this->node, &hyp_shared_pfns); |
| 340 | ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1); |
| 341 | unlock: |
| 342 | mutex_unlock(&hyp_shared_pfns_lock); |
| 343 | |
| 344 | return ret; |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 345 | } |
| 346 | |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 347 | static int unshare_pfn_hyp(u64 pfn) |
| 348 | { |
| 349 | struct rb_node **node, *parent; |
| 350 | struct hyp_shared_pfn *this; |
| 351 | int ret = 0; |
| 352 | |
| 353 | mutex_lock(&hyp_shared_pfns_lock); |
| 354 | this = find_shared_pfn(pfn, &node, &parent); |
| 355 | if (WARN_ON(!this)) { |
| 356 | ret = -ENOENT; |
| 357 | goto unlock; |
| 358 | } |
| 359 | |
| 360 | this->count--; |
| 361 | if (this->count) |
| 362 | goto unlock; |
| 363 | |
| 364 | rb_erase(&this->node, &hyp_shared_pfns); |
| 365 | kfree(this); |
| 366 | ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); |
| 367 | unlock: |
| 368 | mutex_unlock(&hyp_shared_pfns_lock); |
| 369 | |
| 370 | return ret; |
| 371 | } |
| 372 | |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 373 | int kvm_share_hyp(void *from, void *to) |
| 374 | { |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 375 | phys_addr_t start, end, cur; |
| 376 | u64 pfn; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 377 | int ret; |
| 378 | |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 379 | if (is_kernel_in_hyp_mode()) |
| 380 | return 0; |
| 381 | |
| 382 | /* |
| 383 | * The share hcall maps things in the 'fixed-offset' region of the hyp |
| 384 | * VA space, so we can only share physically contiguous data-structures |
| 385 | * for now. |
| 386 | */ |
| 387 | if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to)) |
| 388 | return -EINVAL; |
| 389 | |
| 390 | if (kvm_host_owns_hyp_mappings()) |
| 391 | return create_hyp_mappings(from, to, PAGE_HYP); |
| 392 | |
Quentin Perret | a83e219 | 2021-12-15 16:12:24 +0000 | [diff] [blame] | 393 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); |
| 394 | end = PAGE_ALIGN(__pa(to)); |
| 395 | for (cur = start; cur < end; cur += PAGE_SIZE) { |
| 396 | pfn = __phys_to_pfn(cur); |
| 397 | ret = share_pfn_hyp(pfn); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 398 | if (ret) |
| 399 | return ret; |
| 400 | } |
| 401 | |
| 402 | return 0; |
| 403 | } |
| 404 | |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 405 | void kvm_unshare_hyp(void *from, void *to) |
| 406 | { |
| 407 | phys_addr_t start, end, cur; |
| 408 | u64 pfn; |
| 409 | |
| 410 | if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) |
| 411 | return; |
| 412 | |
| 413 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); |
| 414 | end = PAGE_ALIGN(__pa(to)); |
| 415 | for (cur = start; cur < end; cur += PAGE_SIZE) { |
| 416 | pfn = __phys_to_pfn(cur); |
| 417 | WARN_ON(unshare_pfn_hyp(pfn)); |
| 418 | } |
| 419 | } |
| 420 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 421 | /** |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 422 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 423 | * @from: The virtual kernel start address of the range |
| 424 | * @to: The virtual kernel end address of the range (exclusive) |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 425 | * @prot: The protection to be applied to this range |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 426 | * |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 427 | * The same virtual address as the kernel virtual address is also used |
| 428 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
| 429 | * physical pages. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 430 | */ |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 431 | int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 432 | { |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 433 | phys_addr_t phys_addr; |
| 434 | unsigned long virt_addr; |
Marc Zyngier | 6c41a41 | 2016-06-30 18:40:51 +0100 | [diff] [blame] | 435 | unsigned long start = kern_hyp_va((unsigned long)from); |
| 436 | unsigned long end = kern_hyp_va((unsigned long)to); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 437 | |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 438 | if (is_kernel_in_hyp_mode()) |
| 439 | return 0; |
| 440 | |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 441 | if (!kvm_host_owns_hyp_mappings()) |
| 442 | return -EPERM; |
Quentin Perret | 66c57ed | 2021-08-09 16:24:47 +0100 | [diff] [blame] | 443 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 444 | start = start & PAGE_MASK; |
| 445 | end = PAGE_ALIGN(end); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 446 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 447 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
| 448 | int err; |
| 449 | |
| 450 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 451 | err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr, |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 452 | prot); |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 453 | if (err) |
| 454 | return err; |
| 455 | } |
| 456 | |
| 457 | return 0; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 458 | } |
| 459 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 460 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 461 | unsigned long *haddr, |
| 462 | enum kvm_pgtable_prot prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 463 | { |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 464 | unsigned long base; |
| 465 | int ret = 0; |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 466 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 467 | if (!kvm_host_owns_hyp_mappings()) { |
| 468 | base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, |
| 469 | phys_addr, size, prot); |
| 470 | if (IS_ERR_OR_NULL((void *)base)) |
| 471 | return PTR_ERR((void *)base); |
| 472 | *haddr = base; |
| 473 | |
| 474 | return 0; |
| 475 | } |
| 476 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 477 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 478 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 479 | /* |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 480 | * This assumes that we have enough space below the idmap |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 481 | * page to allocate our VAs. If not, the check below will |
| 482 | * kick. A potential alternative would be to detect that |
| 483 | * overflow and switch to an allocation above the idmap. |
| 484 | * |
| 485 | * The allocated size is always a multiple of PAGE_SIZE. |
| 486 | */ |
| 487 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); |
| 488 | base = io_map_base - size; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 489 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 490 | /* |
| 491 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by |
| 492 | * allocating the new area, as it would indicate we've |
| 493 | * overflowed the idmap/IO address range. |
| 494 | */ |
| 495 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) |
| 496 | ret = -ENOMEM; |
| 497 | else |
| 498 | io_map_base = base; |
| 499 | |
| 500 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 501 | |
| 502 | if (ret) |
| 503 | goto out; |
| 504 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 505 | ret = __create_hyp_mappings(base, size, phys_addr, prot); |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 506 | if (ret) |
| 507 | goto out; |
| 508 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 509 | *haddr = base + offset_in_page(phys_addr); |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 510 | out: |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 511 | return ret; |
| 512 | } |
| 513 | |
| 514 | /** |
| 515 | * create_hyp_io_mappings - Map IO into both kernel and HYP |
| 516 | * @phys_addr: The physical start address which gets mapped |
| 517 | * @size: Size of the region being mapped |
| 518 | * @kaddr: Kernel VA for this mapping |
| 519 | * @haddr: HYP VA for this mapping |
| 520 | */ |
| 521 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, |
| 522 | void __iomem **kaddr, |
| 523 | void __iomem **haddr) |
| 524 | { |
| 525 | unsigned long addr; |
| 526 | int ret; |
| 527 | |
Quentin Perret | bff01cb | 2021-12-08 15:22:58 +0000 | [diff] [blame] | 528 | if (is_protected_kvm_enabled()) |
| 529 | return -EPERM; |
| 530 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 531 | *kaddr = ioremap(phys_addr, size); |
| 532 | if (!*kaddr) |
| 533 | return -ENOMEM; |
| 534 | |
| 535 | if (is_kernel_in_hyp_mode()) { |
| 536 | *haddr = *kaddr; |
| 537 | return 0; |
| 538 | } |
| 539 | |
| 540 | ret = __create_hyp_private_mapping(phys_addr, size, |
| 541 | &addr, PAGE_HYP_DEVICE); |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 542 | if (ret) { |
| 543 | iounmap(*kaddr); |
| 544 | *kaddr = NULL; |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 545 | *haddr = NULL; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 546 | return ret; |
| 547 | } |
| 548 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 549 | *haddr = (void __iomem *)addr; |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | /** |
| 554 | * create_hyp_exec_mappings - Map an executable range into HYP |
| 555 | * @phys_addr: The physical start address which gets mapped |
| 556 | * @size: Size of the region being mapped |
| 557 | * @haddr: HYP VA for this mapping |
| 558 | */ |
| 559 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, |
| 560 | void **haddr) |
| 561 | { |
| 562 | unsigned long addr; |
| 563 | int ret; |
| 564 | |
| 565 | BUG_ON(is_kernel_in_hyp_mode()); |
| 566 | |
| 567 | ret = __create_hyp_private_mapping(phys_addr, size, |
| 568 | &addr, PAGE_HYP_EXEC); |
| 569 | if (ret) { |
| 570 | *haddr = NULL; |
| 571 | return ret; |
| 572 | } |
| 573 | |
| 574 | *haddr = (void *)addr; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 575 | return 0; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 576 | } |
| 577 | |
Marc Zyngier | 6011cf6 | 2021-07-26 16:35:48 +0100 | [diff] [blame] | 578 | static struct kvm_pgtable_mm_ops kvm_user_mm_ops = { |
| 579 | /* We shouldn't need any other callback to walk the PT */ |
| 580 | .phys_to_virt = kvm_host_va, |
| 581 | }; |
| 582 | |
| 583 | static int get_user_mapping_size(struct kvm *kvm, u64 addr) |
| 584 | { |
| 585 | struct kvm_pgtable pgt = { |
| 586 | .pgd = (kvm_pte_t *)kvm->mm->pgd, |
| 587 | .ia_bits = VA_BITS, |
| 588 | .start_level = (KVM_PGTABLE_MAX_LEVELS - |
| 589 | CONFIG_PGTABLE_LEVELS), |
| 590 | .mm_ops = &kvm_user_mm_ops, |
| 591 | }; |
| 592 | kvm_pte_t pte = 0; /* Keep GCC quiet... */ |
| 593 | u32 level = ~0; |
| 594 | int ret; |
| 595 | |
| 596 | ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); |
| 597 | VM_BUG_ON(ret); |
| 598 | VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS); |
| 599 | VM_BUG_ON(!(pte & PTE_VALID)); |
| 600 | |
| 601 | return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); |
| 602 | } |
| 603 | |
Quentin Perret | 7aef0cb | 2021-03-19 10:01:14 +0000 | [diff] [blame] | 604 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { |
| 605 | .zalloc_page = stage2_memcache_zalloc_page, |
| 606 | .zalloc_pages_exact = kvm_host_zalloc_pages_exact, |
| 607 | .free_pages_exact = free_pages_exact, |
| 608 | .get_page = kvm_host_get_page, |
| 609 | .put_page = kvm_host_put_page, |
| 610 | .page_count = kvm_host_page_count, |
| 611 | .phys_to_virt = kvm_host_va, |
| 612 | .virt_to_phys = kvm_host_pa, |
Yanan Wang | 25aa286 | 2021-06-17 18:58:24 +0800 | [diff] [blame] | 613 | .dcache_clean_inval_poc = clean_dcache_guest_page, |
| 614 | .icache_inval_pou = invalidate_icache_guest_page, |
Quentin Perret | 7aef0cb | 2021-03-19 10:01:14 +0000 | [diff] [blame] | 615 | }; |
| 616 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 617 | /** |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 618 | * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure |
| 619 | * @kvm: The pointer to the KVM structure |
| 620 | * @mmu: The pointer to the s2 MMU structure |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 621 | * |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 622 | * Allocates only the stage-2 HW PGD level table(s). |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 623 | * Note we don't need locking here as this is only called when the VM is |
| 624 | * created, which can only be done once. |
| 625 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 626 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 627 | { |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 628 | int cpu, err; |
| 629 | struct kvm_pgtable *pgt; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 630 | |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 631 | if (mmu->pgt != NULL) { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 632 | kvm_err("kvm_arch already initialized?\n"); |
| 633 | return -EINVAL; |
| 634 | } |
| 635 | |
Jia He | 115bae9 | 2021-09-07 20:31:12 +0800 | [diff] [blame] | 636 | pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 637 | if (!pgt) |
Marc Zyngier | a987370 | 2015-03-10 19:06:59 +0000 | [diff] [blame] | 638 | return -ENOMEM; |
| 639 | |
Marc Zyngier | 9d8604b | 2021-11-29 20:00:45 +0000 | [diff] [blame] | 640 | mmu->arch = &kvm->arch; |
| 641 | err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 642 | if (err) |
| 643 | goto out_free_pgtable; |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 644 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 645 | mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); |
| 646 | if (!mmu->last_vcpu_ran) { |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 647 | err = -ENOMEM; |
| 648 | goto out_destroy_pgtable; |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | for_each_possible_cpu(cpu) |
| 652 | *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; |
| 653 | |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 654 | mmu->pgt = pgt; |
| 655 | mmu->pgd_phys = __pa(pgt->pgd); |
Marc Zyngier | cf364e0 | 2021-08-06 12:31:08 +0100 | [diff] [blame] | 656 | WRITE_ONCE(mmu->vmid.vmid_gen, 0); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 657 | return 0; |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 658 | |
| 659 | out_destroy_pgtable: |
| 660 | kvm_pgtable_stage2_destroy(pgt); |
| 661 | out_free_pgtable: |
| 662 | kfree(pgt); |
| 663 | return err; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 664 | } |
| 665 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 666 | static void stage2_unmap_memslot(struct kvm *kvm, |
| 667 | struct kvm_memory_slot *memslot) |
| 668 | { |
| 669 | hva_t hva = memslot->userspace_addr; |
| 670 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
| 671 | phys_addr_t size = PAGE_SIZE * memslot->npages; |
| 672 | hva_t reg_end = hva + size; |
| 673 | |
| 674 | /* |
| 675 | * A memory region could potentially cover multiple VMAs, and any holes |
| 676 | * between them, so iterate over all of them to find out if we should |
| 677 | * unmap any of them. |
| 678 | * |
| 679 | * +--------------------------------------------+ |
| 680 | * +---------------+----------------+ +----------------+ |
| 681 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
| 682 | * +---------------+----------------+ +----------------+ |
| 683 | * | memory region | |
| 684 | * +--------------------------------------------+ |
| 685 | */ |
| 686 | do { |
Gavin Shan | c728fd4 | 2021-03-16 12:11:25 +0800 | [diff] [blame] | 687 | struct vm_area_struct *vma; |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 688 | hva_t vm_start, vm_end; |
| 689 | |
Gavin Shan | c728fd4 | 2021-03-16 12:11:25 +0800 | [diff] [blame] | 690 | vma = find_vma_intersection(current->mm, hva, reg_end); |
| 691 | if (!vma) |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 692 | break; |
| 693 | |
| 694 | /* |
| 695 | * Take the intersection of this VMA with the memory region |
| 696 | */ |
| 697 | vm_start = max(hva, vma->vm_start); |
| 698 | vm_end = min(reg_end, vma->vm_end); |
| 699 | |
| 700 | if (!(vma->vm_flags & VM_PFNMAP)) { |
| 701 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 702 | unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 703 | } |
| 704 | hva = vm_end; |
| 705 | } while (hva < reg_end); |
| 706 | } |
| 707 | |
| 708 | /** |
| 709 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings |
| 710 | * @kvm: The struct kvm pointer |
| 711 | * |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 712 | * Go through the memregions and unmap any regular RAM |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 713 | * backing memory already mapped to the VM. |
| 714 | */ |
| 715 | void stage2_unmap_vm(struct kvm *kvm) |
| 716 | { |
| 717 | struct kvm_memslots *slots; |
| 718 | struct kvm_memory_slot *memslot; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 719 | int idx, bkt; |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 720 | |
| 721 | idx = srcu_read_lock(&kvm->srcu); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 722 | mmap_read_lock(current->mm); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 723 | spin_lock(&kvm->mmu_lock); |
| 724 | |
| 725 | slots = kvm_memslots(kvm); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 726 | kvm_for_each_memslot(memslot, bkt, slots) |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 727 | stage2_unmap_memslot(kvm, memslot); |
| 728 | |
| 729 | spin_unlock(&kvm->mmu_lock); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 730 | mmap_read_unlock(current->mm); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 731 | srcu_read_unlock(&kvm->srcu, idx); |
| 732 | } |
| 733 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 734 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 735 | { |
Quentin Perret | cfb1a98 | 2021-03-19 10:01:28 +0000 | [diff] [blame] | 736 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 737 | struct kvm_pgtable *pgt = NULL; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 738 | |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 739 | spin_lock(&kvm->mmu_lock); |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 740 | pgt = mmu->pgt; |
| 741 | if (pgt) { |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 742 | mmu->pgd_phys = 0; |
| 743 | mmu->pgt = NULL; |
| 744 | free_percpu(mmu->last_vcpu_ran); |
Suzuki K Poulose | 6c0d706 | 2017-05-03 15:17:51 +0100 | [diff] [blame] | 745 | } |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 746 | spin_unlock(&kvm->mmu_lock); |
| 747 | |
Will Deacon | 71233d0 | 2020-09-11 14:25:13 +0100 | [diff] [blame] | 748 | if (pgt) { |
| 749 | kvm_pgtable_stage2_destroy(pgt); |
| 750 | kfree(pgt); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 751 | } |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 752 | } |
| 753 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 754 | /** |
| 755 | * kvm_phys_addr_ioremap - map a device range to guest IPA |
| 756 | * |
| 757 | * @kvm: The KVM pointer |
| 758 | * @guest_ipa: The IPA at which to insert the mapping |
| 759 | * @pa: The physical address of the device |
| 760 | * @size: The size of the mapping |
Xiaofei Tan | c9c0279 | 2020-09-17 09:47:49 +0800 | [diff] [blame] | 761 | * @writable: Whether or not to create a writable mapping |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 762 | */ |
| 763 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 764 | phys_addr_t pa, unsigned long size, bool writable) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 765 | { |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 766 | phys_addr_t addr; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 767 | int ret = 0; |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 768 | struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 769 | struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; |
| 770 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | |
| 771 | KVM_PGTABLE_PROT_R | |
| 772 | (writable ? KVM_PGTABLE_PROT_W : 0); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 773 | |
Quentin Perret | bff01cb | 2021-12-08 15:22:58 +0000 | [diff] [blame] | 774 | if (is_protected_kvm_enabled()) |
| 775 | return -EPERM; |
| 776 | |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 777 | size += offset_in_page(guest_ipa); |
| 778 | guest_ipa &= PAGE_MASK; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 779 | |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 780 | for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 781 | ret = kvm_mmu_topup_memory_cache(&cache, |
| 782 | kvm_mmu_cache_min_pages(kvm)); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 783 | if (ret) |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 784 | break; |
| 785 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 786 | spin_lock(&kvm->mmu_lock); |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 787 | ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, |
| 788 | &cache); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 789 | spin_unlock(&kvm->mmu_lock); |
| 790 | if (ret) |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 791 | break; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 792 | |
Will Deacon | 02bbd37 | 2020-09-11 14:25:15 +0100 | [diff] [blame] | 793 | pa += PAGE_SIZE; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 794 | } |
| 795 | |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 796 | kvm_mmu_free_memory_cache(&cache); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 797 | return ret; |
| 798 | } |
| 799 | |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 800 | /** |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 801 | * stage2_wp_range() - write protect stage2 memory region range |
Xiaofei Tan | c9c0279 | 2020-09-17 09:47:49 +0800 | [diff] [blame] | 802 | * @mmu: The KVM stage-2 MMU pointer |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 803 | * @addr: Start address of range |
| 804 | * @end: End address of range |
| 805 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 806 | static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 807 | { |
Quentin Perret | cfb1a98 | 2021-03-19 10:01:28 +0000 | [diff] [blame] | 808 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
Quentin Perret | cc38d61 | 2020-09-11 14:25:21 +0100 | [diff] [blame] | 809 | stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | /** |
| 813 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot |
| 814 | * @kvm: The KVM pointer |
| 815 | * @slot: The memory slot to write protect |
| 816 | * |
| 817 | * Called to start logging dirty pages after memory region |
| 818 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns |
Punit Agrawal | 4ea5af5 | 2018-12-11 17:10:37 +0000 | [diff] [blame] | 819 | * all present PUD, PMD and PTEs are write protected in the memory region. |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 820 | * Afterwards read of dirty page log can be called. |
| 821 | * |
| 822 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, |
| 823 | * serializing operations for VM memory regions. |
| 824 | */ |
Gavin Shan | eab6214 | 2021-03-16 12:11:24 +0800 | [diff] [blame] | 825 | static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 826 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 827 | struct kvm_memslots *slots = kvm_memslots(kvm); |
| 828 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 829 | phys_addr_t start, end; |
| 830 | |
| 831 | if (WARN_ON_ONCE(!memslot)) |
| 832 | return; |
| 833 | |
| 834 | start = memslot->base_gfn << PAGE_SHIFT; |
| 835 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 836 | |
| 837 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 838 | stage2_wp_range(&kvm->arch.mmu, start, end); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 839 | spin_unlock(&kvm->mmu_lock); |
| 840 | kvm_flush_remote_tlbs(kvm); |
| 841 | } |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 842 | |
| 843 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 844 | * kvm_mmu_write_protect_pt_masked() - write protect dirty pages |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 845 | * @kvm: The KVM pointer |
| 846 | * @slot: The memory slot associated with mask |
| 847 | * @gfn_offset: The gfn offset in memory slot |
| 848 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory |
| 849 | * slot to be write protected |
| 850 | * |
| 851 | * Walks bits set in mask write protects the associated pte's. Caller must |
| 852 | * acquire kvm_mmu_lock. |
| 853 | */ |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 854 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 855 | struct kvm_memory_slot *slot, |
| 856 | gfn_t gfn_offset, unsigned long mask) |
| 857 | { |
| 858 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; |
| 859 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; |
| 860 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; |
| 861 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 862 | stage2_wp_range(&kvm->arch.mmu, start, end); |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 863 | } |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 864 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 865 | /* |
| 866 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
| 867 | * dirty pages. |
| 868 | * |
| 869 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to |
| 870 | * enable dirty logging for them. |
| 871 | */ |
| 872 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 873 | struct kvm_memory_slot *slot, |
| 874 | gfn_t gfn_offset, unsigned long mask) |
| 875 | { |
| 876 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); |
| 877 | } |
| 878 | |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 879 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 880 | { |
Eric W. Biederman | 795a837 | 2018-04-16 13:39:10 -0500 | [diff] [blame] | 881 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 882 | } |
| 883 | |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 884 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
| 885 | unsigned long hva, |
| 886 | unsigned long map_size) |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 887 | { |
Shaokun Zhang | c2be79a | 2019-02-19 17:22:21 +0800 | [diff] [blame] | 888 | gpa_t gpa_start; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 889 | hva_t uaddr_start, uaddr_end; |
| 890 | size_t size; |
| 891 | |
Suzuki K Poulose | 9f28361 | 2020-05-07 20:35:45 +0800 | [diff] [blame] | 892 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
| 893 | if (map_size == PAGE_SIZE) |
| 894 | return true; |
| 895 | |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 896 | size = memslot->npages * PAGE_SIZE; |
| 897 | |
| 898 | gpa_start = memslot->base_gfn << PAGE_SHIFT; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 899 | |
| 900 | uaddr_start = memslot->userspace_addr; |
| 901 | uaddr_end = uaddr_start + size; |
| 902 | |
| 903 | /* |
| 904 | * Pages belonging to memslots that don't have the same alignment |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 905 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
| 906 | * PMD/PUD entries, because we'll end up mapping the wrong pages. |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 907 | * |
| 908 | * Consider a layout like the following: |
| 909 | * |
| 910 | * memslot->userspace_addr: |
| 911 | * +-----+--------------------+--------------------+---+ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 912 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 913 | * +-----+--------------------+--------------------+---+ |
| 914 | * |
Suzuki K Poulose | 9f28361 | 2020-05-07 20:35:45 +0800 | [diff] [blame] | 915 | * memslot->base_gfn << PAGE_SHIFT: |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 916 | * +---+--------------------+--------------------+-----+ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 917 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 918 | * +---+--------------------+--------------------+-----+ |
| 919 | * |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 920 | * If we create those stage-2 blocks, we'll end up with this incorrect |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 921 | * mapping: |
| 922 | * d -> f |
| 923 | * e -> g |
| 924 | * f -> h |
| 925 | */ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 926 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 927 | return false; |
| 928 | |
| 929 | /* |
| 930 | * Next, let's make sure we're not trying to map anything not covered |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 931 | * by the memslot. This means we have to prohibit block size mappings |
| 932 | * for the beginning and end of a non-block aligned and non-block sized |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 933 | * memory slot (illustrated by the head and tail parts of the |
| 934 | * userspace view above containing pages 'abcde' and 'xyz', |
| 935 | * respectively). |
| 936 | * |
| 937 | * Note that it doesn't matter if we do the check using the |
| 938 | * userspace_addr or the base_gfn, as both are equally aligned (per |
| 939 | * the check above) and equally sized. |
| 940 | */ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 941 | return (hva & ~(map_size - 1)) >= uaddr_start && |
| 942 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 943 | } |
| 944 | |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 945 | /* |
| 946 | * Check if the given hva is backed by a transparent huge page (THP) and |
| 947 | * whether it can be mapped using block mapping in stage2. If so, adjust |
| 948 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently |
| 949 | * supported. This will need to be updated to support other THP sizes. |
| 950 | * |
| 951 | * Returns the size of the mapping. |
| 952 | */ |
| 953 | static unsigned long |
Marc Zyngier | 6011cf6 | 2021-07-26 16:35:48 +0100 | [diff] [blame] | 954 | transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 955 | unsigned long hva, kvm_pfn_t *pfnp, |
| 956 | phys_addr_t *ipap) |
| 957 | { |
| 958 | kvm_pfn_t pfn = *pfnp; |
| 959 | |
| 960 | /* |
| 961 | * Make sure the adjustment is done only for THP pages. Also make |
| 962 | * sure that the HVA and IPA are sufficiently aligned and that the |
| 963 | * block map is contained within the memslot. |
| 964 | */ |
Marc Zyngier | 6011cf6 | 2021-07-26 16:35:48 +0100 | [diff] [blame] | 965 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && |
| 966 | get_user_mapping_size(kvm, hva) >= PMD_SIZE) { |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 967 | /* |
| 968 | * The address we faulted on is backed by a transparent huge |
| 969 | * page. However, because we map the compound huge page and |
| 970 | * not the individual tail page, we need to transfer the |
| 971 | * refcount to the head page. We have to be careful that the |
| 972 | * THP doesn't start to split while we are adjusting the |
| 973 | * refcounts. |
| 974 | * |
| 975 | * We are sure this doesn't happen, because mmu_notifier_retry |
| 976 | * was successful and we are holding the mmu_lock, so if this |
| 977 | * THP is trying to split, it will be blocked in the mmu |
| 978 | * notifier before touching any of the pages, specifically |
| 979 | * before being able to call __split_huge_page_refcount(). |
| 980 | * |
| 981 | * We can therefore safely transfer the refcount from PG_tail |
| 982 | * to PG_head and switch the pfn from a tail page to the head |
| 983 | * page accordingly. |
| 984 | */ |
| 985 | *ipap &= PMD_MASK; |
| 986 | kvm_release_pfn_clean(pfn); |
| 987 | pfn &= ~(PTRS_PER_PMD - 1); |
Marc Zyngier | 0fe4963 | 2021-07-26 16:35:51 +0100 | [diff] [blame] | 988 | get_page(pfn_to_page(pfn)); |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 989 | *pfnp = pfn; |
| 990 | |
| 991 | return PMD_SIZE; |
| 992 | } |
| 993 | |
| 994 | /* Use page mapping if we cannot use block mapping. */ |
| 995 | return PAGE_SIZE; |
| 996 | } |
| 997 | |
Keqian Zhu | 2aa53d6 | 2021-05-07 19:03:22 +0800 | [diff] [blame] | 998 | static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) |
| 999 | { |
| 1000 | unsigned long pa; |
| 1001 | |
| 1002 | if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP)) |
| 1003 | return huge_page_shift(hstate_vma(vma)); |
| 1004 | |
| 1005 | if (!(vma->vm_flags & VM_PFNMAP)) |
| 1006 | return PAGE_SHIFT; |
| 1007 | |
| 1008 | VM_BUG_ON(is_vm_hugetlb_page(vma)); |
| 1009 | |
| 1010 | pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start); |
| 1011 | |
| 1012 | #ifndef __PAGETABLE_PMD_FOLDED |
| 1013 | if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && |
| 1014 | ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && |
| 1015 | ALIGN(hva, PUD_SIZE) <= vma->vm_end) |
| 1016 | return PUD_SHIFT; |
| 1017 | #endif |
| 1018 | |
| 1019 | if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) && |
| 1020 | ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start && |
| 1021 | ALIGN(hva, PMD_SIZE) <= vma->vm_end) |
| 1022 | return PMD_SHIFT; |
| 1023 | |
| 1024 | return PAGE_SHIFT; |
| 1025 | } |
| 1026 | |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1027 | /* |
| 1028 | * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be |
| 1029 | * able to see the page's tags and therefore they must be initialised first. If |
| 1030 | * PG_mte_tagged is set, tags have already been initialised. |
| 1031 | * |
| 1032 | * The race in the test/set of the PG_mte_tagged flag is handled by: |
| 1033 | * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs |
| 1034 | * racing to santise the same page |
| 1035 | * - mmap_lock protects between a VM faulting a page in and the VMM performing |
| 1036 | * an mprotect() to add VM_MTE |
| 1037 | */ |
| 1038 | static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, |
| 1039 | unsigned long size) |
| 1040 | { |
| 1041 | unsigned long i, nr_pages = size >> PAGE_SHIFT; |
| 1042 | struct page *page; |
| 1043 | |
| 1044 | if (!kvm_has_mte(kvm)) |
| 1045 | return 0; |
| 1046 | |
| 1047 | /* |
| 1048 | * pfn_to_online_page() is used to reject ZONE_DEVICE pages |
| 1049 | * that may not support tags. |
| 1050 | */ |
| 1051 | page = pfn_to_online_page(pfn); |
| 1052 | |
| 1053 | if (!page) |
| 1054 | return -EFAULT; |
| 1055 | |
| 1056 | for (i = 0; i < nr_pages; i++, page++) { |
| 1057 | if (!test_bit(PG_mte_tagged, &page->flags)) { |
| 1058 | mte_clear_page_tags(page_address(page)); |
| 1059 | set_bit(PG_mte_tagged, &page->flags); |
| 1060 | } |
| 1061 | } |
| 1062 | |
| 1063 | return 0; |
| 1064 | } |
| 1065 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1066 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1067 | struct kvm_memory_slot *memslot, unsigned long hva, |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1068 | unsigned long fault_status) |
| 1069 | { |
Will Deacon | ffd1b63 | 2020-09-30 11:24:42 +0100 | [diff] [blame] | 1070 | int ret = 0; |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1071 | bool write_fault, writable, force_pte = false; |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1072 | bool exec_fault; |
| 1073 | bool device = false; |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1074 | bool shared; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1075 | unsigned long mmu_seq; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1076 | struct kvm *kvm = vcpu->kvm; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1077 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1078 | struct vm_area_struct *vma; |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1079 | short vma_shift; |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1080 | gfn_t gfn; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1081 | kvm_pfn_t pfn; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1082 | bool logging_active = memslot_is_logging(memslot); |
Yanan Wang | 7d89483 | 2020-12-02 04:10:34 +0800 | [diff] [blame] | 1083 | unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); |
| 1084 | unsigned long vma_pagesize, fault_granule; |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1085 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; |
| 1086 | struct kvm_pgtable *pgt; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1087 | |
Yanan Wang | 7d89483 | 2020-12-02 04:10:34 +0800 | [diff] [blame] | 1088 | fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); |
Ard Biesheuvel | a7d079c | 2014-09-09 11:27:09 +0100 | [diff] [blame] | 1089 | write_fault = kvm_is_write_fault(vcpu); |
Marc Zyngier | c4ad98e | 2020-09-15 11:42:17 +0100 | [diff] [blame] | 1090 | exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 1091 | VM_BUG_ON(write_fault && exec_fault); |
| 1092 | |
| 1093 | if (fault_status == FSC_PERM && !write_fault && !exec_fault) { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1094 | kvm_err("Unexpected L2 read permission error\n"); |
| 1095 | return -EFAULT; |
| 1096 | } |
| 1097 | |
Keqian Zhu | 2aa53d6 | 2021-05-07 19:03:22 +0800 | [diff] [blame] | 1098 | /* |
| 1099 | * Let's check if we will get back a huge page backed by hugetlbfs, or |
| 1100 | * get block mapping for device MMIO region. |
| 1101 | */ |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1102 | mmap_read_lock(current->mm); |
Liam Howlett | 09eef83 | 2021-06-28 19:38:59 -0700 | [diff] [blame] | 1103 | vma = vma_lookup(current->mm, hva); |
Ard Biesheuvel | 37b5440 | 2014-09-17 14:56:17 -0700 | [diff] [blame] | 1104 | if (unlikely(!vma)) { |
| 1105 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1106 | mmap_read_unlock(current->mm); |
Ard Biesheuvel | 37b5440 | 2014-09-17 14:56:17 -0700 | [diff] [blame] | 1107 | return -EFAULT; |
| 1108 | } |
| 1109 | |
Keqian Zhu | 2aa53d6 | 2021-05-07 19:03:22 +0800 | [diff] [blame] | 1110 | /* |
| 1111 | * logging_active is guaranteed to never be true for VM_PFNMAP |
| 1112 | * memslots. |
| 1113 | */ |
| 1114 | if (logging_active) { |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1115 | force_pte = true; |
Alexandru Elisei | 523b399 | 2020-09-10 14:33:51 +0100 | [diff] [blame] | 1116 | vma_shift = PAGE_SHIFT; |
Keqian Zhu | 2aa53d6 | 2021-05-07 19:03:22 +0800 | [diff] [blame] | 1117 | } else { |
| 1118 | vma_shift = get_vma_page_shift(vma, hva); |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1119 | } |
| 1120 | |
Marc Zyngier | 80d9ac9 | 2021-07-13 12:36:41 +0100 | [diff] [blame] | 1121 | shared = (vma->vm_flags & VM_SHARED); |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1122 | |
Gavin Shan | 2f40c46 | 2020-10-26 10:06:26 +1100 | [diff] [blame] | 1123 | switch (vma_shift) { |
Gavin Shan | faf0003 | 2020-11-03 11:30:09 +1100 | [diff] [blame] | 1124 | #ifndef __PAGETABLE_PMD_FOLDED |
Gavin Shan | 2f40c46 | 2020-10-26 10:06:26 +1100 | [diff] [blame] | 1125 | case PUD_SHIFT: |
| 1126 | if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) |
| 1127 | break; |
| 1128 | fallthrough; |
Gavin Shan | faf0003 | 2020-11-03 11:30:09 +1100 | [diff] [blame] | 1129 | #endif |
Gavin Shan | 2f40c46 | 2020-10-26 10:06:26 +1100 | [diff] [blame] | 1130 | case CONT_PMD_SHIFT: |
| 1131 | vma_shift = PMD_SHIFT; |
| 1132 | fallthrough; |
| 1133 | case PMD_SHIFT: |
| 1134 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) |
| 1135 | break; |
| 1136 | fallthrough; |
| 1137 | case CONT_PTE_SHIFT: |
Alexandru Elisei | 523b399 | 2020-09-10 14:33:51 +0100 | [diff] [blame] | 1138 | vma_shift = PAGE_SHIFT; |
Gavin Shan | 2f40c46 | 2020-10-26 10:06:26 +1100 | [diff] [blame] | 1139 | force_pte = true; |
| 1140 | fallthrough; |
| 1141 | case PAGE_SHIFT: |
| 1142 | break; |
| 1143 | default: |
| 1144 | WARN_ONCE(1, "Unknown vma_shift %d", vma_shift); |
Alexandru Elisei | 523b399 | 2020-09-10 14:33:51 +0100 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | vma_pagesize = 1UL << vma_shift; |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1148 | if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) |
Alexandru Elisei | 523b399 | 2020-09-10 14:33:51 +0100 | [diff] [blame] | 1149 | fault_ipa &= ~(vma_pagesize - 1); |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1150 | |
| 1151 | gfn = fault_ipa >> PAGE_SHIFT; |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1152 | mmap_read_unlock(current->mm); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1153 | |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1154 | /* |
| 1155 | * Permission faults just need to update the existing leaf entry, |
| 1156 | * and so normally don't require allocations from the memcache. The |
| 1157 | * only exception to this is when dirty logging is enabled at runtime |
| 1158 | * and a write fault needs to collapse a block entry into a table. |
| 1159 | */ |
| 1160 | if (fault_status != FSC_PERM || (logging_active && write_fault)) { |
| 1161 | ret = kvm_mmu_topup_memory_cache(memcache, |
| 1162 | kvm_mmu_cache_min_pages(kvm)); |
| 1163 | if (ret) |
| 1164 | return ret; |
| 1165 | } |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1166 | |
| 1167 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 1168 | /* |
| 1169 | * Ensure the read of mmu_notifier_seq happens before we call |
| 1170 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk |
| 1171 | * the page we just got a reference to gets unmapped before we have a |
| 1172 | * chance to grab the mmu_lock, which ensure that if the page gets |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1173 | * unmapped afterwards, the call to kvm_unmap_gfn will take it away |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1174 | * from us again properly. This smp_rmb() interacts with the smp_wmb() |
| 1175 | * in kvm_mmu_notifier_invalidate_<page|range_end>. |
Gavin Shan | 10ba2d1 | 2021-03-16 12:11:26 +0800 | [diff] [blame] | 1176 | * |
| 1177 | * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is |
| 1178 | * used to avoid unnecessary overhead introduced to locate the memory |
| 1179 | * slot because it's always fixed even @gfn is adjusted for huge pages. |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1180 | */ |
| 1181 | smp_rmb(); |
| 1182 | |
Gavin Shan | 10ba2d1 | 2021-03-16 12:11:26 +0800 | [diff] [blame] | 1183 | pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, |
| 1184 | write_fault, &writable, NULL); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1185 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1186 | kvm_send_hwpoison_signal(hva, vma_shift); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1187 | return 0; |
| 1188 | } |
Christoffer Dall | 9ac7159 | 2016-08-17 10:46:10 +0200 | [diff] [blame] | 1189 | if (is_error_noslot_pfn(pfn)) |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1190 | return -EFAULT; |
| 1191 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1192 | if (kvm_is_device_pfn(pfn)) { |
Keqian Zhu | 2aa53d6 | 2021-05-07 19:03:22 +0800 | [diff] [blame] | 1193 | /* |
| 1194 | * If the page was identified as device early by looking at |
| 1195 | * the VMA flags, vma_pagesize is already representing the |
| 1196 | * largest quantity we can map. If instead it was mapped |
| 1197 | * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE |
| 1198 | * and must not be upgraded. |
| 1199 | * |
| 1200 | * In both cases, we don't let transparent_hugepage_adjust() |
| 1201 | * change things at the last minute. |
| 1202 | */ |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1203 | device = true; |
| 1204 | } else if (logging_active && !write_fault) { |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1205 | /* |
| 1206 | * Only actually map the page as writable if this was a write |
| 1207 | * fault. |
| 1208 | */ |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1209 | writable = false; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1210 | } |
Kim Phillips | b886576 | 2014-06-26 01:45:51 +0100 | [diff] [blame] | 1211 | |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1212 | if (exec_fault && device) |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1213 | return -ENOEXEC; |
| 1214 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1215 | spin_lock(&kvm->mmu_lock); |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1216 | pgt = vcpu->arch.hw_mmu->pgt; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1217 | if (mmu_notifier_retry(kvm, mmu_seq)) |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1218 | goto out_unlock; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1219 | |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 1220 | /* |
| 1221 | * If we are not forced to use page mapping, check if we are |
| 1222 | * backed by a THP and thus use block mapping if possible. |
| 1223 | */ |
Marc Zyngier | f2cc327 | 2021-07-26 16:35:49 +0100 | [diff] [blame] | 1224 | if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { |
| 1225 | if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE) |
| 1226 | vma_pagesize = fault_granule; |
| 1227 | else |
| 1228 | vma_pagesize = transparent_hugepage_adjust(kvm, memslot, |
| 1229 | hva, &pfn, |
| 1230 | &fault_ipa); |
| 1231 | } |
Marc Zyngier | 9f03db6 | 2021-06-22 15:09:34 +0100 | [diff] [blame] | 1232 | |
| 1233 | if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { |
| 1234 | /* Check the VMM hasn't introduced a new VM_SHARED VMA */ |
| 1235 | if (!shared) |
| 1236 | ret = sanitise_mte_tags(kvm, pfn, vma_pagesize); |
| 1237 | else |
| 1238 | ret = -EFAULT; |
| 1239 | if (ret) |
| 1240 | goto out_unlock; |
| 1241 | } |
| 1242 | |
Yanan Wang | 509552e | 2021-01-14 20:13:50 +0800 | [diff] [blame] | 1243 | if (writable) |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1244 | prot |= KVM_PGTABLE_PROT_W; |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1245 | |
Yanan Wang | 25aa286 | 2021-06-17 18:58:24 +0800 | [diff] [blame] | 1246 | if (exec_fault) |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1247 | prot |= KVM_PGTABLE_PROT_X; |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1248 | |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1249 | if (device) |
| 1250 | prot |= KVM_PGTABLE_PROT_DEVICE; |
| 1251 | else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) |
| 1252 | prot |= KVM_PGTABLE_PROT_X; |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1253 | |
Yanan Wang | 7d89483 | 2020-12-02 04:10:34 +0800 | [diff] [blame] | 1254 | /* |
| 1255 | * Under the premise of getting a FSC_PERM fault, we just need to relax |
| 1256 | * permissions only if vma_pagesize equals fault_granule. Otherwise, |
| 1257 | * kvm_pgtable_stage2_map() should be called to change block size. |
| 1258 | */ |
| 1259 | if (fault_status == FSC_PERM && vma_pagesize == fault_granule) { |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1260 | ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1261 | } else { |
Will Deacon | 6f745f1 | 2020-09-11 14:25:25 +0100 | [diff] [blame] | 1262 | ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, |
| 1263 | __pfn_to_phys(pfn), prot, |
| 1264 | memcache); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1265 | } |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1266 | |
Yanan Wang | 509552e | 2021-01-14 20:13:50 +0800 | [diff] [blame] | 1267 | /* Mark the page dirty only if the fault is handled successfully */ |
| 1268 | if (writable && !ret) { |
| 1269 | kvm_set_pfn_dirty(pfn); |
Gavin Shan | 10ba2d1 | 2021-03-16 12:11:26 +0800 | [diff] [blame] | 1270 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Yanan Wang | 509552e | 2021-01-14 20:13:50 +0800 | [diff] [blame] | 1271 | } |
| 1272 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1273 | out_unlock: |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1274 | spin_unlock(&kvm->mmu_lock); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1275 | kvm_set_pfn_accessed(pfn); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1276 | kvm_release_pfn_clean(pfn); |
Yanan Wang | 509552e | 2021-01-14 20:13:50 +0800 | [diff] [blame] | 1277 | return ret != -EAGAIN ? ret : 0; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1278 | } |
| 1279 | |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1280 | /* Resolve the access fault by making the page young again. */ |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 1281 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) |
| 1282 | { |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1283 | pte_t pte; |
| 1284 | kvm_pte_t kpte; |
| 1285 | struct kvm_s2_mmu *mmu; |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 1286 | |
| 1287 | trace_kvm_access_fault(fault_ipa); |
| 1288 | |
| 1289 | spin_lock(&vcpu->kvm->mmu_lock); |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1290 | mmu = vcpu->arch.hw_mmu; |
| 1291 | kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 1292 | spin_unlock(&vcpu->kvm->mmu_lock); |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1293 | |
| 1294 | pte = __pte(kpte); |
| 1295 | if (pte_valid(pte)) |
| 1296 | kvm_set_pfn_accessed(pte_pfn(pte)); |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 1297 | } |
| 1298 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1299 | /** |
| 1300 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
| 1301 | * @vcpu: the VCPU pointer |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1302 | * |
| 1303 | * Any abort that gets to the host is almost guaranteed to be caused by a |
| 1304 | * missing second stage translation table entry, which can mean that either the |
| 1305 | * guest simply needs more memory and we must allocate an appropriate page or it |
| 1306 | * can mean that the guest tried to access I/O memory, which is emulated by user |
| 1307 | * space. The distinction is based on the IPA causing the fault and whether this |
| 1308 | * memory region has been registered as standard RAM by user space. |
| 1309 | */ |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 1310 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1311 | { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1312 | unsigned long fault_status; |
| 1313 | phys_addr_t fault_ipa; |
| 1314 | struct kvm_memory_slot *memslot; |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1315 | unsigned long hva; |
| 1316 | bool is_iabt, write_fault, writable; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1317 | gfn_t gfn; |
| 1318 | int ret, idx; |
| 1319 | |
Tyler Baicar | 621f48e | 2017-06-21 12:17:14 -0600 | [diff] [blame] | 1320 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
| 1321 | |
| 1322 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 1323 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
Tyler Baicar | 621f48e | 2017-06-21 12:17:14 -0600 | [diff] [blame] | 1324 | |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 1325 | /* Synchronous External Abort? */ |
Will Deacon | c9a636f | 2020-07-29 11:28:18 +0100 | [diff] [blame] | 1326 | if (kvm_vcpu_abt_issea(vcpu)) { |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 1327 | /* |
| 1328 | * For RAS the host kernel may handle this abort. |
| 1329 | * There is no need to pass the error into the guest. |
| 1330 | */ |
Will Deacon | 84b951a | 2020-07-29 11:28:19 +0100 | [diff] [blame] | 1331 | if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 1332 | kvm_inject_vabt(vcpu); |
Will Deacon | 84b951a | 2020-07-29 11:28:19 +0100 | [diff] [blame] | 1333 | |
| 1334 | return 1; |
Marc Zyngier | 4055710 | 2016-09-06 14:02:15 +0100 | [diff] [blame] | 1335 | } |
| 1336 | |
Gavin Shan | 3a949f4 | 2020-06-30 11:57:05 +1000 | [diff] [blame] | 1337 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 1338 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1339 | |
| 1340 | /* Check the stage-2 fault is trans. fault or write fault */ |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1341 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
| 1342 | fault_status != FSC_ACCESS) { |
Christoffer Dall | 0496daa5 | 2014-09-26 12:29:34 +0200 | [diff] [blame] | 1343 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
| 1344 | kvm_vcpu_trap_get_class(vcpu), |
| 1345 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
Gavin Shan | 3a949f4 | 2020-06-30 11:57:05 +1000 | [diff] [blame] | 1346 | (unsigned long)kvm_vcpu_get_esr(vcpu)); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1347 | return -EFAULT; |
| 1348 | } |
| 1349 | |
| 1350 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1351 | |
| 1352 | gfn = fault_ipa >> PAGE_SHIFT; |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1353 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
| 1354 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); |
Ard Biesheuvel | a7d079c | 2014-09-09 11:27:09 +0100 | [diff] [blame] | 1355 | write_fault = kvm_is_write_fault(vcpu); |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1356 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
Will Deacon | 022c832 | 2020-07-29 11:28:21 +0100 | [diff] [blame] | 1357 | /* |
| 1358 | * The guest has put either its instructions or its page-tables |
| 1359 | * somewhere it shouldn't have. Userspace won't be able to do |
| 1360 | * anything about this (there's no syndrome for a start), so |
| 1361 | * re-inject the abort back into the guest. |
| 1362 | */ |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1363 | if (is_iabt) { |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1364 | ret = -ENOEXEC; |
| 1365 | goto out; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1366 | } |
| 1367 | |
Marc Zyngier | c4ad98e | 2020-09-15 11:42:17 +0100 | [diff] [blame] | 1368 | if (kvm_vcpu_abt_iss1tw(vcpu)) { |
Will Deacon | 022c832 | 2020-07-29 11:28:21 +0100 | [diff] [blame] | 1369 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| 1370 | ret = 1; |
| 1371 | goto out_unlock; |
| 1372 | } |
| 1373 | |
Marc Zyngier | cfe3950 | 2012-12-12 14:42:09 +0000 | [diff] [blame] | 1374 | /* |
Marc Zyngier | 57c841f | 2016-01-29 15:01:28 +0000 | [diff] [blame] | 1375 | * Check for a cache maintenance operation. Since we |
| 1376 | * ended-up here, we know it is outside of any memory |
| 1377 | * slot. But we can't find out if that is for a device, |
| 1378 | * or if the guest is just being stupid. The only thing |
| 1379 | * we know for sure is that this range cannot be cached. |
| 1380 | * |
| 1381 | * So let's assume that the guest is just being |
| 1382 | * cautious, and skip the instruction. |
| 1383 | */ |
Will Deacon | 54dc0d2 | 2020-07-29 11:28:20 +0100 | [diff] [blame] | 1384 | if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { |
Marc Zyngier | cdb5e02 | 2020-10-14 09:29:27 +0100 | [diff] [blame] | 1385 | kvm_incr_pc(vcpu); |
Marc Zyngier | 57c841f | 2016-01-29 15:01:28 +0000 | [diff] [blame] | 1386 | ret = 1; |
| 1387 | goto out_unlock; |
| 1388 | } |
| 1389 | |
| 1390 | /* |
Marc Zyngier | cfe3950 | 2012-12-12 14:42:09 +0000 | [diff] [blame] | 1391 | * The IPA is reported as [MAX:12], so we need to |
| 1392 | * complement it with the bottom 12 bits from the |
| 1393 | * faulting VA. This is always 12 bits, irrespective |
| 1394 | * of the page size. |
| 1395 | */ |
| 1396 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 1397 | ret = io_mem_abort(vcpu, fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1398 | goto out_unlock; |
| 1399 | } |
| 1400 | |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 1401 | /* Userspace should not be able to register out-of-bounds IPAs */ |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1402 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 1403 | |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 1404 | if (fault_status == FSC_ACCESS) { |
| 1405 | handle_access_fault(vcpu, fault_ipa); |
| 1406 | ret = 1; |
| 1407 | goto out_unlock; |
| 1408 | } |
| 1409 | |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1410 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1411 | if (ret == 0) |
| 1412 | ret = 1; |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1413 | out: |
| 1414 | if (ret == -ENOEXEC) { |
| 1415 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| 1416 | ret = 1; |
| 1417 | } |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1418 | out_unlock: |
| 1419 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1420 | return ret; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1421 | } |
| 1422 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1423 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1424 | { |
Will Deacon | 063deeb | 2020-09-11 14:25:26 +0100 | [diff] [blame] | 1425 | if (!kvm->arch.mmu.pgt) |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1426 | return false; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1427 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1428 | __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, |
| 1429 | (range->end - range->start) << PAGE_SHIFT, |
| 1430 | range->may_block); |
| 1431 | |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1432 | return false; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1433 | } |
| 1434 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1435 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1436 | { |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1437 | kvm_pfn_t pfn = pte_pfn(range->pte); |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1438 | int ret; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1439 | |
Will Deacon | e9edb17 | 2020-09-11 14:25:16 +0100 | [diff] [blame] | 1440 | if (!kvm->arch.mmu.pgt) |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1441 | return false; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1442 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1443 | WARN_ON(range->end - range->start != 1); |
| 1444 | |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1445 | ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE); |
| 1446 | if (ret) |
| 1447 | return false; |
| 1448 | |
Marc Zyngier | 694556d | 2018-08-23 09:58:27 +0100 | [diff] [blame] | 1449 | /* |
Yanan Wang | 25aa286 | 2021-06-17 18:58:24 +0800 | [diff] [blame] | 1450 | * We've moved a page around, probably through CoW, so let's treat |
| 1451 | * it just like a translation fault and the map handler will clean |
| 1452 | * the cache to the PoC. |
| 1453 | * |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1454 | * The MMU notifiers will have unmapped a huge PMD before calling |
| 1455 | * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and |
| 1456 | * therefore we never need to clear out a huge PMD through this |
| 1457 | * calling path and a memcache is not required. |
| 1458 | */ |
| 1459 | kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, |
| 1460 | PAGE_SIZE, __pfn_to_phys(pfn), |
| 1461 | KVM_PGTABLE_PROT_R, NULL); |
| 1462 | |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1463 | return false; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1464 | } |
| 1465 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1466 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1467 | { |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1468 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1469 | kvm_pte_t kpte; |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1470 | pte_t pte; |
| 1471 | |
| 1472 | if (!kvm->arch.mmu.pgt) |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1473 | return false; |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1474 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 1475 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1476 | |
| 1477 | kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, |
| 1478 | range->start << PAGE_SHIFT); |
Will Deacon | ee8efad | 2020-09-11 14:25:19 +0100 | [diff] [blame] | 1479 | pte = __pte(kpte); |
| 1480 | return pte_valid(pte) && pte_young(pte); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1481 | } |
| 1482 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1483 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1484 | { |
Will Deacon | 063deeb | 2020-09-11 14:25:26 +0100 | [diff] [blame] | 1485 | if (!kvm->arch.mmu.pgt) |
kernel test robot | fcb8283 | 2021-04-27 06:33:57 +0800 | [diff] [blame] | 1486 | return false; |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 1487 | |
Sean Christopherson | cd4c718 | 2021-04-01 17:56:51 -0700 | [diff] [blame] | 1488 | return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, |
| 1489 | range->start << PAGE_SHIFT); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 1490 | } |
| 1491 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1492 | phys_addr_t kvm_mmu_get_httbr(void) |
| 1493 | { |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1494 | return __pa(hyp_pgtable->pgd); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1495 | } |
| 1496 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 1497 | phys_addr_t kvm_get_idmap_vector(void) |
| 1498 | { |
| 1499 | return hyp_idmap_vector; |
| 1500 | } |
| 1501 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1502 | static int kvm_map_idmap_text(void) |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 1503 | { |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1504 | unsigned long size = hyp_idmap_end - hyp_idmap_start; |
| 1505 | int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start, |
| 1506 | PAGE_HYP_EXEC); |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 1507 | if (err) |
| 1508 | kvm_err("Failed to idmap %lx-%lx\n", |
| 1509 | hyp_idmap_start, hyp_idmap_end); |
| 1510 | |
| 1511 | return err; |
| 1512 | } |
| 1513 | |
Quentin Perret | 7aef0cb | 2021-03-19 10:01:14 +0000 | [diff] [blame] | 1514 | static void *kvm_hyp_zalloc_page(void *arg) |
| 1515 | { |
| 1516 | return (void *)get_zeroed_page(GFP_KERNEL); |
| 1517 | } |
| 1518 | |
| 1519 | static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { |
| 1520 | .zalloc_page = kvm_hyp_zalloc_page, |
| 1521 | .get_page = kvm_host_get_page, |
| 1522 | .put_page = kvm_host_put_page, |
| 1523 | .phys_to_virt = kvm_host_va, |
| 1524 | .virt_to_phys = kvm_host_pa, |
| 1525 | }; |
| 1526 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1527 | int kvm_mmu_init(u32 *hyp_va_bits) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1528 | { |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 1529 | int err; |
| 1530 | |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 1531 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
Marc Zyngier | 46fef15 | 2018-03-12 14:25:10 +0000 | [diff] [blame] | 1532 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 1533 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
Marc Zyngier | 46fef15 | 2018-03-12 14:25:10 +0000 | [diff] [blame] | 1534 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 1535 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 1536 | |
Ard Biesheuvel | 06f75a1 | 2015-03-19 16:42:26 +0000 | [diff] [blame] | 1537 | /* |
| 1538 | * We rely on the linker script to ensure at build time that the HYP |
| 1539 | * init code does not cross a page boundary. |
| 1540 | */ |
| 1541 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 1542 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1543 | *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); |
| 1544 | kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); |
Marc Zyngier | b4ef049 | 2017-12-03 20:04:51 +0000 | [diff] [blame] | 1545 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
| 1546 | kvm_debug("HYP VA range: %lx:%lx\n", |
| 1547 | kern_hyp_va(PAGE_OFFSET), |
| 1548 | kern_hyp_va((unsigned long)high_memory - 1)); |
Marc Zyngier | eac378a | 2016-06-30 18:40:50 +0100 | [diff] [blame] | 1549 | |
Marc Zyngier | 6c41a41 | 2016-06-30 18:40:51 +0100 | [diff] [blame] | 1550 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
Marc Zyngier | ed57cac | 2017-12-03 18:22:49 +0000 | [diff] [blame] | 1551 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
Marc Zyngier | d2896d4 | 2016-08-22 09:01:17 +0100 | [diff] [blame] | 1552 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
Marc Zyngier | eac378a | 2016-06-30 18:40:50 +0100 | [diff] [blame] | 1553 | /* |
| 1554 | * The idmap page is intersecting with the VA space, |
| 1555 | * it is not safe to continue further. |
| 1556 | */ |
| 1557 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); |
| 1558 | err = -EINVAL; |
| 1559 | goto out; |
| 1560 | } |
| 1561 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1562 | hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL); |
| 1563 | if (!hyp_pgtable) { |
| 1564 | kvm_err("Hyp mode page-table not allocated\n"); |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 1565 | err = -ENOMEM; |
| 1566 | goto out; |
| 1567 | } |
| 1568 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1569 | err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1570 | if (err) |
| 1571 | goto out_free_pgtable; |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 1572 | |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1573 | err = kvm_map_idmap_text(); |
| 1574 | if (err) |
| 1575 | goto out_destroy_pgtable; |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 1576 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 1577 | io_map_base = hyp_idmap_start; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1578 | return 0; |
Will Deacon | 0f9d09b | 2020-09-11 14:25:12 +0100 | [diff] [blame] | 1579 | |
| 1580 | out_destroy_pgtable: |
| 1581 | kvm_pgtable_hyp_destroy(hyp_pgtable); |
| 1582 | out_free_pgtable: |
| 1583 | kfree(hyp_pgtable); |
| 1584 | hyp_pgtable = NULL; |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 1585 | out: |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 1586 | return err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1587 | } |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1588 | |
| 1589 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 1590 | struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 1591 | const struct kvm_memory_slot *new, |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1592 | enum kvm_mr_change change) |
| 1593 | { |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1594 | /* |
| 1595 | * At this point memslot has been committed and there is an |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 1596 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1597 | * memory slot is write protected. |
| 1598 | */ |
Sean Christopherson | 509c594 | 2021-12-06 20:54:12 +0100 | [diff] [blame] | 1599 | if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
Keqian Zhu | c862626 | 2020-04-13 20:20:23 +0800 | [diff] [blame] | 1600 | /* |
| 1601 | * If we're with initial-all-set, we don't need to write |
| 1602 | * protect any pages because they're all reported as dirty. |
| 1603 | * Huge pages and normal pages will be write protect gradually. |
| 1604 | */ |
| 1605 | if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { |
Sean Christopherson | 509c594 | 2021-12-06 20:54:12 +0100 | [diff] [blame] | 1606 | kvm_mmu_wp_memory_region(kvm, new->id); |
Keqian Zhu | c862626 | 2020-04-13 20:20:23 +0800 | [diff] [blame] | 1607 | } |
| 1608 | } |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1609 | } |
| 1610 | |
| 1611 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame] | 1612 | const struct kvm_memory_slot *old, |
| 1613 | struct kvm_memory_slot *new, |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1614 | enum kvm_mr_change change) |
| 1615 | { |
Sean Christopherson | 509c594 | 2021-12-06 20:54:12 +0100 | [diff] [blame] | 1616 | hva_t hva, reg_end; |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1617 | int ret = 0; |
| 1618 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1619 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
| 1620 | change != KVM_MR_FLAGS_ONLY) |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1621 | return 0; |
| 1622 | |
| 1623 | /* |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 1624 | * Prevent userspace from creating a memory region outside of the IPA |
| 1625 | * space addressable by the KVM guest IPA space. |
| 1626 | */ |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame] | 1627 | if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 1628 | return -EFAULT; |
| 1629 | |
Sean Christopherson | 509c594 | 2021-12-06 20:54:12 +0100 | [diff] [blame] | 1630 | hva = new->userspace_addr; |
| 1631 | reg_end = hva + (new->npages << PAGE_SHIFT); |
| 1632 | |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1633 | mmap_read_lock(current->mm); |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 1634 | /* |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1635 | * A memory region could potentially cover multiple VMAs, and any holes |
Keqian Zhu | fd6f17b | 2021-05-07 19:03:21 +0800 | [diff] [blame] | 1636 | * between them, so iterate over all of them. |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1637 | * |
| 1638 | * +--------------------------------------------+ |
| 1639 | * +---------------+----------------+ +----------------+ |
| 1640 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
| 1641 | * +---------------+----------------+ +----------------+ |
| 1642 | * | memory region | |
| 1643 | * +--------------------------------------------+ |
| 1644 | */ |
| 1645 | do { |
Gavin Shan | c728fd4 | 2021-03-16 12:11:25 +0800 | [diff] [blame] | 1646 | struct vm_area_struct *vma; |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1647 | |
Gavin Shan | c728fd4 | 2021-03-16 12:11:25 +0800 | [diff] [blame] | 1648 | vma = find_vma_intersection(current->mm, hva, reg_end); |
| 1649 | if (!vma) |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1650 | break; |
| 1651 | |
| 1652 | /* |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1653 | * VM_SHARED mappings are not allowed with MTE to avoid races |
| 1654 | * when updating the PG_mte_tagged page flag, see |
| 1655 | * sanitise_mte_tags for more details. |
| 1656 | */ |
Quentin Perret | 6e6a8ef08 | 2021-10-05 13:20:31 +0100 | [diff] [blame] | 1657 | if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { |
| 1658 | ret = -EINVAL; |
| 1659 | break; |
| 1660 | } |
Steven Price | ea7fc1b | 2021-06-21 12:17:12 +0100 | [diff] [blame] | 1661 | |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1662 | if (vma->vm_flags & VM_PFNMAP) { |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1663 | /* IO region dirty page logging not allowed */ |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame] | 1664 | if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
Marc Zyngier | 72f3104 | 2017-03-16 18:20:50 +0000 | [diff] [blame] | 1665 | ret = -EINVAL; |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1666 | break; |
Keqian Zhu | fd6f17b | 2021-05-07 19:03:21 +0800 | [diff] [blame] | 1667 | } |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1668 | } |
Keqian Zhu | fd6f17b | 2021-05-07 19:03:21 +0800 | [diff] [blame] | 1669 | hva = min(reg_end, vma->vm_end); |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1670 | } while (hva < reg_end); |
| 1671 | |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1672 | mmap_read_unlock(current->mm); |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1673 | return ret; |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1674 | } |
| 1675 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 1676 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1677 | { |
| 1678 | } |
| 1679 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 1680 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1681 | { |
| 1682 | } |
| 1683 | |
| 1684 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
| 1685 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1686 | kvm_free_stage2_pgd(&kvm->arch.mmu); |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1687 | } |
| 1688 | |
| 1689 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 1690 | struct kvm_memory_slot *slot) |
| 1691 | { |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1692 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
| 1693 | phys_addr_t size = slot->npages << PAGE_SHIFT; |
| 1694 | |
| 1695 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1696 | unmap_stage2_range(&kvm->arch.mmu, gpa, size); |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 1697 | spin_unlock(&kvm->mmu_lock); |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 1698 | } |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 1699 | |
| 1700 | /* |
| 1701 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
| 1702 | * |
| 1703 | * Main problems: |
| 1704 | * - S/W ops are local to a CPU (not broadcast) |
| 1705 | * - We have line migration behind our back (speculation) |
| 1706 | * - System caches don't support S/W at all (damn!) |
| 1707 | * |
| 1708 | * In the face of the above, the best we can do is to try and convert |
| 1709 | * S/W ops to VA ops. Because the guest is not allowed to infer the |
| 1710 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, |
| 1711 | * which is a rather good thing for us. |
| 1712 | * |
| 1713 | * Also, it is only used when turning caches on/off ("The expected |
| 1714 | * usage of the cache maintenance instructions that operate by set/way |
| 1715 | * is associated with the cache maintenance instructions associated |
| 1716 | * with the powerdown and powerup of caches, if this is required by |
| 1717 | * the implementation."). |
| 1718 | * |
| 1719 | * We use the following policy: |
| 1720 | * |
| 1721 | * - If we trap a S/W operation, we enable VM trapping to detect |
| 1722 | * caches being turned on/off, and do a full clean. |
| 1723 | * |
| 1724 | * - We flush the caches on both caches being turned on and off. |
| 1725 | * |
| 1726 | * - Once the caches are enabled, we stop trapping VM ops. |
| 1727 | */ |
| 1728 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) |
| 1729 | { |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1730 | unsigned long hcr = *vcpu_hcr(vcpu); |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 1731 | |
| 1732 | /* |
| 1733 | * If this is the first time we do a S/W operation |
| 1734 | * (i.e. HCR_TVM not set) flush the whole memory, and set the |
| 1735 | * VM trapping. |
| 1736 | * |
| 1737 | * Otherwise, rely on the VM trapping to wait for the MMU + |
| 1738 | * Caches to be turned off. At that point, we'll be able to |
| 1739 | * clean the caches again. |
| 1740 | */ |
| 1741 | if (!(hcr & HCR_TVM)) { |
| 1742 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), |
| 1743 | vcpu_has_cache_enabled(vcpu)); |
| 1744 | stage2_flush_vm(vcpu->kvm); |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1745 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 1746 | } |
| 1747 | } |
| 1748 | |
| 1749 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) |
| 1750 | { |
| 1751 | bool now_enabled = vcpu_has_cache_enabled(vcpu); |
| 1752 | |
| 1753 | /* |
| 1754 | * If switching the MMU+caches on, need to invalidate the caches. |
| 1755 | * If switching it off, need to clean the caches. |
| 1756 | * Clean + invalidate does the trick always. |
| 1757 | */ |
| 1758 | if (now_enabled != was_enabled) |
| 1759 | stage2_flush_vm(vcpu->kvm); |
| 1760 | |
| 1761 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ |
| 1762 | if (now_enabled) |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1763 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 1764 | |
| 1765 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); |
| 1766 | } |