Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
| 4 | * |
| 5 | * Authors: |
| 6 | * Anup Patel <anup.patel@wdc.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/hugetlb.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/kvm_host.h> |
| 17 | #include <linux/sched/signal.h> |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 18 | #include <asm/csr.h> |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 19 | #include <asm/page.h> |
| 20 | #include <asm/pgtable.h> |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 21 | #include <asm/sbi.h> |
| 22 | |
| 23 | #ifdef CONFIG_64BIT |
| 24 | static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT); |
| 25 | static unsigned long stage2_pgd_levels = 3; |
| 26 | #define stage2_index_bits 9 |
| 27 | #else |
| 28 | static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT); |
| 29 | static unsigned long stage2_pgd_levels = 2; |
| 30 | #define stage2_index_bits 10 |
| 31 | #endif |
| 32 | |
| 33 | #define stage2_pgd_xbits 2 |
| 34 | #define stage2_pgd_size (1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits)) |
| 35 | #define stage2_gpa_bits (HGATP_PAGE_SHIFT + \ |
| 36 | (stage2_pgd_levels * stage2_index_bits) + \ |
| 37 | stage2_pgd_xbits) |
| 38 | #define stage2_gpa_size ((gpa_t)(1ULL << stage2_gpa_bits)) |
| 39 | |
| 40 | #define stage2_pte_leaf(__ptep) \ |
| 41 | (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)) |
| 42 | |
| 43 | static inline unsigned long stage2_pte_index(gpa_t addr, u32 level) |
| 44 | { |
| 45 | unsigned long mask; |
| 46 | unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level); |
| 47 | |
| 48 | if (level == (stage2_pgd_levels - 1)) |
| 49 | mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1; |
| 50 | else |
| 51 | mask = PTRS_PER_PTE - 1; |
| 52 | |
| 53 | return (addr >> shift) & mask; |
| 54 | } |
| 55 | |
| 56 | static inline unsigned long stage2_pte_page_vaddr(pte_t pte) |
| 57 | { |
| 58 | return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT); |
| 59 | } |
| 60 | |
| 61 | static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level) |
| 62 | { |
| 63 | u32 i; |
| 64 | unsigned long psz = 1UL << 12; |
| 65 | |
| 66 | for (i = 0; i < stage2_pgd_levels; i++) { |
| 67 | if (page_size == (psz << (i * stage2_index_bits))) { |
| 68 | *out_level = i; |
| 69 | return 0; |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | return -EINVAL; |
| 74 | } |
| 75 | |
| 76 | static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize) |
| 77 | { |
| 78 | if (stage2_pgd_levels < level) |
| 79 | return -EINVAL; |
| 80 | |
| 81 | *out_pgsize = 1UL << (12 + (level * stage2_index_bits)); |
| 82 | |
| 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache, |
| 87 | int min, int max) |
| 88 | { |
| 89 | void *page; |
| 90 | |
| 91 | BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS); |
| 92 | if (pcache->nobjs >= min) |
| 93 | return 0; |
| 94 | while (pcache->nobjs < max) { |
| 95 | page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 96 | if (!page) |
| 97 | return -ENOMEM; |
| 98 | pcache->objects[pcache->nobjs++] = page; |
| 99 | } |
| 100 | |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache) |
| 105 | { |
| 106 | while (pcache && pcache->nobjs) |
| 107 | free_page((unsigned long)pcache->objects[--pcache->nobjs]); |
| 108 | } |
| 109 | |
| 110 | static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache) |
| 111 | { |
| 112 | void *p; |
| 113 | |
| 114 | if (!pcache) |
| 115 | return NULL; |
| 116 | |
| 117 | BUG_ON(!pcache->nobjs); |
| 118 | p = pcache->objects[--pcache->nobjs]; |
| 119 | |
| 120 | return p; |
| 121 | } |
| 122 | |
| 123 | static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr, |
| 124 | pte_t **ptepp, u32 *ptep_level) |
| 125 | { |
| 126 | pte_t *ptep; |
| 127 | u32 current_level = stage2_pgd_levels - 1; |
| 128 | |
| 129 | *ptep_level = current_level; |
| 130 | ptep = (pte_t *)kvm->arch.pgd; |
| 131 | ptep = &ptep[stage2_pte_index(addr, current_level)]; |
| 132 | while (ptep && pte_val(*ptep)) { |
| 133 | if (stage2_pte_leaf(ptep)) { |
| 134 | *ptep_level = current_level; |
| 135 | *ptepp = ptep; |
| 136 | return true; |
| 137 | } |
| 138 | |
| 139 | if (current_level) { |
| 140 | current_level--; |
| 141 | *ptep_level = current_level; |
| 142 | ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); |
| 143 | ptep = &ptep[stage2_pte_index(addr, current_level)]; |
| 144 | } else { |
| 145 | ptep = NULL; |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | return false; |
| 150 | } |
| 151 | |
| 152 | static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) |
| 153 | { |
| 154 | struct cpumask hmask; |
| 155 | unsigned long size = PAGE_SIZE; |
| 156 | struct kvm_vmid *vmid = &kvm->arch.vmid; |
| 157 | |
| 158 | if (stage2_level_to_page_size(level, &size)) |
| 159 | return; |
| 160 | addr &= ~(size - 1); |
| 161 | |
| 162 | /* |
| 163 | * TODO: Instead of cpu_online_mask, we should only target CPUs |
| 164 | * where the Guest/VM is running. |
| 165 | */ |
| 166 | preempt_disable(); |
| 167 | riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask); |
| 168 | sbi_remote_hfence_gvma_vmid(cpumask_bits(&hmask), addr, size, |
| 169 | READ_ONCE(vmid->vmid)); |
| 170 | preempt_enable(); |
| 171 | } |
| 172 | |
| 173 | static int stage2_set_pte(struct kvm *kvm, u32 level, |
| 174 | struct kvm_mmu_page_cache *pcache, |
| 175 | gpa_t addr, const pte_t *new_pte) |
| 176 | { |
| 177 | u32 current_level = stage2_pgd_levels - 1; |
| 178 | pte_t *next_ptep = (pte_t *)kvm->arch.pgd; |
| 179 | pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)]; |
| 180 | |
| 181 | if (current_level < level) |
| 182 | return -EINVAL; |
| 183 | |
| 184 | while (current_level != level) { |
| 185 | if (stage2_pte_leaf(ptep)) |
| 186 | return -EEXIST; |
| 187 | |
| 188 | if (!pte_val(*ptep)) { |
| 189 | next_ptep = stage2_cache_alloc(pcache); |
| 190 | if (!next_ptep) |
| 191 | return -ENOMEM; |
| 192 | *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), |
| 193 | __pgprot(_PAGE_TABLE)); |
| 194 | } else { |
| 195 | if (stage2_pte_leaf(ptep)) |
| 196 | return -EEXIST; |
| 197 | next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); |
| 198 | } |
| 199 | |
| 200 | current_level--; |
| 201 | ptep = &next_ptep[stage2_pte_index(addr, current_level)]; |
| 202 | } |
| 203 | |
| 204 | *ptep = *new_pte; |
| 205 | if (stage2_pte_leaf(ptep)) |
| 206 | stage2_remote_tlb_flush(kvm, current_level, addr); |
| 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | static int stage2_map_page(struct kvm *kvm, |
| 212 | struct kvm_mmu_page_cache *pcache, |
| 213 | gpa_t gpa, phys_addr_t hpa, |
| 214 | unsigned long page_size, |
| 215 | bool page_rdonly, bool page_exec) |
| 216 | { |
| 217 | int ret; |
| 218 | u32 level = 0; |
| 219 | pte_t new_pte; |
| 220 | pgprot_t prot; |
| 221 | |
| 222 | ret = stage2_page_size_to_level(page_size, &level); |
| 223 | if (ret) |
| 224 | return ret; |
| 225 | |
| 226 | /* |
| 227 | * A RISC-V implementation can choose to either: |
| 228 | * 1) Update 'A' and 'D' PTE bits in hardware |
| 229 | * 2) Generate page fault when 'A' and/or 'D' bits are not set |
| 230 | * PTE so that software can update these bits. |
| 231 | * |
| 232 | * We support both options mentioned above. To achieve this, we |
| 233 | * always set 'A' and 'D' PTE bits at time of creating stage2 |
| 234 | * mapping. To support KVM dirty page logging with both options |
| 235 | * mentioned above, we will write-protect stage2 PTEs to track |
| 236 | * dirty pages. |
| 237 | */ |
| 238 | |
| 239 | if (page_exec) { |
| 240 | if (page_rdonly) |
| 241 | prot = PAGE_READ_EXEC; |
| 242 | else |
| 243 | prot = PAGE_WRITE_EXEC; |
| 244 | } else { |
| 245 | if (page_rdonly) |
| 246 | prot = PAGE_READ; |
| 247 | else |
| 248 | prot = PAGE_WRITE; |
| 249 | } |
| 250 | new_pte = pfn_pte(PFN_DOWN(hpa), prot); |
| 251 | new_pte = pte_mkdirty(new_pte); |
| 252 | |
| 253 | return stage2_set_pte(kvm, level, pcache, gpa, &new_pte); |
| 254 | } |
| 255 | |
| 256 | enum stage2_op { |
| 257 | STAGE2_OP_NOP = 0, /* Nothing */ |
| 258 | STAGE2_OP_CLEAR, /* Clear/Unmap */ |
| 259 | STAGE2_OP_WP, /* Write-protect */ |
| 260 | }; |
| 261 | |
| 262 | static void stage2_op_pte(struct kvm *kvm, gpa_t addr, |
| 263 | pte_t *ptep, u32 ptep_level, enum stage2_op op) |
| 264 | { |
| 265 | int i, ret; |
| 266 | pte_t *next_ptep; |
| 267 | u32 next_ptep_level; |
| 268 | unsigned long next_page_size, page_size; |
| 269 | |
| 270 | ret = stage2_level_to_page_size(ptep_level, &page_size); |
| 271 | if (ret) |
| 272 | return; |
| 273 | |
| 274 | BUG_ON(addr & (page_size - 1)); |
| 275 | |
| 276 | if (!pte_val(*ptep)) |
| 277 | return; |
| 278 | |
| 279 | if (ptep_level && !stage2_pte_leaf(ptep)) { |
| 280 | next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep); |
| 281 | next_ptep_level = ptep_level - 1; |
| 282 | ret = stage2_level_to_page_size(next_ptep_level, |
| 283 | &next_page_size); |
| 284 | if (ret) |
| 285 | return; |
| 286 | |
| 287 | if (op == STAGE2_OP_CLEAR) |
| 288 | set_pte(ptep, __pte(0)); |
| 289 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 290 | stage2_op_pte(kvm, addr + i * next_page_size, |
| 291 | &next_ptep[i], next_ptep_level, op); |
| 292 | if (op == STAGE2_OP_CLEAR) |
| 293 | put_page(virt_to_page(next_ptep)); |
| 294 | } else { |
| 295 | if (op == STAGE2_OP_CLEAR) |
| 296 | set_pte(ptep, __pte(0)); |
| 297 | else if (op == STAGE2_OP_WP) |
| 298 | set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE)); |
| 299 | stage2_remote_tlb_flush(kvm, ptep_level, addr); |
| 300 | } |
| 301 | } |
| 302 | |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 303 | static void stage2_unmap_range(struct kvm *kvm, gpa_t start, |
| 304 | gpa_t size, bool may_block) |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 305 | { |
| 306 | int ret; |
| 307 | pte_t *ptep; |
| 308 | u32 ptep_level; |
| 309 | bool found_leaf; |
| 310 | unsigned long page_size; |
| 311 | gpa_t addr = start, end = start + size; |
| 312 | |
| 313 | while (addr < end) { |
| 314 | found_leaf = stage2_get_leaf_entry(kvm, addr, |
| 315 | &ptep, &ptep_level); |
| 316 | ret = stage2_level_to_page_size(ptep_level, &page_size); |
| 317 | if (ret) |
| 318 | break; |
| 319 | |
| 320 | if (!found_leaf) |
| 321 | goto next; |
| 322 | |
| 323 | if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) |
| 324 | stage2_op_pte(kvm, addr, ptep, |
| 325 | ptep_level, STAGE2_OP_CLEAR); |
| 326 | |
| 327 | next: |
| 328 | addr += page_size; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 329 | |
| 330 | /* |
| 331 | * If the range is too large, release the kvm->mmu_lock |
| 332 | * to prevent starvation and lockup detector warnings. |
| 333 | */ |
| 334 | if (may_block && addr < end) |
| 335 | cond_resched_lock(&kvm->mmu_lock); |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 336 | } |
| 337 | } |
| 338 | |
| 339 | static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) |
| 340 | { |
| 341 | int ret; |
| 342 | pte_t *ptep; |
| 343 | u32 ptep_level; |
| 344 | bool found_leaf; |
| 345 | gpa_t addr = start; |
| 346 | unsigned long page_size; |
| 347 | |
| 348 | while (addr < end) { |
| 349 | found_leaf = stage2_get_leaf_entry(kvm, addr, |
| 350 | &ptep, &ptep_level); |
| 351 | ret = stage2_level_to_page_size(ptep_level, &page_size); |
| 352 | if (ret) |
| 353 | break; |
| 354 | |
| 355 | if (!found_leaf) |
| 356 | goto next; |
| 357 | |
| 358 | if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) |
| 359 | stage2_op_pte(kvm, addr, ptep, |
| 360 | ptep_level, STAGE2_OP_WP); |
| 361 | |
| 362 | next: |
| 363 | addr += page_size; |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | static void stage2_wp_memory_region(struct kvm *kvm, int slot) |
| 368 | { |
| 369 | struct kvm_memslots *slots = kvm_memslots(kvm); |
| 370 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); |
| 371 | phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; |
| 372 | phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; |
| 373 | |
| 374 | spin_lock(&kvm->mmu_lock); |
| 375 | stage2_wp_range(kvm, start, end); |
| 376 | spin_unlock(&kvm->mmu_lock); |
| 377 | kvm_flush_remote_tlbs(kvm); |
| 378 | } |
| 379 | |
| 380 | static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, |
| 381 | unsigned long size, bool writable) |
| 382 | { |
| 383 | pte_t pte; |
| 384 | int ret = 0; |
| 385 | unsigned long pfn; |
| 386 | phys_addr_t addr, end; |
| 387 | struct kvm_mmu_page_cache pcache = { 0, }; |
| 388 | |
| 389 | end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; |
| 390 | pfn = __phys_to_pfn(hpa); |
| 391 | |
| 392 | for (addr = gpa; addr < end; addr += PAGE_SIZE) { |
| 393 | pte = pfn_pte(pfn, PAGE_KERNEL); |
| 394 | |
| 395 | if (!writable) |
| 396 | pte = pte_wrprotect(pte); |
| 397 | |
| 398 | ret = stage2_cache_topup(&pcache, |
| 399 | stage2_pgd_levels, |
| 400 | KVM_MMU_PAGE_CACHE_NR_OBJS); |
| 401 | if (ret) |
| 402 | goto out; |
| 403 | |
| 404 | spin_lock(&kvm->mmu_lock); |
| 405 | ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte); |
| 406 | spin_unlock(&kvm->mmu_lock); |
| 407 | if (ret) |
| 408 | goto out; |
| 409 | |
| 410 | pfn++; |
| 411 | } |
| 412 | |
| 413 | out: |
| 414 | stage2_cache_flush(&pcache); |
| 415 | return ret; |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 419 | struct kvm_memory_slot *slot, |
| 420 | gfn_t gfn_offset, |
| 421 | unsigned long mask) |
| 422 | { |
| 423 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; |
| 424 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; |
| 425 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; |
| 426 | |
| 427 | stage2_wp_range(kvm, start, end); |
| 428 | } |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 429 | |
| 430 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
| 431 | { |
| 432 | } |
| 433 | |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 434 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
| 435 | const struct kvm_memory_slot *memslot) |
| 436 | { |
| 437 | kvm_flush_remote_tlbs(kvm); |
| 438 | } |
| 439 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 440 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) |
| 441 | { |
| 442 | } |
| 443 | |
| 444 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
| 445 | { |
| 446 | } |
| 447 | |
| 448 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
| 449 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 450 | kvm_riscv_stage2_free_pgd(kvm); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 454 | struct kvm_memory_slot *slot) |
| 455 | { |
Sean Christopherson | 756e1fc | 2021-11-04 16:41:06 +0000 | [diff] [blame] | 456 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
| 457 | phys_addr_t size = slot->npages << PAGE_SHIFT; |
| 458 | |
| 459 | spin_lock(&kvm->mmu_lock); |
| 460 | stage2_unmap_range(kvm, gpa, size, false); |
| 461 | spin_unlock(&kvm->mmu_lock); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
| 465 | const struct kvm_userspace_memory_region *mem, |
| 466 | struct kvm_memory_slot *old, |
| 467 | const struct kvm_memory_slot *new, |
| 468 | enum kvm_mr_change change) |
| 469 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 470 | /* |
| 471 | * At this point memslot has been committed and there is an |
| 472 | * allocated dirty_bitmap[], dirty pages will be tracked while |
| 473 | * the memory slot is write protected. |
| 474 | */ |
| 475 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) |
| 476 | stage2_wp_memory_region(kvm, mem->slot); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 477 | } |
| 478 | |
| 479 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 480 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame^] | 481 | const struct kvm_memory_slot *old, |
| 482 | struct kvm_memory_slot *new, |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 483 | enum kvm_mr_change change) |
| 484 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 485 | hva_t hva = mem->userspace_addr; |
| 486 | hva_t reg_end = hva + mem->memory_size; |
| 487 | bool writable = !(mem->flags & KVM_MEM_READONLY); |
| 488 | int ret = 0; |
| 489 | |
| 490 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
| 491 | change != KVM_MR_FLAGS_ONLY) |
| 492 | return 0; |
| 493 | |
| 494 | /* |
| 495 | * Prevent userspace from creating a memory region outside of the GPA |
| 496 | * space addressable by the KVM guest GPA space. |
| 497 | */ |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame^] | 498 | if ((new->base_gfn + new->npages) >= |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 499 | (stage2_gpa_size >> PAGE_SHIFT)) |
| 500 | return -EFAULT; |
| 501 | |
| 502 | mmap_read_lock(current->mm); |
| 503 | |
| 504 | /* |
| 505 | * A memory region could potentially cover multiple VMAs, and |
| 506 | * any holes between them, so iterate over all of them to find |
| 507 | * out if we can map any of them right now. |
| 508 | * |
| 509 | * +--------------------------------------------+ |
| 510 | * +---------------+----------------+ +----------------+ |
| 511 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
| 512 | * +---------------+----------------+ +----------------+ |
| 513 | * | memory region | |
| 514 | * +--------------------------------------------+ |
| 515 | */ |
| 516 | do { |
| 517 | struct vm_area_struct *vma = find_vma(current->mm, hva); |
| 518 | hva_t vm_start, vm_end; |
| 519 | |
| 520 | if (!vma || vma->vm_start >= reg_end) |
| 521 | break; |
| 522 | |
| 523 | /* |
| 524 | * Mapping a read-only VMA is only allowed if the |
| 525 | * memory region is configured as read-only. |
| 526 | */ |
| 527 | if (writable && !(vma->vm_flags & VM_WRITE)) { |
| 528 | ret = -EPERM; |
| 529 | break; |
| 530 | } |
| 531 | |
| 532 | /* Take the intersection of this VMA with the memory region */ |
| 533 | vm_start = max(hva, vma->vm_start); |
| 534 | vm_end = min(reg_end, vma->vm_end); |
| 535 | |
| 536 | if (vma->vm_flags & VM_PFNMAP) { |
| 537 | gpa_t gpa = mem->guest_phys_addr + |
| 538 | (vm_start - mem->userspace_addr); |
| 539 | phys_addr_t pa; |
| 540 | |
| 541 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
| 542 | pa += vm_start - vma->vm_start; |
| 543 | |
| 544 | /* IO region dirty page logging not allowed */ |
Sean Christopherson | 537a17b | 2021-12-06 20:54:11 +0100 | [diff] [blame^] | 545 | if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 546 | ret = -EINVAL; |
| 547 | goto out; |
| 548 | } |
| 549 | |
| 550 | ret = stage2_ioremap(kvm, gpa, pa, |
| 551 | vm_end - vm_start, writable); |
| 552 | if (ret) |
| 553 | break; |
| 554 | } |
| 555 | hva = vm_end; |
| 556 | } while (hva < reg_end); |
| 557 | |
| 558 | if (change == KVM_MR_FLAGS_ONLY) |
| 559 | goto out; |
| 560 | |
| 561 | spin_lock(&kvm->mmu_lock); |
| 562 | if (ret) |
| 563 | stage2_unmap_range(kvm, mem->guest_phys_addr, |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 564 | mem->memory_size, false); |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 565 | spin_unlock(&kvm->mmu_lock); |
| 566 | |
| 567 | out: |
| 568 | mmap_read_unlock(current->mm); |
| 569 | return ret; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 570 | } |
| 571 | |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 572 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
| 573 | { |
| 574 | if (!kvm->arch.pgd) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 575 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 576 | |
| 577 | stage2_unmap_range(kvm, range->start << PAGE_SHIFT, |
| 578 | (range->end - range->start) << PAGE_SHIFT, |
| 579 | range->may_block); |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 580 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 581 | } |
| 582 | |
| 583 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
| 584 | { |
| 585 | int ret; |
| 586 | kvm_pfn_t pfn = pte_pfn(range->pte); |
| 587 | |
| 588 | if (!kvm->arch.pgd) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 589 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 590 | |
| 591 | WARN_ON(range->end - range->start != 1); |
| 592 | |
| 593 | ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT, |
| 594 | __pfn_to_phys(pfn), PAGE_SIZE, true, true); |
| 595 | if (ret) { |
| 596 | kvm_debug("Failed to map stage2 page (error %d)\n", ret); |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 597 | return true; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 598 | } |
| 599 | |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 600 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 601 | } |
| 602 | |
| 603 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
| 604 | { |
| 605 | pte_t *ptep; |
| 606 | u32 ptep_level = 0; |
| 607 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
| 608 | |
| 609 | if (!kvm->arch.pgd) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 610 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 611 | |
| 612 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); |
| 613 | |
| 614 | if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, |
| 615 | &ptep, &ptep_level)) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 616 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 617 | |
| 618 | return ptep_test_and_clear_young(NULL, 0, ptep); |
| 619 | } |
| 620 | |
| 621 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
| 622 | { |
| 623 | pte_t *ptep; |
| 624 | u32 ptep_level = 0; |
| 625 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
| 626 | |
| 627 | if (!kvm->arch.pgd) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 628 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 629 | |
| 630 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); |
| 631 | |
| 632 | if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT, |
| 633 | &ptep, &ptep_level)) |
Bixuan Cui | bbd5ba8 | 2021-10-27 15:20:53 +0800 | [diff] [blame] | 634 | return false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 635 | |
| 636 | return pte_young(*ptep); |
| 637 | } |
| 638 | |
Anup Patel | 9f70132 | 2021-09-27 17:10:06 +0530 | [diff] [blame] | 639 | int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, |
| 640 | struct kvm_memory_slot *memslot, |
| 641 | gpa_t gpa, unsigned long hva, bool is_write) |
| 642 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 643 | int ret; |
| 644 | kvm_pfn_t hfn; |
| 645 | bool writeable; |
| 646 | short vma_pageshift; |
| 647 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 648 | struct vm_area_struct *vma; |
| 649 | struct kvm *kvm = vcpu->kvm; |
| 650 | struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache; |
| 651 | bool logging = (memslot->dirty_bitmap && |
| 652 | !(memslot->flags & KVM_MEM_READONLY)) ? true : false; |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 653 | unsigned long vma_pagesize, mmu_seq; |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 654 | |
| 655 | mmap_read_lock(current->mm); |
| 656 | |
| 657 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
| 658 | if (unlikely(!vma)) { |
| 659 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); |
| 660 | mmap_read_unlock(current->mm); |
| 661 | return -EFAULT; |
| 662 | } |
| 663 | |
| 664 | if (is_vm_hugetlb_page(vma)) |
| 665 | vma_pageshift = huge_page_shift(hstate_vma(vma)); |
| 666 | else |
| 667 | vma_pageshift = PAGE_SHIFT; |
| 668 | vma_pagesize = 1ULL << vma_pageshift; |
| 669 | if (logging || (vma->vm_flags & VM_PFNMAP)) |
| 670 | vma_pagesize = PAGE_SIZE; |
| 671 | |
| 672 | if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE) |
| 673 | gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
| 674 | |
| 675 | mmap_read_unlock(current->mm); |
| 676 | |
| 677 | if (vma_pagesize != PGDIR_SIZE && |
| 678 | vma_pagesize != PMD_SIZE && |
| 679 | vma_pagesize != PAGE_SIZE) { |
| 680 | kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize); |
| 681 | return -EFAULT; |
| 682 | } |
| 683 | |
| 684 | /* We need minimum second+third level pages */ |
| 685 | ret = stage2_cache_topup(pcache, stage2_pgd_levels, |
| 686 | KVM_MMU_PAGE_CACHE_NR_OBJS); |
| 687 | if (ret) { |
| 688 | kvm_err("Failed to topup stage2 cache\n"); |
| 689 | return ret; |
| 690 | } |
| 691 | |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 692 | mmu_seq = kvm->mmu_notifier_seq; |
| 693 | |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 694 | hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); |
| 695 | if (hfn == KVM_PFN_ERR_HWPOISON) { |
| 696 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, |
| 697 | vma_pageshift, current); |
| 698 | return 0; |
| 699 | } |
| 700 | if (is_error_noslot_pfn(hfn)) |
| 701 | return -EFAULT; |
| 702 | |
| 703 | /* |
| 704 | * If logging is active then we allow writable pages only |
| 705 | * for write faults. |
| 706 | */ |
| 707 | if (logging && !is_write) |
| 708 | writeable = false; |
| 709 | |
| 710 | spin_lock(&kvm->mmu_lock); |
| 711 | |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 712 | if (mmu_notifier_retry(kvm, mmu_seq)) |
| 713 | goto out_unlock; |
| 714 | |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 715 | if (writeable) { |
| 716 | kvm_set_pfn_dirty(hfn); |
| 717 | mark_page_dirty(kvm, gfn); |
| 718 | ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, |
| 719 | vma_pagesize, false, true); |
| 720 | } else { |
| 721 | ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, |
| 722 | vma_pagesize, true, true); |
| 723 | } |
| 724 | |
| 725 | if (ret) |
| 726 | kvm_err("Failed to map in stage2\n"); |
| 727 | |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 728 | out_unlock: |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 729 | spin_unlock(&kvm->mmu_lock); |
| 730 | kvm_set_pfn_accessed(hfn); |
| 731 | kvm_release_pfn_clean(hfn); |
| 732 | return ret; |
Anup Patel | 9f70132 | 2021-09-27 17:10:06 +0530 | [diff] [blame] | 733 | } |
| 734 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 735 | void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) |
| 736 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 737 | stage2_cache_flush(&vcpu->arch.mmu_page_cache); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 738 | } |
| 739 | |
| 740 | int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) |
| 741 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 742 | struct page *pgd_page; |
| 743 | |
| 744 | if (kvm->arch.pgd != NULL) { |
| 745 | kvm_err("kvm_arch already initialized?\n"); |
| 746 | return -EINVAL; |
| 747 | } |
| 748 | |
| 749 | pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, |
| 750 | get_order(stage2_pgd_size)); |
| 751 | if (!pgd_page) |
| 752 | return -ENOMEM; |
| 753 | kvm->arch.pgd = page_to_virt(pgd_page); |
| 754 | kvm->arch.pgd_phys = page_to_phys(pgd_page); |
| 755 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 756 | return 0; |
| 757 | } |
| 758 | |
| 759 | void kvm_riscv_stage2_free_pgd(struct kvm *kvm) |
| 760 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 761 | void *pgd = NULL; |
| 762 | |
| 763 | spin_lock(&kvm->mmu_lock); |
| 764 | if (kvm->arch.pgd) { |
Anup Patel | 9955371 | 2021-09-27 17:10:10 +0530 | [diff] [blame] | 765 | stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false); |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 766 | pgd = READ_ONCE(kvm->arch.pgd); |
| 767 | kvm->arch.pgd = NULL; |
| 768 | kvm->arch.pgd_phys = 0; |
| 769 | } |
| 770 | spin_unlock(&kvm->mmu_lock); |
| 771 | |
| 772 | if (pgd) |
| 773 | free_pages((unsigned long)pgd, get_order(stage2_pgd_size)); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 774 | } |
| 775 | |
| 776 | void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu) |
| 777 | { |
Anup Patel | 9d05c1f | 2021-09-27 17:10:09 +0530 | [diff] [blame] | 778 | unsigned long hgatp = stage2_mode; |
| 779 | struct kvm_arch *k = &vcpu->kvm->arch; |
| 780 | |
| 781 | hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & |
| 782 | HGATP_VMID_MASK; |
| 783 | hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN; |
| 784 | |
| 785 | csr_write(CSR_HGATP, hgatp); |
| 786 | |
| 787 | if (!kvm_riscv_stage2_vmid_bits()) |
| 788 | __kvm_riscv_hfence_gvma_all(); |
| 789 | } |
| 790 | |
| 791 | void kvm_riscv_stage2_mode_detect(void) |
| 792 | { |
| 793 | #ifdef CONFIG_64BIT |
| 794 | /* Try Sv48x4 stage2 mode */ |
| 795 | csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); |
| 796 | if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) { |
| 797 | stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); |
| 798 | stage2_pgd_levels = 4; |
| 799 | } |
| 800 | csr_write(CSR_HGATP, 0); |
| 801 | |
| 802 | __kvm_riscv_hfence_gvma_all(); |
| 803 | #endif |
| 804 | } |
| 805 | |
| 806 | unsigned long kvm_riscv_stage2_mode(void) |
| 807 | { |
| 808 | return stage2_mode >> HGATP_MODE_SHIFT; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 809 | } |