blob: 5f2736c2e773eb898cdd2892cc82e23b33cb0e4b [file] [log] [blame]
Anup Patel99cdc6c2021-09-27 17:10:01 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9#include <linux/bitops.h>
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/hugetlb.h>
13#include <linux/module.h>
14#include <linux/uaccess.h>
15#include <linux/vmalloc.h>
16#include <linux/kvm_host.h>
17#include <linux/sched/signal.h>
Anup Patel9d05c1f2021-09-27 17:10:09 +053018#include <asm/csr.h>
Anup Patel99cdc6c2021-09-27 17:10:01 +053019#include <asm/page.h>
20#include <asm/pgtable.h>
Anup Patel9d05c1f2021-09-27 17:10:09 +053021#include <asm/sbi.h>
22
23#ifdef CONFIG_64BIT
24static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
25static unsigned long stage2_pgd_levels = 3;
26#define stage2_index_bits 9
27#else
28static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
29static unsigned long stage2_pgd_levels = 2;
30#define stage2_index_bits 10
31#endif
32
33#define stage2_pgd_xbits 2
34#define stage2_pgd_size (1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits))
35#define stage2_gpa_bits (HGATP_PAGE_SHIFT + \
36 (stage2_pgd_levels * stage2_index_bits) + \
37 stage2_pgd_xbits)
38#define stage2_gpa_size ((gpa_t)(1ULL << stage2_gpa_bits))
39
40#define stage2_pte_leaf(__ptep) \
41 (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
42
43static inline unsigned long stage2_pte_index(gpa_t addr, u32 level)
44{
45 unsigned long mask;
46 unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level);
47
48 if (level == (stage2_pgd_levels - 1))
49 mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1;
50 else
51 mask = PTRS_PER_PTE - 1;
52
53 return (addr >> shift) & mask;
54}
55
56static inline unsigned long stage2_pte_page_vaddr(pte_t pte)
57{
58 return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
59}
60
61static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level)
62{
63 u32 i;
64 unsigned long psz = 1UL << 12;
65
66 for (i = 0; i < stage2_pgd_levels; i++) {
67 if (page_size == (psz << (i * stage2_index_bits))) {
68 *out_level = i;
69 return 0;
70 }
71 }
72
73 return -EINVAL;
74}
75
76static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
77{
78 if (stage2_pgd_levels < level)
79 return -EINVAL;
80
81 *out_pgsize = 1UL << (12 + (level * stage2_index_bits));
82
83 return 0;
84}
85
Anup Patel9d05c1f2021-09-27 17:10:09 +053086static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
87 pte_t **ptepp, u32 *ptep_level)
88{
89 pte_t *ptep;
90 u32 current_level = stage2_pgd_levels - 1;
91
92 *ptep_level = current_level;
93 ptep = (pte_t *)kvm->arch.pgd;
94 ptep = &ptep[stage2_pte_index(addr, current_level)];
95 while (ptep && pte_val(*ptep)) {
96 if (stage2_pte_leaf(ptep)) {
97 *ptep_level = current_level;
98 *ptepp = ptep;
99 return true;
100 }
101
102 if (current_level) {
103 current_level--;
104 *ptep_level = current_level;
105 ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
106 ptep = &ptep[stage2_pte_index(addr, current_level)];
107 } else {
108 ptep = NULL;
109 }
110 }
111
112 return false;
113}
114
115static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
116{
117 struct cpumask hmask;
118 unsigned long size = PAGE_SIZE;
119 struct kvm_vmid *vmid = &kvm->arch.vmid;
120
121 if (stage2_level_to_page_size(level, &size))
122 return;
123 addr &= ~(size - 1);
124
125 /*
126 * TODO: Instead of cpu_online_mask, we should only target CPUs
127 * where the Guest/VM is running.
128 */
129 preempt_disable();
130 riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask);
131 sbi_remote_hfence_gvma_vmid(cpumask_bits(&hmask), addr, size,
132 READ_ONCE(vmid->vmid));
133 preempt_enable();
134}
135
136static int stage2_set_pte(struct kvm *kvm, u32 level,
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000137 struct kvm_mmu_memory_cache *pcache,
Anup Patel9d05c1f2021-09-27 17:10:09 +0530138 gpa_t addr, const pte_t *new_pte)
139{
140 u32 current_level = stage2_pgd_levels - 1;
141 pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
142 pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)];
143
144 if (current_level < level)
145 return -EINVAL;
146
147 while (current_level != level) {
148 if (stage2_pte_leaf(ptep))
149 return -EEXIST;
150
151 if (!pte_val(*ptep)) {
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000152 if (!pcache)
153 return -ENOMEM;
154 next_ptep = kvm_mmu_memory_cache_alloc(pcache);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530155 if (!next_ptep)
156 return -ENOMEM;
157 *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
158 __pgprot(_PAGE_TABLE));
159 } else {
160 if (stage2_pte_leaf(ptep))
161 return -EEXIST;
162 next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
163 }
164
165 current_level--;
166 ptep = &next_ptep[stage2_pte_index(addr, current_level)];
167 }
168
169 *ptep = *new_pte;
170 if (stage2_pte_leaf(ptep))
171 stage2_remote_tlb_flush(kvm, current_level, addr);
172
173 return 0;
174}
175
176static int stage2_map_page(struct kvm *kvm,
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000177 struct kvm_mmu_memory_cache *pcache,
Anup Patel9d05c1f2021-09-27 17:10:09 +0530178 gpa_t gpa, phys_addr_t hpa,
179 unsigned long page_size,
180 bool page_rdonly, bool page_exec)
181{
182 int ret;
183 u32 level = 0;
184 pte_t new_pte;
185 pgprot_t prot;
186
187 ret = stage2_page_size_to_level(page_size, &level);
188 if (ret)
189 return ret;
190
191 /*
192 * A RISC-V implementation can choose to either:
193 * 1) Update 'A' and 'D' PTE bits in hardware
194 * 2) Generate page fault when 'A' and/or 'D' bits are not set
195 * PTE so that software can update these bits.
196 *
197 * We support both options mentioned above. To achieve this, we
198 * always set 'A' and 'D' PTE bits at time of creating stage2
199 * mapping. To support KVM dirty page logging with both options
200 * mentioned above, we will write-protect stage2 PTEs to track
201 * dirty pages.
202 */
203
204 if (page_exec) {
205 if (page_rdonly)
206 prot = PAGE_READ_EXEC;
207 else
208 prot = PAGE_WRITE_EXEC;
209 } else {
210 if (page_rdonly)
211 prot = PAGE_READ;
212 else
213 prot = PAGE_WRITE;
214 }
215 new_pte = pfn_pte(PFN_DOWN(hpa), prot);
216 new_pte = pte_mkdirty(new_pte);
217
218 return stage2_set_pte(kvm, level, pcache, gpa, &new_pte);
219}
220
221enum stage2_op {
222 STAGE2_OP_NOP = 0, /* Nothing */
223 STAGE2_OP_CLEAR, /* Clear/Unmap */
224 STAGE2_OP_WP, /* Write-protect */
225};
226
227static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
228 pte_t *ptep, u32 ptep_level, enum stage2_op op)
229{
230 int i, ret;
231 pte_t *next_ptep;
232 u32 next_ptep_level;
233 unsigned long next_page_size, page_size;
234
235 ret = stage2_level_to_page_size(ptep_level, &page_size);
236 if (ret)
237 return;
238
239 BUG_ON(addr & (page_size - 1));
240
241 if (!pte_val(*ptep))
242 return;
243
244 if (ptep_level && !stage2_pte_leaf(ptep)) {
245 next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
246 next_ptep_level = ptep_level - 1;
247 ret = stage2_level_to_page_size(next_ptep_level,
248 &next_page_size);
249 if (ret)
250 return;
251
252 if (op == STAGE2_OP_CLEAR)
253 set_pte(ptep, __pte(0));
254 for (i = 0; i < PTRS_PER_PTE; i++)
255 stage2_op_pte(kvm, addr + i * next_page_size,
256 &next_ptep[i], next_ptep_level, op);
257 if (op == STAGE2_OP_CLEAR)
258 put_page(virt_to_page(next_ptep));
259 } else {
260 if (op == STAGE2_OP_CLEAR)
261 set_pte(ptep, __pte(0));
262 else if (op == STAGE2_OP_WP)
263 set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
264 stage2_remote_tlb_flush(kvm, ptep_level, addr);
265 }
266}
267
Anup Patel99553712021-09-27 17:10:10 +0530268static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
269 gpa_t size, bool may_block)
Anup Patel9d05c1f2021-09-27 17:10:09 +0530270{
271 int ret;
272 pte_t *ptep;
273 u32 ptep_level;
274 bool found_leaf;
275 unsigned long page_size;
276 gpa_t addr = start, end = start + size;
277
278 while (addr < end) {
279 found_leaf = stage2_get_leaf_entry(kvm, addr,
280 &ptep, &ptep_level);
281 ret = stage2_level_to_page_size(ptep_level, &page_size);
282 if (ret)
283 break;
284
285 if (!found_leaf)
286 goto next;
287
288 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
289 stage2_op_pte(kvm, addr, ptep,
290 ptep_level, STAGE2_OP_CLEAR);
291
292next:
293 addr += page_size;
Anup Patel99553712021-09-27 17:10:10 +0530294
295 /*
296 * If the range is too large, release the kvm->mmu_lock
297 * to prevent starvation and lockup detector warnings.
298 */
299 if (may_block && addr < end)
300 cond_resched_lock(&kvm->mmu_lock);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530301 }
302}
303
304static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
305{
306 int ret;
307 pte_t *ptep;
308 u32 ptep_level;
309 bool found_leaf;
310 gpa_t addr = start;
311 unsigned long page_size;
312
313 while (addr < end) {
314 found_leaf = stage2_get_leaf_entry(kvm, addr,
315 &ptep, &ptep_level);
316 ret = stage2_level_to_page_size(ptep_level, &page_size);
317 if (ret)
318 break;
319
320 if (!found_leaf)
321 goto next;
322
323 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
324 stage2_op_pte(kvm, addr, ptep,
325 ptep_level, STAGE2_OP_WP);
326
327next:
328 addr += page_size;
329 }
330}
331
332static void stage2_wp_memory_region(struct kvm *kvm, int slot)
333{
334 struct kvm_memslots *slots = kvm_memslots(kvm);
335 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
336 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
337 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
338
339 spin_lock(&kvm->mmu_lock);
340 stage2_wp_range(kvm, start, end);
341 spin_unlock(&kvm->mmu_lock);
342 kvm_flush_remote_tlbs(kvm);
343}
344
345static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
346 unsigned long size, bool writable)
347{
348 pte_t pte;
349 int ret = 0;
350 unsigned long pfn;
351 phys_addr_t addr, end;
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000352 struct kvm_mmu_memory_cache pcache;
353
354 memset(&pcache, 0, sizeof(pcache));
355 pcache.gfp_zero = __GFP_ZERO;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530356
357 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
358 pfn = __phys_to_pfn(hpa);
359
360 for (addr = gpa; addr < end; addr += PAGE_SIZE) {
361 pte = pfn_pte(pfn, PAGE_KERNEL);
362
363 if (!writable)
364 pte = pte_wrprotect(pte);
365
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000366 ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530367 if (ret)
368 goto out;
369
370 spin_lock(&kvm->mmu_lock);
371 ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte);
372 spin_unlock(&kvm->mmu_lock);
373 if (ret)
374 goto out;
375
376 pfn++;
377 }
378
379out:
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000380 kvm_mmu_free_memory_cache(&pcache);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530381 return ret;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530382}
383
384void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
385 struct kvm_memory_slot *slot,
386 gfn_t gfn_offset,
387 unsigned long mask)
388{
389 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
390 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
391 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
392
393 stage2_wp_range(kvm, start, end);
394}
Anup Patel99cdc6c2021-09-27 17:10:01 +0530395
396void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
397{
398}
399
Anup Patel9d05c1f2021-09-27 17:10:09 +0530400void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
401 const struct kvm_memory_slot *memslot)
402{
403 kvm_flush_remote_tlbs(kvm);
404}
405
Anup Patel99cdc6c2021-09-27 17:10:01 +0530406void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
407{
408}
409
410void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
411{
412}
413
414void kvm_arch_flush_shadow_all(struct kvm *kvm)
415{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530416 kvm_riscv_stage2_free_pgd(kvm);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530417}
418
419void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
420 struct kvm_memory_slot *slot)
421{
Sean Christopherson756e1fc2021-11-04 16:41:06 +0000422 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
423 phys_addr_t size = slot->npages << PAGE_SHIFT;
424
425 spin_lock(&kvm->mmu_lock);
426 stage2_unmap_range(kvm, gpa, size, false);
427 spin_unlock(&kvm->mmu_lock);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530428}
429
430void kvm_arch_commit_memory_region(struct kvm *kvm,
Anup Patel99cdc6c2021-09-27 17:10:01 +0530431 struct kvm_memory_slot *old,
432 const struct kvm_memory_slot *new,
433 enum kvm_mr_change change)
434{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530435 /*
436 * At this point memslot has been committed and there is an
437 * allocated dirty_bitmap[], dirty pages will be tracked while
438 * the memory slot is write protected.
439 */
Sean Christophersond01495d2021-12-06 20:54:17 +0100440 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
441 stage2_wp_memory_region(kvm, new->id);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530442}
443
444int kvm_arch_prepare_memory_region(struct kvm *kvm,
Sean Christopherson537a17b2021-12-06 20:54:11 +0100445 const struct kvm_memory_slot *old,
446 struct kvm_memory_slot *new,
Anup Patel99cdc6c2021-09-27 17:10:01 +0530447 enum kvm_mr_change change)
448{
Sean Christophersond01495d2021-12-06 20:54:17 +0100449 hva_t hva, reg_end, size;
450 gpa_t base_gpa;
451 bool writable;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530452 int ret = 0;
453
454 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
455 change != KVM_MR_FLAGS_ONLY)
456 return 0;
457
458 /*
459 * Prevent userspace from creating a memory region outside of the GPA
460 * space addressable by the KVM guest GPA space.
461 */
Sean Christopherson537a17b2021-12-06 20:54:11 +0100462 if ((new->base_gfn + new->npages) >=
Anup Patel9d05c1f2021-09-27 17:10:09 +0530463 (stage2_gpa_size >> PAGE_SHIFT))
464 return -EFAULT;
465
Sean Christophersond01495d2021-12-06 20:54:17 +0100466 hva = new->userspace_addr;
467 size = new->npages << PAGE_SHIFT;
468 reg_end = hva + size;
469 base_gpa = new->base_gfn << PAGE_SHIFT;
470 writable = !(new->flags & KVM_MEM_READONLY);
471
Anup Patel9d05c1f2021-09-27 17:10:09 +0530472 mmap_read_lock(current->mm);
473
474 /*
475 * A memory region could potentially cover multiple VMAs, and
476 * any holes between them, so iterate over all of them to find
477 * out if we can map any of them right now.
478 *
479 * +--------------------------------------------+
480 * +---------------+----------------+ +----------------+
481 * | : VMA 1 | VMA 2 | | VMA 3 : |
482 * +---------------+----------------+ +----------------+
483 * | memory region |
484 * +--------------------------------------------+
485 */
486 do {
487 struct vm_area_struct *vma = find_vma(current->mm, hva);
488 hva_t vm_start, vm_end;
489
490 if (!vma || vma->vm_start >= reg_end)
491 break;
492
493 /*
494 * Mapping a read-only VMA is only allowed if the
495 * memory region is configured as read-only.
496 */
497 if (writable && !(vma->vm_flags & VM_WRITE)) {
498 ret = -EPERM;
499 break;
500 }
501
502 /* Take the intersection of this VMA with the memory region */
503 vm_start = max(hva, vma->vm_start);
504 vm_end = min(reg_end, vma->vm_end);
505
506 if (vma->vm_flags & VM_PFNMAP) {
Sean Christophersond01495d2021-12-06 20:54:17 +0100507 gpa_t gpa = base_gpa + (vm_start - hva);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530508 phys_addr_t pa;
509
510 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
511 pa += vm_start - vma->vm_start;
512
513 /* IO region dirty page logging not allowed */
Sean Christopherson537a17b2021-12-06 20:54:11 +0100514 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
Anup Patel9d05c1f2021-09-27 17:10:09 +0530515 ret = -EINVAL;
516 goto out;
517 }
518
519 ret = stage2_ioremap(kvm, gpa, pa,
520 vm_end - vm_start, writable);
521 if (ret)
522 break;
523 }
524 hva = vm_end;
525 } while (hva < reg_end);
526
527 if (change == KVM_MR_FLAGS_ONLY)
528 goto out;
529
530 spin_lock(&kvm->mmu_lock);
531 if (ret)
Sean Christophersond01495d2021-12-06 20:54:17 +0100532 stage2_unmap_range(kvm, base_gpa, size, false);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530533 spin_unlock(&kvm->mmu_lock);
534
535out:
536 mmap_read_unlock(current->mm);
537 return ret;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530538}
539
Anup Patel99553712021-09-27 17:10:10 +0530540bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
541{
542 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800543 return false;
Anup Patel99553712021-09-27 17:10:10 +0530544
545 stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
546 (range->end - range->start) << PAGE_SHIFT,
547 range->may_block);
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800548 return false;
Anup Patel99553712021-09-27 17:10:10 +0530549}
550
551bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
552{
553 int ret;
554 kvm_pfn_t pfn = pte_pfn(range->pte);
555
556 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800557 return false;
Anup Patel99553712021-09-27 17:10:10 +0530558
559 WARN_ON(range->end - range->start != 1);
560
561 ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
562 __pfn_to_phys(pfn), PAGE_SIZE, true, true);
563 if (ret) {
564 kvm_debug("Failed to map stage2 page (error %d)\n", ret);
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800565 return true;
Anup Patel99553712021-09-27 17:10:10 +0530566 }
567
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800568 return false;
Anup Patel99553712021-09-27 17:10:10 +0530569}
570
571bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
572{
573 pte_t *ptep;
574 u32 ptep_level = 0;
575 u64 size = (range->end - range->start) << PAGE_SHIFT;
576
577 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800578 return false;
Anup Patel99553712021-09-27 17:10:10 +0530579
580 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
581
582 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
583 &ptep, &ptep_level))
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800584 return false;
Anup Patel99553712021-09-27 17:10:10 +0530585
586 return ptep_test_and_clear_young(NULL, 0, ptep);
587}
588
589bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
590{
591 pte_t *ptep;
592 u32 ptep_level = 0;
593 u64 size = (range->end - range->start) << PAGE_SHIFT;
594
595 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800596 return false;
Anup Patel99553712021-09-27 17:10:10 +0530597
598 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
599
600 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
601 &ptep, &ptep_level))
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800602 return false;
Anup Patel99553712021-09-27 17:10:10 +0530603
604 return pte_young(*ptep);
605}
606
Anup Patel9f701322021-09-27 17:10:06 +0530607int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
608 struct kvm_memory_slot *memslot,
609 gpa_t gpa, unsigned long hva, bool is_write)
610{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530611 int ret;
612 kvm_pfn_t hfn;
613 bool writeable;
614 short vma_pageshift;
615 gfn_t gfn = gpa >> PAGE_SHIFT;
616 struct vm_area_struct *vma;
617 struct kvm *kvm = vcpu->kvm;
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000618 struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530619 bool logging = (memslot->dirty_bitmap &&
620 !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
Anup Patel99553712021-09-27 17:10:10 +0530621 unsigned long vma_pagesize, mmu_seq;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530622
623 mmap_read_lock(current->mm);
624
625 vma = find_vma_intersection(current->mm, hva, hva + 1);
626 if (unlikely(!vma)) {
627 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
628 mmap_read_unlock(current->mm);
629 return -EFAULT;
630 }
631
632 if (is_vm_hugetlb_page(vma))
633 vma_pageshift = huge_page_shift(hstate_vma(vma));
634 else
635 vma_pageshift = PAGE_SHIFT;
636 vma_pagesize = 1ULL << vma_pageshift;
637 if (logging || (vma->vm_flags & VM_PFNMAP))
638 vma_pagesize = PAGE_SIZE;
639
640 if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
641 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
642
643 mmap_read_unlock(current->mm);
644
645 if (vma_pagesize != PGDIR_SIZE &&
646 vma_pagesize != PMD_SIZE &&
647 vma_pagesize != PAGE_SIZE) {
648 kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
649 return -EFAULT;
650 }
651
652 /* We need minimum second+third level pages */
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000653 ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530654 if (ret) {
655 kvm_err("Failed to topup stage2 cache\n");
656 return ret;
657 }
658
Anup Patel99553712021-09-27 17:10:10 +0530659 mmu_seq = kvm->mmu_notifier_seq;
660
Anup Patel9d05c1f2021-09-27 17:10:09 +0530661 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
662 if (hfn == KVM_PFN_ERR_HWPOISON) {
663 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
664 vma_pageshift, current);
665 return 0;
666 }
667 if (is_error_noslot_pfn(hfn))
668 return -EFAULT;
669
670 /*
671 * If logging is active then we allow writable pages only
672 * for write faults.
673 */
674 if (logging && !is_write)
675 writeable = false;
676
677 spin_lock(&kvm->mmu_lock);
678
Anup Patel99553712021-09-27 17:10:10 +0530679 if (mmu_notifier_retry(kvm, mmu_seq))
680 goto out_unlock;
681
Anup Patel9d05c1f2021-09-27 17:10:09 +0530682 if (writeable) {
683 kvm_set_pfn_dirty(hfn);
684 mark_page_dirty(kvm, gfn);
685 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
686 vma_pagesize, false, true);
687 } else {
688 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
689 vma_pagesize, true, true);
690 }
691
692 if (ret)
693 kvm_err("Failed to map in stage2\n");
694
Anup Patel99553712021-09-27 17:10:10 +0530695out_unlock:
Anup Patel9d05c1f2021-09-27 17:10:09 +0530696 spin_unlock(&kvm->mmu_lock);
697 kvm_set_pfn_accessed(hfn);
698 kvm_release_pfn_clean(hfn);
699 return ret;
Anup Patel9f701322021-09-27 17:10:06 +0530700}
701
Anup Patel99cdc6c2021-09-27 17:10:01 +0530702int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
703{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530704 struct page *pgd_page;
705
706 if (kvm->arch.pgd != NULL) {
707 kvm_err("kvm_arch already initialized?\n");
708 return -EINVAL;
709 }
710
711 pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
712 get_order(stage2_pgd_size));
713 if (!pgd_page)
714 return -ENOMEM;
715 kvm->arch.pgd = page_to_virt(pgd_page);
716 kvm->arch.pgd_phys = page_to_phys(pgd_page);
717
Anup Patel99cdc6c2021-09-27 17:10:01 +0530718 return 0;
719}
720
721void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
722{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530723 void *pgd = NULL;
724
725 spin_lock(&kvm->mmu_lock);
726 if (kvm->arch.pgd) {
Anup Patel99553712021-09-27 17:10:10 +0530727 stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530728 pgd = READ_ONCE(kvm->arch.pgd);
729 kvm->arch.pgd = NULL;
730 kvm->arch.pgd_phys = 0;
731 }
732 spin_unlock(&kvm->mmu_lock);
733
734 if (pgd)
735 free_pages((unsigned long)pgd, get_order(stage2_pgd_size));
Anup Patel99cdc6c2021-09-27 17:10:01 +0530736}
737
738void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu)
739{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530740 unsigned long hgatp = stage2_mode;
741 struct kvm_arch *k = &vcpu->kvm->arch;
742
743 hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) &
744 HGATP_VMID_MASK;
745 hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
746
747 csr_write(CSR_HGATP, hgatp);
748
749 if (!kvm_riscv_stage2_vmid_bits())
750 __kvm_riscv_hfence_gvma_all();
751}
752
753void kvm_riscv_stage2_mode_detect(void)
754{
755#ifdef CONFIG_64BIT
756 /* Try Sv48x4 stage2 mode */
757 csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
758 if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
759 stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
760 stage2_pgd_levels = 4;
761 }
762 csr_write(CSR_HGATP, 0);
763
764 __kvm_riscv_hfence_gvma_all();
765#endif
766}
767
768unsigned long kvm_riscv_stage2_mode(void)
769{
770 return stage2_mode >> HGATP_MODE_SHIFT;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530771}