blob: f80a34fbf10276fef888f5bcdedbff64cbeca5fa [file] [log] [blame]
Anup Patel99cdc6c2021-09-27 17:10:01 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9#include <linux/bitops.h>
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/hugetlb.h>
13#include <linux/module.h>
14#include <linux/uaccess.h>
15#include <linux/vmalloc.h>
16#include <linux/kvm_host.h>
17#include <linux/sched/signal.h>
Anup Patel9d05c1f2021-09-27 17:10:09 +053018#include <asm/csr.h>
Anup Patel99cdc6c2021-09-27 17:10:01 +053019#include <asm/page.h>
20#include <asm/pgtable.h>
Anup Patel9d05c1f2021-09-27 17:10:09 +053021#include <asm/sbi.h>
22
23#ifdef CONFIG_64BIT
24static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
25static unsigned long stage2_pgd_levels = 3;
26#define stage2_index_bits 9
27#else
28static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
29static unsigned long stage2_pgd_levels = 2;
30#define stage2_index_bits 10
31#endif
32
33#define stage2_pgd_xbits 2
34#define stage2_pgd_size (1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits))
35#define stage2_gpa_bits (HGATP_PAGE_SHIFT + \
36 (stage2_pgd_levels * stage2_index_bits) + \
37 stage2_pgd_xbits)
38#define stage2_gpa_size ((gpa_t)(1ULL << stage2_gpa_bits))
39
40#define stage2_pte_leaf(__ptep) \
41 (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
42
43static inline unsigned long stage2_pte_index(gpa_t addr, u32 level)
44{
45 unsigned long mask;
46 unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level);
47
48 if (level == (stage2_pgd_levels - 1))
49 mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1;
50 else
51 mask = PTRS_PER_PTE - 1;
52
53 return (addr >> shift) & mask;
54}
55
56static inline unsigned long stage2_pte_page_vaddr(pte_t pte)
57{
58 return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
59}
60
61static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level)
62{
63 u32 i;
64 unsigned long psz = 1UL << 12;
65
66 for (i = 0; i < stage2_pgd_levels; i++) {
67 if (page_size == (psz << (i * stage2_index_bits))) {
68 *out_level = i;
69 return 0;
70 }
71 }
72
73 return -EINVAL;
74}
75
76static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
77{
78 if (stage2_pgd_levels < level)
79 return -EINVAL;
80
81 *out_pgsize = 1UL << (12 + (level * stage2_index_bits));
82
83 return 0;
84}
85
Anup Patel9d05c1f2021-09-27 17:10:09 +053086static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
87 pte_t **ptepp, u32 *ptep_level)
88{
89 pte_t *ptep;
90 u32 current_level = stage2_pgd_levels - 1;
91
92 *ptep_level = current_level;
93 ptep = (pte_t *)kvm->arch.pgd;
94 ptep = &ptep[stage2_pte_index(addr, current_level)];
95 while (ptep && pte_val(*ptep)) {
96 if (stage2_pte_leaf(ptep)) {
97 *ptep_level = current_level;
98 *ptepp = ptep;
99 return true;
100 }
101
102 if (current_level) {
103 current_level--;
104 *ptep_level = current_level;
105 ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
106 ptep = &ptep[stage2_pte_index(addr, current_level)];
107 } else {
108 ptep = NULL;
109 }
110 }
111
112 return false;
113}
114
115static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
116{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530117 unsigned long size = PAGE_SIZE;
118 struct kvm_vmid *vmid = &kvm->arch.vmid;
119
120 if (stage2_level_to_page_size(level, &size))
121 return;
122 addr &= ~(size - 1);
123
124 /*
125 * TODO: Instead of cpu_online_mask, we should only target CPUs
126 * where the Guest/VM is running.
127 */
128 preempt_disable();
Atish Patra26fb7512022-01-20 01:09:18 -0800129 sbi_remote_hfence_gvma_vmid(cpu_online_mask, addr, size,
Anup Patel9d05c1f2021-09-27 17:10:09 +0530130 READ_ONCE(vmid->vmid));
131 preempt_enable();
132}
133
134static int stage2_set_pte(struct kvm *kvm, u32 level,
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000135 struct kvm_mmu_memory_cache *pcache,
Anup Patel9d05c1f2021-09-27 17:10:09 +0530136 gpa_t addr, const pte_t *new_pte)
137{
138 u32 current_level = stage2_pgd_levels - 1;
139 pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
140 pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)];
141
142 if (current_level < level)
143 return -EINVAL;
144
145 while (current_level != level) {
146 if (stage2_pte_leaf(ptep))
147 return -EEXIST;
148
149 if (!pte_val(*ptep)) {
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000150 if (!pcache)
151 return -ENOMEM;
152 next_ptep = kvm_mmu_memory_cache_alloc(pcache);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530153 if (!next_ptep)
154 return -ENOMEM;
155 *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
156 __pgprot(_PAGE_TABLE));
157 } else {
158 if (stage2_pte_leaf(ptep))
159 return -EEXIST;
160 next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
161 }
162
163 current_level--;
164 ptep = &next_ptep[stage2_pte_index(addr, current_level)];
165 }
166
167 *ptep = *new_pte;
168 if (stage2_pte_leaf(ptep))
169 stage2_remote_tlb_flush(kvm, current_level, addr);
170
171 return 0;
172}
173
174static int stage2_map_page(struct kvm *kvm,
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000175 struct kvm_mmu_memory_cache *pcache,
Anup Patel9d05c1f2021-09-27 17:10:09 +0530176 gpa_t gpa, phys_addr_t hpa,
177 unsigned long page_size,
178 bool page_rdonly, bool page_exec)
179{
180 int ret;
181 u32 level = 0;
182 pte_t new_pte;
183 pgprot_t prot;
184
185 ret = stage2_page_size_to_level(page_size, &level);
186 if (ret)
187 return ret;
188
189 /*
190 * A RISC-V implementation can choose to either:
191 * 1) Update 'A' and 'D' PTE bits in hardware
192 * 2) Generate page fault when 'A' and/or 'D' bits are not set
193 * PTE so that software can update these bits.
194 *
195 * We support both options mentioned above. To achieve this, we
196 * always set 'A' and 'D' PTE bits at time of creating stage2
197 * mapping. To support KVM dirty page logging with both options
198 * mentioned above, we will write-protect stage2 PTEs to track
199 * dirty pages.
200 */
201
202 if (page_exec) {
203 if (page_rdonly)
204 prot = PAGE_READ_EXEC;
205 else
206 prot = PAGE_WRITE_EXEC;
207 } else {
208 if (page_rdonly)
209 prot = PAGE_READ;
210 else
211 prot = PAGE_WRITE;
212 }
213 new_pte = pfn_pte(PFN_DOWN(hpa), prot);
214 new_pte = pte_mkdirty(new_pte);
215
216 return stage2_set_pte(kvm, level, pcache, gpa, &new_pte);
217}
218
219enum stage2_op {
220 STAGE2_OP_NOP = 0, /* Nothing */
221 STAGE2_OP_CLEAR, /* Clear/Unmap */
222 STAGE2_OP_WP, /* Write-protect */
223};
224
225static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
226 pte_t *ptep, u32 ptep_level, enum stage2_op op)
227{
228 int i, ret;
229 pte_t *next_ptep;
230 u32 next_ptep_level;
231 unsigned long next_page_size, page_size;
232
233 ret = stage2_level_to_page_size(ptep_level, &page_size);
234 if (ret)
235 return;
236
237 BUG_ON(addr & (page_size - 1));
238
239 if (!pte_val(*ptep))
240 return;
241
242 if (ptep_level && !stage2_pte_leaf(ptep)) {
243 next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
244 next_ptep_level = ptep_level - 1;
245 ret = stage2_level_to_page_size(next_ptep_level,
246 &next_page_size);
247 if (ret)
248 return;
249
250 if (op == STAGE2_OP_CLEAR)
251 set_pte(ptep, __pte(0));
252 for (i = 0; i < PTRS_PER_PTE; i++)
253 stage2_op_pte(kvm, addr + i * next_page_size,
254 &next_ptep[i], next_ptep_level, op);
255 if (op == STAGE2_OP_CLEAR)
256 put_page(virt_to_page(next_ptep));
257 } else {
258 if (op == STAGE2_OP_CLEAR)
259 set_pte(ptep, __pte(0));
260 else if (op == STAGE2_OP_WP)
261 set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
262 stage2_remote_tlb_flush(kvm, ptep_level, addr);
263 }
264}
265
Anup Patel99553712021-09-27 17:10:10 +0530266static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
267 gpa_t size, bool may_block)
Anup Patel9d05c1f2021-09-27 17:10:09 +0530268{
269 int ret;
270 pte_t *ptep;
271 u32 ptep_level;
272 bool found_leaf;
273 unsigned long page_size;
274 gpa_t addr = start, end = start + size;
275
276 while (addr < end) {
277 found_leaf = stage2_get_leaf_entry(kvm, addr,
278 &ptep, &ptep_level);
279 ret = stage2_level_to_page_size(ptep_level, &page_size);
280 if (ret)
281 break;
282
283 if (!found_leaf)
284 goto next;
285
286 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
287 stage2_op_pte(kvm, addr, ptep,
288 ptep_level, STAGE2_OP_CLEAR);
289
290next:
291 addr += page_size;
Anup Patel99553712021-09-27 17:10:10 +0530292
293 /*
294 * If the range is too large, release the kvm->mmu_lock
295 * to prevent starvation and lockup detector warnings.
296 */
297 if (may_block && addr < end)
298 cond_resched_lock(&kvm->mmu_lock);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530299 }
300}
301
302static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
303{
304 int ret;
305 pte_t *ptep;
306 u32 ptep_level;
307 bool found_leaf;
308 gpa_t addr = start;
309 unsigned long page_size;
310
311 while (addr < end) {
312 found_leaf = stage2_get_leaf_entry(kvm, addr,
313 &ptep, &ptep_level);
314 ret = stage2_level_to_page_size(ptep_level, &page_size);
315 if (ret)
316 break;
317
318 if (!found_leaf)
319 goto next;
320
321 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
322 stage2_op_pte(kvm, addr, ptep,
323 ptep_level, STAGE2_OP_WP);
324
325next:
326 addr += page_size;
327 }
328}
329
330static void stage2_wp_memory_region(struct kvm *kvm, int slot)
331{
332 struct kvm_memslots *slots = kvm_memslots(kvm);
333 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
334 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
335 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
336
337 spin_lock(&kvm->mmu_lock);
338 stage2_wp_range(kvm, start, end);
339 spin_unlock(&kvm->mmu_lock);
340 kvm_flush_remote_tlbs(kvm);
341}
342
343static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
344 unsigned long size, bool writable)
345{
346 pte_t pte;
347 int ret = 0;
348 unsigned long pfn;
349 phys_addr_t addr, end;
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000350 struct kvm_mmu_memory_cache pcache;
351
352 memset(&pcache, 0, sizeof(pcache));
353 pcache.gfp_zero = __GFP_ZERO;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530354
355 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
356 pfn = __phys_to_pfn(hpa);
357
358 for (addr = gpa; addr < end; addr += PAGE_SIZE) {
359 pte = pfn_pte(pfn, PAGE_KERNEL);
360
361 if (!writable)
362 pte = pte_wrprotect(pte);
363
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000364 ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530365 if (ret)
366 goto out;
367
368 spin_lock(&kvm->mmu_lock);
369 ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte);
370 spin_unlock(&kvm->mmu_lock);
371 if (ret)
372 goto out;
373
374 pfn++;
375 }
376
377out:
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000378 kvm_mmu_free_memory_cache(&pcache);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530379 return ret;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530380}
381
382void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
383 struct kvm_memory_slot *slot,
384 gfn_t gfn_offset,
385 unsigned long mask)
386{
387 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
388 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
389 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
390
391 stage2_wp_range(kvm, start, end);
392}
Anup Patel99cdc6c2021-09-27 17:10:01 +0530393
394void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
395{
396}
397
Anup Patel9d05c1f2021-09-27 17:10:09 +0530398void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
399 const struct kvm_memory_slot *memslot)
400{
401 kvm_flush_remote_tlbs(kvm);
402}
403
Anup Patel99cdc6c2021-09-27 17:10:01 +0530404void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
405{
406}
407
408void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
409{
410}
411
412void kvm_arch_flush_shadow_all(struct kvm *kvm)
413{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530414 kvm_riscv_stage2_free_pgd(kvm);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530415}
416
417void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
418 struct kvm_memory_slot *slot)
419{
Sean Christopherson756e1fc2021-11-04 16:41:06 +0000420 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
421 phys_addr_t size = slot->npages << PAGE_SHIFT;
422
423 spin_lock(&kvm->mmu_lock);
424 stage2_unmap_range(kvm, gpa, size, false);
425 spin_unlock(&kvm->mmu_lock);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530426}
427
428void kvm_arch_commit_memory_region(struct kvm *kvm,
Anup Patel99cdc6c2021-09-27 17:10:01 +0530429 struct kvm_memory_slot *old,
430 const struct kvm_memory_slot *new,
431 enum kvm_mr_change change)
432{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530433 /*
434 * At this point memslot has been committed and there is an
435 * allocated dirty_bitmap[], dirty pages will be tracked while
436 * the memory slot is write protected.
437 */
Sean Christophersond01495d2021-12-06 20:54:17 +0100438 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
439 stage2_wp_memory_region(kvm, new->id);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530440}
441
442int kvm_arch_prepare_memory_region(struct kvm *kvm,
Sean Christopherson537a17b2021-12-06 20:54:11 +0100443 const struct kvm_memory_slot *old,
444 struct kvm_memory_slot *new,
Anup Patel99cdc6c2021-09-27 17:10:01 +0530445 enum kvm_mr_change change)
446{
Sean Christophersond01495d2021-12-06 20:54:17 +0100447 hva_t hva, reg_end, size;
448 gpa_t base_gpa;
449 bool writable;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530450 int ret = 0;
451
452 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
453 change != KVM_MR_FLAGS_ONLY)
454 return 0;
455
456 /*
457 * Prevent userspace from creating a memory region outside of the GPA
458 * space addressable by the KVM guest GPA space.
459 */
Sean Christopherson537a17b2021-12-06 20:54:11 +0100460 if ((new->base_gfn + new->npages) >=
Anup Patel9d05c1f2021-09-27 17:10:09 +0530461 (stage2_gpa_size >> PAGE_SHIFT))
462 return -EFAULT;
463
Sean Christophersond01495d2021-12-06 20:54:17 +0100464 hva = new->userspace_addr;
465 size = new->npages << PAGE_SHIFT;
466 reg_end = hva + size;
467 base_gpa = new->base_gfn << PAGE_SHIFT;
468 writable = !(new->flags & KVM_MEM_READONLY);
469
Anup Patel9d05c1f2021-09-27 17:10:09 +0530470 mmap_read_lock(current->mm);
471
472 /*
473 * A memory region could potentially cover multiple VMAs, and
474 * any holes between them, so iterate over all of them to find
475 * out if we can map any of them right now.
476 *
477 * +--------------------------------------------+
478 * +---------------+----------------+ +----------------+
479 * | : VMA 1 | VMA 2 | | VMA 3 : |
480 * +---------------+----------------+ +----------------+
481 * | memory region |
482 * +--------------------------------------------+
483 */
484 do {
485 struct vm_area_struct *vma = find_vma(current->mm, hva);
486 hva_t vm_start, vm_end;
487
488 if (!vma || vma->vm_start >= reg_end)
489 break;
490
491 /*
492 * Mapping a read-only VMA is only allowed if the
493 * memory region is configured as read-only.
494 */
495 if (writable && !(vma->vm_flags & VM_WRITE)) {
496 ret = -EPERM;
497 break;
498 }
499
500 /* Take the intersection of this VMA with the memory region */
501 vm_start = max(hva, vma->vm_start);
502 vm_end = min(reg_end, vma->vm_end);
503
504 if (vma->vm_flags & VM_PFNMAP) {
Sean Christophersond01495d2021-12-06 20:54:17 +0100505 gpa_t gpa = base_gpa + (vm_start - hva);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530506 phys_addr_t pa;
507
508 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
509 pa += vm_start - vma->vm_start;
510
511 /* IO region dirty page logging not allowed */
Sean Christopherson537a17b2021-12-06 20:54:11 +0100512 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
Anup Patel9d05c1f2021-09-27 17:10:09 +0530513 ret = -EINVAL;
514 goto out;
515 }
516
517 ret = stage2_ioremap(kvm, gpa, pa,
518 vm_end - vm_start, writable);
519 if (ret)
520 break;
521 }
522 hva = vm_end;
523 } while (hva < reg_end);
524
525 if (change == KVM_MR_FLAGS_ONLY)
526 goto out;
527
528 spin_lock(&kvm->mmu_lock);
529 if (ret)
Sean Christophersond01495d2021-12-06 20:54:17 +0100530 stage2_unmap_range(kvm, base_gpa, size, false);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530531 spin_unlock(&kvm->mmu_lock);
532
533out:
534 mmap_read_unlock(current->mm);
535 return ret;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530536}
537
Anup Patel99553712021-09-27 17:10:10 +0530538bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
539{
540 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800541 return false;
Anup Patel99553712021-09-27 17:10:10 +0530542
543 stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
544 (range->end - range->start) << PAGE_SHIFT,
545 range->may_block);
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800546 return false;
Anup Patel99553712021-09-27 17:10:10 +0530547}
548
549bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
550{
551 int ret;
552 kvm_pfn_t pfn = pte_pfn(range->pte);
553
554 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800555 return false;
Anup Patel99553712021-09-27 17:10:10 +0530556
557 WARN_ON(range->end - range->start != 1);
558
559 ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
560 __pfn_to_phys(pfn), PAGE_SIZE, true, true);
561 if (ret) {
562 kvm_debug("Failed to map stage2 page (error %d)\n", ret);
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800563 return true;
Anup Patel99553712021-09-27 17:10:10 +0530564 }
565
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800566 return false;
Anup Patel99553712021-09-27 17:10:10 +0530567}
568
569bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
570{
571 pte_t *ptep;
572 u32 ptep_level = 0;
573 u64 size = (range->end - range->start) << PAGE_SHIFT;
574
575 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800576 return false;
Anup Patel99553712021-09-27 17:10:10 +0530577
578 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
579
580 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
581 &ptep, &ptep_level))
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800582 return false;
Anup Patel99553712021-09-27 17:10:10 +0530583
584 return ptep_test_and_clear_young(NULL, 0, ptep);
585}
586
587bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
588{
589 pte_t *ptep;
590 u32 ptep_level = 0;
591 u64 size = (range->end - range->start) << PAGE_SHIFT;
592
593 if (!kvm->arch.pgd)
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800594 return false;
Anup Patel99553712021-09-27 17:10:10 +0530595
596 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
597
598 if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
599 &ptep, &ptep_level))
Bixuan Cuibbd5ba82021-10-27 15:20:53 +0800600 return false;
Anup Patel99553712021-09-27 17:10:10 +0530601
602 return pte_young(*ptep);
603}
604
Anup Patel9f701322021-09-27 17:10:06 +0530605int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
606 struct kvm_memory_slot *memslot,
607 gpa_t gpa, unsigned long hva, bool is_write)
608{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530609 int ret;
610 kvm_pfn_t hfn;
611 bool writeable;
612 short vma_pageshift;
613 gfn_t gfn = gpa >> PAGE_SHIFT;
614 struct vm_area_struct *vma;
615 struct kvm *kvm = vcpu->kvm;
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000616 struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530617 bool logging = (memslot->dirty_bitmap &&
618 !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
Anup Patel99553712021-09-27 17:10:10 +0530619 unsigned long vma_pagesize, mmu_seq;
Anup Patel9d05c1f2021-09-27 17:10:09 +0530620
621 mmap_read_lock(current->mm);
622
623 vma = find_vma_intersection(current->mm, hva, hva + 1);
624 if (unlikely(!vma)) {
625 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
626 mmap_read_unlock(current->mm);
627 return -EFAULT;
628 }
629
630 if (is_vm_hugetlb_page(vma))
631 vma_pageshift = huge_page_shift(hstate_vma(vma));
632 else
633 vma_pageshift = PAGE_SHIFT;
634 vma_pagesize = 1ULL << vma_pageshift;
635 if (logging || (vma->vm_flags & VM_PFNMAP))
636 vma_pagesize = PAGE_SIZE;
637
638 if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
639 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
640
641 mmap_read_unlock(current->mm);
642
643 if (vma_pagesize != PGDIR_SIZE &&
644 vma_pagesize != PMD_SIZE &&
645 vma_pagesize != PAGE_SIZE) {
646 kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
647 return -EFAULT;
648 }
649
650 /* We need minimum second+third level pages */
Sean Christophersoncc4f6022021-11-04 16:41:07 +0000651 ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530652 if (ret) {
653 kvm_err("Failed to topup stage2 cache\n");
654 return ret;
655 }
656
Anup Patel99553712021-09-27 17:10:10 +0530657 mmu_seq = kvm->mmu_notifier_seq;
658
Anup Patel9d05c1f2021-09-27 17:10:09 +0530659 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
660 if (hfn == KVM_PFN_ERR_HWPOISON) {
661 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
662 vma_pageshift, current);
663 return 0;
664 }
665 if (is_error_noslot_pfn(hfn))
666 return -EFAULT;
667
668 /*
669 * If logging is active then we allow writable pages only
670 * for write faults.
671 */
672 if (logging && !is_write)
673 writeable = false;
674
675 spin_lock(&kvm->mmu_lock);
676
Anup Patel99553712021-09-27 17:10:10 +0530677 if (mmu_notifier_retry(kvm, mmu_seq))
678 goto out_unlock;
679
Anup Patel9d05c1f2021-09-27 17:10:09 +0530680 if (writeable) {
681 kvm_set_pfn_dirty(hfn);
682 mark_page_dirty(kvm, gfn);
683 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
684 vma_pagesize, false, true);
685 } else {
686 ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
687 vma_pagesize, true, true);
688 }
689
690 if (ret)
691 kvm_err("Failed to map in stage2\n");
692
Anup Patel99553712021-09-27 17:10:10 +0530693out_unlock:
Anup Patel9d05c1f2021-09-27 17:10:09 +0530694 spin_unlock(&kvm->mmu_lock);
695 kvm_set_pfn_accessed(hfn);
696 kvm_release_pfn_clean(hfn);
697 return ret;
Anup Patel9f701322021-09-27 17:10:06 +0530698}
699
Anup Patel99cdc6c2021-09-27 17:10:01 +0530700int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
701{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530702 struct page *pgd_page;
703
704 if (kvm->arch.pgd != NULL) {
705 kvm_err("kvm_arch already initialized?\n");
706 return -EINVAL;
707 }
708
709 pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
710 get_order(stage2_pgd_size));
711 if (!pgd_page)
712 return -ENOMEM;
713 kvm->arch.pgd = page_to_virt(pgd_page);
714 kvm->arch.pgd_phys = page_to_phys(pgd_page);
715
Anup Patel99cdc6c2021-09-27 17:10:01 +0530716 return 0;
717}
718
719void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
720{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530721 void *pgd = NULL;
722
723 spin_lock(&kvm->mmu_lock);
724 if (kvm->arch.pgd) {
Anup Patel99553712021-09-27 17:10:10 +0530725 stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
Anup Patel9d05c1f2021-09-27 17:10:09 +0530726 pgd = READ_ONCE(kvm->arch.pgd);
727 kvm->arch.pgd = NULL;
728 kvm->arch.pgd_phys = 0;
729 }
730 spin_unlock(&kvm->mmu_lock);
731
732 if (pgd)
733 free_pages((unsigned long)pgd, get_order(stage2_pgd_size));
Anup Patel99cdc6c2021-09-27 17:10:01 +0530734}
735
736void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu)
737{
Anup Patel9d05c1f2021-09-27 17:10:09 +0530738 unsigned long hgatp = stage2_mode;
739 struct kvm_arch *k = &vcpu->kvm->arch;
740
741 hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) &
742 HGATP_VMID_MASK;
743 hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
744
745 csr_write(CSR_HGATP, hgatp);
746
747 if (!kvm_riscv_stage2_vmid_bits())
748 __kvm_riscv_hfence_gvma_all();
749}
750
751void kvm_riscv_stage2_mode_detect(void)
752{
753#ifdef CONFIG_64BIT
754 /* Try Sv48x4 stage2 mode */
755 csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
756 if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
757 stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
758 stage2_pgd_levels = 4;
759 }
760 csr_write(CSR_HGATP, 0);
761
762 __kvm_riscv_hfence_gvma_all();
763#endif
764}
765
766unsigned long kvm_riscv_stage2_mode(void)
767{
768 return stage2_mode >> HGATP_MODE_SHIFT;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530769}
Anup Patela457fd52021-11-26 17:05:51 +0530770
771int kvm_riscv_stage2_gpa_bits(void)
772{
773 return stage2_gpa_bits;
774}