blob: 2e5ca43c8c49ef34ff188a9f452c7ae05683ccb8 [file] [log] [blame]
Anup Patel99cdc6c2021-09-27 17:10:01 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9#include <linux/errno.h>
10#include <linux/err.h>
11#include <linux/module.h>
12#include <linux/kvm_host.h>
13#include <asm/csr.h>
14#include <asm/hwcap.h>
15#include <asm/sbi.h>
16
17long kvm_arch_dev_ioctl(struct file *filp,
18 unsigned int ioctl, unsigned long arg)
19{
20 return -EINVAL;
21}
22
23int kvm_arch_check_processor_compat(void *opaque)
24{
25 return 0;
26}
27
28int kvm_arch_hardware_setup(void *opaque)
29{
30 return 0;
31}
32
33int kvm_arch_hardware_enable(void)
34{
35 unsigned long hideleg, hedeleg;
36
37 hedeleg = 0;
38 hedeleg |= (1UL << EXC_INST_MISALIGNED);
39 hedeleg |= (1UL << EXC_BREAKPOINT);
40 hedeleg |= (1UL << EXC_SYSCALL);
41 hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
42 hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
43 hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
44 csr_write(CSR_HEDELEG, hedeleg);
45
46 hideleg = 0;
47 hideleg |= (1UL << IRQ_VS_SOFT);
48 hideleg |= (1UL << IRQ_VS_TIMER);
49 hideleg |= (1UL << IRQ_VS_EXT);
50 csr_write(CSR_HIDELEG, hideleg);
51
52 csr_write(CSR_HCOUNTEREN, -1UL);
53
54 csr_write(CSR_HVIP, 0);
55
56 return 0;
57}
58
59void kvm_arch_hardware_disable(void)
60{
Vincent Chen33e5b572021-12-27 11:05:14 +080061 /*
62 * After clearing the hideleg CSR, the host kernel will receive
63 * spurious interrupts if hvip CSR has pending interrupts and the
64 * corresponding enable bits in vsie CSR are asserted. To avoid it,
65 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
66 */
67 csr_write(CSR_VSIE, 0);
68 csr_write(CSR_HVIP, 0);
Anup Patel99cdc6c2021-09-27 17:10:01 +053069 csr_write(CSR_HEDELEG, 0);
70 csr_write(CSR_HIDELEG, 0);
71}
72
73int kvm_arch_init(void *opaque)
74{
Anup Patel9d05c1f2021-09-27 17:10:09 +053075 const char *str;
76
Anup Patel99cdc6c2021-09-27 17:10:01 +053077 if (!riscv_isa_extension_available(NULL, h)) {
78 kvm_info("hypervisor extension not available\n");
79 return -ENODEV;
80 }
81
82 if (sbi_spec_is_0_1()) {
83 kvm_info("require SBI v0.2 or higher\n");
84 return -ENODEV;
85 }
86
87 if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
88 kvm_info("require SBI RFENCE extension\n");
89 return -ENODEV;
90 }
91
Anup Patel9d05c1f2021-09-27 17:10:09 +053092 kvm_riscv_stage2_mode_detect();
93
Anup Patelfd7bb4a2021-09-27 17:10:08 +053094 kvm_riscv_stage2_vmid_detect();
95
Anup Patel99cdc6c2021-09-27 17:10:01 +053096 kvm_info("hypervisor extension available\n");
97
Anup Patel9d05c1f2021-09-27 17:10:09 +053098 switch (kvm_riscv_stage2_mode()) {
99 case HGATP_MODE_SV32X4:
100 str = "Sv32x4";
101 break;
102 case HGATP_MODE_SV39X4:
103 str = "Sv39x4";
104 break;
105 case HGATP_MODE_SV48X4:
106 str = "Sv48x4";
107 break;
108 default:
109 return -ENODEV;
110 }
111 kvm_info("using %s G-stage page table format\n", str);
112
Anup Patelfd7bb4a2021-09-27 17:10:08 +0530113 kvm_info("VMID %ld bits available\n", kvm_riscv_stage2_vmid_bits());
114
Anup Patel99cdc6c2021-09-27 17:10:01 +0530115 return 0;
116}
117
118void kvm_arch_exit(void)
119{
120}
121
122static int riscv_kvm_init(void)
123{
124 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
125}
126module_init(riscv_kvm_init);