blob: 1bc0608a5bfd35afbba0d048d9e7fddec633a93b [file] [log] [blame]
Atish Patra5f862df2021-11-18 00:39:11 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 */
8
9#include <linux/errno.h>
10#include <linux/err.h>
11#include <linux/kvm_host.h>
12#include <asm/csr.h>
13#include <asm/sbi.h>
14#include <asm/kvm_vcpu_timer.h>
15#include <asm/kvm_vcpu_sbi.h>
16
17static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
18 unsigned long *out_val,
19 struct kvm_cpu_trap *utrap, bool *exit)
20{
21 int ret = 0;
22 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
23 u64 next_cycle;
24
25 if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
26 return -EINVAL;
27
28#if __riscv_xlen == 32
29 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
30#else
31 next_cycle = (u64)cp->a0;
32#endif
33 kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
34
35 return ret;
36}
37
38const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
39 .extid_start = SBI_EXT_TIME,
40 .extid_end = SBI_EXT_TIME,
41 .handler = kvm_sbi_ext_time_handler,
42};
43
44static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
45 unsigned long *out_val,
46 struct kvm_cpu_trap *utrap, bool *exit)
47{
48 int ret = 0;
49 unsigned long i;
50 struct kvm_vcpu *tmp;
51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
52 unsigned long hmask = cp->a0;
53 unsigned long hbase = cp->a1;
54
55 if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
56 return -EINVAL;
57
58 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
59 if (hbase != -1UL) {
60 if (tmp->vcpu_id < hbase)
61 continue;
62 if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
63 continue;
64 }
65 ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
66 if (ret < 0)
67 break;
68 }
69
70 return ret;
71}
72
73const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
74 .extid_start = SBI_EXT_IPI,
75 .extid_end = SBI_EXT_IPI,
76 .handler = kvm_sbi_ext_ipi_handler,
77};
78
79static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
80 unsigned long *out_val,
81 struct kvm_cpu_trap *utrap, bool *exit)
82{
83 int ret = 0;
84 unsigned long i;
Atish Patra26fb7512022-01-20 01:09:18 -080085 struct cpumask cm;
Atish Patra5f862df2021-11-18 00:39:11 -080086 struct kvm_vcpu *tmp;
87 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
88 unsigned long hmask = cp->a0;
89 unsigned long hbase = cp->a1;
90 unsigned long funcid = cp->a6;
91
92 cpumask_clear(&cm);
Atish Patra5f862df2021-11-18 00:39:11 -080093 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
94 if (hbase != -1UL) {
95 if (tmp->vcpu_id < hbase)
96 continue;
97 if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
98 continue;
99 }
100 if (tmp->cpu < 0)
101 continue;
102 cpumask_set_cpu(tmp->cpu, &cm);
103 }
104
Atish Patra5f862df2021-11-18 00:39:11 -0800105 switch (funcid) {
106 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
Atish Patra26fb7512022-01-20 01:09:18 -0800107 ret = sbi_remote_fence_i(&cm);
Atish Patra5f862df2021-11-18 00:39:11 -0800108 break;
109 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
Atish Patra26fb7512022-01-20 01:09:18 -0800110 ret = sbi_remote_hfence_vvma(&cm, cp->a2, cp->a3);
Atish Patra5f862df2021-11-18 00:39:11 -0800111 break;
112 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
Atish Patra26fb7512022-01-20 01:09:18 -0800113 ret = sbi_remote_hfence_vvma_asid(&cm, cp->a2,
Atish Patra5f862df2021-11-18 00:39:11 -0800114 cp->a3, cp->a4);
115 break;
116 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
117 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
118 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
119 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
120 /* TODO: implement for nested hypervisor case */
121 default:
122 ret = -EOPNOTSUPP;
123 }
124
125 return ret;
126}
127
128const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
129 .extid_start = SBI_EXT_RFENCE,
130 .extid_end = SBI_EXT_RFENCE,
131 .handler = kvm_sbi_ext_rfence_handler,
132};