Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
| 4 | * |
| 5 | * Authors: |
| 6 | * Anup Patel <anup.patel@wdc.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/kdebug.h> |
| 13 | #include <linux/module.h> |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 14 | #include <linux/percpu.h> |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <linux/sched/signal.h> |
| 18 | #include <linux/fs.h> |
| 19 | #include <linux/kvm_host.h> |
| 20 | #include <asm/csr.h> |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 21 | #include <asm/hwcap.h> |
| 22 | |
| 23 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
| 24 | KVM_GENERIC_VCPU_STATS(), |
| 25 | STATS_DESC_COUNTER(VCPU, ecall_exit_stat), |
| 26 | STATS_DESC_COUNTER(VCPU, wfi_exit_stat), |
| 27 | STATS_DESC_COUNTER(VCPU, mmio_exit_user), |
| 28 | STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), |
| 29 | STATS_DESC_COUNTER(VCPU, exits) |
| 30 | }; |
| 31 | |
| 32 | const struct kvm_stats_header kvm_vcpu_stats_header = { |
| 33 | .name_size = KVM_STATS_NAME_SIZE, |
| 34 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
| 35 | .id_offset = sizeof(struct kvm_stats_header), |
| 36 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
| 37 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
| 38 | sizeof(kvm_vcpu_stats_desc), |
| 39 | }; |
| 40 | |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 41 | #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \ |
| 42 | riscv_isa_extension_mask(c) | \ |
| 43 | riscv_isa_extension_mask(d) | \ |
| 44 | riscv_isa_extension_mask(f) | \ |
| 45 | riscv_isa_extension_mask(i) | \ |
| 46 | riscv_isa_extension_mask(m) | \ |
| 47 | riscv_isa_extension_mask(s) | \ |
| 48 | riscv_isa_extension_mask(u)) |
| 49 | |
| 50 | static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) |
| 51 | { |
| 52 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 53 | struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; |
| 54 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
| 55 | struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; |
Atish Patra | 3e1d865 | 2021-11-18 00:39:12 -0800 | [diff] [blame] | 56 | bool loaded; |
| 57 | |
| 58 | /** |
| 59 | * The preemption should be disabled here because it races with |
| 60 | * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which |
| 61 | * also calls vcpu_load/put. |
| 62 | */ |
| 63 | get_cpu(); |
| 64 | loaded = (vcpu->cpu != -1); |
| 65 | if (loaded) |
| 66 | kvm_arch_vcpu_put(vcpu); |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 67 | |
| 68 | memcpy(csr, reset_csr, sizeof(*csr)); |
| 69 | |
| 70 | memcpy(cntx, reset_cntx, sizeof(*cntx)); |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 71 | |
Atish Patra | 5de52d4 | 2021-09-27 17:10:12 +0530 | [diff] [blame] | 72 | kvm_riscv_vcpu_fp_reset(vcpu); |
| 73 | |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 74 | kvm_riscv_vcpu_timer_reset(vcpu); |
| 75 | |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 76 | WRITE_ONCE(vcpu->arch.irqs_pending, 0); |
| 77 | WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); |
Atish Patra | 3e1d865 | 2021-11-18 00:39:12 -0800 | [diff] [blame] | 78 | |
| 79 | /* Reset the guest CSRs for hotplug usecase */ |
| 80 | if (loaded) |
| 81 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); |
| 82 | put_cpu(); |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 83 | } |
| 84 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 85 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
| 86 | { |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
| 91 | { |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 92 | struct kvm_cpu_context *cntx; |
Mayuresh Chitale | de1d7b6 | 2022-01-31 16:33:07 +0530 | [diff] [blame] | 93 | struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 94 | |
| 95 | /* Mark this VCPU never ran */ |
| 96 | vcpu->arch.ran_atleast_once = false; |
Sean Christopherson | cc4f602 | 2021-11-04 16:41:07 +0000 | [diff] [blame] | 97 | vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 98 | |
| 99 | /* Setup ISA features available to VCPU */ |
| 100 | vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; |
| 101 | |
| 102 | /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ |
| 103 | cntx = &vcpu->arch.guest_reset_context; |
| 104 | cntx->sstatus = SR_SPP | SR_SPIE; |
| 105 | cntx->hstatus = 0; |
| 106 | cntx->hstatus |= HSTATUS_VTW; |
| 107 | cntx->hstatus |= HSTATUS_SPVP; |
| 108 | cntx->hstatus |= HSTATUS_SPV; |
| 109 | |
Mayuresh Chitale | de1d7b6 | 2022-01-31 16:33:07 +0530 | [diff] [blame] | 110 | /* By default, make CY, TM, and IR counters accessible in VU mode */ |
| 111 | reset_csr->scounteren = 0x7; |
| 112 | |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 113 | /* Setup VCPU timer */ |
| 114 | kvm_riscv_vcpu_timer_init(vcpu); |
| 115 | |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 116 | /* Reset VCPU */ |
| 117 | kvm_riscv_reset_vcpu(vcpu); |
| 118 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
| 123 | { |
Atish Patra | 3e1d865 | 2021-11-18 00:39:12 -0800 | [diff] [blame] | 124 | /** |
| 125 | * vcpu with id 0 is the designated boot cpu. |
| 126 | * Keep all vcpus with non-zero id in power-off state so that |
| 127 | * they can be brought up using SBI HSM extension. |
| 128 | */ |
| 129 | if (vcpu->vcpu_idx != 0) |
| 130 | kvm_riscv_vcpu_power_off(vcpu); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 131 | } |
| 132 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 133 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 134 | { |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 135 | /* Cleanup VCPU timer */ |
| 136 | kvm_riscv_vcpu_timer_deinit(vcpu); |
| 137 | |
Sean Christopherson | cc4f602 | 2021-11-04 16:41:07 +0000 | [diff] [blame] | 138 | /* Free unused pages pre-allocated for Stage2 page table mappings */ |
| 139 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 143 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 144 | return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
| 148 | { |
| 149 | } |
| 150 | |
| 151 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
| 152 | { |
| 153 | } |
| 154 | |
| 155 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
| 156 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 157 | return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && |
| 158 | !vcpu->arch.power_off && !vcpu->arch.pause); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 162 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 163 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 167 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 168 | return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
| 172 | { |
| 173 | return VM_FAULT_SIGBUS; |
| 174 | } |
| 175 | |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 176 | static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, |
| 177 | const struct kvm_one_reg *reg) |
| 178 | { |
| 179 | unsigned long __user *uaddr = |
| 180 | (unsigned long __user *)(unsigned long)reg->addr; |
| 181 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 182 | KVM_REG_SIZE_MASK | |
| 183 | KVM_REG_RISCV_CONFIG); |
| 184 | unsigned long reg_val; |
| 185 | |
| 186 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 187 | return -EINVAL; |
| 188 | |
| 189 | switch (reg_num) { |
| 190 | case KVM_REG_RISCV_CONFIG_REG(isa): |
| 191 | reg_val = vcpu->arch.isa; |
| 192 | break; |
| 193 | default: |
| 194 | return -EINVAL; |
ran jianping | 7b161d9 | 2021-10-21 11:57:06 +0000 | [diff] [blame] | 195 | } |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 196 | |
| 197 | if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) |
| 198 | return -EFAULT; |
| 199 | |
| 200 | return 0; |
| 201 | } |
| 202 | |
| 203 | static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, |
| 204 | const struct kvm_one_reg *reg) |
| 205 | { |
| 206 | unsigned long __user *uaddr = |
| 207 | (unsigned long __user *)(unsigned long)reg->addr; |
| 208 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 209 | KVM_REG_SIZE_MASK | |
| 210 | KVM_REG_RISCV_CONFIG); |
| 211 | unsigned long reg_val; |
| 212 | |
| 213 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 214 | return -EINVAL; |
| 215 | |
| 216 | if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) |
| 217 | return -EFAULT; |
| 218 | |
| 219 | switch (reg_num) { |
| 220 | case KVM_REG_RISCV_CONFIG_REG(isa): |
| 221 | if (!vcpu->arch.ran_atleast_once) { |
| 222 | vcpu->arch.isa = reg_val; |
| 223 | vcpu->arch.isa &= riscv_isa_extension_base(NULL); |
| 224 | vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; |
Atish Patra | 5de52d4 | 2021-09-27 17:10:12 +0530 | [diff] [blame] | 225 | kvm_riscv_vcpu_fp_reset(vcpu); |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 226 | } else { |
| 227 | return -EOPNOTSUPP; |
| 228 | } |
| 229 | break; |
| 230 | default: |
| 231 | return -EINVAL; |
ran jianping | 7b161d9 | 2021-10-21 11:57:06 +0000 | [diff] [blame] | 232 | } |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, |
| 238 | const struct kvm_one_reg *reg) |
| 239 | { |
| 240 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
| 241 | unsigned long __user *uaddr = |
| 242 | (unsigned long __user *)(unsigned long)reg->addr; |
| 243 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 244 | KVM_REG_SIZE_MASK | |
| 245 | KVM_REG_RISCV_CORE); |
| 246 | unsigned long reg_val; |
| 247 | |
| 248 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 249 | return -EINVAL; |
| 250 | if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) |
| 251 | return -EINVAL; |
| 252 | |
| 253 | if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) |
| 254 | reg_val = cntx->sepc; |
| 255 | else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && |
| 256 | reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) |
| 257 | reg_val = ((unsigned long *)cntx)[reg_num]; |
| 258 | else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) |
| 259 | reg_val = (cntx->sstatus & SR_SPP) ? |
| 260 | KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; |
| 261 | else |
| 262 | return -EINVAL; |
| 263 | |
| 264 | if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) |
| 265 | return -EFAULT; |
| 266 | |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, |
| 271 | const struct kvm_one_reg *reg) |
| 272 | { |
| 273 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
| 274 | unsigned long __user *uaddr = |
| 275 | (unsigned long __user *)(unsigned long)reg->addr; |
| 276 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 277 | KVM_REG_SIZE_MASK | |
| 278 | KVM_REG_RISCV_CORE); |
| 279 | unsigned long reg_val; |
| 280 | |
| 281 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 282 | return -EINVAL; |
| 283 | if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) |
| 284 | return -EINVAL; |
| 285 | |
| 286 | if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) |
| 287 | return -EFAULT; |
| 288 | |
| 289 | if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) |
| 290 | cntx->sepc = reg_val; |
| 291 | else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && |
| 292 | reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) |
| 293 | ((unsigned long *)cntx)[reg_num] = reg_val; |
| 294 | else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { |
| 295 | if (reg_val == KVM_RISCV_MODE_S) |
| 296 | cntx->sstatus |= SR_SPP; |
| 297 | else |
| 298 | cntx->sstatus &= ~SR_SPP; |
| 299 | } else |
| 300 | return -EINVAL; |
| 301 | |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, |
| 306 | const struct kvm_one_reg *reg) |
| 307 | { |
| 308 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 309 | unsigned long __user *uaddr = |
| 310 | (unsigned long __user *)(unsigned long)reg->addr; |
| 311 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 312 | KVM_REG_SIZE_MASK | |
| 313 | KVM_REG_RISCV_CSR); |
| 314 | unsigned long reg_val; |
| 315 | |
| 316 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 317 | return -EINVAL; |
| 318 | if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) |
| 319 | return -EINVAL; |
| 320 | |
| 321 | if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { |
| 322 | kvm_riscv_vcpu_flush_interrupts(vcpu); |
| 323 | reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; |
| 324 | } else |
| 325 | reg_val = ((unsigned long *)csr)[reg_num]; |
| 326 | |
| 327 | if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) |
| 328 | return -EFAULT; |
| 329 | |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, |
| 334 | const struct kvm_one_reg *reg) |
| 335 | { |
| 336 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 337 | unsigned long __user *uaddr = |
| 338 | (unsigned long __user *)(unsigned long)reg->addr; |
| 339 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
| 340 | KVM_REG_SIZE_MASK | |
| 341 | KVM_REG_RISCV_CSR); |
| 342 | unsigned long reg_val; |
| 343 | |
| 344 | if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) |
| 345 | return -EINVAL; |
| 346 | if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) |
| 347 | return -EINVAL; |
| 348 | |
| 349 | if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) |
| 350 | return -EFAULT; |
| 351 | |
| 352 | if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { |
| 353 | reg_val &= VSIP_VALID_MASK; |
| 354 | reg_val <<= VSIP_TO_HVIP_SHIFT; |
| 355 | } |
| 356 | |
| 357 | ((unsigned long *)csr)[reg_num] = reg_val; |
| 358 | |
| 359 | if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) |
| 360 | WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); |
| 361 | |
| 362 | return 0; |
| 363 | } |
| 364 | |
| 365 | static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, |
| 366 | const struct kvm_one_reg *reg) |
| 367 | { |
| 368 | if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) |
| 369 | return kvm_riscv_vcpu_set_reg_config(vcpu, reg); |
| 370 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) |
| 371 | return kvm_riscv_vcpu_set_reg_core(vcpu, reg); |
| 372 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) |
| 373 | return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 374 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) |
| 375 | return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); |
Atish Patra | 4d9c5c0 | 2021-09-27 17:10:13 +0530 | [diff] [blame] | 376 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) |
| 377 | return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, |
| 378 | KVM_REG_RISCV_FP_F); |
| 379 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) |
| 380 | return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, |
| 381 | KVM_REG_RISCV_FP_D); |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 382 | |
| 383 | return -EINVAL; |
| 384 | } |
| 385 | |
| 386 | static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, |
| 387 | const struct kvm_one_reg *reg) |
| 388 | { |
| 389 | if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) |
| 390 | return kvm_riscv_vcpu_get_reg_config(vcpu, reg); |
| 391 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) |
| 392 | return kvm_riscv_vcpu_get_reg_core(vcpu, reg); |
| 393 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) |
| 394 | return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 395 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) |
| 396 | return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); |
Atish Patra | 4d9c5c0 | 2021-09-27 17:10:13 +0530 | [diff] [blame] | 397 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) |
| 398 | return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, |
| 399 | KVM_REG_RISCV_FP_F); |
| 400 | else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) |
| 401 | return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, |
| 402 | KVM_REG_RISCV_FP_D); |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 403 | |
| 404 | return -EINVAL; |
| 405 | } |
| 406 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 407 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 408 | unsigned int ioctl, unsigned long arg) |
| 409 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 410 | struct kvm_vcpu *vcpu = filp->private_data; |
| 411 | void __user *argp = (void __user *)arg; |
| 412 | |
| 413 | if (ioctl == KVM_INTERRUPT) { |
| 414 | struct kvm_interrupt irq; |
| 415 | |
| 416 | if (copy_from_user(&irq, argp, sizeof(irq))) |
| 417 | return -EFAULT; |
| 418 | |
| 419 | if (irq.irq == KVM_INTERRUPT_SET) |
| 420 | return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); |
| 421 | else |
| 422 | return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); |
| 423 | } |
| 424 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 425 | return -ENOIOCTLCMD; |
| 426 | } |
| 427 | |
| 428 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 429 | unsigned int ioctl, unsigned long arg) |
| 430 | { |
Anup Patel | 92ad820 | 2021-09-27 17:10:04 +0530 | [diff] [blame] | 431 | struct kvm_vcpu *vcpu = filp->private_data; |
| 432 | void __user *argp = (void __user *)arg; |
| 433 | long r = -EINVAL; |
| 434 | |
| 435 | switch (ioctl) { |
| 436 | case KVM_SET_ONE_REG: |
| 437 | case KVM_GET_ONE_REG: { |
| 438 | struct kvm_one_reg reg; |
| 439 | |
| 440 | r = -EFAULT; |
| 441 | if (copy_from_user(®, argp, sizeof(reg))) |
| 442 | break; |
| 443 | |
| 444 | if (ioctl == KVM_SET_ONE_REG) |
| 445 | r = kvm_riscv_vcpu_set_reg(vcpu, ®); |
| 446 | else |
| 447 | r = kvm_riscv_vcpu_get_reg(vcpu, ®); |
| 448 | break; |
| 449 | } |
| 450 | default: |
| 451 | break; |
| 452 | } |
| 453 | |
| 454 | return r; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| 458 | struct kvm_sregs *sregs) |
| 459 | { |
| 460 | return -EINVAL; |
| 461 | } |
| 462 | |
| 463 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| 464 | struct kvm_sregs *sregs) |
| 465 | { |
| 466 | return -EINVAL; |
| 467 | } |
| 468 | |
| 469 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 470 | { |
| 471 | return -EINVAL; |
| 472 | } |
| 473 | |
| 474 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 475 | { |
| 476 | return -EINVAL; |
| 477 | } |
| 478 | |
| 479 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 480 | struct kvm_translation *tr) |
| 481 | { |
| 482 | return -EINVAL; |
| 483 | } |
| 484 | |
| 485 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 486 | { |
| 487 | return -EINVAL; |
| 488 | } |
| 489 | |
| 490 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 491 | { |
| 492 | return -EINVAL; |
| 493 | } |
| 494 | |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 495 | void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) |
| 496 | { |
| 497 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 498 | unsigned long mask, val; |
| 499 | |
| 500 | if (READ_ONCE(vcpu->arch.irqs_pending_mask)) { |
| 501 | mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0); |
| 502 | val = READ_ONCE(vcpu->arch.irqs_pending) & mask; |
| 503 | |
| 504 | csr->hvip &= ~mask; |
| 505 | csr->hvip |= val; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) |
| 510 | { |
| 511 | unsigned long hvip; |
| 512 | struct kvm_vcpu_arch *v = &vcpu->arch; |
| 513 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 514 | |
| 515 | /* Read current HVIP and VSIE CSRs */ |
| 516 | csr->vsie = csr_read(CSR_VSIE); |
| 517 | |
| 518 | /* Sync-up HVIP.VSSIP bit changes does by Guest */ |
| 519 | hvip = csr_read(CSR_HVIP); |
| 520 | if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { |
| 521 | if (hvip & (1UL << IRQ_VS_SOFT)) { |
| 522 | if (!test_and_set_bit(IRQ_VS_SOFT, |
| 523 | &v->irqs_pending_mask)) |
| 524 | set_bit(IRQ_VS_SOFT, &v->irqs_pending); |
| 525 | } else { |
| 526 | if (!test_and_set_bit(IRQ_VS_SOFT, |
| 527 | &v->irqs_pending_mask)) |
| 528 | clear_bit(IRQ_VS_SOFT, &v->irqs_pending); |
| 529 | } |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) |
| 534 | { |
| 535 | if (irq != IRQ_VS_SOFT && |
| 536 | irq != IRQ_VS_TIMER && |
| 537 | irq != IRQ_VS_EXT) |
| 538 | return -EINVAL; |
| 539 | |
| 540 | set_bit(irq, &vcpu->arch.irqs_pending); |
| 541 | smp_mb__before_atomic(); |
| 542 | set_bit(irq, &vcpu->arch.irqs_pending_mask); |
| 543 | |
| 544 | kvm_vcpu_kick(vcpu); |
| 545 | |
| 546 | return 0; |
| 547 | } |
| 548 | |
| 549 | int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) |
| 550 | { |
| 551 | if (irq != IRQ_VS_SOFT && |
| 552 | irq != IRQ_VS_TIMER && |
| 553 | irq != IRQ_VS_EXT) |
| 554 | return -EINVAL; |
| 555 | |
| 556 | clear_bit(irq, &vcpu->arch.irqs_pending); |
| 557 | smp_mb__before_atomic(); |
| 558 | set_bit(irq, &vcpu->arch.irqs_pending_mask); |
| 559 | |
| 560 | return 0; |
| 561 | } |
| 562 | |
| 563 | bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask) |
| 564 | { |
| 565 | unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) |
| 566 | << VSIP_TO_HVIP_SHIFT) & mask; |
| 567 | |
| 568 | return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false; |
| 569 | } |
| 570 | |
| 571 | void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) |
| 572 | { |
| 573 | vcpu->arch.power_off = true; |
| 574 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
| 575 | kvm_vcpu_kick(vcpu); |
| 576 | } |
| 577 | |
| 578 | void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) |
| 579 | { |
| 580 | vcpu->arch.power_off = false; |
| 581 | kvm_vcpu_wake_up(vcpu); |
| 582 | } |
| 583 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 584 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 585 | struct kvm_mp_state *mp_state) |
| 586 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 587 | if (vcpu->arch.power_off) |
| 588 | mp_state->mp_state = KVM_MP_STATE_STOPPED; |
| 589 | else |
| 590 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; |
| 591 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 592 | return 0; |
| 593 | } |
| 594 | |
| 595 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 596 | struct kvm_mp_state *mp_state) |
| 597 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 598 | int ret = 0; |
| 599 | |
| 600 | switch (mp_state->mp_state) { |
| 601 | case KVM_MP_STATE_RUNNABLE: |
| 602 | vcpu->arch.power_off = false; |
| 603 | break; |
| 604 | case KVM_MP_STATE_STOPPED: |
| 605 | kvm_riscv_vcpu_power_off(vcpu); |
| 606 | break; |
| 607 | default: |
| 608 | ret = -EINVAL; |
| 609 | } |
| 610 | |
| 611 | return ret; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 615 | struct kvm_guest_debug *dbg) |
| 616 | { |
| 617 | /* TODO; To be implemented later. */ |
| 618 | return -EINVAL; |
| 619 | } |
| 620 | |
| 621 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 622 | { |
Anup Patel | 34bde9d8 | 2021-09-27 17:10:05 +0530 | [diff] [blame] | 623 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 624 | |
| 625 | csr_write(CSR_VSSTATUS, csr->vsstatus); |
| 626 | csr_write(CSR_VSIE, csr->vsie); |
| 627 | csr_write(CSR_VSTVEC, csr->vstvec); |
| 628 | csr_write(CSR_VSSCRATCH, csr->vsscratch); |
| 629 | csr_write(CSR_VSEPC, csr->vsepc); |
| 630 | csr_write(CSR_VSCAUSE, csr->vscause); |
| 631 | csr_write(CSR_VSTVAL, csr->vstval); |
| 632 | csr_write(CSR_HVIP, csr->hvip); |
| 633 | csr_write(CSR_VSATP, csr->vsatp); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 634 | |
| 635 | kvm_riscv_stage2_update_hgatp(vcpu); |
Anup Patel | 34bde9d8 | 2021-09-27 17:10:05 +0530 | [diff] [blame] | 636 | |
Atish Patra | 3a9f66c | 2021-09-27 17:10:11 +0530 | [diff] [blame] | 637 | kvm_riscv_vcpu_timer_restore(vcpu); |
| 638 | |
Atish Patra | 5de52d4 | 2021-09-27 17:10:12 +0530 | [diff] [blame] | 639 | kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); |
| 640 | kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, |
| 641 | vcpu->arch.isa); |
| 642 | |
Anup Patel | 34bde9d8 | 2021-09-27 17:10:05 +0530 | [diff] [blame] | 643 | vcpu->cpu = cpu; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 644 | } |
| 645 | |
| 646 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 647 | { |
Anup Patel | 34bde9d8 | 2021-09-27 17:10:05 +0530 | [diff] [blame] | 648 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 649 | |
| 650 | vcpu->cpu = -1; |
| 651 | |
Atish Patra | 5de52d4 | 2021-09-27 17:10:12 +0530 | [diff] [blame] | 652 | kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, |
| 653 | vcpu->arch.isa); |
| 654 | kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); |
| 655 | |
Anup Patel | 34bde9d8 | 2021-09-27 17:10:05 +0530 | [diff] [blame] | 656 | csr_write(CSR_HGATP, 0); |
| 657 | |
| 658 | csr->vsstatus = csr_read(CSR_VSSTATUS); |
| 659 | csr->vsie = csr_read(CSR_VSIE); |
| 660 | csr->vstvec = csr_read(CSR_VSTVEC); |
| 661 | csr->vsscratch = csr_read(CSR_VSSCRATCH); |
| 662 | csr->vsepc = csr_read(CSR_VSEPC); |
| 663 | csr->vscause = csr_read(CSR_VSCAUSE); |
| 664 | csr->vstval = csr_read(CSR_VSTVAL); |
| 665 | csr->hvip = csr_read(CSR_HVIP); |
| 666 | csr->vsatp = csr_read(CSR_VSATP); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) |
| 670 | { |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 671 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
| 672 | |
| 673 | if (kvm_request_pending(vcpu)) { |
| 674 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { |
| 675 | rcuwait_wait_event(wait, |
| 676 | (!vcpu->arch.power_off) && (!vcpu->arch.pause), |
| 677 | TASK_INTERRUPTIBLE); |
| 678 | |
| 679 | if (vcpu->arch.power_off || vcpu->arch.pause) { |
| 680 | /* |
| 681 | * Awaken to handle a signal, request to |
| 682 | * sleep again later. |
| 683 | */ |
| 684 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
| 685 | } |
| 686 | } |
| 687 | |
| 688 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
| 689 | kvm_riscv_reset_vcpu(vcpu); |
Anup Patel | fd7bb4a | 2021-09-27 17:10:08 +0530 | [diff] [blame] | 690 | |
| 691 | if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) |
| 692 | kvm_riscv_stage2_update_hgatp(vcpu); |
| 693 | |
| 694 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) |
| 695 | __kvm_riscv_hfence_gvma_all(); |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 696 | } |
| 697 | } |
| 698 | |
| 699 | static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) |
| 700 | { |
| 701 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
| 702 | |
| 703 | csr_write(CSR_HVIP, csr->hvip); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 704 | } |
| 705 | |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 706 | /* |
| 707 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while |
| 708 | * the vCPU is running. |
| 709 | * |
| 710 | * This must be noinstr as instrumentation may make use of RCU, and this is not |
| 711 | * safe during the EQS. |
| 712 | */ |
| 713 | static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
| 714 | { |
| 715 | guest_state_enter_irqoff(); |
| 716 | __kvm_riscv_switch_to(&vcpu->arch); |
| 717 | guest_state_exit_irqoff(); |
| 718 | } |
| 719 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 720 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
| 721 | { |
| 722 | int ret; |
| 723 | struct kvm_cpu_trap trap; |
| 724 | struct kvm_run *run = vcpu->run; |
| 725 | |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 726 | /* Mark this VCPU ran at least once */ |
| 727 | vcpu->arch.ran_atleast_once = true; |
| 728 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 729 | vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 730 | |
| 731 | /* Process MMIO value returned from user-space */ |
| 732 | if (run->exit_reason == KVM_EXIT_MMIO) { |
| 733 | ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); |
| 734 | if (ret) { |
| 735 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); |
| 736 | return ret; |
| 737 | } |
| 738 | } |
| 739 | |
Atish Patra | dea8ee3 | 2021-09-27 17:10:14 +0530 | [diff] [blame] | 740 | /* Process SBI value returned from user-space */ |
| 741 | if (run->exit_reason == KVM_EXIT_RISCV_SBI) { |
| 742 | ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); |
| 743 | if (ret) { |
| 744 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); |
| 745 | return ret; |
| 746 | } |
| 747 | } |
| 748 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 749 | if (run->immediate_exit) { |
| 750 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); |
| 751 | return -EINTR; |
| 752 | } |
| 753 | |
| 754 | vcpu_load(vcpu); |
| 755 | |
| 756 | kvm_sigset_activate(vcpu); |
| 757 | |
| 758 | ret = 1; |
| 759 | run->exit_reason = KVM_EXIT_UNKNOWN; |
| 760 | while (ret > 0) { |
| 761 | /* Check conditions before entering the guest */ |
| 762 | cond_resched(); |
| 763 | |
Anup Patel | fd7bb4a | 2021-09-27 17:10:08 +0530 | [diff] [blame] | 764 | kvm_riscv_stage2_vmid_update(vcpu); |
| 765 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 766 | kvm_riscv_check_vcpu_requests(vcpu); |
| 767 | |
| 768 | preempt_disable(); |
| 769 | |
| 770 | local_irq_disable(); |
| 771 | |
| 772 | /* |
| 773 | * Exit if we have a signal pending so that we can deliver |
| 774 | * the signal to user space. |
| 775 | */ |
| 776 | if (signal_pending(current)) { |
| 777 | ret = -EINTR; |
| 778 | run->exit_reason = KVM_EXIT_INTR; |
| 779 | } |
| 780 | |
| 781 | /* |
| 782 | * Ensure we set mode to IN_GUEST_MODE after we disable |
| 783 | * interrupts and before the final VCPU requests check. |
| 784 | * See the comment in kvm_vcpu_exiting_guest_mode() and |
Mauro Carvalho Chehab | 636e36b | 2021-11-16 12:11:22 +0000 | [diff] [blame] | 785 | * Documentation/virt/kvm/vcpu-requests.rst |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 786 | */ |
| 787 | vcpu->mode = IN_GUEST_MODE; |
| 788 | |
| 789 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); |
| 790 | smp_mb__after_srcu_read_unlock(); |
| 791 | |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 792 | /* |
| 793 | * We might have got VCPU interrupts updated asynchronously |
| 794 | * so update it in HW. |
| 795 | */ |
| 796 | kvm_riscv_vcpu_flush_interrupts(vcpu); |
| 797 | |
| 798 | /* Update HVIP CSR for current CPU */ |
| 799 | kvm_riscv_update_hvip(vcpu); |
| 800 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 801 | if (ret <= 0 || |
Anup Patel | fd7bb4a | 2021-09-27 17:10:08 +0530 | [diff] [blame] | 802 | kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) || |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 803 | kvm_request_pending(vcpu)) { |
| 804 | vcpu->mode = OUTSIDE_GUEST_MODE; |
| 805 | local_irq_enable(); |
| 806 | preempt_enable(); |
| 807 | vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 808 | continue; |
| 809 | } |
| 810 | |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 811 | guest_timing_enter_irqoff(); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 812 | |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 813 | kvm_riscv_vcpu_enter_exit(vcpu); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 814 | |
| 815 | vcpu->mode = OUTSIDE_GUEST_MODE; |
| 816 | vcpu->stat.exits++; |
| 817 | |
| 818 | /* |
| 819 | * Save SCAUSE, STVAL, HTVAL, and HTINST because we might |
| 820 | * get an interrupt between __kvm_riscv_switch_to() and |
| 821 | * local_irq_enable() which can potentially change CSRs. |
| 822 | */ |
Anup Patel | a33c72f | 2021-09-27 17:10:02 +0530 | [diff] [blame] | 823 | trap.sepc = vcpu->arch.guest_context.sepc; |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 824 | trap.scause = csr_read(CSR_SCAUSE); |
| 825 | trap.stval = csr_read(CSR_STVAL); |
| 826 | trap.htval = csr_read(CSR_HTVAL); |
| 827 | trap.htinst = csr_read(CSR_HTINST); |
| 828 | |
Anup Patel | cce69af | 2021-09-27 17:10:03 +0530 | [diff] [blame] | 829 | /* Syncup interrupts state with HW */ |
| 830 | kvm_riscv_vcpu_sync_interrupts(vcpu); |
| 831 | |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 832 | /* |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 833 | * We must ensure that any pending interrupts are taken before |
| 834 | * we exit guest timing so that timer ticks are accounted as |
| 835 | * guest time. Transiently unmask interrupts so that any |
| 836 | * pending interrupts are taken. |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 837 | * |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 838 | * There's no barrier which ensures that pending interrupts are |
| 839 | * recognised, so we just hope that the CPU takes any pending |
| 840 | * interrupts between the enable and disable. |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 841 | */ |
| 842 | local_irq_enable(); |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 843 | local_irq_disable(); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 844 | |
Mark Rutland | 6455317 | 2022-02-01 13:29:25 +0000 | [diff] [blame] | 845 | guest_timing_exit_irqoff(); |
| 846 | |
| 847 | local_irq_enable(); |
Anup Patel | 99cdc6c | 2021-09-27 17:10:01 +0530 | [diff] [blame] | 848 | |
| 849 | preempt_enable(); |
| 850 | |
| 851 | vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 852 | |
| 853 | ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); |
| 854 | } |
| 855 | |
| 856 | kvm_sigset_deactivate(vcpu); |
| 857 | |
| 858 | vcpu_put(vcpu); |
| 859 | |
| 860 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); |
| 861 | |
| 862 | return ret; |
| 863 | } |