blob: 64f74290a90f1398b85a6b887c60982bca8ab8be [file] [log] [blame]
Anup Patel99cdc6c2021-09-27 17:10:01 +05301// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9#include <linux/bitops.h>
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kdebug.h>
13#include <linux/module.h>
Anup Patelcce69af2021-09-27 17:10:03 +053014#include <linux/percpu.h>
Anup Patel99cdc6c2021-09-27 17:10:01 +053015#include <linux/uaccess.h>
16#include <linux/vmalloc.h>
17#include <linux/sched/signal.h>
18#include <linux/fs.h>
19#include <linux/kvm_host.h>
20#include <asm/csr.h>
Anup Patel99cdc6c2021-09-27 17:10:01 +053021#include <asm/hwcap.h>
22
23const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
24 KVM_GENERIC_VCPU_STATS(),
25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
27 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
29 STATS_DESC_COUNTER(VCPU, exits)
30};
31
32const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
39};
40
Anup Patela33c72f2021-09-27 17:10:02 +053041#define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
42 riscv_isa_extension_mask(c) | \
43 riscv_isa_extension_mask(d) | \
44 riscv_isa_extension_mask(f) | \
45 riscv_isa_extension_mask(i) | \
46 riscv_isa_extension_mask(m) | \
47 riscv_isa_extension_mask(s) | \
48 riscv_isa_extension_mask(u))
49
50static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
51{
52 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
53 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
54 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
55 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
56
57 memcpy(csr, reset_csr, sizeof(*csr));
58
59 memcpy(cntx, reset_cntx, sizeof(*cntx));
Anup Patelcce69af2021-09-27 17:10:03 +053060
61 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
62 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
Anup Patela33c72f2021-09-27 17:10:02 +053063}
64
Anup Patel99cdc6c2021-09-27 17:10:01 +053065int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
66{
67 return 0;
68}
69
70int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
71{
Anup Patela33c72f2021-09-27 17:10:02 +053072 struct kvm_cpu_context *cntx;
73
74 /* Mark this VCPU never ran */
75 vcpu->arch.ran_atleast_once = false;
76
77 /* Setup ISA features available to VCPU */
78 vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
79
80 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
81 cntx = &vcpu->arch.guest_reset_context;
82 cntx->sstatus = SR_SPP | SR_SPIE;
83 cntx->hstatus = 0;
84 cntx->hstatus |= HSTATUS_VTW;
85 cntx->hstatus |= HSTATUS_SPVP;
86 cntx->hstatus |= HSTATUS_SPV;
87
88 /* Reset VCPU */
89 kvm_riscv_reset_vcpu(vcpu);
90
Anup Patel99cdc6c2021-09-27 17:10:01 +053091 return 0;
92}
93
94void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
95{
96}
97
Anup Patel99cdc6c2021-09-27 17:10:01 +053098void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
99{
Anup Patela33c72f2021-09-27 17:10:02 +0530100 /* Flush the pages pre-allocated for Stage2 page table mappings */
101 kvm_riscv_stage2_flush_cache(vcpu);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530102}
103
104int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
105{
Anup Patelcce69af2021-09-27 17:10:03 +0530106 return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530107}
108
109void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
110{
111}
112
113void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
114{
115}
116
117int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
118{
Anup Patelcce69af2021-09-27 17:10:03 +0530119 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
120 !vcpu->arch.power_off && !vcpu->arch.pause);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530121}
122
123int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
124{
Anup Patelcce69af2021-09-27 17:10:03 +0530125 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530126}
127
128bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
129{
Anup Patelcce69af2021-09-27 17:10:03 +0530130 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530131}
132
133vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
134{
135 return VM_FAULT_SIGBUS;
136}
137
Anup Patel92ad8202021-09-27 17:10:04 +0530138static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
139 const struct kvm_one_reg *reg)
140{
141 unsigned long __user *uaddr =
142 (unsigned long __user *)(unsigned long)reg->addr;
143 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
144 KVM_REG_SIZE_MASK |
145 KVM_REG_RISCV_CONFIG);
146 unsigned long reg_val;
147
148 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
149 return -EINVAL;
150
151 switch (reg_num) {
152 case KVM_REG_RISCV_CONFIG_REG(isa):
153 reg_val = vcpu->arch.isa;
154 break;
155 default:
156 return -EINVAL;
157 };
158
159 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
160 return -EFAULT;
161
162 return 0;
163}
164
165static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
166 const struct kvm_one_reg *reg)
167{
168 unsigned long __user *uaddr =
169 (unsigned long __user *)(unsigned long)reg->addr;
170 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
171 KVM_REG_SIZE_MASK |
172 KVM_REG_RISCV_CONFIG);
173 unsigned long reg_val;
174
175 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
176 return -EINVAL;
177
178 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
179 return -EFAULT;
180
181 switch (reg_num) {
182 case KVM_REG_RISCV_CONFIG_REG(isa):
183 if (!vcpu->arch.ran_atleast_once) {
184 vcpu->arch.isa = reg_val;
185 vcpu->arch.isa &= riscv_isa_extension_base(NULL);
186 vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
187 } else {
188 return -EOPNOTSUPP;
189 }
190 break;
191 default:
192 return -EINVAL;
193 };
194
195 return 0;
196}
197
198static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
199 const struct kvm_one_reg *reg)
200{
201 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
202 unsigned long __user *uaddr =
203 (unsigned long __user *)(unsigned long)reg->addr;
204 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
205 KVM_REG_SIZE_MASK |
206 KVM_REG_RISCV_CORE);
207 unsigned long reg_val;
208
209 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
210 return -EINVAL;
211 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
212 return -EINVAL;
213
214 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
215 reg_val = cntx->sepc;
216 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
217 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
218 reg_val = ((unsigned long *)cntx)[reg_num];
219 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
220 reg_val = (cntx->sstatus & SR_SPP) ?
221 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
222 else
223 return -EINVAL;
224
225 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
226 return -EFAULT;
227
228 return 0;
229}
230
231static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
232 const struct kvm_one_reg *reg)
233{
234 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
235 unsigned long __user *uaddr =
236 (unsigned long __user *)(unsigned long)reg->addr;
237 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
238 KVM_REG_SIZE_MASK |
239 KVM_REG_RISCV_CORE);
240 unsigned long reg_val;
241
242 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
243 return -EINVAL;
244 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
245 return -EINVAL;
246
247 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
248 return -EFAULT;
249
250 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
251 cntx->sepc = reg_val;
252 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
253 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
254 ((unsigned long *)cntx)[reg_num] = reg_val;
255 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
256 if (reg_val == KVM_RISCV_MODE_S)
257 cntx->sstatus |= SR_SPP;
258 else
259 cntx->sstatus &= ~SR_SPP;
260 } else
261 return -EINVAL;
262
263 return 0;
264}
265
266static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
267 const struct kvm_one_reg *reg)
268{
269 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
270 unsigned long __user *uaddr =
271 (unsigned long __user *)(unsigned long)reg->addr;
272 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
273 KVM_REG_SIZE_MASK |
274 KVM_REG_RISCV_CSR);
275 unsigned long reg_val;
276
277 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
278 return -EINVAL;
279 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
280 return -EINVAL;
281
282 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
283 kvm_riscv_vcpu_flush_interrupts(vcpu);
284 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
285 } else
286 reg_val = ((unsigned long *)csr)[reg_num];
287
288 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
289 return -EFAULT;
290
291 return 0;
292}
293
294static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
295 const struct kvm_one_reg *reg)
296{
297 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
298 unsigned long __user *uaddr =
299 (unsigned long __user *)(unsigned long)reg->addr;
300 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
301 KVM_REG_SIZE_MASK |
302 KVM_REG_RISCV_CSR);
303 unsigned long reg_val;
304
305 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
306 return -EINVAL;
307 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
308 return -EINVAL;
309
310 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
311 return -EFAULT;
312
313 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
314 reg_val &= VSIP_VALID_MASK;
315 reg_val <<= VSIP_TO_HVIP_SHIFT;
316 }
317
318 ((unsigned long *)csr)[reg_num] = reg_val;
319
320 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
321 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
322
323 return 0;
324}
325
326static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
327 const struct kvm_one_reg *reg)
328{
329 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
330 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
331 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
332 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
333 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
334 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
335
336 return -EINVAL;
337}
338
339static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
340 const struct kvm_one_reg *reg)
341{
342 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
343 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
344 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
345 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
346 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
347 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
348
349 return -EINVAL;
350}
351
Anup Patel99cdc6c2021-09-27 17:10:01 +0530352long kvm_arch_vcpu_async_ioctl(struct file *filp,
353 unsigned int ioctl, unsigned long arg)
354{
Anup Patelcce69af2021-09-27 17:10:03 +0530355 struct kvm_vcpu *vcpu = filp->private_data;
356 void __user *argp = (void __user *)arg;
357
358 if (ioctl == KVM_INTERRUPT) {
359 struct kvm_interrupt irq;
360
361 if (copy_from_user(&irq, argp, sizeof(irq)))
362 return -EFAULT;
363
364 if (irq.irq == KVM_INTERRUPT_SET)
365 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
366 else
367 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
368 }
369
Anup Patel99cdc6c2021-09-27 17:10:01 +0530370 return -ENOIOCTLCMD;
371}
372
373long kvm_arch_vcpu_ioctl(struct file *filp,
374 unsigned int ioctl, unsigned long arg)
375{
Anup Patel92ad8202021-09-27 17:10:04 +0530376 struct kvm_vcpu *vcpu = filp->private_data;
377 void __user *argp = (void __user *)arg;
378 long r = -EINVAL;
379
380 switch (ioctl) {
381 case KVM_SET_ONE_REG:
382 case KVM_GET_ONE_REG: {
383 struct kvm_one_reg reg;
384
385 r = -EFAULT;
386 if (copy_from_user(&reg, argp, sizeof(reg)))
387 break;
388
389 if (ioctl == KVM_SET_ONE_REG)
390 r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
391 else
392 r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
393 break;
394 }
395 default:
396 break;
397 }
398
399 return r;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530400}
401
402int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
403 struct kvm_sregs *sregs)
404{
405 return -EINVAL;
406}
407
408int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
409 struct kvm_sregs *sregs)
410{
411 return -EINVAL;
412}
413
414int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
415{
416 return -EINVAL;
417}
418
419int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
420{
421 return -EINVAL;
422}
423
424int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
425 struct kvm_translation *tr)
426{
427 return -EINVAL;
428}
429
430int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
431{
432 return -EINVAL;
433}
434
435int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
436{
437 return -EINVAL;
438}
439
Anup Patelcce69af2021-09-27 17:10:03 +0530440void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
441{
442 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
443 unsigned long mask, val;
444
445 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
446 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
447 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
448
449 csr->hvip &= ~mask;
450 csr->hvip |= val;
451 }
452}
453
454void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
455{
456 unsigned long hvip;
457 struct kvm_vcpu_arch *v = &vcpu->arch;
458 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
459
460 /* Read current HVIP and VSIE CSRs */
461 csr->vsie = csr_read(CSR_VSIE);
462
463 /* Sync-up HVIP.VSSIP bit changes does by Guest */
464 hvip = csr_read(CSR_HVIP);
465 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
466 if (hvip & (1UL << IRQ_VS_SOFT)) {
467 if (!test_and_set_bit(IRQ_VS_SOFT,
468 &v->irqs_pending_mask))
469 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
470 } else {
471 if (!test_and_set_bit(IRQ_VS_SOFT,
472 &v->irqs_pending_mask))
473 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
474 }
475 }
476}
477
478int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
479{
480 if (irq != IRQ_VS_SOFT &&
481 irq != IRQ_VS_TIMER &&
482 irq != IRQ_VS_EXT)
483 return -EINVAL;
484
485 set_bit(irq, &vcpu->arch.irqs_pending);
486 smp_mb__before_atomic();
487 set_bit(irq, &vcpu->arch.irqs_pending_mask);
488
489 kvm_vcpu_kick(vcpu);
490
491 return 0;
492}
493
494int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
495{
496 if (irq != IRQ_VS_SOFT &&
497 irq != IRQ_VS_TIMER &&
498 irq != IRQ_VS_EXT)
499 return -EINVAL;
500
501 clear_bit(irq, &vcpu->arch.irqs_pending);
502 smp_mb__before_atomic();
503 set_bit(irq, &vcpu->arch.irqs_pending_mask);
504
505 return 0;
506}
507
508bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
509{
510 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
511 << VSIP_TO_HVIP_SHIFT) & mask;
512
513 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
514}
515
516void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
517{
518 vcpu->arch.power_off = true;
519 kvm_make_request(KVM_REQ_SLEEP, vcpu);
520 kvm_vcpu_kick(vcpu);
521}
522
523void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
524{
525 vcpu->arch.power_off = false;
526 kvm_vcpu_wake_up(vcpu);
527}
528
Anup Patel99cdc6c2021-09-27 17:10:01 +0530529int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
530 struct kvm_mp_state *mp_state)
531{
Anup Patelcce69af2021-09-27 17:10:03 +0530532 if (vcpu->arch.power_off)
533 mp_state->mp_state = KVM_MP_STATE_STOPPED;
534 else
535 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
536
Anup Patel99cdc6c2021-09-27 17:10:01 +0530537 return 0;
538}
539
540int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
541 struct kvm_mp_state *mp_state)
542{
Anup Patelcce69af2021-09-27 17:10:03 +0530543 int ret = 0;
544
545 switch (mp_state->mp_state) {
546 case KVM_MP_STATE_RUNNABLE:
547 vcpu->arch.power_off = false;
548 break;
549 case KVM_MP_STATE_STOPPED:
550 kvm_riscv_vcpu_power_off(vcpu);
551 break;
552 default:
553 ret = -EINVAL;
554 }
555
556 return ret;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530557}
558
559int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
560 struct kvm_guest_debug *dbg)
561{
562 /* TODO; To be implemented later. */
563 return -EINVAL;
564}
565
566void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
567{
Anup Patel34bde9d82021-09-27 17:10:05 +0530568 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
569
570 csr_write(CSR_VSSTATUS, csr->vsstatus);
571 csr_write(CSR_VSIE, csr->vsie);
572 csr_write(CSR_VSTVEC, csr->vstvec);
573 csr_write(CSR_VSSCRATCH, csr->vsscratch);
574 csr_write(CSR_VSEPC, csr->vsepc);
575 csr_write(CSR_VSCAUSE, csr->vscause);
576 csr_write(CSR_VSTVAL, csr->vstval);
577 csr_write(CSR_HVIP, csr->hvip);
578 csr_write(CSR_VSATP, csr->vsatp);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530579
580 kvm_riscv_stage2_update_hgatp(vcpu);
Anup Patel34bde9d82021-09-27 17:10:05 +0530581
582 vcpu->cpu = cpu;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530583}
584
585void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
586{
Anup Patel34bde9d82021-09-27 17:10:05 +0530587 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
588
589 vcpu->cpu = -1;
590
591 csr_write(CSR_HGATP, 0);
592
593 csr->vsstatus = csr_read(CSR_VSSTATUS);
594 csr->vsie = csr_read(CSR_VSIE);
595 csr->vstvec = csr_read(CSR_VSTVEC);
596 csr->vsscratch = csr_read(CSR_VSSCRATCH);
597 csr->vsepc = csr_read(CSR_VSEPC);
598 csr->vscause = csr_read(CSR_VSCAUSE);
599 csr->vstval = csr_read(CSR_VSTVAL);
600 csr->hvip = csr_read(CSR_HVIP);
601 csr->vsatp = csr_read(CSR_VSATP);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530602}
603
604static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
605{
Anup Patelcce69af2021-09-27 17:10:03 +0530606 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
607
608 if (kvm_request_pending(vcpu)) {
609 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
610 rcuwait_wait_event(wait,
611 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
612 TASK_INTERRUPTIBLE);
613
614 if (vcpu->arch.power_off || vcpu->arch.pause) {
615 /*
616 * Awaken to handle a signal, request to
617 * sleep again later.
618 */
619 kvm_make_request(KVM_REQ_SLEEP, vcpu);
620 }
621 }
622
623 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
624 kvm_riscv_reset_vcpu(vcpu);
625 }
626}
627
628static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
629{
630 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
631
632 csr_write(CSR_HVIP, csr->hvip);
Anup Patel99cdc6c2021-09-27 17:10:01 +0530633}
634
635int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
636{
637 int ret;
638 struct kvm_cpu_trap trap;
639 struct kvm_run *run = vcpu->run;
640
Anup Patela33c72f2021-09-27 17:10:02 +0530641 /* Mark this VCPU ran at least once */
642 vcpu->arch.ran_atleast_once = true;
643
Anup Patel99cdc6c2021-09-27 17:10:01 +0530644 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
645
646 /* Process MMIO value returned from user-space */
647 if (run->exit_reason == KVM_EXIT_MMIO) {
648 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
649 if (ret) {
650 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
651 return ret;
652 }
653 }
654
655 if (run->immediate_exit) {
656 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
657 return -EINTR;
658 }
659
660 vcpu_load(vcpu);
661
662 kvm_sigset_activate(vcpu);
663
664 ret = 1;
665 run->exit_reason = KVM_EXIT_UNKNOWN;
666 while (ret > 0) {
667 /* Check conditions before entering the guest */
668 cond_resched();
669
670 kvm_riscv_check_vcpu_requests(vcpu);
671
672 preempt_disable();
673
674 local_irq_disable();
675
676 /*
677 * Exit if we have a signal pending so that we can deliver
678 * the signal to user space.
679 */
680 if (signal_pending(current)) {
681 ret = -EINTR;
682 run->exit_reason = KVM_EXIT_INTR;
683 }
684
685 /*
686 * Ensure we set mode to IN_GUEST_MODE after we disable
687 * interrupts and before the final VCPU requests check.
688 * See the comment in kvm_vcpu_exiting_guest_mode() and
689 * Documentation/virtual/kvm/vcpu-requests.rst
690 */
691 vcpu->mode = IN_GUEST_MODE;
692
693 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
694 smp_mb__after_srcu_read_unlock();
695
Anup Patelcce69af2021-09-27 17:10:03 +0530696 /*
697 * We might have got VCPU interrupts updated asynchronously
698 * so update it in HW.
699 */
700 kvm_riscv_vcpu_flush_interrupts(vcpu);
701
702 /* Update HVIP CSR for current CPU */
703 kvm_riscv_update_hvip(vcpu);
704
Anup Patel99cdc6c2021-09-27 17:10:01 +0530705 if (ret <= 0 ||
706 kvm_request_pending(vcpu)) {
707 vcpu->mode = OUTSIDE_GUEST_MODE;
708 local_irq_enable();
709 preempt_enable();
710 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
711 continue;
712 }
713
714 guest_enter_irqoff();
715
716 __kvm_riscv_switch_to(&vcpu->arch);
717
718 vcpu->mode = OUTSIDE_GUEST_MODE;
719 vcpu->stat.exits++;
720
721 /*
722 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
723 * get an interrupt between __kvm_riscv_switch_to() and
724 * local_irq_enable() which can potentially change CSRs.
725 */
Anup Patela33c72f2021-09-27 17:10:02 +0530726 trap.sepc = vcpu->arch.guest_context.sepc;
Anup Patel99cdc6c2021-09-27 17:10:01 +0530727 trap.scause = csr_read(CSR_SCAUSE);
728 trap.stval = csr_read(CSR_STVAL);
729 trap.htval = csr_read(CSR_HTVAL);
730 trap.htinst = csr_read(CSR_HTINST);
731
Anup Patelcce69af2021-09-27 17:10:03 +0530732 /* Syncup interrupts state with HW */
733 kvm_riscv_vcpu_sync_interrupts(vcpu);
734
Anup Patel99cdc6c2021-09-27 17:10:01 +0530735 /*
736 * We may have taken a host interrupt in VS/VU-mode (i.e.
737 * while executing the guest). This interrupt is still
738 * pending, as we haven't serviced it yet!
739 *
740 * We're now back in HS-mode with interrupts disabled
741 * so enabling the interrupts now will have the effect
742 * of taking the interrupt again, in HS-mode this time.
743 */
744 local_irq_enable();
745
746 /*
747 * We do local_irq_enable() before calling guest_exit() so
748 * that if a timer interrupt hits while running the guest
749 * we account that tick as being spent in the guest. We
750 * enable preemption after calling guest_exit() so that if
751 * we get preempted we make sure ticks after that is not
752 * counted as guest time.
753 */
754 guest_exit();
755
756 preempt_enable();
757
758 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
759
760 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
761 }
762
763 kvm_sigset_deactivate(vcpu);
764
765 vcpu_put(vcpu);
766
767 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
768
769 return ret;
770}