blob: 22e745e49b0abd1d59d9922096922b8782a2bc6a [file] [log] [blame]
Sanjay Lale685c682012-11-21 18:34:04 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lale685c682012-11-21 18:34:04 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
James Hogane30492b2014-05-29 10:16:35 +010014#include <linux/ktime.h>
Sanjay Lale685c682012-11-21 18:34:04 -080015#include <linux/kvm_host.h>
Sanjay Lale685c682012-11-21 18:34:04 -080016#include <linux/vmalloc.h>
17#include <linux/fs.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070018#include <linux/memblock.h>
Sanjay Lale685c682012-11-21 18:34:04 -080019#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
James Hoganf4956f62015-12-16 23:49:37 +000022#include <asm/cacheops.h>
Sanjay Lale685c682012-11-21 18:34:04 -080023#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
Deng-Cheng Zhud7d5b052014-06-26 12:11:38 -070032#include "interrupt.h"
Sanjay Lale685c682012-11-21 18:34:04 -080033
34#include "trace.h"
35
36/*
37 * Compute the return address and do emulate branch simulation, if required.
38 * This function should be called only in branch delay slot active.
39 */
James Hogan122e51d2016-11-28 17:23:14 +000040static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
41 unsigned long *out)
Sanjay Lale685c682012-11-21 18:34:04 -080042{
43 unsigned int dspcontrol;
44 union mips_instruction insn;
45 struct kvm_vcpu_arch *arch = &vcpu->arch;
46 long epc = instpc;
James Hogan122e51d2016-11-28 17:23:14 +000047 long nextpc;
48 int err;
Sanjay Lale685c682012-11-21 18:34:04 -080049
James Hogan122e51d2016-11-28 17:23:14 +000050 if (epc & 3) {
51 kvm_err("%s: unaligned epc\n", __func__);
52 return -EINVAL;
53 }
Sanjay Lale685c682012-11-21 18:34:04 -080054
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070055 /* Read the instruction */
James Hogan6a97c772015-04-23 16:54:35 +010056 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
James Hogan122e51d2016-11-28 17:23:14 +000057 if (err)
58 return err;
Sanjay Lale685c682012-11-21 18:34:04 -080059
60 switch (insn.i_format.opcode) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070061 /* jr and jalr are in r_format format. */
Sanjay Lale685c682012-11-21 18:34:04 -080062 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
Liangliang Huangc9b02992020-05-04 16:51:29 +080066 fallthrough;
Sanjay Lale685c682012-11-21 18:34:04 -080067 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
James Hogan122e51d2016-11-28 17:23:14 +000070 default:
71 return -EINVAL;
Sanjay Lale685c682012-11-21 18:34:04 -080072 }
73 break;
74
75 /*
76 * This group contains:
77 * bltz_op, bgez_op, bltzl_op, bgezl_op,
78 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 */
80 case bcond_op:
81 switch (insn.i_format.rt) {
82 case bltz_op:
83 case bltzl_op:
84 if ((long)arch->gprs[insn.i_format.rs] < 0)
85 epc = epc + 4 + (insn.i_format.simmediate << 2);
86 else
87 epc += 8;
88 nextpc = epc;
89 break;
90
91 case bgez_op:
92 case bgezl_op:
93 if ((long)arch->gprs[insn.i_format.rs] >= 0)
94 epc = epc + 4 + (insn.i_format.simmediate << 2);
95 else
96 epc += 8;
97 nextpc = epc;
98 break;
99
100 case bltzal_op:
101 case bltzall_op:
102 arch->gprs[31] = epc + 8;
103 if ((long)arch->gprs[insn.i_format.rs] < 0)
104 epc = epc + 4 + (insn.i_format.simmediate << 2);
105 else
106 epc += 8;
107 nextpc = epc;
108 break;
109
110 case bgezal_op:
111 case bgezall_op:
112 arch->gprs[31] = epc + 8;
113 if ((long)arch->gprs[insn.i_format.rs] >= 0)
114 epc = epc + 4 + (insn.i_format.simmediate << 2);
115 else
116 epc += 8;
117 nextpc = epc;
118 break;
119 case bposge32_op:
James Hogan122e51d2016-11-28 17:23:14 +0000120 if (!cpu_has_dsp) {
121 kvm_err("%s: DSP branch but not DSP ASE\n",
122 __func__);
123 return -EINVAL;
124 }
Sanjay Lale685c682012-11-21 18:34:04 -0800125
126 dspcontrol = rddsp(0x01);
127
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700128 if (dspcontrol >= 32)
Sanjay Lale685c682012-11-21 18:34:04 -0800129 epc = epc + 4 + (insn.i_format.simmediate << 2);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700130 else
Sanjay Lale685c682012-11-21 18:34:04 -0800131 epc += 8;
132 nextpc = epc;
133 break;
James Hogan122e51d2016-11-28 17:23:14 +0000134 default:
135 return -EINVAL;
Sanjay Lale685c682012-11-21 18:34:04 -0800136 }
137 break;
138
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700139 /* These are unconditional and in j_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800140 case jal_op:
141 arch->gprs[31] = instpc + 8;
Liangliang Huangc9b02992020-05-04 16:51:29 +0800142 fallthrough;
Sanjay Lale685c682012-11-21 18:34:04 -0800143 case j_op:
144 epc += 4;
145 epc >>= 28;
146 epc <<= 28;
147 epc |= (insn.j_format.target << 2);
148 nextpc = epc;
149 break;
150
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700151 /* These are conditional and in i_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800152 case beq_op:
153 case beql_op:
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
157 else
158 epc += 8;
159 nextpc = epc;
160 break;
161
162 case bne_op:
163 case bnel_op:
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
167 else
168 epc += 8;
169 nextpc = epc;
170 break;
171
James Hogan2e0badf2016-07-04 19:35:12 +0100172 case blez_op: /* POP06 */
173#ifndef CONFIG_CPU_MIPSR6
174 case blezl_op: /* removed in R6 */
175#endif
176 if (insn.i_format.rt != 0)
177 goto compact_branch;
Sanjay Lale685c682012-11-21 18:34:04 -0800178 if ((long)arch->gprs[insn.i_format.rs] <= 0)
179 epc = epc + 4 + (insn.i_format.simmediate << 2);
180 else
181 epc += 8;
182 nextpc = epc;
183 break;
184
James Hogan2e0badf2016-07-04 19:35:12 +0100185 case bgtz_op: /* POP07 */
186#ifndef CONFIG_CPU_MIPSR6
187 case bgtzl_op: /* removed in R6 */
188#endif
189 if (insn.i_format.rt != 0)
190 goto compact_branch;
Sanjay Lale685c682012-11-21 18:34:04 -0800191 if ((long)arch->gprs[insn.i_format.rs] > 0)
192 epc = epc + 4 + (insn.i_format.simmediate << 2);
193 else
194 epc += 8;
195 nextpc = epc;
196 break;
197
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700198 /* And now the FPA/cp1 branch instructions. */
Sanjay Lale685c682012-11-21 18:34:04 -0800199 case cop1_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700200 kvm_err("%s: unsupported cop1_op\n", __func__);
James Hogan122e51d2016-11-28 17:23:14 +0000201 return -EINVAL;
James Hogan2e0badf2016-07-04 19:35:12 +0100202
203#ifdef CONFIG_CPU_MIPSR6
204 /* R6 added the following compact branches with forbidden slots */
205 case blezl_op: /* POP26 */
206 case bgtzl_op: /* POP27 */
207 /* only rt == 0 isn't compact branch */
208 if (insn.i_format.rt != 0)
209 goto compact_branch;
James Hogan122e51d2016-11-28 17:23:14 +0000210 return -EINVAL;
James Hogan2e0badf2016-07-04 19:35:12 +0100211 case pop10_op:
212 case pop30_op:
213 /* only rs == rt == 0 is reserved, rest are compact branches */
214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
215 goto compact_branch;
James Hogan122e51d2016-11-28 17:23:14 +0000216 return -EINVAL;
James Hogan2e0badf2016-07-04 19:35:12 +0100217 case pop66_op:
218 case pop76_op:
219 /* only rs == 0 isn't compact branch */
220 if (insn.i_format.rs != 0)
221 goto compact_branch;
James Hogan122e51d2016-11-28 17:23:14 +0000222 return -EINVAL;
James Hogan2e0badf2016-07-04 19:35:12 +0100223compact_branch:
224 /*
225 * If we've hit an exception on the forbidden slot, then
226 * the branch must not have been taken.
227 */
228 epc += 8;
229 nextpc = epc;
230 break;
231#else
232compact_branch:
James Hogan122e51d2016-11-28 17:23:14 +0000233 /* Fall through - Compact branches not supported before R6 */
James Hogan2e0badf2016-07-04 19:35:12 +0100234#endif
James Hogan122e51d2016-11-28 17:23:14 +0000235 default:
236 return -EINVAL;
Sanjay Lale685c682012-11-21 18:34:04 -0800237 }
238
James Hogan122e51d2016-11-28 17:23:14 +0000239 *out = nextpc;
240 return 0;
Sanjay Lale685c682012-11-21 18:34:04 -0800241}
242
James Hoganbdb7ed82016-06-09 14:19:07 +0100243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
Sanjay Lale685c682012-11-21 18:34:04 -0800244{
James Hogan122e51d2016-11-28 17:23:14 +0000245 int err;
Sanjay Lale685c682012-11-21 18:34:04 -0800246
247 if (cause & CAUSEF_BD) {
James Hogan122e51d2016-11-28 17:23:14 +0000248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
249 &vcpu->arch.pc);
250 if (err)
251 return EMULATE_FAIL;
252 } else {
Sanjay Lale685c682012-11-21 18:34:04 -0800253 vcpu->arch.pc += 4;
James Hogan122e51d2016-11-28 17:23:14 +0000254 }
Sanjay Lale685c682012-11-21 18:34:04 -0800255
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
257
James Hogan122e51d2016-11-28 17:23:14 +0000258 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800259}
260
James Hogane30492b2014-05-29 10:16:35 +0100261/**
James Hogan6a97c772015-04-23 16:54:35 +0100262 * kvm_get_badinstr() - Get bad instruction encoding.
263 * @opc: Guest pointer to faulting instruction.
264 * @vcpu: KVM VCPU information.
265 *
266 * Gets the instruction encoding of the faulting instruction, using the saved
267 * BadInstr register value if it exists, otherwise falling back to reading guest
268 * memory at @opc.
269 *
270 * Returns: The instruction encoding of the faulting instruction.
271 */
272int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
273{
274 if (cpu_has_badinstr) {
275 *out = vcpu->arch.host_cp0_badinstr;
276 return 0;
277 } else {
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100278 WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
279 return -EINVAL;
James Hogan6a97c772015-04-23 16:54:35 +0100280 }
281}
282
283/**
284 * kvm_get_badinstrp() - Get bad prior instruction encoding.
285 * @opc: Guest pointer to prior faulting instruction.
286 * @vcpu: KVM VCPU information.
287 *
288 * Gets the instruction encoding of the prior faulting instruction (the branch
289 * containing the delay slot which faulted), using the saved BadInstrP register
290 * value if it exists, otherwise falling back to reading guest memory at @opc.
291 *
292 * Returns: The instruction encoding of the prior faulting instruction.
293 */
294int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
295{
296 if (cpu_has_badinstrp) {
297 *out = vcpu->arch.host_cp0_badinstrp;
298 return 0;
299 } else {
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100300 WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
301 return -EINVAL;
James Hogan6a97c772015-04-23 16:54:35 +0100302 }
303}
304
305/**
James Hogane30492b2014-05-29 10:16:35 +0100306 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
307 * @vcpu: Virtual CPU.
Sanjay Lale685c682012-11-21 18:34:04 -0800308 *
James Hoganf8239342014-05-29 10:16:37 +0100309 * Returns: 1 if the CP0_Count timer is disabled by either the guest
310 * CP0_Cause.DC bit or the count_ctl.DC bit.
James Hogane30492b2014-05-29 10:16:35 +0100311 * 0 otherwise (in which case CP0_Count timer is running).
Sanjay Lale685c682012-11-21 18:34:04 -0800312 */
James Hoganf4474d52017-03-14 10:15:39 +0000313int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800314{
315 struct mips_coproc *cop0 = vcpu->arch.cop0;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700316
James Hoganf8239342014-05-29 10:16:37 +0100317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
318 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
James Hogane30492b2014-05-29 10:16:35 +0100319}
Sanjay Lale685c682012-11-21 18:34:04 -0800320
James Hogane30492b2014-05-29 10:16:35 +0100321/**
322 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
323 *
324 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
325 *
326 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
327 */
James Hoganbdb7ed82016-06-09 14:19:07 +0100328static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
James Hogane30492b2014-05-29 10:16:35 +0100329{
330 s64 now_ns, periods;
331 u64 delta;
332
333 now_ns = ktime_to_ns(now);
334 delta = now_ns + vcpu->arch.count_dyn_bias;
335
336 if (delta >= vcpu->arch.count_period) {
337 /* If delta is out of safe range the bias needs adjusting */
338 periods = div64_s64(now_ns, vcpu->arch.count_period);
339 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
340 /* Recalculate delta with new bias */
341 delta = now_ns + vcpu->arch.count_dyn_bias;
Sanjay Lale685c682012-11-21 18:34:04 -0800342 }
343
James Hogane30492b2014-05-29 10:16:35 +0100344 /*
345 * We've ensured that:
346 * delta < count_period
347 *
348 * Therefore the intermediate delta*count_hz will never overflow since
349 * at the boundary condition:
350 * delta = count_period
351 * delta = NSEC_PER_SEC * 2^32 / count_hz
352 * delta * count_hz = NSEC_PER_SEC * 2^32
353 */
354 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
355}
356
357/**
James Hoganf8239342014-05-29 10:16:37 +0100358 * kvm_mips_count_time() - Get effective current time.
359 * @vcpu: Virtual CPU.
360 *
361 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
362 * except when the master disable bit is set in count_ctl, in which case it is
363 * count_resume, i.e. the time that the count was disabled.
364 *
365 * Returns: Effective monotonic ktime for CP0_Count.
366 */
367static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
368{
369 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
370 return vcpu->arch.count_resume;
371
372 return ktime_get();
373}
374
375/**
James Hogane30492b2014-05-29 10:16:35 +0100376 * kvm_mips_read_count_running() - Read the current count value as if running.
377 * @vcpu: Virtual CPU.
378 * @now: Kernel time to read CP0_Count at.
379 *
380 * Returns the current guest CP0_Count register at time @now and handles if the
381 * timer interrupt is pending and hasn't been handled yet.
382 *
383 * Returns: The current value of the guest CP0_Count register.
384 */
James Hoganbdb7ed82016-06-09 14:19:07 +0100385static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
James Hogane30492b2014-05-29 10:16:35 +0100386{
James Hogan4355c44f02016-04-22 10:38:45 +0100387 struct mips_coproc *cop0 = vcpu->arch.cop0;
388 ktime_t expires, threshold;
James Hogan8cffd192016-06-09 14:19:08 +0100389 u32 count, compare;
James Hogane30492b2014-05-29 10:16:35 +0100390 int running;
391
James Hogan4355c44f02016-04-22 10:38:45 +0100392 /* Calculate the biased and scaled guest CP0_Count */
393 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
394 compare = kvm_read_c0_guest_compare(cop0);
395
396 /*
397 * Find whether CP0_Count has reached the closest timer interrupt. If
398 * not, we shouldn't inject it.
399 */
James Hogan8cffd192016-06-09 14:19:08 +0100400 if ((s32)(count - compare) < 0)
James Hogan4355c44f02016-04-22 10:38:45 +0100401 return count;
402
403 /*
404 * The CP0_Count we're going to return has already reached the closest
405 * timer interrupt. Quickly check if it really is a new interrupt by
406 * looking at whether the interval until the hrtimer expiry time is
407 * less than 1/4 of the timer period.
408 */
James Hogane30492b2014-05-29 10:16:35 +0100409 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
James Hogan4355c44f02016-04-22 10:38:45 +0100410 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
411 if (ktime_before(expires, threshold)) {
James Hogane30492b2014-05-29 10:16:35 +0100412 /*
413 * Cancel it while we handle it so there's no chance of
414 * interference with the timeout handler.
415 */
416 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
417
418 /* Nothing should be waiting on the timeout */
419 kvm_mips_callbacks->queue_timer_int(vcpu);
420
421 /*
422 * Restart the timer if it was running based on the expiry time
423 * we read, so that we don't push it back 2 periods.
424 */
425 if (running) {
426 expires = ktime_add_ns(expires,
427 vcpu->arch.count_period);
428 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
429 HRTIMER_MODE_ABS);
430 }
431 }
432
James Hogan4355c44f02016-04-22 10:38:45 +0100433 return count;
James Hogane30492b2014-05-29 10:16:35 +0100434}
435
436/**
437 * kvm_mips_read_count() - Read the current count value.
438 * @vcpu: Virtual CPU.
439 *
440 * Read the current guest CP0_Count value, taking into account whether the timer
441 * is stopped.
442 *
443 * Returns: The current guest CP0_Count value.
444 */
James Hoganbdb7ed82016-06-09 14:19:07 +0100445u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
James Hogane30492b2014-05-29 10:16:35 +0100446{
447 struct mips_coproc *cop0 = vcpu->arch.cop0;
448
449 /* If count disabled just read static copy of count */
450 if (kvm_mips_count_disabled(vcpu))
451 return kvm_read_c0_guest_count(cop0);
452
453 return kvm_mips_read_count_running(vcpu, ktime_get());
454}
455
456/**
457 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
458 * @vcpu: Virtual CPU.
459 * @count: Output pointer for CP0_Count value at point of freeze.
460 *
461 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
462 * at the point it was frozen. It is guaranteed that any pending interrupts at
463 * the point it was frozen are handled, and none after that point.
464 *
465 * This is useful where the time/CP0_Count is needed in the calculation of the
466 * new parameters.
467 *
468 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
469 *
470 * Returns: The ktime at the point of freeze.
471 */
James Hoganf4474d52017-03-14 10:15:39 +0000472ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
James Hogane30492b2014-05-29 10:16:35 +0100473{
474 ktime_t now;
475
476 /* stop hrtimer before finding time */
477 hrtimer_cancel(&vcpu->arch.comparecount_timer);
478 now = ktime_get();
479
480 /* find count at this point and handle pending hrtimer */
481 *count = kvm_mips_read_count_running(vcpu, now);
482
483 return now;
484}
485
James Hogane30492b2014-05-29 10:16:35 +0100486/**
487 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
488 * @vcpu: Virtual CPU.
489 * @now: ktime at point of resume.
490 * @count: CP0_Count at point of resume.
491 *
492 * Resumes the timer and updates the timer expiry based on @now and @count.
493 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
494 * parameters need to be changed.
495 *
496 * It is guaranteed that a timer interrupt immediately after resume will be
497 * handled, but not if CP_Compare is exactly at @count. That case is already
498 * handled by kvm_mips_freeze_timer().
499 *
500 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
501 */
502static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
James Hoganbdb7ed82016-06-09 14:19:07 +0100503 ktime_t now, u32 count)
James Hogane30492b2014-05-29 10:16:35 +0100504{
505 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan8cffd192016-06-09 14:19:08 +0100506 u32 compare;
James Hogane30492b2014-05-29 10:16:35 +0100507 u64 delta;
508 ktime_t expire;
509
510 /* Calculate timeout (wrap 0 to 2^32) */
511 compare = kvm_read_c0_guest_compare(cop0);
James Hogan8cffd192016-06-09 14:19:08 +0100512 delta = (u64)(u32)(compare - count - 1) + 1;
James Hogane30492b2014-05-29 10:16:35 +0100513 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
514 expire = ktime_add_ns(now, delta);
515
516 /* Update hrtimer to use new timeout */
517 hrtimer_cancel(&vcpu->arch.comparecount_timer);
518 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
519}
520
521/**
James Hoganf4474d52017-03-14 10:15:39 +0000522 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
523 * @vcpu: Virtual CPU.
524 * @before: Time before Count was saved, lower bound of drift calculation.
525 * @count: CP0_Count at point of restore.
526 * @min_drift: Minimum amount of drift permitted before correction.
527 * Must be <= 0.
528 *
529 * Restores the timer from a particular @count, accounting for drift. This can
530 * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
531 * to be used for a period of time, but the exact ktime corresponding to the
532 * final Count that must be restored is not known.
533 *
534 * It is gauranteed that a timer interrupt immediately after restore will be
535 * handled, but not if CP0_Compare is exactly at @count. That case should
536 * already be handled when the hardware timer state is saved.
537 *
538 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
539 * stopped).
540 *
541 * Returns: Amount of correction to count_bias due to drift.
542 */
543int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
544 u32 count, int min_drift)
545{
546 ktime_t now, count_time;
547 u32 now_count, before_count;
548 u64 delta;
549 int drift, ret = 0;
550
551 /* Calculate expected count at before */
552 before_count = vcpu->arch.count_bias +
553 kvm_mips_ktime_to_count(vcpu, before);
554
555 /*
556 * Detect significantly negative drift, where count is lower than
557 * expected. Some negative drift is expected when hardware counter is
558 * set after kvm_mips_freeze_timer(), and it is harmless to allow the
559 * time to jump forwards a little, within reason. If the drift is too
560 * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
561 */
562 drift = count - before_count;
563 if (drift < min_drift) {
564 count_time = before;
565 vcpu->arch.count_bias += drift;
566 ret = drift;
567 goto resume;
568 }
569
570 /* Calculate expected count right now */
571 now = ktime_get();
572 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
573
574 /*
575 * Detect positive drift, where count is higher than expected, and
576 * adjust the bias to avoid guest time going backwards.
577 */
578 drift = count - now_count;
579 if (drift > 0) {
580 count_time = now;
581 vcpu->arch.count_bias += drift;
582 ret = drift;
583 goto resume;
584 }
585
586 /* Subtract nanosecond delta to find ktime when count was read */
587 delta = (u64)(u32)(now_count - count);
588 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
589 count_time = ktime_sub_ns(now, delta);
590
591resume:
592 /* Resume using the calculated ktime */
593 kvm_mips_resume_hrtimer(vcpu, count_time, count);
594 return ret;
595}
596
597/**
James Hogane30492b2014-05-29 10:16:35 +0100598 * kvm_mips_write_count() - Modify the count and update timer.
599 * @vcpu: Virtual CPU.
600 * @count: Guest CP0_Count value to set.
601 *
602 * Sets the CP0_Count value and updates the timer accordingly.
603 */
James Hoganbdb7ed82016-06-09 14:19:07 +0100604void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
James Hogane30492b2014-05-29 10:16:35 +0100605{
606 struct mips_coproc *cop0 = vcpu->arch.cop0;
607 ktime_t now;
608
609 /* Calculate bias */
James Hoganf8239342014-05-29 10:16:37 +0100610 now = kvm_mips_count_time(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100611 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
612
613 if (kvm_mips_count_disabled(vcpu))
614 /* The timer's disabled, adjust the static count */
615 kvm_write_c0_guest_count(cop0, count);
616 else
617 /* Update timeout */
618 kvm_mips_resume_hrtimer(vcpu, now, count);
619}
620
621/**
622 * kvm_mips_init_count() - Initialise timer.
623 * @vcpu: Virtual CPU.
James Hogana517c1a2017-03-14 10:15:21 +0000624 * @count_hz: Frequency of timer.
James Hogane30492b2014-05-29 10:16:35 +0100625 *
James Hogana517c1a2017-03-14 10:15:21 +0000626 * Initialise the timer to the specified frequency, zero it, and set it going if
627 * it's enabled.
James Hogane30492b2014-05-29 10:16:35 +0100628 */
James Hogana517c1a2017-03-14 10:15:21 +0000629void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
James Hogane30492b2014-05-29 10:16:35 +0100630{
James Hogana517c1a2017-03-14 10:15:21 +0000631 vcpu->arch.count_hz = count_hz;
632 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
James Hogane30492b2014-05-29 10:16:35 +0100633 vcpu->arch.count_dyn_bias = 0;
634
635 /* Starting at 0 */
636 kvm_mips_write_count(vcpu, 0);
637}
638
639/**
James Hoganf74a8e22014-05-29 10:16:38 +0100640 * kvm_mips_set_count_hz() - Update the frequency of the timer.
641 * @vcpu: Virtual CPU.
642 * @count_hz: Frequency of CP0_Count timer in Hz.
643 *
644 * Change the frequency of the CP0_Count timer. This is done atomically so that
645 * CP0_Count is continuous and no timer interrupt is lost.
646 *
647 * Returns: -EINVAL if @count_hz is out of range.
648 * 0 on success.
649 */
650int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
651{
652 struct mips_coproc *cop0 = vcpu->arch.cop0;
653 int dc;
654 ktime_t now;
655 u32 count;
656
657 /* ensure the frequency is in a sensible range... */
658 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
659 return -EINVAL;
660 /* ... and has actually changed */
661 if (vcpu->arch.count_hz == count_hz)
662 return 0;
663
664 /* Safely freeze timer so we can keep it continuous */
665 dc = kvm_mips_count_disabled(vcpu);
666 if (dc) {
667 now = kvm_mips_count_time(vcpu);
668 count = kvm_read_c0_guest_count(cop0);
669 } else {
670 now = kvm_mips_freeze_hrtimer(vcpu, &count);
671 }
672
673 /* Update the frequency */
674 vcpu->arch.count_hz = count_hz;
675 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
676 vcpu->arch.count_dyn_bias = 0;
677
678 /* Calculate adjusted bias so dynamic count is unchanged */
679 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
680
681 /* Update and resume hrtimer */
682 if (!dc)
683 kvm_mips_resume_hrtimer(vcpu, now, count);
684 return 0;
685}
686
687/**
James Hogane30492b2014-05-29 10:16:35 +0100688 * kvm_mips_write_compare() - Modify compare and update timer.
689 * @vcpu: Virtual CPU.
690 * @compare: New CP0_Compare value.
James Hoganb45bacd2016-04-22 10:38:46 +0100691 * @ack: Whether to acknowledge timer interrupt.
James Hogane30492b2014-05-29 10:16:35 +0100692 *
693 * Update CP0_Compare to a new value and update the timeout.
James Hoganb45bacd2016-04-22 10:38:46 +0100694 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
695 * any pending timer interrupt is preserved.
James Hogane30492b2014-05-29 10:16:35 +0100696 */
James Hoganbdb7ed82016-06-09 14:19:07 +0100697void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
James Hogane30492b2014-05-29 10:16:35 +0100698{
699 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hoganb45bacd2016-04-22 10:38:46 +0100700 int dc;
701 u32 old_compare = kvm_read_c0_guest_compare(cop0);
James Hogan5dee99b2017-03-14 10:15:28 +0000702 s32 delta = compare - old_compare;
703 u32 cause;
704 ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
James Hogan8cffd192016-06-09 14:19:08 +0100705 u32 count;
James Hogane30492b2014-05-29 10:16:35 +0100706
707 /* if unchanged, must just be an ack */
James Hoganb45bacd2016-04-22 10:38:46 +0100708 if (old_compare == compare) {
709 if (!ack)
710 return;
711 kvm_mips_callbacks->dequeue_timer_int(vcpu);
712 kvm_write_c0_guest_compare(cop0, compare);
James Hogane30492b2014-05-29 10:16:35 +0100713 return;
James Hoganb45bacd2016-04-22 10:38:46 +0100714 }
James Hogane30492b2014-05-29 10:16:35 +0100715
James Hogan5dee99b2017-03-14 10:15:28 +0000716 /*
717 * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
718 * too to prevent guest CP0_Count hitting guest CP0_Compare.
719 *
720 * The new GTOffset corresponds to the new value of CP0_Compare, and is
721 * set prior to it being written into the guest context. We disable
722 * preemption until the new value is written to prevent restore of a
723 * GTOffset corresponding to the old CP0_Compare value.
724 */
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100725 if (delta > 0) {
James Hogan5dee99b2017-03-14 10:15:28 +0000726 preempt_disable();
727 write_c0_gtoffset(compare - read_c0_count());
728 back_to_back_c0_hazard();
729 }
730
James Hoganb45bacd2016-04-22 10:38:46 +0100731 /* freeze_hrtimer() takes care of timer interrupts <= count */
732 dc = kvm_mips_count_disabled(vcpu);
733 if (!dc)
734 now = kvm_mips_freeze_hrtimer(vcpu, &count);
735
736 if (ack)
737 kvm_mips_callbacks->dequeue_timer_int(vcpu);
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100738 else
James Hogan5dee99b2017-03-14 10:15:28 +0000739 /*
740 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
741 * preserve guest CP0_Cause.TI if we don't want to ack it.
742 */
743 cause = kvm_read_c0_guest_cause(cop0);
James Hoganb45bacd2016-04-22 10:38:46 +0100744
James Hogane30492b2014-05-29 10:16:35 +0100745 kvm_write_c0_guest_compare(cop0, compare);
746
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100747 if (delta > 0)
748 preempt_enable();
James Hogan5dee99b2017-03-14 10:15:28 +0000749
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100750 back_to_back_c0_hazard();
James Hogan5dee99b2017-03-14 10:15:28 +0000751
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100752 if (!ack && cause & CAUSEF_TI)
753 kvm_write_c0_guest_cause(cop0, cause);
James Hogan5dee99b2017-03-14 10:15:28 +0000754
James Hoganb45bacd2016-04-22 10:38:46 +0100755 /* resume_hrtimer() takes care of timer interrupts > count */
756 if (!dc)
757 kvm_mips_resume_hrtimer(vcpu, now, count);
James Hogan5dee99b2017-03-14 10:15:28 +0000758
759 /*
760 * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
761 * until after the new CP0_Compare is written, otherwise new guest
762 * CP0_Count could hit new guest CP0_Compare.
763 */
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100764 if (delta <= 0)
James Hogan5dee99b2017-03-14 10:15:28 +0000765 write_c0_gtoffset(compare - read_c0_count());
James Hogane30492b2014-05-29 10:16:35 +0100766}
767
768/**
769 * kvm_mips_count_disable() - Disable count.
770 * @vcpu: Virtual CPU.
771 *
772 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
773 * time will be handled but not after.
774 *
James Hoganf8239342014-05-29 10:16:37 +0100775 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
776 * count_ctl.DC has been set (count disabled).
James Hogane30492b2014-05-29 10:16:35 +0100777 *
778 * Returns: The time that the timer was stopped.
779 */
780static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
781{
782 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan8cffd192016-06-09 14:19:08 +0100783 u32 count;
James Hogane30492b2014-05-29 10:16:35 +0100784 ktime_t now;
785
786 /* Stop hrtimer */
787 hrtimer_cancel(&vcpu->arch.comparecount_timer);
788
789 /* Set the static count from the dynamic count, handling pending TI */
790 now = ktime_get();
791 count = kvm_mips_read_count_running(vcpu, now);
792 kvm_write_c0_guest_count(cop0, count);
793
794 return now;
795}
796
797/**
798 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
799 * @vcpu: Virtual CPU.
800 *
801 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
James Hoganf8239342014-05-29 10:16:37 +0100802 * before the final stop time will be handled if the timer isn't disabled by
803 * count_ctl.DC, but not after.
James Hogane30492b2014-05-29 10:16:35 +0100804 *
805 * Assumes CP0_Cause.DC is clear (count enabled).
806 */
807void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
808{
809 struct mips_coproc *cop0 = vcpu->arch.cop0;
810
811 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
James Hoganf8239342014-05-29 10:16:37 +0100812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
813 kvm_mips_count_disable(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100814}
815
816/**
817 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
818 * @vcpu: Virtual CPU.
819 *
820 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
James Hoganf8239342014-05-29 10:16:37 +0100821 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
822 * potentially before even returning, so the caller should be careful with
823 * ordering of CP0_Cause modifications so as not to lose it.
James Hogane30492b2014-05-29 10:16:35 +0100824 *
825 * Assumes CP0_Cause.DC is set (count disabled).
826 */
827void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
828{
829 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan8cffd192016-06-09 14:19:08 +0100830 u32 count;
James Hogane30492b2014-05-29 10:16:35 +0100831
832 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
833
834 /*
835 * Set the dynamic count to match the static count.
James Hoganf8239342014-05-29 10:16:37 +0100836 * This starts the hrtimer if count_ctl.DC allows it.
837 * Otherwise it conveniently updates the biases.
James Hogane30492b2014-05-29 10:16:35 +0100838 */
839 count = kvm_read_c0_guest_count(cop0);
840 kvm_mips_write_count(vcpu, count);
841}
842
843/**
James Hoganf8239342014-05-29 10:16:37 +0100844 * kvm_mips_set_count_ctl() - Update the count control KVM register.
845 * @vcpu: Virtual CPU.
846 * @count_ctl: Count control register new value.
847 *
848 * Set the count control KVM register. The timer is updated accordingly.
849 *
850 * Returns: -EINVAL if reserved bits are set.
851 * 0 on success.
852 */
853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
854{
855 struct mips_coproc *cop0 = vcpu->arch.cop0;
856 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
857 s64 delta;
858 ktime_t expire, now;
James Hogan8cffd192016-06-09 14:19:08 +0100859 u32 count, compare;
James Hoganf8239342014-05-29 10:16:37 +0100860
861 /* Only allow defined bits to be changed */
862 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
863 return -EINVAL;
864
865 /* Apply new value */
866 vcpu->arch.count_ctl = count_ctl;
867
868 /* Master CP0_Count disable */
869 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
870 /* Is CP0_Cause.DC already disabling CP0_Count? */
871 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
872 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
873 /* Just record the current time */
874 vcpu->arch.count_resume = ktime_get();
875 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
876 /* disable timer and record current time */
877 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
878 } else {
879 /*
880 * Calculate timeout relative to static count at resume
881 * time (wrap 0 to 2^32).
882 */
883 count = kvm_read_c0_guest_count(cop0);
884 compare = kvm_read_c0_guest_compare(cop0);
James Hogan8cffd192016-06-09 14:19:08 +0100885 delta = (u64)(u32)(compare - count - 1) + 1;
James Hoganf8239342014-05-29 10:16:37 +0100886 delta = div_u64(delta * NSEC_PER_SEC,
887 vcpu->arch.count_hz);
888 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
889
890 /* Handle pending interrupt */
891 now = ktime_get();
892 if (ktime_compare(now, expire) >= 0)
893 /* Nothing should be waiting on the timeout */
894 kvm_mips_callbacks->queue_timer_int(vcpu);
895
896 /* Resume hrtimer without changing bias */
897 count = kvm_mips_read_count_running(vcpu, now);
898 kvm_mips_resume_hrtimer(vcpu, now, count);
899 }
900 }
901
902 return 0;
903}
904
905/**
906 * kvm_mips_set_count_resume() - Update the count resume KVM register.
907 * @vcpu: Virtual CPU.
908 * @count_resume: Count resume register new value.
909 *
910 * Set the count resume KVM register.
911 *
912 * Returns: -EINVAL if out of valid range (0..now).
913 * 0 on success.
914 */
915int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
916{
917 /*
918 * It doesn't make sense for the resume time to be in the future, as it
919 * would be possible for the next interrupt to be more than a full
920 * period in the future.
921 */
922 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
923 return -EINVAL;
924
925 vcpu->arch.count_resume = ns_to_ktime(count_resume);
926 return 0;
927}
928
929/**
James Hogane30492b2014-05-29 10:16:35 +0100930 * kvm_mips_count_timeout() - Push timer forward on timeout.
931 * @vcpu: Virtual CPU.
932 *
933 * Handle an hrtimer event by push the hrtimer forward a period.
934 *
935 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
936 */
937enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
938{
939 /* Add the Count period to the current expiry time */
940 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
941 vcpu->arch.count_period);
942 return HRTIMER_RESTART;
Sanjay Lale685c682012-11-21 18:34:04 -0800943}
944
Sanjay Lale685c682012-11-21 18:34:04 -0800945enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
946{
Sanjay Lale685c682012-11-21 18:34:04 -0800947 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
948 vcpu->arch.pending_exceptions);
949
950 ++vcpu->stat.wait_exits;
James Hogan1e09e862016-06-14 09:40:12 +0100951 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
Sanjay Lale685c682012-11-21 18:34:04 -0800952 if (!vcpu->arch.pending_exceptions) {
James Hoganf4474d52017-03-14 10:15:39 +0000953 kvm_vz_lose_htimer(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -0800954 vcpu->arch.wait = 1;
955 kvm_vcpu_block(vcpu);
956
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700957 /*
958 * We we are runnable, then definitely go off to user space to
959 * check if any I/O interrupts are pending.
Sanjay Lale685c682012-11-21 18:34:04 -0800960 */
961 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
Radim Krčmář72875d82017-04-26 22:32:19 +0200962 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -0800963 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
964 }
965 }
966
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700967 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800968}
969
James Hogan258f3a22016-06-15 19:29:47 +0100970enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
971 u32 cause,
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700972 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800973{
Huacai Chenf21db302020-05-23 15:56:37 +0800974 int r;
James Hogan8b48d5b2017-03-14 10:15:15 +0000975 enum emulation_result er;
James Hogan258f3a22016-06-15 19:29:47 +0100976 u32 rt;
Tianjia Zhangc34b26b2020-06-23 21:14:17 +0800977 struct kvm_run *run = vcpu->run;
Sanjay Lale685c682012-11-21 18:34:04 -0800978 void *data = run->mmio.data;
Huacai Chendc6d95b2020-05-23 15:56:40 +0800979 unsigned int imme;
Sanjay Lale685c682012-11-21 18:34:04 -0800980 unsigned long curr_pc;
981
982 /*
983 * Update PC and hold onto current PC in case there is
984 * an error and we want to rollback the PC
985 */
986 curr_pc = vcpu->arch.pc;
987 er = update_pc(vcpu, cause);
988 if (er == EMULATE_FAIL)
989 return er;
990
James Hogan258f3a22016-06-15 19:29:47 +0100991 rt = inst.i_format.rt;
Sanjay Lale685c682012-11-21 18:34:04 -0800992
James Hogan8b48d5b2017-03-14 10:15:15 +0000993 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
994 vcpu->arch.host_cp0_badvaddr);
995 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
996 goto out_fail;
997
James Hogan258f3a22016-06-15 19:29:47 +0100998 switch (inst.i_format.opcode) {
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +0100999#if defined(CONFIG_64BIT)
James Hogan59d78142017-03-14 10:15:16 +00001000 case sd_op:
1001 run->mmio.len = 8;
1002 *(u64 *)data = vcpu->arch.gprs[rt];
1003
1004 kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1005 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1006 vcpu->arch.gprs[rt], *(u64 *)data);
1007 break;
1008#endif
1009
Sanjay Lale685c682012-11-21 18:34:04 -08001010 case sw_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001011 run->mmio.len = 4;
1012 *(u32 *)data = vcpu->arch.gprs[rt];
Sanjay Lale685c682012-11-21 18:34:04 -08001013
1014 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1015 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
James Hogan8b48d5b2017-03-14 10:15:15 +00001016 vcpu->arch.gprs[rt], *(u32 *)data);
Sanjay Lale685c682012-11-21 18:34:04 -08001017 break;
1018
1019 case sh_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001020 run->mmio.len = 2;
1021 *(u16 *)data = vcpu->arch.gprs[rt];
Sanjay Lale685c682012-11-21 18:34:04 -08001022
1023 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1024 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
James Hogan8b48d5b2017-03-14 10:15:15 +00001025 vcpu->arch.gprs[rt], *(u16 *)data);
1026 break;
1027
1028 case sb_op:
1029 run->mmio.len = 1;
1030 *(u8 *)data = vcpu->arch.gprs[rt];
1031
1032 kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1033 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1034 vcpu->arch.gprs[rt], *(u8 *)data);
Sanjay Lale685c682012-11-21 18:34:04 -08001035 break;
1036
Huacai Chendc6d95b2020-05-23 15:56:40 +08001037 case swl_op:
1038 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1039 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1040 run->mmio.len = 4;
1041 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1042 switch (imme) {
1043 case 0:
1044 *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1045 (vcpu->arch.gprs[rt] >> 24);
1046 break;
1047 case 1:
1048 *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1049 (vcpu->arch.gprs[rt] >> 16);
1050 break;
1051 case 2:
1052 *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1053 (vcpu->arch.gprs[rt] >> 8);
1054 break;
1055 case 3:
1056 *(u32 *)data = vcpu->arch.gprs[rt];
1057 break;
1058 default:
1059 break;
1060 }
1061
1062 kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1063 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1064 vcpu->arch.gprs[rt], *(u32 *)data);
1065 break;
1066
1067 case swr_op:
1068 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1069 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1070 run->mmio.len = 4;
1071 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1072 switch (imme) {
1073 case 0:
1074 *(u32 *)data = vcpu->arch.gprs[rt];
1075 break;
1076 case 1:
1077 *(u32 *)data = ((*(u32 *)data) & 0xff) |
1078 (vcpu->arch.gprs[rt] << 8);
1079 break;
1080 case 2:
1081 *(u32 *)data = ((*(u32 *)data) & 0xffff) |
1082 (vcpu->arch.gprs[rt] << 16);
1083 break;
1084 case 3:
1085 *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1086 (vcpu->arch.gprs[rt] << 24);
1087 break;
1088 default:
1089 break;
1090 }
1091
1092 kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1093 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1094 vcpu->arch.gprs[rt], *(u32 *)data);
1095 break;
1096
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +01001097#if defined(CONFIG_64BIT)
Huacai Chendc6d95b2020-05-23 15:56:40 +08001098 case sdl_op:
1099 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1100 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1101
1102 run->mmio.len = 8;
1103 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1104 switch (imme) {
1105 case 0:
1106 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1107 ((vcpu->arch.gprs[rt] >> 56) & 0xff);
1108 break;
1109 case 1:
1110 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1111 ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1112 break;
1113 case 2:
1114 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1115 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1116 break;
1117 case 3:
1118 *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1119 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1120 break;
1121 case 4:
1122 *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1123 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1124 break;
1125 case 5:
1126 *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1127 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1128 break;
1129 case 6:
1130 *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1131 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1132 break;
1133 case 7:
1134 *(u64 *)data = vcpu->arch.gprs[rt];
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1141 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1142 vcpu->arch.gprs[rt], *(u64 *)data);
1143 break;
1144
1145 case sdr_op:
1146 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1147 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1148
1149 run->mmio.len = 8;
1150 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1151 switch (imme) {
1152 case 0:
1153 *(u64 *)data = vcpu->arch.gprs[rt];
1154 break;
1155 case 1:
1156 *(u64 *)data = ((*(u64 *)data) & 0xff) |
1157 (vcpu->arch.gprs[rt] << 8);
1158 break;
1159 case 2:
1160 *(u64 *)data = ((*(u64 *)data) & 0xffff) |
1161 (vcpu->arch.gprs[rt] << 16);
1162 break;
1163 case 3:
1164 *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1165 (vcpu->arch.gprs[rt] << 24);
1166 break;
1167 case 4:
1168 *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1169 (vcpu->arch.gprs[rt] << 32);
1170 break;
1171 case 5:
1172 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1173 (vcpu->arch.gprs[rt] << 40);
1174 break;
1175 case 6:
1176 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1177 (vcpu->arch.gprs[rt] << 48);
1178 break;
1179 case 7:
1180 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1181 (vcpu->arch.gprs[rt] << 56);
1182 break;
1183 default:
1184 break;
1185 }
1186
1187 kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1188 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1189 vcpu->arch.gprs[rt], *(u64 *)data);
1190 break;
Huacai Chen3d9fdc22020-07-10 15:23:17 +08001191#endif
Huacai Chendc6d95b2020-05-23 15:56:40 +08001192
1193#ifdef CONFIG_CPU_LOONGSON64
1194 case sdc2_op:
1195 rt = inst.loongson3_lsdc2_format.rt;
1196 switch (inst.loongson3_lsdc2_format.opcode1) {
1197 /*
1198 * Loongson-3 overridden sdc2 instructions.
1199 * opcode1 instruction
1200 * 0x0 gssbx: store 1 bytes from GPR
1201 * 0x1 gsshx: store 2 bytes from GPR
1202 * 0x2 gsswx: store 4 bytes from GPR
1203 * 0x3 gssdx: store 8 bytes from GPR
1204 */
1205 case 0x0:
1206 run->mmio.len = 1;
1207 *(u8 *)data = vcpu->arch.gprs[rt];
1208
1209 kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1210 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1211 vcpu->arch.gprs[rt], *(u8 *)data);
1212 break;
1213 case 0x1:
1214 run->mmio.len = 2;
1215 *(u16 *)data = vcpu->arch.gprs[rt];
1216
1217 kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1218 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1219 vcpu->arch.gprs[rt], *(u16 *)data);
1220 break;
1221 case 0x2:
1222 run->mmio.len = 4;
1223 *(u32 *)data = vcpu->arch.gprs[rt];
1224
1225 kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1226 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1227 vcpu->arch.gprs[rt], *(u32 *)data);
1228 break;
1229 case 0x3:
1230 run->mmio.len = 8;
1231 *(u64 *)data = vcpu->arch.gprs[rt];
1232
1233 kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1234 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1235 vcpu->arch.gprs[rt], *(u64 *)data);
1236 break;
1237 default:
Colin Ian King0ed076c72020-06-15 09:26:36 +01001238 kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
Huacai Chendc6d95b2020-05-23 15:56:40 +08001239 inst.word);
1240 break;
1241 }
1242 break;
1243#endif
Sanjay Lale685c682012-11-21 18:34:04 -08001244 default:
James Hogand86c1eb2016-06-14 09:40:17 +01001245 kvm_err("Store not yet supported (inst=0x%08x)\n",
James Hogan258f3a22016-06-15 19:29:47 +01001246 inst.word);
James Hogan8b48d5b2017-03-14 10:15:15 +00001247 goto out_fail;
Sanjay Lale685c682012-11-21 18:34:04 -08001248 }
1249
James Hogan8b48d5b2017-03-14 10:15:15 +00001250 vcpu->mmio_needed = 1;
Huacai Chenf21db302020-05-23 15:56:37 +08001251 run->mmio.is_write = 1;
James Hogan8b48d5b2017-03-14 10:15:15 +00001252 vcpu->mmio_is_write = 1;
Huacai Chenf21db302020-05-23 15:56:37 +08001253
1254 r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1255 run->mmio.phys_addr, run->mmio.len, data);
1256
1257 if (!r) {
1258 vcpu->mmio_needed = 0;
1259 return EMULATE_DONE;
1260 }
1261
James Hogan8b48d5b2017-03-14 10:15:15 +00001262 return EMULATE_DO_MMIO;
Sanjay Lale685c682012-11-21 18:34:04 -08001263
James Hogan8b48d5b2017-03-14 10:15:15 +00001264out_fail:
1265 /* Rollback PC if emulation was unsuccessful */
1266 vcpu->arch.pc = curr_pc;
1267 return EMULATE_FAIL;
Sanjay Lale685c682012-11-21 18:34:04 -08001268}
1269
James Hogan258f3a22016-06-15 19:29:47 +01001270enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
Tianjia Zhangc34b26b2020-06-23 21:14:17 +08001271 u32 cause, struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001272{
Tianjia Zhangc34b26b2020-06-23 21:14:17 +08001273 struct kvm_run *run = vcpu->run;
Huacai Chenf21db302020-05-23 15:56:37 +08001274 int r;
James Hogan8b48d5b2017-03-14 10:15:15 +00001275 enum emulation_result er;
James Hogane1e575f62016-10-25 16:11:12 +01001276 unsigned long curr_pc;
James Hogan258f3a22016-06-15 19:29:47 +01001277 u32 op, rt;
Huacai Chendc6d95b2020-05-23 15:56:40 +08001278 unsigned int imme;
Sanjay Lale685c682012-11-21 18:34:04 -08001279
James Hogan258f3a22016-06-15 19:29:47 +01001280 rt = inst.i_format.rt;
1281 op = inst.i_format.opcode;
Sanjay Lale685c682012-11-21 18:34:04 -08001282
James Hogane1e575f62016-10-25 16:11:12 +01001283 /*
1284 * Find the resume PC now while we have safe and easy access to the
1285 * prior branch instruction, and save it for
1286 * kvm_mips_complete_mmio_load() to restore later.
1287 */
1288 curr_pc = vcpu->arch.pc;
1289 er = update_pc(vcpu, cause);
1290 if (er == EMULATE_FAIL)
1291 return er;
1292 vcpu->arch.io_pc = vcpu->arch.pc;
1293 vcpu->arch.pc = curr_pc;
1294
Sanjay Lale685c682012-11-21 18:34:04 -08001295 vcpu->arch.io_gpr = rt;
1296
James Hogan8b48d5b2017-03-14 10:15:15 +00001297 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1298 vcpu->arch.host_cp0_badvaddr);
1299 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1300 return EMULATE_FAIL;
1301
1302 vcpu->mmio_needed = 2; /* signed */
Sanjay Lale685c682012-11-21 18:34:04 -08001303 switch (op) {
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +01001304#if defined(CONFIG_64BIT)
James Hogan59d78142017-03-14 10:15:16 +00001305 case ld_op:
1306 run->mmio.len = 8;
1307 break;
1308
1309 case lwu_op:
1310 vcpu->mmio_needed = 1; /* unsigned */
Jiaxun Yangf40a4b02020-08-10 09:17:49 +08001311 fallthrough;
James Hogan59d78142017-03-14 10:15:16 +00001312#endif
Sanjay Lale685c682012-11-21 18:34:04 -08001313 case lw_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001314 run->mmio.len = 4;
Sanjay Lale685c682012-11-21 18:34:04 -08001315 break;
1316
Sanjay Lale685c682012-11-21 18:34:04 -08001317 case lhu_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001318 vcpu->mmio_needed = 1; /* unsigned */
Liangliang Huangc9b02992020-05-04 16:51:29 +08001319 fallthrough;
James Hogan8b48d5b2017-03-14 10:15:15 +00001320 case lh_op:
1321 run->mmio.len = 2;
Sanjay Lale685c682012-11-21 18:34:04 -08001322 break;
1323
1324 case lbu_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001325 vcpu->mmio_needed = 1; /* unsigned */
Liangliang Huangc9b02992020-05-04 16:51:29 +08001326 fallthrough;
Sanjay Lale685c682012-11-21 18:34:04 -08001327 case lb_op:
James Hogan8b48d5b2017-03-14 10:15:15 +00001328 run->mmio.len = 1;
Sanjay Lale685c682012-11-21 18:34:04 -08001329 break;
1330
Huacai Chendc6d95b2020-05-23 15:56:40 +08001331 case lwl_op:
1332 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1333 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1334
1335 run->mmio.len = 4;
1336 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1337 switch (imme) {
1338 case 0:
1339 vcpu->mmio_needed = 3; /* 1 byte */
1340 break;
1341 case 1:
1342 vcpu->mmio_needed = 4; /* 2 bytes */
1343 break;
1344 case 2:
1345 vcpu->mmio_needed = 5; /* 3 bytes */
1346 break;
1347 case 3:
1348 vcpu->mmio_needed = 6; /* 4 bytes */
1349 break;
1350 default:
1351 break;
1352 }
1353 break;
1354
1355 case lwr_op:
1356 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1357 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1358
1359 run->mmio.len = 4;
1360 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1361 switch (imme) {
1362 case 0:
1363 vcpu->mmio_needed = 7; /* 4 bytes */
1364 break;
1365 case 1:
1366 vcpu->mmio_needed = 8; /* 3 bytes */
1367 break;
1368 case 2:
1369 vcpu->mmio_needed = 9; /* 2 bytes */
1370 break;
1371 case 3:
1372 vcpu->mmio_needed = 10; /* 1 byte */
1373 break;
1374 default:
1375 break;
1376 }
1377 break;
1378
Thomas Bogendoerfer45c7e8a2021-03-01 16:29:57 +01001379#if defined(CONFIG_64BIT)
Huacai Chendc6d95b2020-05-23 15:56:40 +08001380 case ldl_op:
1381 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1382 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1383
1384 run->mmio.len = 8;
1385 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1386 switch (imme) {
1387 case 0:
1388 vcpu->mmio_needed = 11; /* 1 byte */
1389 break;
1390 case 1:
1391 vcpu->mmio_needed = 12; /* 2 bytes */
1392 break;
1393 case 2:
1394 vcpu->mmio_needed = 13; /* 3 bytes */
1395 break;
1396 case 3:
1397 vcpu->mmio_needed = 14; /* 4 bytes */
1398 break;
1399 case 4:
1400 vcpu->mmio_needed = 15; /* 5 bytes */
1401 break;
1402 case 5:
1403 vcpu->mmio_needed = 16; /* 6 bytes */
1404 break;
1405 case 6:
1406 vcpu->mmio_needed = 17; /* 7 bytes */
1407 break;
1408 case 7:
1409 vcpu->mmio_needed = 18; /* 8 bytes */
1410 break;
1411 default:
1412 break;
1413 }
1414 break;
1415
1416 case ldr_op:
1417 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1418 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1419
1420 run->mmio.len = 8;
1421 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1422 switch (imme) {
1423 case 0:
1424 vcpu->mmio_needed = 19; /* 8 bytes */
1425 break;
1426 case 1:
1427 vcpu->mmio_needed = 20; /* 7 bytes */
1428 break;
1429 case 2:
1430 vcpu->mmio_needed = 21; /* 6 bytes */
1431 break;
1432 case 3:
1433 vcpu->mmio_needed = 22; /* 5 bytes */
1434 break;
1435 case 4:
1436 vcpu->mmio_needed = 23; /* 4 bytes */
1437 break;
1438 case 5:
1439 vcpu->mmio_needed = 24; /* 3 bytes */
1440 break;
1441 case 6:
1442 vcpu->mmio_needed = 25; /* 2 bytes */
1443 break;
1444 case 7:
1445 vcpu->mmio_needed = 26; /* 1 byte */
1446 break;
1447 default:
1448 break;
1449 }
1450 break;
Huacai Chen3d9fdc22020-07-10 15:23:17 +08001451#endif
Huacai Chendc6d95b2020-05-23 15:56:40 +08001452
1453#ifdef CONFIG_CPU_LOONGSON64
1454 case ldc2_op:
1455 rt = inst.loongson3_lsdc2_format.rt;
1456 switch (inst.loongson3_lsdc2_format.opcode1) {
1457 /*
1458 * Loongson-3 overridden ldc2 instructions.
1459 * opcode1 instruction
1460 * 0x0 gslbx: store 1 bytes from GPR
1461 * 0x1 gslhx: store 2 bytes from GPR
1462 * 0x2 gslwx: store 4 bytes from GPR
1463 * 0x3 gsldx: store 8 bytes from GPR
1464 */
1465 case 0x0:
1466 run->mmio.len = 1;
1467 vcpu->mmio_needed = 27; /* signed */
1468 break;
1469 case 0x1:
1470 run->mmio.len = 2;
1471 vcpu->mmio_needed = 28; /* signed */
1472 break;
1473 case 0x2:
1474 run->mmio.len = 4;
1475 vcpu->mmio_needed = 29; /* signed */
1476 break;
1477 case 0x3:
1478 run->mmio.len = 8;
1479 vcpu->mmio_needed = 30; /* signed */
1480 break;
1481 default:
Colin Ian King0ed076c72020-06-15 09:26:36 +01001482 kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
Huacai Chendc6d95b2020-05-23 15:56:40 +08001483 inst.word);
1484 break;
1485 }
1486 break;
1487#endif
1488
Sanjay Lale685c682012-11-21 18:34:04 -08001489 default:
James Hogand86c1eb2016-06-14 09:40:17 +01001490 kvm_err("Load not yet supported (inst=0x%08x)\n",
James Hogan258f3a22016-06-15 19:29:47 +01001491 inst.word);
James Hogan8b48d5b2017-03-14 10:15:15 +00001492 vcpu->mmio_needed = 0;
1493 return EMULATE_FAIL;
Sanjay Lale685c682012-11-21 18:34:04 -08001494 }
1495
James Hogan8b48d5b2017-03-14 10:15:15 +00001496 run->mmio.is_write = 0;
1497 vcpu->mmio_is_write = 0;
Huacai Chenf21db302020-05-23 15:56:37 +08001498
1499 r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1500 run->mmio.phys_addr, run->mmio.len, run->mmio.data);
1501
1502 if (!r) {
Huacai Chen033555f2020-07-19 18:23:27 +08001503 kvm_mips_complete_mmio_load(vcpu);
Huacai Chenf21db302020-05-23 15:56:37 +08001504 vcpu->mmio_needed = 0;
1505 return EMULATE_DONE;
1506 }
1507
James Hogan8b48d5b2017-03-14 10:15:15 +00001508 return EMULATE_DO_MMIO;
Sanjay Lale685c682012-11-21 18:34:04 -08001509}
1510
Tianjia Zhangc34b26b2020-06-23 21:14:17 +08001511enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001512{
Tianjia Zhangc34b26b2020-06-23 21:14:17 +08001513 struct kvm_run *run = vcpu->run;
Sanjay Lale685c682012-11-21 18:34:04 -08001514 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1515 enum emulation_result er = EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001516
1517 if (run->mmio.len > sizeof(*gpr)) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001518 kvm_err("Bad MMIO length: %d", run->mmio.len);
Sanjay Lale685c682012-11-21 18:34:04 -08001519 er = EMULATE_FAIL;
1520 goto done;
1521 }
1522
James Hogane1e575f62016-10-25 16:11:12 +01001523 /* Restore saved resume PC */
1524 vcpu->arch.pc = vcpu->arch.io_pc;
Sanjay Lale685c682012-11-21 18:34:04 -08001525
1526 switch (run->mmio.len) {
James Hogan59d78142017-03-14 10:15:16 +00001527 case 8:
Huacai Chendc6d95b2020-05-23 15:56:40 +08001528 switch (vcpu->mmio_needed) {
1529 case 11:
1530 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1531 (((*(s64 *)run->mmio.data) & 0xff) << 56);
1532 break;
1533 case 12:
1534 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1535 (((*(s64 *)run->mmio.data) & 0xffff) << 48);
1536 break;
1537 case 13:
1538 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1539 (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
1540 break;
1541 case 14:
1542 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1543 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
1544 break;
1545 case 15:
1546 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1547 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
1548 break;
1549 case 16:
1550 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1551 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
1552 break;
1553 case 17:
1554 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1555 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
1556 break;
1557 case 18:
1558 case 19:
1559 *gpr = *(s64 *)run->mmio.data;
1560 break;
1561 case 20:
1562 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1563 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
1564 break;
1565 case 21:
1566 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1567 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
1568 break;
1569 case 22:
1570 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1571 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
1572 break;
1573 case 23:
1574 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1575 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
1576 break;
1577 case 24:
1578 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1579 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
1580 break;
1581 case 25:
1582 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1583 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
1584 break;
1585 case 26:
1586 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1587 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
1588 break;
1589 default:
1590 *gpr = *(s64 *)run->mmio.data;
1591 }
James Hogan59d78142017-03-14 10:15:16 +00001592 break;
1593
Sanjay Lale685c682012-11-21 18:34:04 -08001594 case 4:
Huacai Chendc6d95b2020-05-23 15:56:40 +08001595 switch (vcpu->mmio_needed) {
1596 case 1:
James Hogan59d78142017-03-14 10:15:16 +00001597 *gpr = *(u32 *)run->mmio.data;
Huacai Chendc6d95b2020-05-23 15:56:40 +08001598 break;
1599 case 2:
1600 *gpr = *(s32 *)run->mmio.data;
1601 break;
1602 case 3:
1603 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1604 (((*(s32 *)run->mmio.data) & 0xff) << 24);
1605 break;
1606 case 4:
1607 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1608 (((*(s32 *)run->mmio.data) & 0xffff) << 16);
1609 break;
1610 case 5:
1611 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1612 (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
1613 break;
1614 case 6:
1615 case 7:
1616 *gpr = *(s32 *)run->mmio.data;
1617 break;
1618 case 8:
1619 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1620 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
1621 break;
1622 case 9:
1623 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1624 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
1625 break;
1626 case 10:
1627 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1628 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
1629 break;
1630 default:
1631 *gpr = *(s32 *)run->mmio.data;
1632 }
Sanjay Lale685c682012-11-21 18:34:04 -08001633 break;
1634
1635 case 2:
Huacai Chendc6d95b2020-05-23 15:56:40 +08001636 if (vcpu->mmio_needed == 1)
James Hogan8cffd192016-06-09 14:19:08 +01001637 *gpr = *(u16 *)run->mmio.data;
Huacai Chendc6d95b2020-05-23 15:56:40 +08001638 else
1639 *gpr = *(s16 *)run->mmio.data;
Sanjay Lale685c682012-11-21 18:34:04 -08001640
1641 break;
1642 case 1:
Huacai Chendc6d95b2020-05-23 15:56:40 +08001643 if (vcpu->mmio_needed == 1)
1644 *gpr = *(u8 *)run->mmio.data;
Sanjay Lale685c682012-11-21 18:34:04 -08001645 else
Huacai Chendc6d95b2020-05-23 15:56:40 +08001646 *gpr = *(s8 *)run->mmio.data;
Sanjay Lale685c682012-11-21 18:34:04 -08001647 break;
1648 }
1649
Sanjay Lale685c682012-11-21 18:34:04 -08001650done:
1651 return er;
1652}