blob: 80a681f42bf509ab059781555f920cc110f19820 [file] [log] [blame]
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080014#include <linux/kvm_host.h>
James Hogandacc3ed2016-08-19 15:27:22 +010015#include <linux/uaccess.h>
James Hogan1581ff32016-11-16 23:48:56 +000016#include <linux/vmalloc.h>
17#include <asm/mmu_context.h>
James Hoganf7f14272016-09-08 22:57:03 +010018#include <asm/pgalloc.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080019
Deng-Cheng Zhud7d5b052014-06-26 12:11:38 -070020#include "interrupt.h"
Sanjay Lalf5c236d2012-11-21 18:34:09 -080021
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
James Hogan8cffd192016-06-09 14:19:08 +010025 gva_t kseg = KSEGX(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010026 gva_t gkseg = KVM_GUEST_KSEGX(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080027
28 if ((kseg == CKSEG0) || (kseg == CKSEG1))
29 gpa = CPHYSADDR(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010030 else if (gkseg == KVM_GUEST_KSEG0)
31 gpa = KVM_GUEST_CPHYSADDR(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080032 else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -070033 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080034 kvm_mips_dump_host_tlbs();
35 gpa = KVM_INVALID_ADDR;
36 }
37
Sanjay Lalf5c236d2012-11-21 18:34:09 -080038 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080039
40 return gpa;
41}
42
Sanjay Lalf5c236d2012-11-21 18:34:09 -080043static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
James Hogan1c0cd662015-02-06 10:56:27 +000045 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080046 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +010047 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +010048 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080049 enum emulation_result er = EMULATE_DONE;
50 int ret = RESUME_GUEST;
51
James Hogan1c0cd662015-02-06 10:56:27 +000052 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
53 /* FPU Unusable */
54 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
55 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
56 /*
57 * Unusable/no FPU in guest:
58 * deliver guest COP1 Unusable Exception
59 */
60 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
61 } else {
62 /* Restore FPU state */
63 kvm_own_fpu(vcpu);
64 er = EMULATE_DONE;
65 }
66 } else {
Sanjay Lalf5c236d2012-11-21 18:34:09 -080067 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
James Hogan1c0cd662015-02-06 10:56:27 +000068 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -080069
70 switch (er) {
71 case EMULATE_DONE:
72 ret = RESUME_GUEST;
73 break;
74
75 case EMULATE_FAIL:
76 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
77 ret = RESUME_HOST;
78 break;
79
80 case EMULATE_WAIT:
81 run->exit_reason = KVM_EXIT_INTR;
82 ret = RESUME_HOST;
83 break;
84
85 default:
86 BUG();
87 }
88 return ret;
89}
90
James Hogan420ea092016-12-06 19:27:18 +000091static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
92 struct kvm_vcpu *vcpu)
93{
94 enum emulation_result er;
95 union mips_instruction inst;
96 int err;
97
98 /* A code fetch fault doesn't count as an MMIO */
99 if (kvm_is_ifetch_fault(&vcpu->arch)) {
100 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101 return RESUME_HOST;
102 }
103
104 /* Fetch the instruction. */
105 if (cause & CAUSEF_BD)
106 opc += 1;
107 err = kvm_get_badinstr(opc, vcpu, &inst.word);
108 if (err) {
109 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
110 return RESUME_HOST;
111 }
112
113 /* Emulate the load */
114 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
115 if (er == EMULATE_FAIL) {
116 kvm_err("Emulate load from MMIO space failed\n");
117 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
118 } else {
119 run->exit_reason = KVM_EXIT_MMIO;
120 }
121 return RESUME_HOST;
122}
123
124static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
125 struct kvm_vcpu *vcpu)
126{
127 enum emulation_result er;
128 union mips_instruction inst;
129 int err;
130
131 /* Fetch the instruction. */
132 if (cause & CAUSEF_BD)
133 opc += 1;
134 err = kvm_get_badinstr(opc, vcpu, &inst.word);
135 if (err) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 return RESUME_HOST;
138 }
139
140 /* Emulate the store */
141 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
142 if (er == EMULATE_FAIL) {
143 kvm_err("Emulate store to MMIO space failed\n");
144 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
145 } else {
146 run->exit_reason = KVM_EXIT_MMIO;
147 }
148 return RESUME_HOST;
149}
150
151static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
152 struct kvm_vcpu *vcpu, bool store)
153{
154 if (store)
155 return kvm_mips_bad_store(cause, opc, run, vcpu);
156 else
157 return kvm_mips_bad_load(cause, opc, run, vcpu);
158}
159
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800160static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
161{
James Hogan64ebc9e2016-12-13 13:02:36 +0000162 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800163 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100164 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800165 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100166 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan64ebc9e2016-12-13 13:02:36 +0000167 struct kvm_mips_tlb *tlb;
168 unsigned long entryhi;
169 int index;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800170
171 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
172 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700173 /*
James Hogan64ebc9e2016-12-13 13:02:36 +0000174 * First find the mapping in the guest TLB. If the failure to
175 * write was due to the guest TLB, it should be up to the guest
176 * to handle it.
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800177 */
James Hogan64ebc9e2016-12-13 13:02:36 +0000178 entryhi = (badvaddr & VPN2_MASK) |
179 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
180 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
181
182 /*
183 * These should never happen.
184 * They would indicate stale host TLB entries.
185 */
186 if (unlikely(index < 0)) {
187 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
188 return RESUME_HOST;
189 }
190 tlb = vcpu->arch.guest_tlb + index;
191 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
192 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
193 return RESUME_HOST;
194 }
195
196 /*
197 * Guest entry not dirty? That would explain the TLB modified
198 * exception. Relay that on to the guest so it can handle it.
199 */
200 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
201 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
202 return RESUME_GUEST;
203 }
204
205 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
206 true))
207 /* Not writable, needs handling as MMIO */
208 return kvm_mips_bad_store(cause, opc, run, vcpu);
209 return RESUME_GUEST;
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
212 /* Not writable, needs handling as MMIO */
213 return kvm_mips_bad_store(cause, opc, run, vcpu);
214 return RESUME_GUEST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800215 } else {
James Hogan64ebc9e2016-12-13 13:02:36 +0000216 /* host kernel addresses are all handled as MMIO */
217 return kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800218 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800219}
220
James Hogan3b08aec2016-06-09 14:19:20 +0100221static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800222{
223 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100224 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800225 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100226 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800227 enum emulation_result er = EMULATE_DONE;
228 int ret = RESUME_GUEST;
229
230 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
231 && KVM_GUEST_KERNEL_MODE(vcpu)) {
232 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
233 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
234 ret = RESUME_HOST;
235 }
236 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
237 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
James Hogan3b08aec2016-06-09 14:19:20 +0100238 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
239 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800240
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700241 /*
242 * User Address (UA) fault, this could happen if
243 * (1) TLB entry not present/valid in both Guest and shadow host
244 * TLBs, in this case we pass on the fault to the guest
245 * kernel and let it handle it.
246 * (2) TLB entry is present in the Guest TLB but not in the
247 * shadow, in this case we inject the TLB from the Guest TLB
248 * into the shadow host TLB
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800249 */
250
James Hogan577ed7f2015-05-01 14:56:31 +0100251 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800252 if (er == EMULATE_DONE)
253 ret = RESUME_GUEST;
254 else {
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
James Hogan3b08aec2016-06-09 14:19:20 +0100259 /*
260 * All KSEG0 faults are handled by KVM, as the guest kernel does
261 * not expect to ever get them
262 */
James Hoganb8f79dd2015-05-11 23:31:45 +0100263 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
264 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
James Hogand5888472016-08-19 15:09:47 +0100265 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
266 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
267 /*
268 * With EVA we may get a TLB exception instead of an address
269 * error when the guest performs MMIO to KSeg1 addresses.
270 */
James Hogan420ea092016-12-06 19:27:18 +0000271 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800272 } else {
James Hogan3b08aec2016-06-09 14:19:20 +0100273 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
274 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800275 kvm_mips_dump_host_tlbs();
276 kvm_arch_vcpu_dump_regs(vcpu);
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 }
280 return ret;
281}
282
James Hogan3b08aec2016-06-09 14:19:20 +0100283static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
284{
285 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
286}
287
288static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
289{
290 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
291}
292
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800293static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
294{
295 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100296 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800297 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100298 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800299 int ret = RESUME_GUEST;
300
301 if (KVM_GUEST_KERNEL_MODE(vcpu)
302 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
James Hogan420ea092016-12-06 19:27:18 +0000303 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800304 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100305 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700306 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308 ret = RESUME_HOST;
309 }
310 return ret;
311}
312
313static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
314{
315 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100316 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800317 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100318 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800319 int ret = RESUME_GUEST;
320
321 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
James Hogan420ea092016-12-06 19:27:18 +0000322 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800323 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100324 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700325 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800326 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
327 ret = RESUME_HOST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800328 }
329 return ret;
330}
331
332static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
333{
334 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100335 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100336 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800337 enum emulation_result er = EMULATE_DONE;
338 int ret = RESUME_GUEST;
339
340 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
341 if (er == EMULATE_DONE)
342 ret = RESUME_GUEST;
343 else {
344 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
345 ret = RESUME_HOST;
346 }
347 return ret;
348}
349
350static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
351{
352 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100353 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100354 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800355 enum emulation_result er = EMULATE_DONE;
356 int ret = RESUME_GUEST;
357
358 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
359 if (er == EMULATE_DONE)
360 ret = RESUME_GUEST;
361 else {
362 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
363 ret = RESUME_HOST;
364 }
365 return ret;
366}
367
368static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
369{
370 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100371 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100372 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800373 enum emulation_result er = EMULATE_DONE;
374 int ret = RESUME_GUEST;
375
376 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
377 if (er == EMULATE_DONE)
378 ret = RESUME_GUEST;
379 else {
380 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
381 ret = RESUME_HOST;
382 }
383 return ret;
384}
385
James Hogan0a560422015-02-06 16:03:57 +0000386static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
387{
388 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100389 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100390 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan0a560422015-02-06 16:03:57 +0000391 enum emulation_result er = EMULATE_DONE;
392 int ret = RESUME_GUEST;
393
394 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
395 if (er == EMULATE_DONE) {
396 ret = RESUME_GUEST;
397 } else {
398 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
399 ret = RESUME_HOST;
400 }
401 return ret;
402}
403
James Hoganc2537ed2015-02-06 10:56:27 +0000404static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
405{
406 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100407 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100408 u32 cause = vcpu->arch.host_cp0_cause;
James Hoganc2537ed2015-02-06 10:56:27 +0000409 enum emulation_result er = EMULATE_DONE;
410 int ret = RESUME_GUEST;
411
412 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
413 if (er == EMULATE_DONE) {
414 ret = RESUME_GUEST;
415 } else {
416 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
417 ret = RESUME_HOST;
418 }
419 return ret;
420}
421
James Hogan1c0cd662015-02-06 10:56:27 +0000422static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
423{
424 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100425 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100426 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan1c0cd662015-02-06 10:56:27 +0000427 enum emulation_result er = EMULATE_DONE;
428 int ret = RESUME_GUEST;
429
430 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
431 if (er == EMULATE_DONE) {
432 ret = RESUME_GUEST;
433 } else {
434 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
435 ret = RESUME_HOST;
436 }
437 return ret;
438}
439
James Hoganc2537ed2015-02-06 10:56:27 +0000440/**
441 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
442 * @vcpu: Virtual CPU context.
443 *
444 * Handle when the guest attempts to use MSA when it is disabled.
445 */
James Hogan98119ad2015-02-06 11:11:56 +0000446static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
447{
James Hoganc2537ed2015-02-06 10:56:27 +0000448 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan98119ad2015-02-06 11:11:56 +0000449 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100450 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100451 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan98119ad2015-02-06 11:11:56 +0000452 enum emulation_result er = EMULATE_DONE;
453 int ret = RESUME_GUEST;
454
James Hoganc2537ed2015-02-06 10:56:27 +0000455 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
456 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
457 /*
458 * No MSA in guest, or FPU enabled and not in FR=1 mode,
459 * guest reserved instruction exception
460 */
461 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
462 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
463 /* MSA disabled by guest, guest MSA disabled exception */
464 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
465 } else {
466 /* Restore MSA/FPU state */
467 kvm_own_msa(vcpu);
468 er = EMULATE_DONE;
469 }
James Hogan98119ad2015-02-06 11:11:56 +0000470
471 switch (er) {
472 case EMULATE_DONE:
473 ret = RESUME_GUEST;
474 break;
475
476 case EMULATE_FAIL:
477 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
478 ret = RESUME_HOST;
479 break;
480
481 default:
482 BUG();
483 }
484 return ret;
485}
486
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800487static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
488{
James Hoganf7f14272016-09-08 22:57:03 +0100489 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
490 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
491
James Hoganf7f14272016-09-08 22:57:03 +0100492 /*
493 * Allocate GVA -> HPA page tables.
494 * MIPS doesn't use the mm_struct pointer argument.
495 */
496 kern_mm->pgd = pgd_alloc(kern_mm);
497 if (!kern_mm->pgd)
498 return -ENOMEM;
499
500 user_mm->pgd = pgd_alloc(user_mm);
501 if (!user_mm->pgd) {
502 pgd_free(kern_mm, kern_mm->pgd);
503 return -ENOMEM;
504 }
505
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800506 return 0;
507}
508
James Hoganf7f14272016-09-08 22:57:03 +0100509static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
510{
511 /* Don't free host kernel page tables copied from init_mm.pgd */
512 const unsigned long end = 0x80000000;
513 unsigned long pgd_va, pud_va, pmd_va;
514 pud_t *pud;
515 pmd_t *pmd;
516 pte_t *pte;
517 int i, j, k;
518
519 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
520 if (pgd_none(pgd[i]))
521 continue;
522
523 pgd_va = (unsigned long)i << PGDIR_SHIFT;
524 if (pgd_va >= end)
525 break;
526 pud = pud_offset(pgd + i, 0);
527 for (j = 0; j < PTRS_PER_PUD; j++) {
528 if (pud_none(pud[j]))
529 continue;
530
531 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
532 if (pud_va >= end)
533 break;
534 pmd = pmd_offset(pud + j, 0);
535 for (k = 0; k < PTRS_PER_PMD; k++) {
536 if (pmd_none(pmd[k]))
537 continue;
538
539 pmd_va = pud_va | (k << PMD_SHIFT);
540 if (pmd_va >= end)
541 break;
542 pte = pte_offset(pmd + k, 0);
543 pte_free_kernel(NULL, pte);
544 }
545 pmd_free(NULL, pmd);
546 }
547 pud_free(NULL, pud);
548 }
549 pgd_free(NULL, pgd);
550}
551
James Hogan630766b2016-09-08 23:00:24 +0100552static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
553{
James Hoganf7f14272016-09-08 22:57:03 +0100554 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
555 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
James Hogan630766b2016-09-08 23:00:24 +0100556}
557
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800558static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
559{
560 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogane3429252016-06-15 19:30:00 +0100561 u32 config, config1;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800562 int vcpu_id = vcpu->vcpu_id;
563
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700564 /*
565 * Arch specific stuff, set up config registers properly so that the
James Hogan84260972016-07-04 19:35:15 +0100566 * guest will come up as expected
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800567 */
James Hogan84260972016-07-04 19:35:15 +0100568#ifndef CONFIG_CPU_MIPSR6
569 /* r2-r5, simulate a MIPS 24kc */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800570 kvm_write_c0_guest_prid(cop0, 0x00019300);
James Hogan84260972016-07-04 19:35:15 +0100571#else
572 /* r6+, simulate a generic QEMU machine */
573 kvm_write_c0_guest_prid(cop0, 0x00010000);
574#endif
James Hogane3429252016-06-15 19:30:00 +0100575 /*
576 * Have config1, Cacheable, noncoherent, write-back, write allocate.
577 * Endianness, arch revision & virtually tagged icache should match
578 * host.
579 */
580 config = read_c0_config() & MIPS_CONF_AR;
James Hogan4e10b762016-06-15 19:30:01 +0100581 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
James Hogane3429252016-06-15 19:30:00 +0100582#ifdef CONFIG_CPU_BIG_ENDIAN
583 config |= CONF_BE;
584#endif
585 if (cpu_has_vtag_icache)
586 config |= MIPS_CONF_VI;
587 kvm_write_c0_guest_config(cop0, config);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800588
589 /* Read the cache characteristics from the host Config1 Register */
590 config1 = (read_c0_config1() & ~0x7f);
591
592 /* Set up MMU size */
593 config1 &= ~(0x3f << 25);
594 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
595
596 /* We unset some bits that we aren't emulating */
James Hogan4e10b762016-06-15 19:30:01 +0100597 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
598 MIPS_CONF1_WR | MIPS_CONF1_CA);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800599 kvm_write_c0_guest_config1(cop0, config1);
600
James Hogan2211ee82015-03-04 15:56:47 +0000601 /* Have config3, no tertiary/secondary caches implemented */
602 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
603 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
604
James Hoganc7716072014-06-26 15:11:29 +0100605 /* Have config4, UserLocal */
606 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
607
608 /* Have config5 */
609 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
610
611 /* No config6 */
612 kvm_write_c0_guest_config5(cop0, 0);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800613
614 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
615 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
616
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700617 /*
Adam Buchbinder92a76f62016-02-25 00:44:58 -0800618 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700619 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800620 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
621
622 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
James Hogan37af2f32016-05-11 13:50:49 +0100623 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
624 (vcpu_id & MIPS_EBASE_CPUNUM));
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800625
626 return 0;
627}
628
James Hoganb6209112016-10-25 00:01:37 +0100629static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
630{
631 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
632 kvm_flush_remote_tlbs(kvm);
633}
634
635static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
636 const struct kvm_memory_slot *slot)
637{
638 kvm_trap_emul_flush_shadow_all(kvm);
639}
640
James Hogan654229a2016-12-08 22:46:41 +0000641static u64 kvm_trap_emul_get_one_regs[] = {
642 KVM_REG_MIPS_CP0_INDEX,
643 KVM_REG_MIPS_CP0_CONTEXT,
644 KVM_REG_MIPS_CP0_USERLOCAL,
645 KVM_REG_MIPS_CP0_PAGEMASK,
646 KVM_REG_MIPS_CP0_WIRED,
647 KVM_REG_MIPS_CP0_HWRENA,
648 KVM_REG_MIPS_CP0_BADVADDR,
649 KVM_REG_MIPS_CP0_COUNT,
650 KVM_REG_MIPS_CP0_ENTRYHI,
651 KVM_REG_MIPS_CP0_COMPARE,
652 KVM_REG_MIPS_CP0_STATUS,
653 KVM_REG_MIPS_CP0_CAUSE,
654 KVM_REG_MIPS_CP0_EPC,
655 KVM_REG_MIPS_CP0_PRID,
James Hogan7801bbe2016-11-14 23:59:27 +0000656 KVM_REG_MIPS_CP0_EBASE,
James Hogan654229a2016-12-08 22:46:41 +0000657 KVM_REG_MIPS_CP0_CONFIG,
658 KVM_REG_MIPS_CP0_CONFIG1,
659 KVM_REG_MIPS_CP0_CONFIG2,
660 KVM_REG_MIPS_CP0_CONFIG3,
661 KVM_REG_MIPS_CP0_CONFIG4,
662 KVM_REG_MIPS_CP0_CONFIG5,
663 KVM_REG_MIPS_CP0_CONFIG7,
664 KVM_REG_MIPS_CP0_ERROREPC,
665 KVM_REG_MIPS_CP0_KSCRATCH1,
666 KVM_REG_MIPS_CP0_KSCRATCH2,
667 KVM_REG_MIPS_CP0_KSCRATCH3,
668 KVM_REG_MIPS_CP0_KSCRATCH4,
669 KVM_REG_MIPS_CP0_KSCRATCH5,
670 KVM_REG_MIPS_CP0_KSCRATCH6,
671
672 KVM_REG_MIPS_COUNT_CTL,
673 KVM_REG_MIPS_COUNT_RESUME,
674 KVM_REG_MIPS_COUNT_HZ,
675};
676
James Hoganf5c43bd2016-06-15 19:29:49 +0100677static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
678{
James Hogan654229a2016-12-08 22:46:41 +0000679 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
James Hoganf5c43bd2016-06-15 19:29:49 +0100680}
681
682static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
683 u64 __user *indices)
684{
James Hogan654229a2016-12-08 22:46:41 +0000685 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
686 sizeof(kvm_trap_emul_get_one_regs)))
687 return -EFAULT;
688 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
689
James Hoganf5c43bd2016-06-15 19:29:49 +0100690 return 0;
691}
692
James Hoganf8be02d2014-05-29 10:16:29 +0100693static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
694 const struct kvm_one_reg *reg,
695 s64 *v)
696{
James Hogan654229a2016-12-08 22:46:41 +0000697 struct mips_coproc *cop0 = vcpu->arch.cop0;
698
James Hoganf8be02d2014-05-29 10:16:29 +0100699 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000700 case KVM_REG_MIPS_CP0_INDEX:
701 *v = (long)kvm_read_c0_guest_index(cop0);
702 break;
703 case KVM_REG_MIPS_CP0_CONTEXT:
704 *v = (long)kvm_read_c0_guest_context(cop0);
705 break;
706 case KVM_REG_MIPS_CP0_USERLOCAL:
707 *v = (long)kvm_read_c0_guest_userlocal(cop0);
708 break;
709 case KVM_REG_MIPS_CP0_PAGEMASK:
710 *v = (long)kvm_read_c0_guest_pagemask(cop0);
711 break;
712 case KVM_REG_MIPS_CP0_WIRED:
713 *v = (long)kvm_read_c0_guest_wired(cop0);
714 break;
715 case KVM_REG_MIPS_CP0_HWRENA:
716 *v = (long)kvm_read_c0_guest_hwrena(cop0);
717 break;
718 case KVM_REG_MIPS_CP0_BADVADDR:
719 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
720 break;
721 case KVM_REG_MIPS_CP0_ENTRYHI:
722 *v = (long)kvm_read_c0_guest_entryhi(cop0);
723 break;
724 case KVM_REG_MIPS_CP0_COMPARE:
725 *v = (long)kvm_read_c0_guest_compare(cop0);
726 break;
727 case KVM_REG_MIPS_CP0_STATUS:
728 *v = (long)kvm_read_c0_guest_status(cop0);
729 break;
730 case KVM_REG_MIPS_CP0_CAUSE:
731 *v = (long)kvm_read_c0_guest_cause(cop0);
732 break;
733 case KVM_REG_MIPS_CP0_EPC:
734 *v = (long)kvm_read_c0_guest_epc(cop0);
735 break;
736 case KVM_REG_MIPS_CP0_PRID:
737 *v = (long)kvm_read_c0_guest_prid(cop0);
738 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000739 case KVM_REG_MIPS_CP0_EBASE:
740 *v = (long)kvm_read_c0_guest_ebase(cop0);
741 break;
James Hogan654229a2016-12-08 22:46:41 +0000742 case KVM_REG_MIPS_CP0_CONFIG:
743 *v = (long)kvm_read_c0_guest_config(cop0);
744 break;
745 case KVM_REG_MIPS_CP0_CONFIG1:
746 *v = (long)kvm_read_c0_guest_config1(cop0);
747 break;
748 case KVM_REG_MIPS_CP0_CONFIG2:
749 *v = (long)kvm_read_c0_guest_config2(cop0);
750 break;
751 case KVM_REG_MIPS_CP0_CONFIG3:
752 *v = (long)kvm_read_c0_guest_config3(cop0);
753 break;
754 case KVM_REG_MIPS_CP0_CONFIG4:
755 *v = (long)kvm_read_c0_guest_config4(cop0);
756 break;
757 case KVM_REG_MIPS_CP0_CONFIG5:
758 *v = (long)kvm_read_c0_guest_config5(cop0);
759 break;
760 case KVM_REG_MIPS_CP0_CONFIG7:
761 *v = (long)kvm_read_c0_guest_config7(cop0);
762 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100763 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100764 *v = kvm_mips_read_count(vcpu);
James Hoganf8be02d2014-05-29 10:16:29 +0100765 break;
James Hoganf8239342014-05-29 10:16:37 +0100766 case KVM_REG_MIPS_COUNT_CTL:
767 *v = vcpu->arch.count_ctl;
768 break;
769 case KVM_REG_MIPS_COUNT_RESUME:
770 *v = ktime_to_ns(vcpu->arch.count_resume);
771 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100772 case KVM_REG_MIPS_COUNT_HZ:
773 *v = vcpu->arch.count_hz;
774 break;
James Hogan654229a2016-12-08 22:46:41 +0000775 case KVM_REG_MIPS_CP0_ERROREPC:
776 *v = (long)kvm_read_c0_guest_errorepc(cop0);
777 break;
778 case KVM_REG_MIPS_CP0_KSCRATCH1:
779 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
780 break;
781 case KVM_REG_MIPS_CP0_KSCRATCH2:
782 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
783 break;
784 case KVM_REG_MIPS_CP0_KSCRATCH3:
785 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
786 break;
787 case KVM_REG_MIPS_CP0_KSCRATCH4:
788 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
789 break;
790 case KVM_REG_MIPS_CP0_KSCRATCH5:
791 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
792 break;
793 case KVM_REG_MIPS_CP0_KSCRATCH6:
794 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
795 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100796 default:
797 return -EINVAL;
798 }
799 return 0;
800}
801
802static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
803 const struct kvm_one_reg *reg,
804 s64 v)
805{
806 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hoganf8239342014-05-29 10:16:37 +0100807 int ret = 0;
James Hoganc7716072014-06-26 15:11:29 +0100808 unsigned int cur, change;
James Hoganf8be02d2014-05-29 10:16:29 +0100809
810 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000811 case KVM_REG_MIPS_CP0_INDEX:
812 kvm_write_c0_guest_index(cop0, v);
813 break;
814 case KVM_REG_MIPS_CP0_CONTEXT:
815 kvm_write_c0_guest_context(cop0, v);
816 break;
817 case KVM_REG_MIPS_CP0_USERLOCAL:
818 kvm_write_c0_guest_userlocal(cop0, v);
819 break;
820 case KVM_REG_MIPS_CP0_PAGEMASK:
821 kvm_write_c0_guest_pagemask(cop0, v);
822 break;
823 case KVM_REG_MIPS_CP0_WIRED:
824 kvm_write_c0_guest_wired(cop0, v);
825 break;
826 case KVM_REG_MIPS_CP0_HWRENA:
827 kvm_write_c0_guest_hwrena(cop0, v);
828 break;
829 case KVM_REG_MIPS_CP0_BADVADDR:
830 kvm_write_c0_guest_badvaddr(cop0, v);
831 break;
832 case KVM_REG_MIPS_CP0_ENTRYHI:
833 kvm_write_c0_guest_entryhi(cop0, v);
834 break;
835 case KVM_REG_MIPS_CP0_STATUS:
836 kvm_write_c0_guest_status(cop0, v);
837 break;
838 case KVM_REG_MIPS_CP0_EPC:
839 kvm_write_c0_guest_epc(cop0, v);
840 break;
841 case KVM_REG_MIPS_CP0_PRID:
842 kvm_write_c0_guest_prid(cop0, v);
843 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000844 case KVM_REG_MIPS_CP0_EBASE:
845 /*
846 * Allow core number to be written, but the exception base must
847 * remain in guest KSeg0.
848 */
849 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
850 v);
851 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100852 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100853 kvm_mips_write_count(vcpu, v);
James Hoganf8be02d2014-05-29 10:16:29 +0100854 break;
855 case KVM_REG_MIPS_CP0_COMPARE:
James Hoganb45bacd2016-04-22 10:38:46 +0100856 kvm_mips_write_compare(vcpu, v, false);
James Hogane30492b2014-05-29 10:16:35 +0100857 break;
858 case KVM_REG_MIPS_CP0_CAUSE:
859 /*
860 * If the timer is stopped or started (DC bit) it must look
861 * atomic with changes to the interrupt pending bits (TI, IRQ5).
862 * A timer interrupt should not happen in between.
863 */
864 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
865 if (v & CAUSEF_DC) {
866 /* disable timer first */
867 kvm_mips_count_disable_cause(vcpu);
868 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
869 } else {
870 /* enable timer last */
871 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
872 kvm_mips_count_enable_cause(vcpu);
873 }
874 } else {
875 kvm_write_c0_guest_cause(cop0, v);
876 }
James Hoganf8be02d2014-05-29 10:16:29 +0100877 break;
James Hoganc7716072014-06-26 15:11:29 +0100878 case KVM_REG_MIPS_CP0_CONFIG:
879 /* read-only for now */
880 break;
881 case KVM_REG_MIPS_CP0_CONFIG1:
882 cur = kvm_read_c0_guest_config1(cop0);
883 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
884 if (change) {
885 v = cur ^ change;
886 kvm_write_c0_guest_config1(cop0, v);
887 }
888 break;
889 case KVM_REG_MIPS_CP0_CONFIG2:
890 /* read-only for now */
891 break;
892 case KVM_REG_MIPS_CP0_CONFIG3:
893 cur = kvm_read_c0_guest_config3(cop0);
894 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
895 if (change) {
896 v = cur ^ change;
897 kvm_write_c0_guest_config3(cop0, v);
898 }
899 break;
900 case KVM_REG_MIPS_CP0_CONFIG4:
901 cur = kvm_read_c0_guest_config4(cop0);
902 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
903 if (change) {
904 v = cur ^ change;
905 kvm_write_c0_guest_config4(cop0, v);
906 }
907 break;
908 case KVM_REG_MIPS_CP0_CONFIG5:
909 cur = kvm_read_c0_guest_config5(cop0);
910 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
911 if (change) {
912 v = cur ^ change;
913 kvm_write_c0_guest_config5(cop0, v);
914 }
915 break;
James Hogan89d6ad82016-12-14 01:58:44 +0000916 case KVM_REG_MIPS_CP0_CONFIG7:
917 /* writes ignored */
918 break;
James Hoganf8239342014-05-29 10:16:37 +0100919 case KVM_REG_MIPS_COUNT_CTL:
920 ret = kvm_mips_set_count_ctl(vcpu, v);
921 break;
922 case KVM_REG_MIPS_COUNT_RESUME:
923 ret = kvm_mips_set_count_resume(vcpu, v);
924 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100925 case KVM_REG_MIPS_COUNT_HZ:
926 ret = kvm_mips_set_count_hz(vcpu, v);
927 break;
James Hogan654229a2016-12-08 22:46:41 +0000928 case KVM_REG_MIPS_CP0_ERROREPC:
929 kvm_write_c0_guest_errorepc(cop0, v);
930 break;
931 case KVM_REG_MIPS_CP0_KSCRATCH1:
932 kvm_write_c0_guest_kscratch1(cop0, v);
933 break;
934 case KVM_REG_MIPS_CP0_KSCRATCH2:
935 kvm_write_c0_guest_kscratch2(cop0, v);
936 break;
937 case KVM_REG_MIPS_CP0_KSCRATCH3:
938 kvm_write_c0_guest_kscratch3(cop0, v);
939 break;
940 case KVM_REG_MIPS_CP0_KSCRATCH4:
941 kvm_write_c0_guest_kscratch4(cop0, v);
942 break;
943 case KVM_REG_MIPS_CP0_KSCRATCH5:
944 kvm_write_c0_guest_kscratch5(cop0, v);
945 break;
946 case KVM_REG_MIPS_CP0_KSCRATCH6:
947 kvm_write_c0_guest_kscratch6(cop0, v);
948 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100949 default:
950 return -EINVAL;
951 }
James Hoganf8239342014-05-29 10:16:37 +0100952 return ret;
James Hoganf8be02d2014-05-29 10:16:29 +0100953}
954
James Hogana60b8432016-11-12 00:00:13 +0000955static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +0000956{
James Hoganc550d532016-10-11 23:14:39 +0100957 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
958 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hogan7faa6ee2016-10-07 23:58:53 +0100959 struct mm_struct *mm;
James Hogan1581ff32016-11-16 23:48:56 +0000960
James Hogan1581ff32016-11-16 23:48:56 +0000961 /*
James Hogan91737ea2016-12-02 23:40:52 +0000962 * Were we in guest context? If so, restore the appropriate ASID based
963 * on the mode of the Guest (Kernel/User).
James Hogan1581ff32016-11-16 23:48:56 +0000964 */
965 if (current->flags & PF_VCPU) {
James Hogan7faa6ee2016-10-07 23:58:53 +0100966 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
James Hogan91737ea2016-12-02 23:40:52 +0000967 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
968 asid_version_mask(cpu))
969 get_new_mmu_context(mm, cpu);
James Hogan7faa6ee2016-10-07 23:58:53 +0100970 write_c0_entryhi(cpu_asid(cpu, mm));
971 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +0000972 kvm_mips_suspend_mm(cpu);
James Hogan1581ff32016-11-16 23:48:56 +0000973 ehb();
974 }
975
James Hoganb86ecb32015-02-09 16:35:20 +0000976 return 0;
977}
978
James Hogana60b8432016-11-12 00:00:13 +0000979static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +0000980{
James Hogana60b8432016-11-12 00:00:13 +0000981 kvm_lose_fpu(vcpu);
982
James Hogan91cdee52016-11-18 13:25:24 +0000983 if (current->flags & PF_VCPU) {
984 /* Restore normal Linux process memory map */
985 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
James Hogan91737ea2016-12-02 23:40:52 +0000986 asid_version_mask(cpu)))
James Hogan91cdee52016-11-18 13:25:24 +0000987 get_new_mmu_context(current->mm, cpu);
James Hogan91cdee52016-11-18 13:25:24 +0000988 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +0100989 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +0000990 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +0000991 ehb();
James Hogan1581ff32016-11-16 23:48:56 +0000992 }
James Hogan1581ff32016-11-16 23:48:56 +0000993
James Hoganb86ecb32015-02-09 16:35:20 +0000994 return 0;
995}
996
James Hoganb29e1152016-11-28 23:19:32 +0000997static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
998 bool reload_asid)
999{
1000 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1001 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1002 struct mm_struct *mm;
1003 int i;
1004
1005 if (likely(!vcpu->requests))
1006 return;
1007
1008 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1009 /*
1010 * Both kernel & user GVA mappings must be invalidated. The
1011 * caller is just about to check whether the ASID is stale
1012 * anyway so no need to reload it here.
1013 */
1014 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1015 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1016 for_each_possible_cpu(i) {
1017 cpu_context(i, kern_mm) = 0;
1018 cpu_context(i, user_mm) = 0;
1019 }
1020
1021 /* Generate new ASID for current mode */
1022 if (reload_asid) {
1023 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1024 get_new_mmu_context(mm, cpu);
1025 htw_stop();
1026 write_c0_entryhi(cpu_asid(cpu, mm));
1027 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1028 htw_start();
1029 }
1030 }
1031}
1032
James Hogan1880afd2016-11-28 23:04:52 +00001033/**
1034 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1035 * @vcpu: VCPU pointer.
1036 *
1037 * Call before a GVA space access outside of guest mode, to ensure that
1038 * asynchronous TLB flush requests are handled or delayed until completion of
1039 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1040 *
1041 * Should be called with IRQs already enabled.
1042 */
1043void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1044{
1045 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1046 WARN_ON_ONCE(irqs_disabled());
1047
1048 /*
1049 * The caller is about to access the GVA space, so we set the mode to
1050 * force TLB flush requests to send an IPI, and also disable IRQs to
1051 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1052 */
1053 local_irq_disable();
1054
1055 /*
1056 * Make sure the read of VCPU requests is not reordered ahead of the
1057 * write to vcpu->mode, or we could miss a TLB flush request while
1058 * the requester sees the VCPU as outside of guest mode and not needing
1059 * an IPI.
1060 */
1061 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1062
1063 /*
1064 * If a TLB flush has been requested (potentially while
1065 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1066 * before accessing the GVA space, and be sure to reload the ASID if
1067 * necessary as it'll be immediately used.
1068 *
1069 * TLB flush requests after this check will trigger an IPI due to the
1070 * mode change above, which will be delayed due to IRQs disabled.
1071 */
1072 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1073}
1074
1075/**
1076 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1077 * @vcpu: VCPU pointer.
1078 *
1079 * Called after a GVA space access outside of guest mode. Should have a matching
1080 * call to kvm_trap_emul_gva_lockless_begin().
1081 */
1082void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1083{
1084 /*
1085 * Make sure the write to vcpu->mode is not reordered in front of GVA
1086 * accesses, or a TLB flush requester may not think it necessary to send
1087 * an IPI.
1088 */
1089 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1090
1091 /*
1092 * Now that the access to GVA space is complete, its safe for pending
1093 * TLB flush request IPIs to be handled (which indicates completion).
1094 */
1095 local_irq_enable();
1096}
1097
James Hogana2c046e2016-11-18 13:14:37 +00001098static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1099 struct kvm_vcpu *vcpu)
1100{
James Hoganb29e1152016-11-28 23:19:32 +00001101 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
James Hogana2c046e2016-11-18 13:14:37 +00001102 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hoganb29e1152016-11-28 23:19:32 +00001103 struct mm_struct *mm;
James Hogana2c046e2016-11-18 13:14:37 +00001104 struct mips_coproc *cop0 = vcpu->arch.cop0;
1105 int i, cpu = smp_processor_id();
1106 unsigned int gasid;
1107
1108 /*
James Hoganb29e1152016-11-28 23:19:32 +00001109 * No need to reload ASID, IRQs are disabled already so there's no rush,
1110 * and we'll check if we need to regenerate below anyway before
1111 * re-entering the guest.
James Hogana2c046e2016-11-18 13:14:37 +00001112 */
James Hoganb29e1152016-11-28 23:19:32 +00001113 kvm_trap_emul_check_requests(vcpu, cpu, false);
1114
1115 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1116 mm = kern_mm;
1117 } else {
1118 mm = user_mm;
1119
1120 /*
1121 * Lazy host ASID regeneration / PT flush for guest user mode.
1122 * If the guest ASID has changed since the last guest usermode
1123 * execution, invalidate the stale TLB entries and flush GVA PT
1124 * entries too.
1125 */
James Hogana2c046e2016-11-18 13:14:37 +00001126 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1127 if (gasid != vcpu->arch.last_user_gasid) {
James Hogana31b50d2016-12-16 15:57:00 +00001128 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
James Hogana2c046e2016-11-18 13:14:37 +00001129 for_each_possible_cpu(i)
James Hoganb29e1152016-11-28 23:19:32 +00001130 cpu_context(i, user_mm) = 0;
James Hogana2c046e2016-11-18 13:14:37 +00001131 vcpu->arch.last_user_gasid = gasid;
1132 }
1133 }
James Hoganb29e1152016-11-28 23:19:32 +00001134
1135 /*
1136 * Check if ASID is stale. This may happen due to a TLB flush request or
1137 * a lazy user MM invalidation.
1138 */
1139 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1140 asid_version_mask(cpu))
1141 get_new_mmu_context(mm, cpu);
James Hogana2c046e2016-11-18 13:14:37 +00001142}
1143
1144static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1145{
James Hogana7ebb2e2016-11-15 00:06:05 +00001146 int cpu = smp_processor_id();
James Hogana2c046e2016-11-18 13:14:37 +00001147 int r;
1148
1149 /* Check if we have any exceptions/interrupts pending */
1150 kvm_mips_deliver_interrupts(vcpu,
1151 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1152
1153 kvm_trap_emul_vcpu_reenter(run, vcpu);
1154
James Hogandacc3ed2016-08-19 15:27:22 +01001155 /*
1156 * We use user accessors to access guest memory, but we don't want to
1157 * invoke Linux page faulting.
1158 */
1159 pagefault_disable();
1160
James Hogana2c046e2016-11-18 13:14:37 +00001161 /* Disable hardware page table walking while in guest */
1162 htw_stop();
1163
James Hogana7ebb2e2016-11-15 00:06:05 +00001164 /*
1165 * While in guest context we're in the guest's address space, not the
1166 * host process address space, so we need to be careful not to confuse
1167 * e.g. cache management IPIs.
1168 */
1169 kvm_mips_suspend_mm(cpu);
1170
James Hogana2c046e2016-11-18 13:14:37 +00001171 r = vcpu->arch.vcpu_run(run, vcpu);
1172
James Hogan91cdee52016-11-18 13:25:24 +00001173 /* We may have migrated while handling guest exits */
1174 cpu = smp_processor_id();
1175
1176 /* Restore normal Linux process memory map */
1177 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1178 asid_version_mask(cpu)))
1179 get_new_mmu_context(current->mm, cpu);
1180 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +01001181 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +00001182 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +00001183
James Hogana2c046e2016-11-18 13:14:37 +00001184 htw_start();
1185
James Hogandacc3ed2016-08-19 15:27:22 +01001186 pagefault_enable();
1187
James Hogana2c046e2016-11-18 13:14:37 +00001188 return r;
1189}
1190
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001191static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1192 /* exit handlers */
1193 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1194 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1195 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1196 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1197 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1198 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1199 .handle_syscall = kvm_trap_emul_handle_syscall,
1200 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1201 .handle_break = kvm_trap_emul_handle_break,
James Hogan0a560422015-02-06 16:03:57 +00001202 .handle_trap = kvm_trap_emul_handle_trap,
James Hoganc2537ed2015-02-06 10:56:27 +00001203 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
James Hogan1c0cd662015-02-06 10:56:27 +00001204 .handle_fpe = kvm_trap_emul_handle_fpe,
James Hogan98119ad2015-02-06 11:11:56 +00001205 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001206
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001207 .vcpu_init = kvm_trap_emul_vcpu_init,
James Hogan630766b2016-09-08 23:00:24 +01001208 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001209 .vcpu_setup = kvm_trap_emul_vcpu_setup,
James Hoganb6209112016-10-25 00:01:37 +01001210 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1211 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001212 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1213 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1214 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1215 .queue_io_int = kvm_mips_queue_io_int_cb,
1216 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1217 .irq_deliver = kvm_mips_irq_deliver_cb,
1218 .irq_clear = kvm_mips_irq_clear_cb,
James Hoganf5c43bd2016-06-15 19:29:49 +01001219 .num_regs = kvm_trap_emul_num_regs,
1220 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
James Hoganf8be02d2014-05-29 10:16:29 +01001221 .get_one_reg = kvm_trap_emul_get_one_reg,
1222 .set_one_reg = kvm_trap_emul_set_one_reg,
James Hogana60b8432016-11-12 00:00:13 +00001223 .vcpu_load = kvm_trap_emul_vcpu_load,
1224 .vcpu_put = kvm_trap_emul_vcpu_put,
James Hogana2c046e2016-11-18 13:14:37 +00001225 .vcpu_run = kvm_trap_emul_vcpu_run,
1226 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001227};
1228
1229int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1230{
1231 *install_callbacks = &kvm_trap_emul_callbacks;
1232 return 0;
1233}