blob: ce44f91c653a2533908bcf31fae7ece6207251f7 [file] [log] [blame]
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080014#include <linux/kvm_host.h>
James Hogandacc3ed2016-08-19 15:27:22 +010015#include <linux/uaccess.h>
James Hogan1581ff32016-11-16 23:48:56 +000016#include <linux/vmalloc.h>
17#include <asm/mmu_context.h>
James Hoganf7f14272016-09-08 22:57:03 +010018#include <asm/pgalloc.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080019
Deng-Cheng Zhud7d5b052014-06-26 12:11:38 -070020#include "interrupt.h"
Sanjay Lalf5c236d2012-11-21 18:34:09 -080021
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
James Hogan8cffd192016-06-09 14:19:08 +010025 gva_t kseg = KSEGX(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010026 gva_t gkseg = KVM_GUEST_KSEGX(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080027
28 if ((kseg == CKSEG0) || (kseg == CKSEG1))
29 gpa = CPHYSADDR(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010030 else if (gkseg == KVM_GUEST_KSEG0)
31 gpa = KVM_GUEST_CPHYSADDR(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080032 else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -070033 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080034 kvm_mips_dump_host_tlbs();
35 gpa = KVM_INVALID_ADDR;
36 }
37
Sanjay Lalf5c236d2012-11-21 18:34:09 -080038 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080039
40 return gpa;
41}
42
Sanjay Lalf5c236d2012-11-21 18:34:09 -080043static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
James Hogan1c0cd662015-02-06 10:56:27 +000045 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080046 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +010047 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +010048 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080049 enum emulation_result er = EMULATE_DONE;
50 int ret = RESUME_GUEST;
51
James Hogan1c0cd662015-02-06 10:56:27 +000052 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
53 /* FPU Unusable */
54 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
55 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
56 /*
57 * Unusable/no FPU in guest:
58 * deliver guest COP1 Unusable Exception
59 */
60 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
61 } else {
62 /* Restore FPU state */
63 kvm_own_fpu(vcpu);
64 er = EMULATE_DONE;
65 }
66 } else {
Sanjay Lalf5c236d2012-11-21 18:34:09 -080067 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
James Hogan1c0cd662015-02-06 10:56:27 +000068 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -080069
70 switch (er) {
71 case EMULATE_DONE:
72 ret = RESUME_GUEST;
73 break;
74
75 case EMULATE_FAIL:
76 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
77 ret = RESUME_HOST;
78 break;
79
80 case EMULATE_WAIT:
81 run->exit_reason = KVM_EXIT_INTR;
82 ret = RESUME_HOST;
83 break;
84
85 default:
86 BUG();
87 }
88 return ret;
89}
90
James Hogan420ea092016-12-06 19:27:18 +000091static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
92 struct kvm_vcpu *vcpu)
93{
94 enum emulation_result er;
95 union mips_instruction inst;
96 int err;
97
98 /* A code fetch fault doesn't count as an MMIO */
99 if (kvm_is_ifetch_fault(&vcpu->arch)) {
100 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101 return RESUME_HOST;
102 }
103
104 /* Fetch the instruction. */
105 if (cause & CAUSEF_BD)
106 opc += 1;
107 err = kvm_get_badinstr(opc, vcpu, &inst.word);
108 if (err) {
109 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
110 return RESUME_HOST;
111 }
112
113 /* Emulate the load */
114 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
115 if (er == EMULATE_FAIL) {
116 kvm_err("Emulate load from MMIO space failed\n");
117 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
118 } else {
119 run->exit_reason = KVM_EXIT_MMIO;
120 }
121 return RESUME_HOST;
122}
123
124static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
125 struct kvm_vcpu *vcpu)
126{
127 enum emulation_result er;
128 union mips_instruction inst;
129 int err;
130
131 /* Fetch the instruction. */
132 if (cause & CAUSEF_BD)
133 opc += 1;
134 err = kvm_get_badinstr(opc, vcpu, &inst.word);
135 if (err) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 return RESUME_HOST;
138 }
139
140 /* Emulate the store */
141 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
142 if (er == EMULATE_FAIL) {
143 kvm_err("Emulate store to MMIO space failed\n");
144 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
145 } else {
146 run->exit_reason = KVM_EXIT_MMIO;
147 }
148 return RESUME_HOST;
149}
150
151static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
152 struct kvm_vcpu *vcpu, bool store)
153{
154 if (store)
155 return kvm_mips_bad_store(cause, opc, run, vcpu);
156 else
157 return kvm_mips_bad_load(cause, opc, run, vcpu);
158}
159
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800160static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
161{
James Hogan64ebc9e2016-12-13 13:02:36 +0000162 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800163 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100164 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800165 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100166 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan64ebc9e2016-12-13 13:02:36 +0000167 struct kvm_mips_tlb *tlb;
168 unsigned long entryhi;
169 int index;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800170
171 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
172 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700173 /*
James Hogan64ebc9e2016-12-13 13:02:36 +0000174 * First find the mapping in the guest TLB. If the failure to
175 * write was due to the guest TLB, it should be up to the guest
176 * to handle it.
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800177 */
James Hogan64ebc9e2016-12-13 13:02:36 +0000178 entryhi = (badvaddr & VPN2_MASK) |
179 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
180 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
181
182 /*
183 * These should never happen.
184 * They would indicate stale host TLB entries.
185 */
186 if (unlikely(index < 0)) {
187 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
188 return RESUME_HOST;
189 }
190 tlb = vcpu->arch.guest_tlb + index;
191 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
192 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
193 return RESUME_HOST;
194 }
195
196 /*
197 * Guest entry not dirty? That would explain the TLB modified
198 * exception. Relay that on to the guest so it can handle it.
199 */
200 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
201 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
202 return RESUME_GUEST;
203 }
204
205 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
206 true))
207 /* Not writable, needs handling as MMIO */
208 return kvm_mips_bad_store(cause, opc, run, vcpu);
209 return RESUME_GUEST;
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
212 /* Not writable, needs handling as MMIO */
213 return kvm_mips_bad_store(cause, opc, run, vcpu);
214 return RESUME_GUEST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800215 } else {
James Hogan64ebc9e2016-12-13 13:02:36 +0000216 /* host kernel addresses are all handled as MMIO */
217 return kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800218 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800219}
220
James Hogan3b08aec2016-06-09 14:19:20 +0100221static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800222{
223 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100224 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800225 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100226 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800227 enum emulation_result er = EMULATE_DONE;
228 int ret = RESUME_GUEST;
229
230 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
231 && KVM_GUEST_KERNEL_MODE(vcpu)) {
232 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
233 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
234 ret = RESUME_HOST;
235 }
236 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
237 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
James Hogan3b08aec2016-06-09 14:19:20 +0100238 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
239 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800240
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700241 /*
242 * User Address (UA) fault, this could happen if
243 * (1) TLB entry not present/valid in both Guest and shadow host
244 * TLBs, in this case we pass on the fault to the guest
245 * kernel and let it handle it.
246 * (2) TLB entry is present in the Guest TLB but not in the
247 * shadow, in this case we inject the TLB from the Guest TLB
248 * into the shadow host TLB
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800249 */
250
James Hogan577ed7f2015-05-01 14:56:31 +0100251 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800252 if (er == EMULATE_DONE)
253 ret = RESUME_GUEST;
254 else {
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
James Hogan3b08aec2016-06-09 14:19:20 +0100259 /*
260 * All KSEG0 faults are handled by KVM, as the guest kernel does
261 * not expect to ever get them
262 */
James Hoganb8f79dd2015-05-11 23:31:45 +0100263 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
264 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
James Hogand5888472016-08-19 15:09:47 +0100265 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
266 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
267 /*
268 * With EVA we may get a TLB exception instead of an address
269 * error when the guest performs MMIO to KSeg1 addresses.
270 */
James Hogan420ea092016-12-06 19:27:18 +0000271 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800272 } else {
James Hogan3b08aec2016-06-09 14:19:20 +0100273 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
274 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800275 kvm_mips_dump_host_tlbs();
276 kvm_arch_vcpu_dump_regs(vcpu);
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 }
280 return ret;
281}
282
James Hogan3b08aec2016-06-09 14:19:20 +0100283static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
284{
285 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
286}
287
288static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
289{
290 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
291}
292
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800293static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
294{
295 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100296 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800297 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100298 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800299 int ret = RESUME_GUEST;
300
301 if (KVM_GUEST_KERNEL_MODE(vcpu)
302 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
James Hogan420ea092016-12-06 19:27:18 +0000303 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800304 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100305 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700306 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308 ret = RESUME_HOST;
309 }
310 return ret;
311}
312
313static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
314{
315 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100316 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800317 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100318 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800319 int ret = RESUME_GUEST;
320
321 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
James Hogan420ea092016-12-06 19:27:18 +0000322 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800323 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100324 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700325 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800326 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
327 ret = RESUME_HOST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800328 }
329 return ret;
330}
331
332static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
333{
334 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100335 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100336 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800337 enum emulation_result er = EMULATE_DONE;
338 int ret = RESUME_GUEST;
339
340 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
341 if (er == EMULATE_DONE)
342 ret = RESUME_GUEST;
343 else {
344 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
345 ret = RESUME_HOST;
346 }
347 return ret;
348}
349
350static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
351{
352 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100353 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100354 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800355 enum emulation_result er = EMULATE_DONE;
356 int ret = RESUME_GUEST;
357
358 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
359 if (er == EMULATE_DONE)
360 ret = RESUME_GUEST;
361 else {
362 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
363 ret = RESUME_HOST;
364 }
365 return ret;
366}
367
368static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
369{
370 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100371 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100372 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800373 enum emulation_result er = EMULATE_DONE;
374 int ret = RESUME_GUEST;
375
376 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
377 if (er == EMULATE_DONE)
378 ret = RESUME_GUEST;
379 else {
380 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
381 ret = RESUME_HOST;
382 }
383 return ret;
384}
385
James Hogan0a560422015-02-06 16:03:57 +0000386static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
387{
388 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100389 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100390 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan0a560422015-02-06 16:03:57 +0000391 enum emulation_result er = EMULATE_DONE;
392 int ret = RESUME_GUEST;
393
394 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
395 if (er == EMULATE_DONE) {
396 ret = RESUME_GUEST;
397 } else {
398 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
399 ret = RESUME_HOST;
400 }
401 return ret;
402}
403
James Hoganc2537ed2015-02-06 10:56:27 +0000404static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
405{
406 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100407 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100408 u32 cause = vcpu->arch.host_cp0_cause;
James Hoganc2537ed2015-02-06 10:56:27 +0000409 enum emulation_result er = EMULATE_DONE;
410 int ret = RESUME_GUEST;
411
412 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
413 if (er == EMULATE_DONE) {
414 ret = RESUME_GUEST;
415 } else {
416 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
417 ret = RESUME_HOST;
418 }
419 return ret;
420}
421
James Hogan1c0cd662015-02-06 10:56:27 +0000422static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
423{
424 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100425 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100426 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan1c0cd662015-02-06 10:56:27 +0000427 enum emulation_result er = EMULATE_DONE;
428 int ret = RESUME_GUEST;
429
430 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
431 if (er == EMULATE_DONE) {
432 ret = RESUME_GUEST;
433 } else {
434 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
435 ret = RESUME_HOST;
436 }
437 return ret;
438}
439
James Hoganc2537ed2015-02-06 10:56:27 +0000440/**
441 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
442 * @vcpu: Virtual CPU context.
443 *
444 * Handle when the guest attempts to use MSA when it is disabled.
445 */
James Hogan98119ad2015-02-06 11:11:56 +0000446static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
447{
James Hoganc2537ed2015-02-06 10:56:27 +0000448 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan98119ad2015-02-06 11:11:56 +0000449 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100450 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100451 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan98119ad2015-02-06 11:11:56 +0000452 enum emulation_result er = EMULATE_DONE;
453 int ret = RESUME_GUEST;
454
James Hoganc2537ed2015-02-06 10:56:27 +0000455 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
456 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
457 /*
458 * No MSA in guest, or FPU enabled and not in FR=1 mode,
459 * guest reserved instruction exception
460 */
461 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
462 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
463 /* MSA disabled by guest, guest MSA disabled exception */
464 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
465 } else {
466 /* Restore MSA/FPU state */
467 kvm_own_msa(vcpu);
468 er = EMULATE_DONE;
469 }
James Hogan98119ad2015-02-06 11:11:56 +0000470
471 switch (er) {
472 case EMULATE_DONE:
473 ret = RESUME_GUEST;
474 break;
475
476 case EMULATE_FAIL:
477 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
478 ret = RESUME_HOST;
479 break;
480
481 default:
482 BUG();
483 }
484 return ret;
485}
486
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800487static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
488{
James Hoganf7f14272016-09-08 22:57:03 +0100489 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
490 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
491
James Hoganf7f14272016-09-08 22:57:03 +0100492 /*
493 * Allocate GVA -> HPA page tables.
494 * MIPS doesn't use the mm_struct pointer argument.
495 */
496 kern_mm->pgd = pgd_alloc(kern_mm);
497 if (!kern_mm->pgd)
498 return -ENOMEM;
499
500 user_mm->pgd = pgd_alloc(user_mm);
501 if (!user_mm->pgd) {
502 pgd_free(kern_mm, kern_mm->pgd);
503 return -ENOMEM;
504 }
505
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800506 return 0;
507}
508
James Hoganf7f14272016-09-08 22:57:03 +0100509static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
510{
511 /* Don't free host kernel page tables copied from init_mm.pgd */
512 const unsigned long end = 0x80000000;
513 unsigned long pgd_va, pud_va, pmd_va;
514 pud_t *pud;
515 pmd_t *pmd;
516 pte_t *pte;
517 int i, j, k;
518
519 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
520 if (pgd_none(pgd[i]))
521 continue;
522
523 pgd_va = (unsigned long)i << PGDIR_SHIFT;
524 if (pgd_va >= end)
525 break;
526 pud = pud_offset(pgd + i, 0);
527 for (j = 0; j < PTRS_PER_PUD; j++) {
528 if (pud_none(pud[j]))
529 continue;
530
531 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
532 if (pud_va >= end)
533 break;
534 pmd = pmd_offset(pud + j, 0);
535 for (k = 0; k < PTRS_PER_PMD; k++) {
536 if (pmd_none(pmd[k]))
537 continue;
538
539 pmd_va = pud_va | (k << PMD_SHIFT);
540 if (pmd_va >= end)
541 break;
542 pte = pte_offset(pmd + k, 0);
543 pte_free_kernel(NULL, pte);
544 }
545 pmd_free(NULL, pmd);
546 }
547 pud_free(NULL, pud);
548 }
549 pgd_free(NULL, pgd);
550}
551
James Hogan630766b2016-09-08 23:00:24 +0100552static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
553{
James Hoganf7f14272016-09-08 22:57:03 +0100554 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
555 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
James Hogan630766b2016-09-08 23:00:24 +0100556}
557
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800558static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
559{
560 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogane3429252016-06-15 19:30:00 +0100561 u32 config, config1;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800562 int vcpu_id = vcpu->vcpu_id;
563
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700564 /*
565 * Arch specific stuff, set up config registers properly so that the
James Hogan84260972016-07-04 19:35:15 +0100566 * guest will come up as expected
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800567 */
James Hogan84260972016-07-04 19:35:15 +0100568#ifndef CONFIG_CPU_MIPSR6
569 /* r2-r5, simulate a MIPS 24kc */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800570 kvm_write_c0_guest_prid(cop0, 0x00019300);
James Hogan84260972016-07-04 19:35:15 +0100571#else
572 /* r6+, simulate a generic QEMU machine */
573 kvm_write_c0_guest_prid(cop0, 0x00010000);
574#endif
James Hogane3429252016-06-15 19:30:00 +0100575 /*
576 * Have config1, Cacheable, noncoherent, write-back, write allocate.
577 * Endianness, arch revision & virtually tagged icache should match
578 * host.
579 */
580 config = read_c0_config() & MIPS_CONF_AR;
James Hogan4e10b762016-06-15 19:30:01 +0100581 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
James Hogane3429252016-06-15 19:30:00 +0100582#ifdef CONFIG_CPU_BIG_ENDIAN
583 config |= CONF_BE;
584#endif
585 if (cpu_has_vtag_icache)
586 config |= MIPS_CONF_VI;
587 kvm_write_c0_guest_config(cop0, config);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800588
589 /* Read the cache characteristics from the host Config1 Register */
590 config1 = (read_c0_config1() & ~0x7f);
591
592 /* Set up MMU size */
593 config1 &= ~(0x3f << 25);
594 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
595
596 /* We unset some bits that we aren't emulating */
James Hogan4e10b762016-06-15 19:30:01 +0100597 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
598 MIPS_CONF1_WR | MIPS_CONF1_CA);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800599 kvm_write_c0_guest_config1(cop0, config1);
600
James Hogan2211ee82015-03-04 15:56:47 +0000601 /* Have config3, no tertiary/secondary caches implemented */
602 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
603 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
604
James Hoganc7716072014-06-26 15:11:29 +0100605 /* Have config4, UserLocal */
606 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
607
608 /* Have config5 */
609 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
610
611 /* No config6 */
612 kvm_write_c0_guest_config5(cop0, 0);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800613
614 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
615 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
616
James Hoganbe67a0b2017-01-18 16:20:31 +0000617 /* Status */
618 kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
619
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700620 /*
Adam Buchbinder92a76f62016-02-25 00:44:58 -0800621 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700622 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800623 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
624
625 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
James Hogan37af2f32016-05-11 13:50:49 +0100626 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
627 (vcpu_id & MIPS_EBASE_CPUNUM));
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800628
James Hoganbe67a0b2017-01-18 16:20:31 +0000629 /* Put PC at guest reset vector */
630 vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
631
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800632 return 0;
633}
634
James Hoganb6209112016-10-25 00:01:37 +0100635static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
636{
637 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
638 kvm_flush_remote_tlbs(kvm);
639}
640
641static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
642 const struct kvm_memory_slot *slot)
643{
644 kvm_trap_emul_flush_shadow_all(kvm);
645}
646
James Hogan654229a2016-12-08 22:46:41 +0000647static u64 kvm_trap_emul_get_one_regs[] = {
648 KVM_REG_MIPS_CP0_INDEX,
649 KVM_REG_MIPS_CP0_CONTEXT,
650 KVM_REG_MIPS_CP0_USERLOCAL,
651 KVM_REG_MIPS_CP0_PAGEMASK,
652 KVM_REG_MIPS_CP0_WIRED,
653 KVM_REG_MIPS_CP0_HWRENA,
654 KVM_REG_MIPS_CP0_BADVADDR,
655 KVM_REG_MIPS_CP0_COUNT,
656 KVM_REG_MIPS_CP0_ENTRYHI,
657 KVM_REG_MIPS_CP0_COMPARE,
658 KVM_REG_MIPS_CP0_STATUS,
659 KVM_REG_MIPS_CP0_CAUSE,
660 KVM_REG_MIPS_CP0_EPC,
661 KVM_REG_MIPS_CP0_PRID,
James Hogan7801bbe2016-11-14 23:59:27 +0000662 KVM_REG_MIPS_CP0_EBASE,
James Hogan654229a2016-12-08 22:46:41 +0000663 KVM_REG_MIPS_CP0_CONFIG,
664 KVM_REG_MIPS_CP0_CONFIG1,
665 KVM_REG_MIPS_CP0_CONFIG2,
666 KVM_REG_MIPS_CP0_CONFIG3,
667 KVM_REG_MIPS_CP0_CONFIG4,
668 KVM_REG_MIPS_CP0_CONFIG5,
669 KVM_REG_MIPS_CP0_CONFIG7,
670 KVM_REG_MIPS_CP0_ERROREPC,
671 KVM_REG_MIPS_CP0_KSCRATCH1,
672 KVM_REG_MIPS_CP0_KSCRATCH2,
673 KVM_REG_MIPS_CP0_KSCRATCH3,
674 KVM_REG_MIPS_CP0_KSCRATCH4,
675 KVM_REG_MIPS_CP0_KSCRATCH5,
676 KVM_REG_MIPS_CP0_KSCRATCH6,
677
678 KVM_REG_MIPS_COUNT_CTL,
679 KVM_REG_MIPS_COUNT_RESUME,
680 KVM_REG_MIPS_COUNT_HZ,
681};
682
James Hoganf5c43bd2016-06-15 19:29:49 +0100683static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
684{
James Hogan654229a2016-12-08 22:46:41 +0000685 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
James Hoganf5c43bd2016-06-15 19:29:49 +0100686}
687
688static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
689 u64 __user *indices)
690{
James Hogan654229a2016-12-08 22:46:41 +0000691 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
692 sizeof(kvm_trap_emul_get_one_regs)))
693 return -EFAULT;
694 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
695
James Hoganf5c43bd2016-06-15 19:29:49 +0100696 return 0;
697}
698
James Hoganf8be02d2014-05-29 10:16:29 +0100699static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
700 const struct kvm_one_reg *reg,
701 s64 *v)
702{
James Hogan654229a2016-12-08 22:46:41 +0000703 struct mips_coproc *cop0 = vcpu->arch.cop0;
704
James Hoganf8be02d2014-05-29 10:16:29 +0100705 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000706 case KVM_REG_MIPS_CP0_INDEX:
707 *v = (long)kvm_read_c0_guest_index(cop0);
708 break;
709 case KVM_REG_MIPS_CP0_CONTEXT:
710 *v = (long)kvm_read_c0_guest_context(cop0);
711 break;
712 case KVM_REG_MIPS_CP0_USERLOCAL:
713 *v = (long)kvm_read_c0_guest_userlocal(cop0);
714 break;
715 case KVM_REG_MIPS_CP0_PAGEMASK:
716 *v = (long)kvm_read_c0_guest_pagemask(cop0);
717 break;
718 case KVM_REG_MIPS_CP0_WIRED:
719 *v = (long)kvm_read_c0_guest_wired(cop0);
720 break;
721 case KVM_REG_MIPS_CP0_HWRENA:
722 *v = (long)kvm_read_c0_guest_hwrena(cop0);
723 break;
724 case KVM_REG_MIPS_CP0_BADVADDR:
725 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
726 break;
727 case KVM_REG_MIPS_CP0_ENTRYHI:
728 *v = (long)kvm_read_c0_guest_entryhi(cop0);
729 break;
730 case KVM_REG_MIPS_CP0_COMPARE:
731 *v = (long)kvm_read_c0_guest_compare(cop0);
732 break;
733 case KVM_REG_MIPS_CP0_STATUS:
734 *v = (long)kvm_read_c0_guest_status(cop0);
735 break;
736 case KVM_REG_MIPS_CP0_CAUSE:
737 *v = (long)kvm_read_c0_guest_cause(cop0);
738 break;
739 case KVM_REG_MIPS_CP0_EPC:
740 *v = (long)kvm_read_c0_guest_epc(cop0);
741 break;
742 case KVM_REG_MIPS_CP0_PRID:
743 *v = (long)kvm_read_c0_guest_prid(cop0);
744 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000745 case KVM_REG_MIPS_CP0_EBASE:
746 *v = (long)kvm_read_c0_guest_ebase(cop0);
747 break;
James Hogan654229a2016-12-08 22:46:41 +0000748 case KVM_REG_MIPS_CP0_CONFIG:
749 *v = (long)kvm_read_c0_guest_config(cop0);
750 break;
751 case KVM_REG_MIPS_CP0_CONFIG1:
752 *v = (long)kvm_read_c0_guest_config1(cop0);
753 break;
754 case KVM_REG_MIPS_CP0_CONFIG2:
755 *v = (long)kvm_read_c0_guest_config2(cop0);
756 break;
757 case KVM_REG_MIPS_CP0_CONFIG3:
758 *v = (long)kvm_read_c0_guest_config3(cop0);
759 break;
760 case KVM_REG_MIPS_CP0_CONFIG4:
761 *v = (long)kvm_read_c0_guest_config4(cop0);
762 break;
763 case KVM_REG_MIPS_CP0_CONFIG5:
764 *v = (long)kvm_read_c0_guest_config5(cop0);
765 break;
766 case KVM_REG_MIPS_CP0_CONFIG7:
767 *v = (long)kvm_read_c0_guest_config7(cop0);
768 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100769 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100770 *v = kvm_mips_read_count(vcpu);
James Hoganf8be02d2014-05-29 10:16:29 +0100771 break;
James Hoganf8239342014-05-29 10:16:37 +0100772 case KVM_REG_MIPS_COUNT_CTL:
773 *v = vcpu->arch.count_ctl;
774 break;
775 case KVM_REG_MIPS_COUNT_RESUME:
776 *v = ktime_to_ns(vcpu->arch.count_resume);
777 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100778 case KVM_REG_MIPS_COUNT_HZ:
779 *v = vcpu->arch.count_hz;
780 break;
James Hogan654229a2016-12-08 22:46:41 +0000781 case KVM_REG_MIPS_CP0_ERROREPC:
782 *v = (long)kvm_read_c0_guest_errorepc(cop0);
783 break;
784 case KVM_REG_MIPS_CP0_KSCRATCH1:
785 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
786 break;
787 case KVM_REG_MIPS_CP0_KSCRATCH2:
788 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
789 break;
790 case KVM_REG_MIPS_CP0_KSCRATCH3:
791 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
792 break;
793 case KVM_REG_MIPS_CP0_KSCRATCH4:
794 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
795 break;
796 case KVM_REG_MIPS_CP0_KSCRATCH5:
797 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
798 break;
799 case KVM_REG_MIPS_CP0_KSCRATCH6:
800 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
801 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100802 default:
803 return -EINVAL;
804 }
805 return 0;
806}
807
808static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
809 const struct kvm_one_reg *reg,
810 s64 v)
811{
812 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hoganf8239342014-05-29 10:16:37 +0100813 int ret = 0;
James Hoganc7716072014-06-26 15:11:29 +0100814 unsigned int cur, change;
James Hoganf8be02d2014-05-29 10:16:29 +0100815
816 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000817 case KVM_REG_MIPS_CP0_INDEX:
818 kvm_write_c0_guest_index(cop0, v);
819 break;
820 case KVM_REG_MIPS_CP0_CONTEXT:
821 kvm_write_c0_guest_context(cop0, v);
822 break;
823 case KVM_REG_MIPS_CP0_USERLOCAL:
824 kvm_write_c0_guest_userlocal(cop0, v);
825 break;
826 case KVM_REG_MIPS_CP0_PAGEMASK:
827 kvm_write_c0_guest_pagemask(cop0, v);
828 break;
829 case KVM_REG_MIPS_CP0_WIRED:
830 kvm_write_c0_guest_wired(cop0, v);
831 break;
832 case KVM_REG_MIPS_CP0_HWRENA:
833 kvm_write_c0_guest_hwrena(cop0, v);
834 break;
835 case KVM_REG_MIPS_CP0_BADVADDR:
836 kvm_write_c0_guest_badvaddr(cop0, v);
837 break;
838 case KVM_REG_MIPS_CP0_ENTRYHI:
839 kvm_write_c0_guest_entryhi(cop0, v);
840 break;
841 case KVM_REG_MIPS_CP0_STATUS:
842 kvm_write_c0_guest_status(cop0, v);
843 break;
844 case KVM_REG_MIPS_CP0_EPC:
845 kvm_write_c0_guest_epc(cop0, v);
846 break;
847 case KVM_REG_MIPS_CP0_PRID:
848 kvm_write_c0_guest_prid(cop0, v);
849 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000850 case KVM_REG_MIPS_CP0_EBASE:
851 /*
852 * Allow core number to be written, but the exception base must
853 * remain in guest KSeg0.
854 */
855 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
856 v);
857 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100858 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100859 kvm_mips_write_count(vcpu, v);
James Hoganf8be02d2014-05-29 10:16:29 +0100860 break;
861 case KVM_REG_MIPS_CP0_COMPARE:
James Hoganb45bacd2016-04-22 10:38:46 +0100862 kvm_mips_write_compare(vcpu, v, false);
James Hogane30492b2014-05-29 10:16:35 +0100863 break;
864 case KVM_REG_MIPS_CP0_CAUSE:
865 /*
866 * If the timer is stopped or started (DC bit) it must look
867 * atomic with changes to the interrupt pending bits (TI, IRQ5).
868 * A timer interrupt should not happen in between.
869 */
870 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
871 if (v & CAUSEF_DC) {
872 /* disable timer first */
873 kvm_mips_count_disable_cause(vcpu);
874 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
875 } else {
876 /* enable timer last */
877 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
878 kvm_mips_count_enable_cause(vcpu);
879 }
880 } else {
881 kvm_write_c0_guest_cause(cop0, v);
882 }
James Hoganf8be02d2014-05-29 10:16:29 +0100883 break;
James Hoganc7716072014-06-26 15:11:29 +0100884 case KVM_REG_MIPS_CP0_CONFIG:
885 /* read-only for now */
886 break;
887 case KVM_REG_MIPS_CP0_CONFIG1:
888 cur = kvm_read_c0_guest_config1(cop0);
889 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
890 if (change) {
891 v = cur ^ change;
892 kvm_write_c0_guest_config1(cop0, v);
893 }
894 break;
895 case KVM_REG_MIPS_CP0_CONFIG2:
896 /* read-only for now */
897 break;
898 case KVM_REG_MIPS_CP0_CONFIG3:
899 cur = kvm_read_c0_guest_config3(cop0);
900 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
901 if (change) {
902 v = cur ^ change;
903 kvm_write_c0_guest_config3(cop0, v);
904 }
905 break;
906 case KVM_REG_MIPS_CP0_CONFIG4:
907 cur = kvm_read_c0_guest_config4(cop0);
908 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
909 if (change) {
910 v = cur ^ change;
911 kvm_write_c0_guest_config4(cop0, v);
912 }
913 break;
914 case KVM_REG_MIPS_CP0_CONFIG5:
915 cur = kvm_read_c0_guest_config5(cop0);
916 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
917 if (change) {
918 v = cur ^ change;
919 kvm_write_c0_guest_config5(cop0, v);
920 }
921 break;
James Hogan89d6ad82016-12-14 01:58:44 +0000922 case KVM_REG_MIPS_CP0_CONFIG7:
923 /* writes ignored */
924 break;
James Hoganf8239342014-05-29 10:16:37 +0100925 case KVM_REG_MIPS_COUNT_CTL:
926 ret = kvm_mips_set_count_ctl(vcpu, v);
927 break;
928 case KVM_REG_MIPS_COUNT_RESUME:
929 ret = kvm_mips_set_count_resume(vcpu, v);
930 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100931 case KVM_REG_MIPS_COUNT_HZ:
932 ret = kvm_mips_set_count_hz(vcpu, v);
933 break;
James Hogan654229a2016-12-08 22:46:41 +0000934 case KVM_REG_MIPS_CP0_ERROREPC:
935 kvm_write_c0_guest_errorepc(cop0, v);
936 break;
937 case KVM_REG_MIPS_CP0_KSCRATCH1:
938 kvm_write_c0_guest_kscratch1(cop0, v);
939 break;
940 case KVM_REG_MIPS_CP0_KSCRATCH2:
941 kvm_write_c0_guest_kscratch2(cop0, v);
942 break;
943 case KVM_REG_MIPS_CP0_KSCRATCH3:
944 kvm_write_c0_guest_kscratch3(cop0, v);
945 break;
946 case KVM_REG_MIPS_CP0_KSCRATCH4:
947 kvm_write_c0_guest_kscratch4(cop0, v);
948 break;
949 case KVM_REG_MIPS_CP0_KSCRATCH5:
950 kvm_write_c0_guest_kscratch5(cop0, v);
951 break;
952 case KVM_REG_MIPS_CP0_KSCRATCH6:
953 kvm_write_c0_guest_kscratch6(cop0, v);
954 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100955 default:
956 return -EINVAL;
957 }
James Hoganf8239342014-05-29 10:16:37 +0100958 return ret;
James Hoganf8be02d2014-05-29 10:16:29 +0100959}
960
James Hogana60b8432016-11-12 00:00:13 +0000961static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +0000962{
James Hoganc550d532016-10-11 23:14:39 +0100963 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
964 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hogan7faa6ee2016-10-07 23:58:53 +0100965 struct mm_struct *mm;
James Hogan1581ff32016-11-16 23:48:56 +0000966
James Hogan1581ff32016-11-16 23:48:56 +0000967 /*
James Hogan91737ea2016-12-02 23:40:52 +0000968 * Were we in guest context? If so, restore the appropriate ASID based
969 * on the mode of the Guest (Kernel/User).
James Hogan1581ff32016-11-16 23:48:56 +0000970 */
971 if (current->flags & PF_VCPU) {
James Hogan7faa6ee2016-10-07 23:58:53 +0100972 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
James Hogan91737ea2016-12-02 23:40:52 +0000973 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
974 asid_version_mask(cpu))
975 get_new_mmu_context(mm, cpu);
James Hogan7faa6ee2016-10-07 23:58:53 +0100976 write_c0_entryhi(cpu_asid(cpu, mm));
977 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +0000978 kvm_mips_suspend_mm(cpu);
James Hogan1581ff32016-11-16 23:48:56 +0000979 ehb();
980 }
981
James Hoganb86ecb32015-02-09 16:35:20 +0000982 return 0;
983}
984
James Hogana60b8432016-11-12 00:00:13 +0000985static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +0000986{
James Hogana60b8432016-11-12 00:00:13 +0000987 kvm_lose_fpu(vcpu);
988
James Hogan91cdee52016-11-18 13:25:24 +0000989 if (current->flags & PF_VCPU) {
990 /* Restore normal Linux process memory map */
991 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
James Hogan91737ea2016-12-02 23:40:52 +0000992 asid_version_mask(cpu)))
James Hogan91cdee52016-11-18 13:25:24 +0000993 get_new_mmu_context(current->mm, cpu);
James Hogan91cdee52016-11-18 13:25:24 +0000994 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +0100995 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +0000996 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +0000997 ehb();
James Hogan1581ff32016-11-16 23:48:56 +0000998 }
James Hogan1581ff32016-11-16 23:48:56 +0000999
James Hoganb86ecb32015-02-09 16:35:20 +00001000 return 0;
1001}
1002
James Hoganb29e1152016-11-28 23:19:32 +00001003static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
1004 bool reload_asid)
1005{
1006 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1007 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1008 struct mm_struct *mm;
1009 int i;
1010
1011 if (likely(!vcpu->requests))
1012 return;
1013
1014 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1015 /*
1016 * Both kernel & user GVA mappings must be invalidated. The
1017 * caller is just about to check whether the ASID is stale
1018 * anyway so no need to reload it here.
1019 */
1020 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1021 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1022 for_each_possible_cpu(i) {
1023 cpu_context(i, kern_mm) = 0;
1024 cpu_context(i, user_mm) = 0;
1025 }
1026
1027 /* Generate new ASID for current mode */
1028 if (reload_asid) {
1029 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1030 get_new_mmu_context(mm, cpu);
1031 htw_stop();
1032 write_c0_entryhi(cpu_asid(cpu, mm));
1033 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1034 htw_start();
1035 }
1036 }
1037}
1038
James Hogan1880afd2016-11-28 23:04:52 +00001039/**
1040 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1041 * @vcpu: VCPU pointer.
1042 *
1043 * Call before a GVA space access outside of guest mode, to ensure that
1044 * asynchronous TLB flush requests are handled or delayed until completion of
1045 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1046 *
1047 * Should be called with IRQs already enabled.
1048 */
1049void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1050{
1051 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1052 WARN_ON_ONCE(irqs_disabled());
1053
1054 /*
1055 * The caller is about to access the GVA space, so we set the mode to
1056 * force TLB flush requests to send an IPI, and also disable IRQs to
1057 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1058 */
1059 local_irq_disable();
1060
1061 /*
1062 * Make sure the read of VCPU requests is not reordered ahead of the
1063 * write to vcpu->mode, or we could miss a TLB flush request while
1064 * the requester sees the VCPU as outside of guest mode and not needing
1065 * an IPI.
1066 */
1067 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1068
1069 /*
1070 * If a TLB flush has been requested (potentially while
1071 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1072 * before accessing the GVA space, and be sure to reload the ASID if
1073 * necessary as it'll be immediately used.
1074 *
1075 * TLB flush requests after this check will trigger an IPI due to the
1076 * mode change above, which will be delayed due to IRQs disabled.
1077 */
1078 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1079}
1080
1081/**
1082 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1083 * @vcpu: VCPU pointer.
1084 *
1085 * Called after a GVA space access outside of guest mode. Should have a matching
1086 * call to kvm_trap_emul_gva_lockless_begin().
1087 */
1088void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1089{
1090 /*
1091 * Make sure the write to vcpu->mode is not reordered in front of GVA
1092 * accesses, or a TLB flush requester may not think it necessary to send
1093 * an IPI.
1094 */
1095 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1096
1097 /*
1098 * Now that the access to GVA space is complete, its safe for pending
1099 * TLB flush request IPIs to be handled (which indicates completion).
1100 */
1101 local_irq_enable();
1102}
1103
James Hogana2c046e2016-11-18 13:14:37 +00001104static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1105 struct kvm_vcpu *vcpu)
1106{
James Hoganb29e1152016-11-28 23:19:32 +00001107 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
James Hogana2c046e2016-11-18 13:14:37 +00001108 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hoganb29e1152016-11-28 23:19:32 +00001109 struct mm_struct *mm;
James Hogana2c046e2016-11-18 13:14:37 +00001110 struct mips_coproc *cop0 = vcpu->arch.cop0;
1111 int i, cpu = smp_processor_id();
1112 unsigned int gasid;
1113
1114 /*
James Hoganb29e1152016-11-28 23:19:32 +00001115 * No need to reload ASID, IRQs are disabled already so there's no rush,
1116 * and we'll check if we need to regenerate below anyway before
1117 * re-entering the guest.
James Hogana2c046e2016-11-18 13:14:37 +00001118 */
James Hoganb29e1152016-11-28 23:19:32 +00001119 kvm_trap_emul_check_requests(vcpu, cpu, false);
1120
1121 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1122 mm = kern_mm;
1123 } else {
1124 mm = user_mm;
1125
1126 /*
1127 * Lazy host ASID regeneration / PT flush for guest user mode.
1128 * If the guest ASID has changed since the last guest usermode
1129 * execution, invalidate the stale TLB entries and flush GVA PT
1130 * entries too.
1131 */
James Hogana2c046e2016-11-18 13:14:37 +00001132 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1133 if (gasid != vcpu->arch.last_user_gasid) {
James Hogana31b50d2016-12-16 15:57:00 +00001134 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
James Hogana2c046e2016-11-18 13:14:37 +00001135 for_each_possible_cpu(i)
James Hoganb29e1152016-11-28 23:19:32 +00001136 cpu_context(i, user_mm) = 0;
James Hogana2c046e2016-11-18 13:14:37 +00001137 vcpu->arch.last_user_gasid = gasid;
1138 }
1139 }
James Hoganb29e1152016-11-28 23:19:32 +00001140
1141 /*
1142 * Check if ASID is stale. This may happen due to a TLB flush request or
1143 * a lazy user MM invalidation.
1144 */
1145 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1146 asid_version_mask(cpu))
1147 get_new_mmu_context(mm, cpu);
James Hogana2c046e2016-11-18 13:14:37 +00001148}
1149
1150static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1151{
James Hogana7ebb2e2016-11-15 00:06:05 +00001152 int cpu = smp_processor_id();
James Hogana2c046e2016-11-18 13:14:37 +00001153 int r;
1154
1155 /* Check if we have any exceptions/interrupts pending */
1156 kvm_mips_deliver_interrupts(vcpu,
1157 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1158
1159 kvm_trap_emul_vcpu_reenter(run, vcpu);
1160
James Hogandacc3ed2016-08-19 15:27:22 +01001161 /*
1162 * We use user accessors to access guest memory, but we don't want to
1163 * invoke Linux page faulting.
1164 */
1165 pagefault_disable();
1166
James Hogana2c046e2016-11-18 13:14:37 +00001167 /* Disable hardware page table walking while in guest */
1168 htw_stop();
1169
James Hogana7ebb2e2016-11-15 00:06:05 +00001170 /*
1171 * While in guest context we're in the guest's address space, not the
1172 * host process address space, so we need to be careful not to confuse
1173 * e.g. cache management IPIs.
1174 */
1175 kvm_mips_suspend_mm(cpu);
1176
James Hogana2c046e2016-11-18 13:14:37 +00001177 r = vcpu->arch.vcpu_run(run, vcpu);
1178
James Hogan91cdee52016-11-18 13:25:24 +00001179 /* We may have migrated while handling guest exits */
1180 cpu = smp_processor_id();
1181
1182 /* Restore normal Linux process memory map */
1183 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1184 asid_version_mask(cpu)))
1185 get_new_mmu_context(current->mm, cpu);
1186 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +01001187 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +00001188 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +00001189
James Hogana2c046e2016-11-18 13:14:37 +00001190 htw_start();
1191
James Hogandacc3ed2016-08-19 15:27:22 +01001192 pagefault_enable();
1193
James Hogana2c046e2016-11-18 13:14:37 +00001194 return r;
1195}
1196
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001197static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1198 /* exit handlers */
1199 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1200 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1201 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1202 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1203 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1204 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1205 .handle_syscall = kvm_trap_emul_handle_syscall,
1206 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1207 .handle_break = kvm_trap_emul_handle_break,
James Hogan0a560422015-02-06 16:03:57 +00001208 .handle_trap = kvm_trap_emul_handle_trap,
James Hoganc2537ed2015-02-06 10:56:27 +00001209 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
James Hogan1c0cd662015-02-06 10:56:27 +00001210 .handle_fpe = kvm_trap_emul_handle_fpe,
James Hogan98119ad2015-02-06 11:11:56 +00001211 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001212
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001213 .vcpu_init = kvm_trap_emul_vcpu_init,
James Hogan630766b2016-09-08 23:00:24 +01001214 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001215 .vcpu_setup = kvm_trap_emul_vcpu_setup,
James Hoganb6209112016-10-25 00:01:37 +01001216 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1217 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001218 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1219 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1220 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1221 .queue_io_int = kvm_mips_queue_io_int_cb,
1222 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1223 .irq_deliver = kvm_mips_irq_deliver_cb,
1224 .irq_clear = kvm_mips_irq_clear_cb,
James Hoganf5c43bd2016-06-15 19:29:49 +01001225 .num_regs = kvm_trap_emul_num_regs,
1226 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
James Hoganf8be02d2014-05-29 10:16:29 +01001227 .get_one_reg = kvm_trap_emul_get_one_reg,
1228 .set_one_reg = kvm_trap_emul_set_one_reg,
James Hogana60b8432016-11-12 00:00:13 +00001229 .vcpu_load = kvm_trap_emul_vcpu_load,
1230 .vcpu_put = kvm_trap_emul_vcpu_put,
James Hogana2c046e2016-11-18 13:14:37 +00001231 .vcpu_run = kvm_trap_emul_vcpu_run,
1232 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001233};
1234
1235int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1236{
1237 *install_callbacks = &kvm_trap_emul_callbacks;
1238 return 0;
1239}