Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 1 | /* |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel |
| 7 | * |
| 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
| 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
| 10 | */ |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 11 | |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/err.h> |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 14 | #include <linux/kvm_host.h> |
James Hogan | dacc3ed | 2016-08-19 15:27:22 +0100 | [diff] [blame] | 15 | #include <linux/uaccess.h> |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 16 | #include <linux/vmalloc.h> |
| 17 | #include <asm/mmu_context.h> |
James Hogan | f7f1427 | 2016-09-08 22:57:03 +0100 | [diff] [blame] | 18 | #include <asm/pgalloc.h> |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 19 | |
Deng-Cheng Zhu | d7d5b05 | 2014-06-26 12:11:38 -0700 | [diff] [blame] | 20 | #include "interrupt.h" |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 21 | |
| 22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) |
| 23 | { |
| 24 | gpa_t gpa; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 25 | gva_t kseg = KSEGX(gva); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 26 | |
| 27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) |
| 28 | gpa = CPHYSADDR(gva); |
| 29 | else { |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 31 | kvm_mips_dump_host_tlbs(); |
| 32 | gpa = KVM_INVALID_ADDR; |
| 33 | } |
| 34 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 35 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 36 | |
| 37 | return gpa; |
| 38 | } |
| 39 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
| 41 | { |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 42 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 43 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 44 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 45 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 46 | enum emulation_result er = EMULATE_DONE; |
| 47 | int ret = RESUME_GUEST; |
| 48 | |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
| 50 | /* FPU Unusable */ |
| 51 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || |
| 52 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { |
| 53 | /* |
| 54 | * Unusable/no FPU in guest: |
| 55 | * deliver guest COP1 Unusable Exception |
| 56 | */ |
| 57 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
| 58 | } else { |
| 59 | /* Restore FPU state */ |
| 60 | kvm_own_fpu(vcpu); |
| 61 | er = EMULATE_DONE; |
| 62 | } |
| 63 | } else { |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 64 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 65 | } |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 66 | |
| 67 | switch (er) { |
| 68 | case EMULATE_DONE: |
| 69 | ret = RESUME_GUEST; |
| 70 | break; |
| 71 | |
| 72 | case EMULATE_FAIL: |
| 73 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 74 | ret = RESUME_HOST; |
| 75 | break; |
| 76 | |
| 77 | case EMULATE_WAIT: |
| 78 | run->exit_reason = KVM_EXIT_INTR; |
| 79 | ret = RESUME_HOST; |
| 80 | break; |
| 81 | |
| 82 | default: |
| 83 | BUG(); |
| 84 | } |
| 85 | return ret; |
| 86 | } |
| 87 | |
| 88 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) |
| 89 | { |
| 90 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 91 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 92 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 93 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 94 | enum emulation_result er = EMULATE_DONE; |
| 95 | int ret = RESUME_GUEST; |
| 96 | |
| 97 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
| 98 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 99 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 100 | cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 101 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
| 102 | |
| 103 | if (er == EMULATE_DONE) |
| 104 | ret = RESUME_GUEST; |
| 105 | else { |
| 106 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 107 | ret = RESUME_HOST; |
| 108 | } |
| 109 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 110 | /* |
| 111 | * XXXKYMA: The guest kernel does not expect to get this fault |
| 112 | * when we are not using HIGHMEM. Need to address this in a |
| 113 | * HIGHMEM kernel |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 114 | */ |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 115 | kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n", |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 116 | cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 117 | kvm_mips_dump_host_tlbs(); |
| 118 | kvm_arch_vcpu_dump_regs(vcpu); |
| 119 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 120 | ret = RESUME_HOST; |
| 121 | } else { |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 122 | kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 123 | cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 124 | kvm_mips_dump_host_tlbs(); |
| 125 | kvm_arch_vcpu_dump_regs(vcpu); |
| 126 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 127 | ret = RESUME_HOST; |
| 128 | } |
| 129 | return ret; |
| 130 | } |
| 131 | |
James Hogan | 3b08aec | 2016-06-09 14:19:20 +0100 | [diff] [blame] | 132 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 133 | { |
| 134 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 135 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 136 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 137 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 138 | enum emulation_result er = EMULATE_DONE; |
| 139 | int ret = RESUME_GUEST; |
| 140 | |
| 141 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) |
| 142 | && KVM_GUEST_KERNEL_MODE(vcpu)) { |
| 143 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { |
| 144 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 145 | ret = RESUME_HOST; |
| 146 | } |
| 147 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
| 148 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
James Hogan | 3b08aec | 2016-06-09 14:19:20 +0100 | [diff] [blame] | 149 | kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
| 150 | store ? "ST" : "LD", cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 151 | |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 152 | /* |
| 153 | * User Address (UA) fault, this could happen if |
| 154 | * (1) TLB entry not present/valid in both Guest and shadow host |
| 155 | * TLBs, in this case we pass on the fault to the guest |
| 156 | * kernel and let it handle it. |
| 157 | * (2) TLB entry is present in the Guest TLB but not in the |
| 158 | * shadow, in this case we inject the TLB from the Guest TLB |
| 159 | * into the shadow host TLB |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 160 | */ |
| 161 | |
| 162 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
| 163 | if (er == EMULATE_DONE) |
| 164 | ret = RESUME_GUEST; |
| 165 | else { |
| 166 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 167 | ret = RESUME_HOST; |
| 168 | } |
| 169 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
James Hogan | 3b08aec | 2016-06-09 14:19:20 +0100 | [diff] [blame] | 170 | /* |
| 171 | * All KSEG0 faults are handled by KVM, as the guest kernel does |
| 172 | * not expect to ever get them |
| 173 | */ |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 174 | if (kvm_mips_handle_kseg0_tlb_fault |
| 175 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { |
| 176 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 177 | ret = RESUME_HOST; |
| 178 | } |
James Hogan | d588847 | 2016-08-19 15:09:47 +0100 | [diff] [blame] | 179 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) |
| 180 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
James Hogan | a1ecc54 | 2016-11-28 18:39:24 +0000 | [diff] [blame] | 181 | /* A code fetch fault doesn't count as an MMIO */ |
| 182 | if (!store && kvm_is_ifetch_fault(&vcpu->arch)) { |
| 183 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 184 | return RESUME_HOST; |
| 185 | } |
| 186 | |
James Hogan | d588847 | 2016-08-19 15:09:47 +0100 | [diff] [blame] | 187 | /* |
| 188 | * With EVA we may get a TLB exception instead of an address |
| 189 | * error when the guest performs MMIO to KSeg1 addresses. |
| 190 | */ |
| 191 | kvm_debug("Emulate %s MMIO space\n", |
| 192 | store ? "Store to" : "Load from"); |
| 193 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
| 194 | if (er == EMULATE_FAIL) { |
| 195 | kvm_err("Emulate %s MMIO space failed\n", |
| 196 | store ? "Store to" : "Load from"); |
| 197 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 198 | ret = RESUME_HOST; |
| 199 | } else { |
| 200 | run->exit_reason = KVM_EXIT_MMIO; |
| 201 | ret = RESUME_HOST; |
| 202 | } |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 203 | } else { |
James Hogan | 3b08aec | 2016-06-09 14:19:20 +0100 | [diff] [blame] | 204 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
| 205 | store ? "ST" : "LD", cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 206 | kvm_mips_dump_host_tlbs(); |
| 207 | kvm_arch_vcpu_dump_regs(vcpu); |
| 208 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 209 | ret = RESUME_HOST; |
| 210 | } |
| 211 | return ret; |
| 212 | } |
| 213 | |
James Hogan | 3b08aec | 2016-06-09 14:19:20 +0100 | [diff] [blame] | 214 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) |
| 215 | { |
| 216 | return kvm_trap_emul_handle_tlb_miss(vcpu, true); |
| 217 | } |
| 218 | |
| 219 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) |
| 220 | { |
| 221 | return kvm_trap_emul_handle_tlb_miss(vcpu, false); |
| 222 | } |
| 223 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 224 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) |
| 225 | { |
| 226 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 227 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 228 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 229 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 230 | enum emulation_result er = EMULATE_DONE; |
| 231 | int ret = RESUME_GUEST; |
| 232 | |
| 233 | if (KVM_GUEST_KERNEL_MODE(vcpu) |
| 234 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 235 | kvm_debug("Emulate Store to MMIO space\n"); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 236 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
| 237 | if (er == EMULATE_FAIL) { |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 238 | kvm_err("Emulate Store to MMIO space failed\n"); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 239 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 240 | ret = RESUME_HOST; |
| 241 | } else { |
| 242 | run->exit_reason = KVM_EXIT_MMIO; |
| 243 | ret = RESUME_HOST; |
| 244 | } |
| 245 | } else { |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 246 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 247 | cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 248 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 249 | ret = RESUME_HOST; |
| 250 | } |
| 251 | return ret; |
| 252 | } |
| 253 | |
| 254 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) |
| 255 | { |
| 256 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 257 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 258 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 259 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 260 | enum emulation_result er = EMULATE_DONE; |
| 261 | int ret = RESUME_GUEST; |
| 262 | |
| 263 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { |
James Hogan | a1ecc54 | 2016-11-28 18:39:24 +0000 | [diff] [blame] | 264 | /* A code fetch fault doesn't count as an MMIO */ |
| 265 | if (kvm_is_ifetch_fault(&vcpu->arch)) { |
| 266 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 267 | return RESUME_HOST; |
| 268 | } |
| 269 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 270 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 271 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
| 272 | if (er == EMULATE_FAIL) { |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 273 | kvm_err("Emulate Load from MMIO space failed\n"); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 274 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 275 | ret = RESUME_HOST; |
| 276 | } else { |
| 277 | run->exit_reason = KVM_EXIT_MMIO; |
| 278 | ret = RESUME_HOST; |
| 279 | } |
| 280 | } else { |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 281 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", |
Deng-Cheng Zhu | 6ad78a5 | 2014-06-26 12:11:35 -0700 | [diff] [blame] | 282 | cause, opc, badvaddr); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 283 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 284 | ret = RESUME_HOST; |
| 285 | er = EMULATE_FAIL; |
| 286 | } |
| 287 | return ret; |
| 288 | } |
| 289 | |
| 290 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) |
| 291 | { |
| 292 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 293 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 294 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 295 | enum emulation_result er = EMULATE_DONE; |
| 296 | int ret = RESUME_GUEST; |
| 297 | |
| 298 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); |
| 299 | if (er == EMULATE_DONE) |
| 300 | ret = RESUME_GUEST; |
| 301 | else { |
| 302 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 303 | ret = RESUME_HOST; |
| 304 | } |
| 305 | return ret; |
| 306 | } |
| 307 | |
| 308 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) |
| 309 | { |
| 310 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 311 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 312 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 313 | enum emulation_result er = EMULATE_DONE; |
| 314 | int ret = RESUME_GUEST; |
| 315 | |
| 316 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); |
| 317 | if (er == EMULATE_DONE) |
| 318 | ret = RESUME_GUEST; |
| 319 | else { |
| 320 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 321 | ret = RESUME_HOST; |
| 322 | } |
| 323 | return ret; |
| 324 | } |
| 325 | |
| 326 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) |
| 327 | { |
| 328 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 329 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 330 | u32 cause = vcpu->arch.host_cp0_cause; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 331 | enum emulation_result er = EMULATE_DONE; |
| 332 | int ret = RESUME_GUEST; |
| 333 | |
| 334 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); |
| 335 | if (er == EMULATE_DONE) |
| 336 | ret = RESUME_GUEST; |
| 337 | else { |
| 338 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 339 | ret = RESUME_HOST; |
| 340 | } |
| 341 | return ret; |
| 342 | } |
| 343 | |
James Hogan | 0a56042 | 2015-02-06 16:03:57 +0000 | [diff] [blame] | 344 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
| 345 | { |
| 346 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 347 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 348 | u32 cause = vcpu->arch.host_cp0_cause; |
James Hogan | 0a56042 | 2015-02-06 16:03:57 +0000 | [diff] [blame] | 349 | enum emulation_result er = EMULATE_DONE; |
| 350 | int ret = RESUME_GUEST; |
| 351 | |
| 352 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); |
| 353 | if (er == EMULATE_DONE) { |
| 354 | ret = RESUME_GUEST; |
| 355 | } else { |
| 356 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 357 | ret = RESUME_HOST; |
| 358 | } |
| 359 | return ret; |
| 360 | } |
| 361 | |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 362 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) |
| 363 | { |
| 364 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 365 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 366 | u32 cause = vcpu->arch.host_cp0_cause; |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 367 | enum emulation_result er = EMULATE_DONE; |
| 368 | int ret = RESUME_GUEST; |
| 369 | |
| 370 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); |
| 371 | if (er == EMULATE_DONE) { |
| 372 | ret = RESUME_GUEST; |
| 373 | } else { |
| 374 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 375 | ret = RESUME_HOST; |
| 376 | } |
| 377 | return ret; |
| 378 | } |
| 379 | |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 380 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) |
| 381 | { |
| 382 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 383 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 384 | u32 cause = vcpu->arch.host_cp0_cause; |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 385 | enum emulation_result er = EMULATE_DONE; |
| 386 | int ret = RESUME_GUEST; |
| 387 | |
| 388 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); |
| 389 | if (er == EMULATE_DONE) { |
| 390 | ret = RESUME_GUEST; |
| 391 | } else { |
| 392 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 393 | ret = RESUME_HOST; |
| 394 | } |
| 395 | return ret; |
| 396 | } |
| 397 | |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 398 | /** |
| 399 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. |
| 400 | * @vcpu: Virtual CPU context. |
| 401 | * |
| 402 | * Handle when the guest attempts to use MSA when it is disabled. |
| 403 | */ |
James Hogan | 98119ad | 2015-02-06 11:11:56 +0000 | [diff] [blame] | 404 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
| 405 | { |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 406 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
James Hogan | 98119ad | 2015-02-06 11:11:56 +0000 | [diff] [blame] | 407 | struct kvm_run *run = vcpu->run; |
James Hogan | 8cffd19 | 2016-06-09 14:19:08 +0100 | [diff] [blame] | 408 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
James Hogan | 31cf749 | 2016-06-09 14:19:09 +0100 | [diff] [blame] | 409 | u32 cause = vcpu->arch.host_cp0_cause; |
James Hogan | 98119ad | 2015-02-06 11:11:56 +0000 | [diff] [blame] | 410 | enum emulation_result er = EMULATE_DONE; |
| 411 | int ret = RESUME_GUEST; |
| 412 | |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 413 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || |
| 414 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { |
| 415 | /* |
| 416 | * No MSA in guest, or FPU enabled and not in FR=1 mode, |
| 417 | * guest reserved instruction exception |
| 418 | */ |
| 419 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
| 420 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { |
| 421 | /* MSA disabled by guest, guest MSA disabled exception */ |
| 422 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); |
| 423 | } else { |
| 424 | /* Restore MSA/FPU state */ |
| 425 | kvm_own_msa(vcpu); |
| 426 | er = EMULATE_DONE; |
| 427 | } |
James Hogan | 98119ad | 2015-02-06 11:11:56 +0000 | [diff] [blame] | 428 | |
| 429 | switch (er) { |
| 430 | case EMULATE_DONE: |
| 431 | ret = RESUME_GUEST; |
| 432 | break; |
| 433 | |
| 434 | case EMULATE_FAIL: |
| 435 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 436 | ret = RESUME_HOST; |
| 437 | break; |
| 438 | |
| 439 | default: |
| 440 | BUG(); |
| 441 | } |
| 442 | return ret; |
| 443 | } |
| 444 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 445 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) |
| 446 | { |
James Hogan | f7f1427 | 2016-09-08 22:57:03 +0100 | [diff] [blame] | 447 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
| 448 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
| 449 | |
James Hogan | 0510870 | 2016-06-15 19:29:56 +0100 | [diff] [blame] | 450 | vcpu->arch.kscratch_enabled = 0xfc; |
| 451 | |
James Hogan | f7f1427 | 2016-09-08 22:57:03 +0100 | [diff] [blame] | 452 | /* |
| 453 | * Allocate GVA -> HPA page tables. |
| 454 | * MIPS doesn't use the mm_struct pointer argument. |
| 455 | */ |
| 456 | kern_mm->pgd = pgd_alloc(kern_mm); |
| 457 | if (!kern_mm->pgd) |
| 458 | return -ENOMEM; |
| 459 | |
| 460 | user_mm->pgd = pgd_alloc(user_mm); |
| 461 | if (!user_mm->pgd) { |
| 462 | pgd_free(kern_mm, kern_mm->pgd); |
| 463 | return -ENOMEM; |
| 464 | } |
| 465 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 466 | return 0; |
| 467 | } |
| 468 | |
James Hogan | f7f1427 | 2016-09-08 22:57:03 +0100 | [diff] [blame] | 469 | static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) |
| 470 | { |
| 471 | /* Don't free host kernel page tables copied from init_mm.pgd */ |
| 472 | const unsigned long end = 0x80000000; |
| 473 | unsigned long pgd_va, pud_va, pmd_va; |
| 474 | pud_t *pud; |
| 475 | pmd_t *pmd; |
| 476 | pte_t *pte; |
| 477 | int i, j, k; |
| 478 | |
| 479 | for (i = 0; i < USER_PTRS_PER_PGD; i++) { |
| 480 | if (pgd_none(pgd[i])) |
| 481 | continue; |
| 482 | |
| 483 | pgd_va = (unsigned long)i << PGDIR_SHIFT; |
| 484 | if (pgd_va >= end) |
| 485 | break; |
| 486 | pud = pud_offset(pgd + i, 0); |
| 487 | for (j = 0; j < PTRS_PER_PUD; j++) { |
| 488 | if (pud_none(pud[j])) |
| 489 | continue; |
| 490 | |
| 491 | pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); |
| 492 | if (pud_va >= end) |
| 493 | break; |
| 494 | pmd = pmd_offset(pud + j, 0); |
| 495 | for (k = 0; k < PTRS_PER_PMD; k++) { |
| 496 | if (pmd_none(pmd[k])) |
| 497 | continue; |
| 498 | |
| 499 | pmd_va = pud_va | (k << PMD_SHIFT); |
| 500 | if (pmd_va >= end) |
| 501 | break; |
| 502 | pte = pte_offset(pmd + k, 0); |
| 503 | pte_free_kernel(NULL, pte); |
| 504 | } |
| 505 | pmd_free(NULL, pmd); |
| 506 | } |
| 507 | pud_free(NULL, pud); |
| 508 | } |
| 509 | pgd_free(NULL, pgd); |
| 510 | } |
| 511 | |
James Hogan | 630766b | 2016-09-08 23:00:24 +0100 | [diff] [blame] | 512 | static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) |
| 513 | { |
James Hogan | f7f1427 | 2016-09-08 22:57:03 +0100 | [diff] [blame] | 514 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); |
| 515 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); |
James Hogan | 630766b | 2016-09-08 23:00:24 +0100 | [diff] [blame] | 516 | } |
| 517 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 518 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) |
| 519 | { |
| 520 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
James Hogan | e342925 | 2016-06-15 19:30:00 +0100 | [diff] [blame] | 521 | u32 config, config1; |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 522 | int vcpu_id = vcpu->vcpu_id; |
| 523 | |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 524 | /* |
| 525 | * Arch specific stuff, set up config registers properly so that the |
James Hogan | 8426097 | 2016-07-04 19:35:15 +0100 | [diff] [blame] | 526 | * guest will come up as expected |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 527 | */ |
James Hogan | 8426097 | 2016-07-04 19:35:15 +0100 | [diff] [blame] | 528 | #ifndef CONFIG_CPU_MIPSR6 |
| 529 | /* r2-r5, simulate a MIPS 24kc */ |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 530 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
James Hogan | 8426097 | 2016-07-04 19:35:15 +0100 | [diff] [blame] | 531 | #else |
| 532 | /* r6+, simulate a generic QEMU machine */ |
| 533 | kvm_write_c0_guest_prid(cop0, 0x00010000); |
| 534 | #endif |
James Hogan | e342925 | 2016-06-15 19:30:00 +0100 | [diff] [blame] | 535 | /* |
| 536 | * Have config1, Cacheable, noncoherent, write-back, write allocate. |
| 537 | * Endianness, arch revision & virtually tagged icache should match |
| 538 | * host. |
| 539 | */ |
| 540 | config = read_c0_config() & MIPS_CONF_AR; |
James Hogan | 4e10b76 | 2016-06-15 19:30:01 +0100 | [diff] [blame] | 541 | config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; |
James Hogan | e342925 | 2016-06-15 19:30:00 +0100 | [diff] [blame] | 542 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 543 | config |= CONF_BE; |
| 544 | #endif |
| 545 | if (cpu_has_vtag_icache) |
| 546 | config |= MIPS_CONF_VI; |
| 547 | kvm_write_c0_guest_config(cop0, config); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 548 | |
| 549 | /* Read the cache characteristics from the host Config1 Register */ |
| 550 | config1 = (read_c0_config1() & ~0x7f); |
| 551 | |
| 552 | /* Set up MMU size */ |
| 553 | config1 &= ~(0x3f << 25); |
| 554 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); |
| 555 | |
| 556 | /* We unset some bits that we aren't emulating */ |
James Hogan | 4e10b76 | 2016-06-15 19:30:01 +0100 | [diff] [blame] | 557 | config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | |
| 558 | MIPS_CONF1_WR | MIPS_CONF1_CA); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 559 | kvm_write_c0_guest_config1(cop0, config1); |
| 560 | |
James Hogan | 2211ee8 | 2015-03-04 15:56:47 +0000 | [diff] [blame] | 561 | /* Have config3, no tertiary/secondary caches implemented */ |
| 562 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); |
| 563 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ |
| 564 | |
James Hogan | c771607 | 2014-06-26 15:11:29 +0100 | [diff] [blame] | 565 | /* Have config4, UserLocal */ |
| 566 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); |
| 567 | |
| 568 | /* Have config5 */ |
| 569 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); |
| 570 | |
| 571 | /* No config6 */ |
| 572 | kvm_write_c0_guest_config5(cop0, 0); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 573 | |
| 574 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
| 575 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
| 576 | |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 577 | /* |
Adam Buchbinder | 92a76f6 | 2016-02-25 00:44:58 -0800 | [diff] [blame] | 578 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 579 | */ |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 580 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
| 581 | |
| 582 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ |
James Hogan | 37af2f3 | 2016-05-11 13:50:49 +0100 | [diff] [blame] | 583 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | |
| 584 | (vcpu_id & MIPS_EBASE_CPUNUM)); |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 585 | |
| 586 | return 0; |
| 587 | } |
| 588 | |
James Hogan | b620911 | 2016-10-25 00:01:37 +0100 | [diff] [blame^] | 589 | static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) |
| 590 | { |
| 591 | /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ |
| 592 | kvm_flush_remote_tlbs(kvm); |
| 593 | } |
| 594 | |
| 595 | static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, |
| 596 | const struct kvm_memory_slot *slot) |
| 597 | { |
| 598 | kvm_trap_emul_flush_shadow_all(kvm); |
| 599 | } |
| 600 | |
James Hogan | f5c43bd | 2016-06-15 19:29:49 +0100 | [diff] [blame] | 601 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) |
| 602 | { |
| 603 | return 0; |
| 604 | } |
| 605 | |
| 606 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, |
| 607 | u64 __user *indices) |
| 608 | { |
| 609 | return 0; |
| 610 | } |
| 611 | |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 612 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, |
| 613 | const struct kvm_one_reg *reg, |
| 614 | s64 *v) |
| 615 | { |
| 616 | switch (reg->id) { |
| 617 | case KVM_REG_MIPS_CP0_COUNT: |
James Hogan | e30492b | 2014-05-29 10:16:35 +0100 | [diff] [blame] | 618 | *v = kvm_mips_read_count(vcpu); |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 619 | break; |
James Hogan | f823934 | 2014-05-29 10:16:37 +0100 | [diff] [blame] | 620 | case KVM_REG_MIPS_COUNT_CTL: |
| 621 | *v = vcpu->arch.count_ctl; |
| 622 | break; |
| 623 | case KVM_REG_MIPS_COUNT_RESUME: |
| 624 | *v = ktime_to_ns(vcpu->arch.count_resume); |
| 625 | break; |
James Hogan | f74a8e2 | 2014-05-29 10:16:38 +0100 | [diff] [blame] | 626 | case KVM_REG_MIPS_COUNT_HZ: |
| 627 | *v = vcpu->arch.count_hz; |
| 628 | break; |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 629 | default: |
| 630 | return -EINVAL; |
| 631 | } |
| 632 | return 0; |
| 633 | } |
| 634 | |
| 635 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, |
| 636 | const struct kvm_one_reg *reg, |
| 637 | s64 v) |
| 638 | { |
| 639 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
James Hogan | f823934 | 2014-05-29 10:16:37 +0100 | [diff] [blame] | 640 | int ret = 0; |
James Hogan | c771607 | 2014-06-26 15:11:29 +0100 | [diff] [blame] | 641 | unsigned int cur, change; |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 642 | |
| 643 | switch (reg->id) { |
| 644 | case KVM_REG_MIPS_CP0_COUNT: |
James Hogan | e30492b | 2014-05-29 10:16:35 +0100 | [diff] [blame] | 645 | kvm_mips_write_count(vcpu, v); |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 646 | break; |
| 647 | case KVM_REG_MIPS_CP0_COMPARE: |
James Hogan | b45bacd | 2016-04-22 10:38:46 +0100 | [diff] [blame] | 648 | kvm_mips_write_compare(vcpu, v, false); |
James Hogan | e30492b | 2014-05-29 10:16:35 +0100 | [diff] [blame] | 649 | break; |
| 650 | case KVM_REG_MIPS_CP0_CAUSE: |
| 651 | /* |
| 652 | * If the timer is stopped or started (DC bit) it must look |
| 653 | * atomic with changes to the interrupt pending bits (TI, IRQ5). |
| 654 | * A timer interrupt should not happen in between. |
| 655 | */ |
| 656 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { |
| 657 | if (v & CAUSEF_DC) { |
| 658 | /* disable timer first */ |
| 659 | kvm_mips_count_disable_cause(vcpu); |
| 660 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); |
| 661 | } else { |
| 662 | /* enable timer last */ |
| 663 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); |
| 664 | kvm_mips_count_enable_cause(vcpu); |
| 665 | } |
| 666 | } else { |
| 667 | kvm_write_c0_guest_cause(cop0, v); |
| 668 | } |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 669 | break; |
James Hogan | c771607 | 2014-06-26 15:11:29 +0100 | [diff] [blame] | 670 | case KVM_REG_MIPS_CP0_CONFIG: |
| 671 | /* read-only for now */ |
| 672 | break; |
| 673 | case KVM_REG_MIPS_CP0_CONFIG1: |
| 674 | cur = kvm_read_c0_guest_config1(cop0); |
| 675 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); |
| 676 | if (change) { |
| 677 | v = cur ^ change; |
| 678 | kvm_write_c0_guest_config1(cop0, v); |
| 679 | } |
| 680 | break; |
| 681 | case KVM_REG_MIPS_CP0_CONFIG2: |
| 682 | /* read-only for now */ |
| 683 | break; |
| 684 | case KVM_REG_MIPS_CP0_CONFIG3: |
| 685 | cur = kvm_read_c0_guest_config3(cop0); |
| 686 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); |
| 687 | if (change) { |
| 688 | v = cur ^ change; |
| 689 | kvm_write_c0_guest_config3(cop0, v); |
| 690 | } |
| 691 | break; |
| 692 | case KVM_REG_MIPS_CP0_CONFIG4: |
| 693 | cur = kvm_read_c0_guest_config4(cop0); |
| 694 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); |
| 695 | if (change) { |
| 696 | v = cur ^ change; |
| 697 | kvm_write_c0_guest_config4(cop0, v); |
| 698 | } |
| 699 | break; |
| 700 | case KVM_REG_MIPS_CP0_CONFIG5: |
| 701 | cur = kvm_read_c0_guest_config5(cop0); |
| 702 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); |
| 703 | if (change) { |
| 704 | v = cur ^ change; |
| 705 | kvm_write_c0_guest_config5(cop0, v); |
| 706 | } |
| 707 | break; |
James Hogan | f823934 | 2014-05-29 10:16:37 +0100 | [diff] [blame] | 708 | case KVM_REG_MIPS_COUNT_CTL: |
| 709 | ret = kvm_mips_set_count_ctl(vcpu, v); |
| 710 | break; |
| 711 | case KVM_REG_MIPS_COUNT_RESUME: |
| 712 | ret = kvm_mips_set_count_resume(vcpu, v); |
| 713 | break; |
James Hogan | f74a8e2 | 2014-05-29 10:16:38 +0100 | [diff] [blame] | 714 | case KVM_REG_MIPS_COUNT_HZ: |
| 715 | ret = kvm_mips_set_count_hz(vcpu, v); |
| 716 | break; |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 717 | default: |
| 718 | return -EINVAL; |
| 719 | } |
James Hogan | f823934 | 2014-05-29 10:16:37 +0100 | [diff] [blame] | 720 | return ret; |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 721 | } |
| 722 | |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 723 | static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
James Hogan | b86ecb3 | 2015-02-09 16:35:20 +0000 | [diff] [blame] | 724 | { |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 725 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
| 726 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 727 | struct mm_struct *mm; |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 728 | |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 729 | /* |
James Hogan | 91737ea | 2016-12-02 23:40:52 +0000 | [diff] [blame] | 730 | * Were we in guest context? If so, restore the appropriate ASID based |
| 731 | * on the mode of the Guest (Kernel/User). |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 732 | */ |
| 733 | if (current->flags & PF_VCPU) { |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 734 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; |
James Hogan | 91737ea | 2016-12-02 23:40:52 +0000 | [diff] [blame] | 735 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & |
| 736 | asid_version_mask(cpu)) |
| 737 | get_new_mmu_context(mm, cpu); |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 738 | write_c0_entryhi(cpu_asid(cpu, mm)); |
| 739 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); |
James Hogan | a7ebb2e | 2016-11-15 00:06:05 +0000 | [diff] [blame] | 740 | kvm_mips_suspend_mm(cpu); |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 741 | ehb(); |
| 742 | } |
| 743 | |
James Hogan | b86ecb3 | 2015-02-09 16:35:20 +0000 | [diff] [blame] | 744 | return 0; |
| 745 | } |
| 746 | |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 747 | static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) |
James Hogan | b86ecb3 | 2015-02-09 16:35:20 +0000 | [diff] [blame] | 748 | { |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 749 | kvm_lose_fpu(vcpu); |
| 750 | |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 751 | if (current->flags & PF_VCPU) { |
| 752 | /* Restore normal Linux process memory map */ |
| 753 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
James Hogan | 91737ea | 2016-12-02 23:40:52 +0000 | [diff] [blame] | 754 | asid_version_mask(cpu))) |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 755 | get_new_mmu_context(current->mm, cpu); |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 756 | write_c0_entryhi(cpu_asid(cpu, current->mm)); |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 757 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
James Hogan | a7ebb2e | 2016-11-15 00:06:05 +0000 | [diff] [blame] | 758 | kvm_mips_resume_mm(cpu); |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 759 | ehb(); |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 760 | } |
James Hogan | 1581ff3 | 2016-11-16 23:48:56 +0000 | [diff] [blame] | 761 | |
James Hogan | b86ecb3 | 2015-02-09 16:35:20 +0000 | [diff] [blame] | 762 | return 0; |
| 763 | } |
| 764 | |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 765 | static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, |
| 766 | bool reload_asid) |
| 767 | { |
| 768 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
| 769 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
| 770 | struct mm_struct *mm; |
| 771 | int i; |
| 772 | |
| 773 | if (likely(!vcpu->requests)) |
| 774 | return; |
| 775 | |
| 776 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { |
| 777 | /* |
| 778 | * Both kernel & user GVA mappings must be invalidated. The |
| 779 | * caller is just about to check whether the ASID is stale |
| 780 | * anyway so no need to reload it here. |
| 781 | */ |
| 782 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); |
| 783 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); |
| 784 | for_each_possible_cpu(i) { |
| 785 | cpu_context(i, kern_mm) = 0; |
| 786 | cpu_context(i, user_mm) = 0; |
| 787 | } |
| 788 | |
| 789 | /* Generate new ASID for current mode */ |
| 790 | if (reload_asid) { |
| 791 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; |
| 792 | get_new_mmu_context(mm, cpu); |
| 793 | htw_stop(); |
| 794 | write_c0_entryhi(cpu_asid(cpu, mm)); |
| 795 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); |
| 796 | htw_start(); |
| 797 | } |
| 798 | } |
| 799 | } |
| 800 | |
James Hogan | 1880afd | 2016-11-28 23:04:52 +0000 | [diff] [blame] | 801 | /** |
| 802 | * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. |
| 803 | * @vcpu: VCPU pointer. |
| 804 | * |
| 805 | * Call before a GVA space access outside of guest mode, to ensure that |
| 806 | * asynchronous TLB flush requests are handled or delayed until completion of |
| 807 | * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). |
| 808 | * |
| 809 | * Should be called with IRQs already enabled. |
| 810 | */ |
| 811 | void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) |
| 812 | { |
| 813 | /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ |
| 814 | WARN_ON_ONCE(irqs_disabled()); |
| 815 | |
| 816 | /* |
| 817 | * The caller is about to access the GVA space, so we set the mode to |
| 818 | * force TLB flush requests to send an IPI, and also disable IRQs to |
| 819 | * delay IPI handling until kvm_trap_emul_gva_lockless_end(). |
| 820 | */ |
| 821 | local_irq_disable(); |
| 822 | |
| 823 | /* |
| 824 | * Make sure the read of VCPU requests is not reordered ahead of the |
| 825 | * write to vcpu->mode, or we could miss a TLB flush request while |
| 826 | * the requester sees the VCPU as outside of guest mode and not needing |
| 827 | * an IPI. |
| 828 | */ |
| 829 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); |
| 830 | |
| 831 | /* |
| 832 | * If a TLB flush has been requested (potentially while |
| 833 | * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it |
| 834 | * before accessing the GVA space, and be sure to reload the ASID if |
| 835 | * necessary as it'll be immediately used. |
| 836 | * |
| 837 | * TLB flush requests after this check will trigger an IPI due to the |
| 838 | * mode change above, which will be delayed due to IRQs disabled. |
| 839 | */ |
| 840 | kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); |
| 841 | } |
| 842 | |
| 843 | /** |
| 844 | * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. |
| 845 | * @vcpu: VCPU pointer. |
| 846 | * |
| 847 | * Called after a GVA space access outside of guest mode. Should have a matching |
| 848 | * call to kvm_trap_emul_gva_lockless_begin(). |
| 849 | */ |
| 850 | void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) |
| 851 | { |
| 852 | /* |
| 853 | * Make sure the write to vcpu->mode is not reordered in front of GVA |
| 854 | * accesses, or a TLB flush requester may not think it necessary to send |
| 855 | * an IPI. |
| 856 | */ |
| 857 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); |
| 858 | |
| 859 | /* |
| 860 | * Now that the access to GVA space is complete, its safe for pending |
| 861 | * TLB flush request IPIs to be handled (which indicates completion). |
| 862 | */ |
| 863 | local_irq_enable(); |
| 864 | } |
| 865 | |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 866 | static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, |
| 867 | struct kvm_vcpu *vcpu) |
| 868 | { |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 869 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 870 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 871 | struct mm_struct *mm; |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 872 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 873 | int i, cpu = smp_processor_id(); |
| 874 | unsigned int gasid; |
| 875 | |
| 876 | /* |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 877 | * No need to reload ASID, IRQs are disabled already so there's no rush, |
| 878 | * and we'll check if we need to regenerate below anyway before |
| 879 | * re-entering the guest. |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 880 | */ |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 881 | kvm_trap_emul_check_requests(vcpu, cpu, false); |
| 882 | |
| 883 | if (KVM_GUEST_KERNEL_MODE(vcpu)) { |
| 884 | mm = kern_mm; |
| 885 | } else { |
| 886 | mm = user_mm; |
| 887 | |
| 888 | /* |
| 889 | * Lazy host ASID regeneration / PT flush for guest user mode. |
| 890 | * If the guest ASID has changed since the last guest usermode |
| 891 | * execution, invalidate the stale TLB entries and flush GVA PT |
| 892 | * entries too. |
| 893 | */ |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 894 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; |
| 895 | if (gasid != vcpu->arch.last_user_gasid) { |
James Hogan | a31b50d | 2016-12-16 15:57:00 +0000 | [diff] [blame] | 896 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 897 | for_each_possible_cpu(i) |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 898 | cpu_context(i, user_mm) = 0; |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 899 | vcpu->arch.last_user_gasid = gasid; |
| 900 | } |
| 901 | } |
James Hogan | b29e115 | 2016-11-28 23:19:32 +0000 | [diff] [blame] | 902 | |
| 903 | /* |
| 904 | * Check if ASID is stale. This may happen due to a TLB flush request or |
| 905 | * a lazy user MM invalidation. |
| 906 | */ |
| 907 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & |
| 908 | asid_version_mask(cpu)) |
| 909 | get_new_mmu_context(mm, cpu); |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 910 | } |
| 911 | |
| 912 | static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 913 | { |
James Hogan | a7ebb2e | 2016-11-15 00:06:05 +0000 | [diff] [blame] | 914 | int cpu = smp_processor_id(); |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 915 | int r; |
| 916 | |
| 917 | /* Check if we have any exceptions/interrupts pending */ |
| 918 | kvm_mips_deliver_interrupts(vcpu, |
| 919 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); |
| 920 | |
| 921 | kvm_trap_emul_vcpu_reenter(run, vcpu); |
| 922 | |
James Hogan | dacc3ed | 2016-08-19 15:27:22 +0100 | [diff] [blame] | 923 | /* |
| 924 | * We use user accessors to access guest memory, but we don't want to |
| 925 | * invoke Linux page faulting. |
| 926 | */ |
| 927 | pagefault_disable(); |
| 928 | |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 929 | /* Disable hardware page table walking while in guest */ |
| 930 | htw_stop(); |
| 931 | |
James Hogan | a7ebb2e | 2016-11-15 00:06:05 +0000 | [diff] [blame] | 932 | /* |
| 933 | * While in guest context we're in the guest's address space, not the |
| 934 | * host process address space, so we need to be careful not to confuse |
| 935 | * e.g. cache management IPIs. |
| 936 | */ |
| 937 | kvm_mips_suspend_mm(cpu); |
| 938 | |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 939 | r = vcpu->arch.vcpu_run(run, vcpu); |
| 940 | |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 941 | /* We may have migrated while handling guest exits */ |
| 942 | cpu = smp_processor_id(); |
| 943 | |
| 944 | /* Restore normal Linux process memory map */ |
| 945 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
| 946 | asid_version_mask(cpu))) |
| 947 | get_new_mmu_context(current->mm, cpu); |
| 948 | write_c0_entryhi(cpu_asid(cpu, current->mm)); |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 949 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
James Hogan | a7ebb2e | 2016-11-15 00:06:05 +0000 | [diff] [blame] | 950 | kvm_mips_resume_mm(cpu); |
James Hogan | 91cdee5 | 2016-11-18 13:25:24 +0000 | [diff] [blame] | 951 | |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 952 | htw_start(); |
| 953 | |
James Hogan | dacc3ed | 2016-08-19 15:27:22 +0100 | [diff] [blame] | 954 | pagefault_enable(); |
| 955 | |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 956 | return r; |
| 957 | } |
| 958 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 959 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
| 960 | /* exit handlers */ |
| 961 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, |
| 962 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, |
| 963 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, |
| 964 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, |
| 965 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, |
| 966 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, |
| 967 | .handle_syscall = kvm_trap_emul_handle_syscall, |
| 968 | .handle_res_inst = kvm_trap_emul_handle_res_inst, |
| 969 | .handle_break = kvm_trap_emul_handle_break, |
James Hogan | 0a56042 | 2015-02-06 16:03:57 +0000 | [diff] [blame] | 970 | .handle_trap = kvm_trap_emul_handle_trap, |
James Hogan | c2537ed | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 971 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, |
James Hogan | 1c0cd66 | 2015-02-06 10:56:27 +0000 | [diff] [blame] | 972 | .handle_fpe = kvm_trap_emul_handle_fpe, |
James Hogan | 98119ad | 2015-02-06 11:11:56 +0000 | [diff] [blame] | 973 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 974 | |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 975 | .vcpu_init = kvm_trap_emul_vcpu_init, |
James Hogan | 630766b | 2016-09-08 23:00:24 +0100 | [diff] [blame] | 976 | .vcpu_uninit = kvm_trap_emul_vcpu_uninit, |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 977 | .vcpu_setup = kvm_trap_emul_vcpu_setup, |
James Hogan | b620911 | 2016-10-25 00:01:37 +0100 | [diff] [blame^] | 978 | .flush_shadow_all = kvm_trap_emul_flush_shadow_all, |
| 979 | .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 980 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, |
| 981 | .queue_timer_int = kvm_mips_queue_timer_int_cb, |
| 982 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, |
| 983 | .queue_io_int = kvm_mips_queue_io_int_cb, |
| 984 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, |
| 985 | .irq_deliver = kvm_mips_irq_deliver_cb, |
| 986 | .irq_clear = kvm_mips_irq_clear_cb, |
James Hogan | f5c43bd | 2016-06-15 19:29:49 +0100 | [diff] [blame] | 987 | .num_regs = kvm_trap_emul_num_regs, |
| 988 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, |
James Hogan | f8be02d | 2014-05-29 10:16:29 +0100 | [diff] [blame] | 989 | .get_one_reg = kvm_trap_emul_get_one_reg, |
| 990 | .set_one_reg = kvm_trap_emul_set_one_reg, |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 991 | .vcpu_load = kvm_trap_emul_vcpu_load, |
| 992 | .vcpu_put = kvm_trap_emul_vcpu_put, |
James Hogan | a2c046e | 2016-11-18 13:14:37 +0000 | [diff] [blame] | 993 | .vcpu_run = kvm_trap_emul_vcpu_run, |
| 994 | .vcpu_reenter = kvm_trap_emul_vcpu_reenter, |
Sanjay Lal | f5c236d | 2012-11-21 18:34:09 -0800 | [diff] [blame] | 995 | }; |
| 996 | |
| 997 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
| 998 | { |
| 999 | *install_callbacks = &kvm_trap_emul_callbacks; |
| 1000 | return 0; |
| 1001 | } |