Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012,2013 - ARM Ltd |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * Derived from arch/arm/kvm/guest.c: |
| 6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/err.h> |
| 24 | #include <linux/kvm_host.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/vmalloc.h> |
| 27 | #include <linux/fs.h> |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 28 | #include <kvm/arm_psci.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 29 | #include <asm/cputype.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 30 | #include <linux/uaccess.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 31 | #include <asm/kvm.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 32 | #include <asm/kvm_emulate.h> |
| 33 | #include <asm/kvm_coproc.h> |
| 34 | |
Alex Bennée | eef8c85 | 2015-07-07 17:30:03 +0100 | [diff] [blame] | 35 | #include "trace.h" |
| 36 | |
Amit Tomar | b19e689 | 2015-11-26 10:09:43 +0000 | [diff] [blame] | 37 | #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } |
| 38 | #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } |
| 39 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 40 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
Amit Tomar | b19e689 | 2015-11-26 10:09:43 +0000 | [diff] [blame] | 41 | VCPU_STAT(hvc_exit_stat), |
| 42 | VCPU_STAT(wfe_exit_stat), |
| 43 | VCPU_STAT(wfi_exit_stat), |
| 44 | VCPU_STAT(mmio_exit_user), |
| 45 | VCPU_STAT(mmio_exit_kernel), |
| 46 | VCPU_STAT(exits), |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 47 | { NULL } |
| 48 | }; |
| 49 | |
| 50 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
| 51 | { |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | static u64 core_reg_offset_from_id(u64 id) |
| 56 | { |
| 57 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); |
| 58 | } |
| 59 | |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 60 | static int validate_core_offset(const struct kvm_one_reg *reg) |
| 61 | { |
| 62 | u64 off = core_reg_offset_from_id(reg->id); |
| 63 | int size; |
| 64 | |
| 65 | switch (off) { |
| 66 | case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... |
| 67 | KVM_REG_ARM_CORE_REG(regs.regs[30]): |
| 68 | case KVM_REG_ARM_CORE_REG(regs.sp): |
| 69 | case KVM_REG_ARM_CORE_REG(regs.pc): |
| 70 | case KVM_REG_ARM_CORE_REG(regs.pstate): |
| 71 | case KVM_REG_ARM_CORE_REG(sp_el1): |
| 72 | case KVM_REG_ARM_CORE_REG(elr_el1): |
| 73 | case KVM_REG_ARM_CORE_REG(spsr[0]) ... |
| 74 | KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): |
| 75 | size = sizeof(__u64); |
| 76 | break; |
| 77 | |
| 78 | case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... |
| 79 | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): |
| 80 | size = sizeof(__uint128_t); |
| 81 | break; |
| 82 | |
| 83 | case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): |
| 84 | case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): |
| 85 | size = sizeof(__u32); |
| 86 | break; |
| 87 | |
| 88 | default: |
| 89 | return -EINVAL; |
| 90 | } |
| 91 | |
| 92 | if (KVM_REG_SIZE(reg->id) == size && |
| 93 | IS_ALIGNED(off, size / sizeof(__u32))) |
| 94 | return 0; |
| 95 | |
| 96 | return -EINVAL; |
| 97 | } |
| 98 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 99 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 100 | { |
| 101 | /* |
| 102 | * Because the kvm_regs structure is a mix of 32, 64 and |
| 103 | * 128bit fields, we index it as if it was a 32bit |
| 104 | * array. Hence below, nr_regs is the number of entries, and |
| 105 | * off the index in the "array". |
| 106 | */ |
| 107 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; |
| 108 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); |
| 109 | int nr_regs = sizeof(*regs) / sizeof(__u32); |
| 110 | u32 off; |
| 111 | |
| 112 | /* Our ID is an index into the kvm_regs struct. */ |
| 113 | off = core_reg_offset_from_id(reg->id); |
| 114 | if (off >= nr_regs || |
| 115 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
| 116 | return -ENOENT; |
| 117 | |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 118 | if (validate_core_offset(reg)) |
| 119 | return -EINVAL; |
| 120 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 121 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) |
| 122 | return -EFAULT; |
| 123 | |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 128 | { |
| 129 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; |
| 130 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); |
| 131 | int nr_regs = sizeof(*regs) / sizeof(__u32); |
| 132 | __uint128_t tmp; |
| 133 | void *valp = &tmp; |
| 134 | u64 off; |
| 135 | int err = 0; |
| 136 | |
| 137 | /* Our ID is an index into the kvm_regs struct. */ |
| 138 | off = core_reg_offset_from_id(reg->id); |
| 139 | if (off >= nr_regs || |
| 140 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
| 141 | return -ENOENT; |
| 142 | |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 143 | if (validate_core_offset(reg)) |
| 144 | return -EINVAL; |
| 145 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 146 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) |
| 147 | return -EINVAL; |
| 148 | |
| 149 | if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { |
| 150 | err = -EFAULT; |
| 151 | goto out; |
| 152 | } |
| 153 | |
| 154 | if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame^] | 155 | u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 156 | switch (mode) { |
Mark Rutland | 256c096 | 2018-07-05 15:16:53 +0100 | [diff] [blame] | 157 | case PSR_AA32_MODE_USR: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame^] | 158 | if (!system_supports_32bit_el0()) |
| 159 | return -EINVAL; |
| 160 | break; |
Mark Rutland | 256c096 | 2018-07-05 15:16:53 +0100 | [diff] [blame] | 161 | case PSR_AA32_MODE_FIQ: |
| 162 | case PSR_AA32_MODE_IRQ: |
| 163 | case PSR_AA32_MODE_SVC: |
| 164 | case PSR_AA32_MODE_ABT: |
| 165 | case PSR_AA32_MODE_UND: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame^] | 166 | if (!vcpu_el1_is_32bit(vcpu)) |
| 167 | return -EINVAL; |
| 168 | break; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 169 | case PSR_MODE_EL0t: |
| 170 | case PSR_MODE_EL1t: |
| 171 | case PSR_MODE_EL1h: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame^] | 172 | if (vcpu_el1_is_32bit(vcpu)) |
| 173 | return -EINVAL; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 174 | break; |
| 175 | default: |
| 176 | err = -EINVAL; |
| 177 | goto out; |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); |
| 182 | out: |
| 183 | return err; |
| 184 | } |
| 185 | |
| 186 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 187 | { |
| 188 | return -EINVAL; |
| 189 | } |
| 190 | |
| 191 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 192 | { |
| 193 | return -EINVAL; |
| 194 | } |
| 195 | |
| 196 | static unsigned long num_core_regs(void) |
| 197 | { |
| 198 | return sizeof(struct kvm_regs) / sizeof(__u32); |
| 199 | } |
| 200 | |
| 201 | /** |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 202 | * ARM64 versions of the TIMER registers, always available on arm64 |
| 203 | */ |
| 204 | |
| 205 | #define NUM_TIMER_REGS 3 |
| 206 | |
| 207 | static bool is_timer_reg(u64 index) |
| 208 | { |
| 209 | switch (index) { |
| 210 | case KVM_REG_ARM_TIMER_CTL: |
| 211 | case KVM_REG_ARM_TIMER_CNT: |
| 212 | case KVM_REG_ARM_TIMER_CVAL: |
| 213 | return true; |
| 214 | } |
| 215 | return false; |
| 216 | } |
| 217 | |
| 218 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
| 219 | { |
| 220 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) |
| 221 | return -EFAULT; |
| 222 | uindices++; |
| 223 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) |
| 224 | return -EFAULT; |
| 225 | uindices++; |
| 226 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) |
| 227 | return -EFAULT; |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 233 | { |
| 234 | void __user *uaddr = (void __user *)(long)reg->addr; |
| 235 | u64 val; |
| 236 | int ret; |
| 237 | |
| 238 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); |
| 239 | if (ret != 0) |
Will Deacon | bd218bc | 2014-08-26 15:13:23 +0100 | [diff] [blame] | 240 | return -EFAULT; |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 241 | |
| 242 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); |
| 243 | } |
| 244 | |
| 245 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 246 | { |
| 247 | void __user *uaddr = (void __user *)(long)reg->addr; |
| 248 | u64 val; |
| 249 | |
| 250 | val = kvm_arm_timer_get_reg(vcpu, reg->id); |
Michael S. Tsirkin | 4cad67f | 2016-02-28 17:32:07 +0200 | [diff] [blame] | 251 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | /** |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 255 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG |
| 256 | * |
| 257 | * This is for all registers. |
| 258 | */ |
| 259 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
| 260 | { |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 261 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 262 | + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | /** |
| 266 | * kvm_arm_copy_reg_indices - get indices of all registers. |
| 267 | * |
Andrea Gelmini | edce229 | 2016-05-21 13:53:14 +0200 | [diff] [blame] | 268 | * We do core registers right here, then we append system regs. |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 269 | */ |
| 270 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
| 271 | { |
| 272 | unsigned int i; |
| 273 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 274 | int ret; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 275 | |
| 276 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { |
| 277 | if (put_user(core_reg | i, uindices)) |
| 278 | return -EFAULT; |
| 279 | uindices++; |
| 280 | } |
| 281 | |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 282 | ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); |
| 283 | if (ret) |
| 284 | return ret; |
| 285 | uindices += kvm_arm_get_fw_num_regs(vcpu); |
| 286 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 287 | ret = copy_timer_indices(vcpu, uindices); |
| 288 | if (ret) |
| 289 | return ret; |
| 290 | uindices += NUM_TIMER_REGS; |
| 291 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 292 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); |
| 293 | } |
| 294 | |
| 295 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 296 | { |
| 297 | /* We currently use nothing arch-specific in upper 32 bits */ |
| 298 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
| 299 | return -EINVAL; |
| 300 | |
| 301 | /* Register group 16 means we want a core register. */ |
| 302 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
| 303 | return get_core_reg(vcpu, reg); |
| 304 | |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 305 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) |
| 306 | return kvm_arm_get_fw_reg(vcpu, reg); |
| 307 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 308 | if (is_timer_reg(reg->id)) |
| 309 | return get_timer_reg(vcpu, reg); |
| 310 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 311 | return kvm_arm_sys_reg_get_reg(vcpu, reg); |
| 312 | } |
| 313 | |
| 314 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 315 | { |
| 316 | /* We currently use nothing arch-specific in upper 32 bits */ |
| 317 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
| 318 | return -EINVAL; |
| 319 | |
| 320 | /* Register group 16 means we set a core register. */ |
| 321 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
| 322 | return set_core_reg(vcpu, reg); |
| 323 | |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 324 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) |
| 325 | return kvm_arm_set_fw_reg(vcpu, reg); |
| 326 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 327 | if (is_timer_reg(reg->id)) |
| 328 | return set_timer_reg(vcpu, reg); |
| 329 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 330 | return kvm_arm_sys_reg_set_reg(vcpu, reg); |
| 331 | } |
| 332 | |
| 333 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| 334 | struct kvm_sregs *sregs) |
| 335 | { |
| 336 | return -EINVAL; |
| 337 | } |
| 338 | |
| 339 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| 340 | struct kvm_sregs *sregs) |
| 341 | { |
| 342 | return -EINVAL; |
| 343 | } |
| 344 | |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 345 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
| 346 | struct kvm_vcpu_events *events) |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 347 | { |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 348 | events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); |
| 349 | events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); |
| 350 | |
| 351 | if (events->exception.serror_pending && events->exception.serror_has_esr) |
| 352 | events->exception.serror_esr = vcpu_get_vsesr(vcpu); |
| 353 | |
| 354 | return 0; |
| 355 | } |
| 356 | |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 357 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
| 358 | struct kvm_vcpu_events *events) |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 359 | { |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 360 | bool serror_pending = events->exception.serror_pending; |
| 361 | bool has_esr = events->exception.serror_has_esr; |
| 362 | |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 363 | if (serror_pending && has_esr) { |
| 364 | if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) |
| 365 | return -EINVAL; |
| 366 | |
| 367 | if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) |
| 368 | kvm_set_sei_esr(vcpu, events->exception.serror_esr); |
| 369 | else |
| 370 | return -EINVAL; |
| 371 | } else if (serror_pending) { |
| 372 | kvm_inject_vabt(vcpu); |
| 373 | } |
| 374 | |
| 375 | return 0; |
| 376 | } |
| 377 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 378 | int __attribute_const__ kvm_target_cpu(void) |
| 379 | { |
| 380 | unsigned long implementor = read_cpuid_implementor(); |
| 381 | unsigned long part_number = read_cpuid_part_number(); |
| 382 | |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 383 | switch (implementor) { |
| 384 | case ARM_CPU_IMP_ARM: |
| 385 | switch (part_number) { |
| 386 | case ARM_CPU_PART_AEM_V8: |
| 387 | return KVM_ARM_TARGET_AEM_V8; |
| 388 | case ARM_CPU_PART_FOUNDATION: |
| 389 | return KVM_ARM_TARGET_FOUNDATION_V8; |
Marc Zyngier | 1252b33 | 2014-05-20 18:06:03 +0100 | [diff] [blame] | 390 | case ARM_CPU_PART_CORTEX_A53: |
| 391 | return KVM_ARM_TARGET_CORTEX_A53; |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 392 | case ARM_CPU_PART_CORTEX_A57: |
| 393 | return KVM_ARM_TARGET_CORTEX_A57; |
| 394 | }; |
| 395 | break; |
| 396 | case ARM_CPU_IMP_APM: |
| 397 | switch (part_number) { |
| 398 | case APM_CPU_PART_POTENZA: |
| 399 | return KVM_ARM_TARGET_XGENE_POTENZA; |
| 400 | }; |
| 401 | break; |
| 402 | }; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 403 | |
Suzuki K. Poulose | bca556a | 2015-06-17 10:00:46 +0100 | [diff] [blame] | 404 | /* Return a default generic target */ |
| 405 | return KVM_ARM_TARGET_GENERIC_V8; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 406 | } |
| 407 | |
Anup Patel | 473bdc0 | 2013-09-30 14:20:06 +0530 | [diff] [blame] | 408 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
| 409 | { |
| 410 | int target = kvm_target_cpu(); |
| 411 | |
| 412 | if (target < 0) |
| 413 | return -ENODEV; |
| 414 | |
| 415 | memset(init, 0, sizeof(*init)); |
| 416 | |
| 417 | /* |
| 418 | * For now, we don't return any features. |
| 419 | * In future, we might use features to return target |
| 420 | * specific features available for the preferred |
| 421 | * target type. |
| 422 | */ |
| 423 | init->target = (__u32)target; |
| 424 | |
| 425 | return 0; |
| 426 | } |
| 427 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 428 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 429 | { |
| 430 | return -EINVAL; |
| 431 | } |
| 432 | |
| 433 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 434 | { |
| 435 | return -EINVAL; |
| 436 | } |
| 437 | |
| 438 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 439 | struct kvm_translation *tr) |
| 440 | { |
| 441 | return -EINVAL; |
| 442 | } |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 443 | |
Alex Bennée | 337b99b | 2015-07-07 17:29:58 +0100 | [diff] [blame] | 444 | #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ |
| 445 | KVM_GUESTDBG_USE_SW_BP | \ |
Alex Bennée | 834bf88 | 2015-07-07 17:30:02 +0100 | [diff] [blame] | 446 | KVM_GUESTDBG_USE_HW | \ |
Alex Bennée | 337b99b | 2015-07-07 17:29:58 +0100 | [diff] [blame] | 447 | KVM_GUESTDBG_SINGLESTEP) |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 448 | |
| 449 | /** |
| 450 | * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging |
| 451 | * @kvm: pointer to the KVM struct |
| 452 | * @kvm_guest_debug: the ioctl data buffer |
| 453 | * |
| 454 | * This sets up and enables the VM for guest debugging. Userspace |
| 455 | * passes in a control flag to enable different debug types and |
| 456 | * potentially other architecture specific information in the rest of |
| 457 | * the structure. |
| 458 | */ |
| 459 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 460 | struct kvm_guest_debug *dbg) |
| 461 | { |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 462 | int ret = 0; |
| 463 | |
Alex Bennée | eef8c85 | 2015-07-07 17:30:03 +0100 | [diff] [blame] | 464 | trace_kvm_set_guest_debug(vcpu, dbg->control); |
| 465 | |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 466 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { |
| 467 | ret = -EINVAL; |
| 468 | goto out; |
| 469 | } |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 470 | |
| 471 | if (dbg->control & KVM_GUESTDBG_ENABLE) { |
| 472 | vcpu->guest_debug = dbg->control; |
Alex Bennée | 834bf88 | 2015-07-07 17:30:02 +0100 | [diff] [blame] | 473 | |
| 474 | /* Hardware assisted Break and Watch points */ |
| 475 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { |
| 476 | vcpu->arch.external_debug_state = dbg->arch; |
| 477 | } |
| 478 | |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 479 | } else { |
| 480 | /* If not enabled clear all flags */ |
| 481 | vcpu->guest_debug = 0; |
| 482 | } |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 483 | |
| 484 | out: |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 485 | return ret; |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 486 | } |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 487 | |
| 488 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, |
| 489 | struct kvm_device_attr *attr) |
| 490 | { |
| 491 | int ret; |
| 492 | |
| 493 | switch (attr->group) { |
| 494 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 495 | ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); |
| 496 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 497 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 498 | ret = kvm_arm_timer_set_attr(vcpu, attr); |
| 499 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 500 | default: |
| 501 | ret = -ENXIO; |
| 502 | break; |
| 503 | } |
| 504 | |
| 505 | return ret; |
| 506 | } |
| 507 | |
| 508 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, |
| 509 | struct kvm_device_attr *attr) |
| 510 | { |
| 511 | int ret; |
| 512 | |
| 513 | switch (attr->group) { |
| 514 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 515 | ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); |
| 516 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 517 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 518 | ret = kvm_arm_timer_get_attr(vcpu, attr); |
| 519 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 520 | default: |
| 521 | ret = -ENXIO; |
| 522 | break; |
| 523 | } |
| 524 | |
| 525 | return ret; |
| 526 | } |
| 527 | |
| 528 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, |
| 529 | struct kvm_device_attr *attr) |
| 530 | { |
| 531 | int ret; |
| 532 | |
| 533 | switch (attr->group) { |
| 534 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 535 | ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); |
| 536 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 537 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 538 | ret = kvm_arm_timer_has_attr(vcpu, attr); |
| 539 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 540 | default: |
| 541 | ret = -ENXIO; |
| 542 | break; |
| 543 | } |
| 544 | |
| 545 | return ret; |
| 546 | } |