Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012,2013 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 5 | * |
| 6 | * Derived from arch/arm/kvm/guest.c: |
| 7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 9 | */ |
| 10 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 11 | #include <linux/bits.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 12 | #include <linux/errno.h> |
| 13 | #include <linux/err.h> |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 14 | #include <linux/nospec.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 15 | #include <linux/kvm_host.h> |
| 16 | #include <linux/module.h> |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 17 | #include <linux/stddef.h> |
Dave Martin | dc52f31 | 2019-02-14 11:49:36 +0000 | [diff] [blame] | 18 | #include <linux/string.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
| 20 | #include <linux/fs.h> |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 21 | #include <kvm/arm_psci.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 22 | #include <asm/cputype.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 23 | #include <linux/uaccess.h> |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 24 | #include <asm/fpsimd.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 25 | #include <asm/kvm.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 26 | #include <asm/kvm_emulate.h> |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 27 | #include <asm/sigcontext.h> |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 28 | |
Alex Bennée | eef8c85 | 2015-07-07 17:30:03 +0100 | [diff] [blame] | 29 | #include "trace.h" |
| 30 | |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 31 | const struct _kvm_stats_desc kvm_vm_stats_desc[] = { |
| 32 | KVM_GENERIC_VM_STATS() |
| 33 | }; |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 34 | |
| 35 | const struct kvm_stats_header kvm_vm_stats_header = { |
| 36 | .name_size = KVM_STATS_NAME_SIZE, |
| 37 | .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), |
| 38 | .id_offset = sizeof(struct kvm_stats_header), |
| 39 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
| 40 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
| 41 | sizeof(kvm_vm_stats_desc), |
| 42 | }; |
| 43 | |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 44 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
| 45 | KVM_GENERIC_VCPU_STATS(), |
| 46 | STATS_DESC_COUNTER(VCPU, hvc_exit_stat), |
| 47 | STATS_DESC_COUNTER(VCPU, wfe_exit_stat), |
| 48 | STATS_DESC_COUNTER(VCPU, wfi_exit_stat), |
| 49 | STATS_DESC_COUNTER(VCPU, mmio_exit_user), |
| 50 | STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), |
Oliver Upton | fe5161d | 2021-08-02 19:28:07 +0000 | [diff] [blame] | 51 | STATS_DESC_COUNTER(VCPU, signal_exits), |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 52 | STATS_DESC_COUNTER(VCPU, exits) |
| 53 | }; |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 54 | |
| 55 | const struct kvm_stats_header kvm_vcpu_stats_header = { |
| 56 | .name_size = KVM_STATS_NAME_SIZE, |
| 57 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
| 58 | .id_offset = sizeof(struct kvm_stats_header), |
| 59 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
| 60 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
| 61 | sizeof(kvm_vcpu_stats_desc), |
| 62 | }; |
| 63 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 64 | static bool core_reg_offset_is_vreg(u64 off) |
| 65 | { |
| 66 | return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && |
| 67 | off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); |
| 68 | } |
| 69 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 70 | static u64 core_reg_offset_from_id(u64 id) |
| 71 | { |
| 72 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); |
| 73 | } |
| 74 | |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 75 | static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 76 | { |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 77 | int size; |
| 78 | |
| 79 | switch (off) { |
| 80 | case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... |
| 81 | KVM_REG_ARM_CORE_REG(regs.regs[30]): |
| 82 | case KVM_REG_ARM_CORE_REG(regs.sp): |
| 83 | case KVM_REG_ARM_CORE_REG(regs.pc): |
| 84 | case KVM_REG_ARM_CORE_REG(regs.pstate): |
| 85 | case KVM_REG_ARM_CORE_REG(sp_el1): |
| 86 | case KVM_REG_ARM_CORE_REG(elr_el1): |
| 87 | case KVM_REG_ARM_CORE_REG(spsr[0]) ... |
| 88 | KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): |
| 89 | size = sizeof(__u64); |
| 90 | break; |
| 91 | |
| 92 | case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... |
| 93 | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): |
| 94 | size = sizeof(__uint128_t); |
| 95 | break; |
| 96 | |
| 97 | case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): |
| 98 | case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): |
| 99 | size = sizeof(__u32); |
| 100 | break; |
| 101 | |
| 102 | default: |
| 103 | return -EINVAL; |
| 104 | } |
| 105 | |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 106 | if (!IS_ALIGNED(off, size / sizeof(__u32))) |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 107 | return -EINVAL; |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 108 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 109 | /* |
| 110 | * The KVM_REG_ARM64_SVE regs must be used instead of |
| 111 | * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on |
| 112 | * SVE-enabled vcpus: |
| 113 | */ |
| 114 | if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) |
| 115 | return -EINVAL; |
| 116 | |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 117 | return size; |
| 118 | } |
| 119 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 120 | static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 121 | { |
| 122 | u64 off = core_reg_offset_from_id(reg->id); |
| 123 | int size = core_reg_size_from_offset(vcpu, off); |
| 124 | |
| 125 | if (size < 0) |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 126 | return NULL; |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 127 | |
| 128 | if (KVM_REG_SIZE(reg->id) != size) |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 129 | return NULL; |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 130 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 131 | switch (off) { |
| 132 | case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... |
| 133 | KVM_REG_ARM_CORE_REG(regs.regs[30]): |
| 134 | off -= KVM_REG_ARM_CORE_REG(regs.regs[0]); |
| 135 | off /= 2; |
| 136 | return &vcpu->arch.ctxt.regs.regs[off]; |
| 137 | |
| 138 | case KVM_REG_ARM_CORE_REG(regs.sp): |
| 139 | return &vcpu->arch.ctxt.regs.sp; |
| 140 | |
| 141 | case KVM_REG_ARM_CORE_REG(regs.pc): |
| 142 | return &vcpu->arch.ctxt.regs.pc; |
| 143 | |
| 144 | case KVM_REG_ARM_CORE_REG(regs.pstate): |
| 145 | return &vcpu->arch.ctxt.regs.pstate; |
| 146 | |
| 147 | case KVM_REG_ARM_CORE_REG(sp_el1): |
Marc Zyngier | 1bded23 | 2019-06-28 23:05:38 +0100 | [diff] [blame] | 148 | return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1); |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 149 | |
| 150 | case KVM_REG_ARM_CORE_REG(elr_el1): |
Marc Zyngier | 98909e6 | 2019-06-28 23:05:38 +0100 | [diff] [blame] | 151 | return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1); |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 152 | |
Marc Zyngier | fd85b66 | 2019-06-28 23:36:42 +0100 | [diff] [blame] | 153 | case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]): |
Marc Zyngier | 710f198 | 2019-06-28 23:05:38 +0100 | [diff] [blame] | 154 | return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1); |
Marc Zyngier | fd85b66 | 2019-06-28 23:36:42 +0100 | [diff] [blame] | 155 | |
| 156 | case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]): |
| 157 | return &vcpu->arch.ctxt.spsr_abt; |
| 158 | |
| 159 | case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]): |
| 160 | return &vcpu->arch.ctxt.spsr_und; |
| 161 | |
| 162 | case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]): |
| 163 | return &vcpu->arch.ctxt.spsr_irq; |
| 164 | |
| 165 | case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]): |
| 166 | return &vcpu->arch.ctxt.spsr_fiq; |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 167 | |
| 168 | case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... |
| 169 | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): |
| 170 | off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]); |
| 171 | off /= 4; |
| 172 | return &vcpu->arch.ctxt.fp_regs.vregs[off]; |
| 173 | |
| 174 | case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): |
| 175 | return &vcpu->arch.ctxt.fp_regs.fpsr; |
| 176 | |
| 177 | case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): |
| 178 | return &vcpu->arch.ctxt.fp_regs.fpcr; |
| 179 | |
| 180 | default: |
| 181 | return NULL; |
| 182 | } |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 185 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 186 | { |
| 187 | /* |
| 188 | * Because the kvm_regs structure is a mix of 32, 64 and |
| 189 | * 128bit fields, we index it as if it was a 32bit |
| 190 | * array. Hence below, nr_regs is the number of entries, and |
| 191 | * off the index in the "array". |
| 192 | */ |
| 193 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 194 | int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32); |
| 195 | void *addr; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 196 | u32 off; |
| 197 | |
| 198 | /* Our ID is an index into the kvm_regs struct. */ |
| 199 | off = core_reg_offset_from_id(reg->id); |
| 200 | if (off >= nr_regs || |
| 201 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
| 202 | return -ENOENT; |
| 203 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 204 | addr = core_reg_addr(vcpu, reg); |
| 205 | if (!addr) |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 206 | return -EINVAL; |
| 207 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 208 | if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id))) |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 209 | return -EFAULT; |
| 210 | |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 215 | { |
| 216 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 217 | int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32); |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 218 | __uint128_t tmp; |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 219 | void *valp = &tmp, *addr; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 220 | u64 off; |
| 221 | int err = 0; |
| 222 | |
| 223 | /* Our ID is an index into the kvm_regs struct. */ |
| 224 | off = core_reg_offset_from_id(reg->id); |
| 225 | if (off >= nr_regs || |
| 226 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
| 227 | return -ENOENT; |
| 228 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 229 | addr = core_reg_addr(vcpu, reg); |
| 230 | if (!addr) |
Dave Martin | d26c25a | 2018-09-27 16:53:21 +0100 | [diff] [blame] | 231 | return -EINVAL; |
| 232 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 233 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) |
| 234 | return -EINVAL; |
| 235 | |
| 236 | if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { |
| 237 | err = -EFAULT; |
| 238 | goto out; |
| 239 | } |
| 240 | |
| 241 | if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame] | 242 | u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 243 | switch (mode) { |
Mark Rutland | 256c096 | 2018-07-05 15:16:53 +0100 | [diff] [blame] | 244 | case PSR_AA32_MODE_USR: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame] | 245 | if (!system_supports_32bit_el0()) |
| 246 | return -EINVAL; |
| 247 | break; |
Mark Rutland | 256c096 | 2018-07-05 15:16:53 +0100 | [diff] [blame] | 248 | case PSR_AA32_MODE_FIQ: |
| 249 | case PSR_AA32_MODE_IRQ: |
| 250 | case PSR_AA32_MODE_SVC: |
| 251 | case PSR_AA32_MODE_ABT: |
| 252 | case PSR_AA32_MODE_UND: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame] | 253 | if (!vcpu_el1_is_32bit(vcpu)) |
| 254 | return -EINVAL; |
| 255 | break; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 256 | case PSR_MODE_EL0t: |
| 257 | case PSR_MODE_EL1t: |
| 258 | case PSR_MODE_EL1h: |
Marc Zyngier | 2a3f934 | 2018-09-27 16:53:22 +0100 | [diff] [blame] | 259 | if (vcpu_el1_is_32bit(vcpu)) |
| 260 | return -EINVAL; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 261 | break; |
| 262 | default: |
| 263 | err = -EINVAL; |
| 264 | goto out; |
| 265 | } |
| 266 | } |
| 267 | |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 268 | memcpy(addr, valp, KVM_REG_SIZE(reg->id)); |
Marc Zyngier | 0225fd5 | 2020-04-29 11:21:55 +0100 | [diff] [blame] | 269 | |
| 270 | if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { |
Marc Zyngier | 90c1f93 | 2020-10-16 18:41:24 +0100 | [diff] [blame] | 271 | int i, nr_reg; |
Marc Zyngier | 0225fd5 | 2020-04-29 11:21:55 +0100 | [diff] [blame] | 272 | |
Marc Zyngier | 90c1f93 | 2020-10-16 18:41:24 +0100 | [diff] [blame] | 273 | switch (*vcpu_cpsr(vcpu)) { |
| 274 | /* |
| 275 | * Either we are dealing with user mode, and only the |
| 276 | * first 15 registers (+ PC) must be narrowed to 32bit. |
| 277 | * AArch32 r0-r14 conveniently map to AArch64 x0-x14. |
| 278 | */ |
| 279 | case PSR_AA32_MODE_USR: |
| 280 | case PSR_AA32_MODE_SYS: |
| 281 | nr_reg = 15; |
| 282 | break; |
| 283 | |
| 284 | /* |
| 285 | * Otherwide, this is a priviledged mode, and *all* the |
| 286 | * registers must be narrowed to 32bit. |
| 287 | */ |
| 288 | default: |
| 289 | nr_reg = 31; |
| 290 | break; |
| 291 | } |
| 292 | |
| 293 | for (i = 0; i < nr_reg; i++) |
| 294 | vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i)); |
| 295 | |
| 296 | *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu); |
Marc Zyngier | 0225fd5 | 2020-04-29 11:21:55 +0100 | [diff] [blame] | 297 | } |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 298 | out: |
| 299 | return err; |
| 300 | } |
| 301 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 302 | #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) |
| 303 | #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) |
Zhang Lei | e644fa1 | 2019-07-03 18:42:50 +0100 | [diff] [blame] | 304 | #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 305 | |
| 306 | static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 307 | { |
| 308 | unsigned int max_vq, vq; |
Dave Martin | 4bd774e | 2019-04-11 17:09:59 +0100 | [diff] [blame] | 309 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 310 | |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 311 | if (!vcpu_has_sve(vcpu)) |
| 312 | return -ENOENT; |
| 313 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 314 | if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) |
| 315 | return -EINVAL; |
| 316 | |
| 317 | memset(vqs, 0, sizeof(vqs)); |
| 318 | |
Marc Zyngier | 468f347 | 2021-03-12 14:38:43 +0000 | [diff] [blame] | 319 | max_vq = vcpu_sve_max_vq(vcpu); |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 320 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) |
| 321 | if (sve_vq_available(vq)) |
| 322 | vqs[vq_word(vq)] |= vq_mask(vq); |
| 323 | |
| 324 | if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) |
| 325 | return -EFAULT; |
| 326 | |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 331 | { |
| 332 | unsigned int max_vq, vq; |
Dave Martin | 4bd774e | 2019-04-11 17:09:59 +0100 | [diff] [blame] | 333 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 334 | |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 335 | if (!vcpu_has_sve(vcpu)) |
| 336 | return -ENOENT; |
| 337 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 338 | if (kvm_arm_vcpu_sve_finalized(vcpu)) |
| 339 | return -EPERM; /* too late! */ |
| 340 | |
| 341 | if (WARN_ON(vcpu->arch.sve_state)) |
| 342 | return -EINVAL; |
| 343 | |
| 344 | if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) |
| 345 | return -EFAULT; |
| 346 | |
| 347 | max_vq = 0; |
| 348 | for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) |
Viresh Kumar | 0c529ff | 2019-06-10 15:30:03 +0530 | [diff] [blame] | 349 | if (vq_present(vqs, vq)) |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 350 | max_vq = vq; |
| 351 | |
| 352 | if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) |
| 353 | return -EINVAL; |
| 354 | |
Dave Martin | ecfb6ed | 2019-04-11 17:23:00 +0100 | [diff] [blame] | 355 | /* |
| 356 | * Vector lengths supported by the host can't currently be |
| 357 | * hidden from the guest individually: instead we can only set a |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 358 | * maximum via ZCR_EL2.LEN. So, make sure the available vector |
Dave Martin | ecfb6ed | 2019-04-11 17:23:00 +0100 | [diff] [blame] | 359 | * lengths match the set requested exactly up to the requested |
| 360 | * maximum: |
| 361 | */ |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 362 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) |
Viresh Kumar | 0c529ff | 2019-06-10 15:30:03 +0530 | [diff] [blame] | 363 | if (vq_present(vqs, vq) != sve_vq_available(vq)) |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 364 | return -EINVAL; |
| 365 | |
| 366 | /* Can't run with no vector lengths at all: */ |
| 367 | if (max_vq < SVE_VQ_MIN) |
| 368 | return -EINVAL; |
| 369 | |
| 370 | /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ |
| 371 | vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 376 | #define SVE_REG_SLICE_SHIFT 0 |
| 377 | #define SVE_REG_SLICE_BITS 5 |
| 378 | #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) |
| 379 | #define SVE_REG_ID_BITS 5 |
| 380 | |
| 381 | #define SVE_REG_SLICE_MASK \ |
| 382 | GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ |
| 383 | SVE_REG_SLICE_SHIFT) |
| 384 | #define SVE_REG_ID_MASK \ |
| 385 | GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) |
| 386 | |
| 387 | #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) |
| 388 | |
| 389 | #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) |
| 390 | #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) |
| 391 | |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 392 | /* |
Dave Martin | f8d4635 | 2019-04-05 17:31:37 +0100 | [diff] [blame] | 393 | * Number of register slices required to cover each whole SVE register. |
| 394 | * NOTE: Only the first slice every exists, for now. |
| 395 | * If you are tempted to modify this, you must also rework sve_reg_to_region() |
| 396 | * to match: |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 397 | */ |
| 398 | #define vcpu_sve_slices(vcpu) 1 |
| 399 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 400 | /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ |
| 401 | struct sve_state_reg_region { |
| 402 | unsigned int koffset; /* offset into sve_state in kernel memory */ |
| 403 | unsigned int klen; /* length in kernel memory */ |
| 404 | unsigned int upad; /* extra trailing padding in user memory */ |
| 405 | }; |
| 406 | |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 407 | /* |
| 408 | * Validate SVE register ID and get sanitised bounds for user/kernel SVE |
| 409 | * register copy |
| 410 | */ |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 411 | static int sve_reg_to_region(struct sve_state_reg_region *region, |
| 412 | struct kvm_vcpu *vcpu, |
| 413 | const struct kvm_one_reg *reg) |
| 414 | { |
| 415 | /* reg ID ranges for Z- registers */ |
| 416 | const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); |
| 417 | const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, |
| 418 | SVE_NUM_SLICES - 1); |
| 419 | |
| 420 | /* reg ID ranges for P- registers and FFR (which are contiguous) */ |
| 421 | const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); |
| 422 | const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); |
| 423 | |
| 424 | unsigned int vq; |
| 425 | unsigned int reg_num; |
| 426 | |
| 427 | unsigned int reqoffset, reqlen; /* User-requested offset and length */ |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 428 | unsigned int maxlen; /* Maximum permitted length */ |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 429 | |
| 430 | size_t sve_state_size; |
| 431 | |
Dave Martin | 8ae6efd | 2019-04-05 17:17:08 +0100 | [diff] [blame] | 432 | const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, |
| 433 | SVE_NUM_SLICES - 1); |
| 434 | |
| 435 | /* Verify that the P-regs and FFR really do have contiguous IDs: */ |
| 436 | BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); |
| 437 | |
| 438 | /* Verify that we match the UAPI header: */ |
| 439 | BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); |
| 440 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 441 | reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; |
| 442 | |
| 443 | if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 444 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) |
| 445 | return -ENOENT; |
| 446 | |
Marc Zyngier | 468f347 | 2021-03-12 14:38:43 +0000 | [diff] [blame] | 447 | vq = vcpu_sve_max_vq(vcpu); |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 448 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 449 | reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - |
| 450 | SVE_SIG_REGS_OFFSET; |
| 451 | reqlen = KVM_SVE_ZREG_SIZE; |
| 452 | maxlen = SVE_SIG_ZREG_SIZE(vq); |
| 453 | } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 454 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) |
| 455 | return -ENOENT; |
| 456 | |
Marc Zyngier | 468f347 | 2021-03-12 14:38:43 +0000 | [diff] [blame] | 457 | vq = vcpu_sve_max_vq(vcpu); |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 458 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 459 | reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - |
| 460 | SVE_SIG_REGS_OFFSET; |
| 461 | reqlen = KVM_SVE_PREG_SIZE; |
| 462 | maxlen = SVE_SIG_PREG_SIZE(vq); |
| 463 | } else { |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 464 | return -EINVAL; |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | sve_state_size = vcpu_sve_state_size(vcpu); |
Dave Martin | 55ffad3 | 2019-04-11 16:37:38 +0100 | [diff] [blame] | 468 | if (WARN_ON(!sve_state_size)) |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 469 | return -EINVAL; |
| 470 | |
| 471 | region->koffset = array_index_nospec(reqoffset, sve_state_size); |
| 472 | region->klen = min(maxlen, reqlen); |
| 473 | region->upad = reqlen - region->klen; |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 479 | { |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 480 | int ret; |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 481 | struct sve_state_reg_region region; |
| 482 | char __user *uptr = (char __user *)reg->addr; |
| 483 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 484 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ |
| 485 | if (reg->id == KVM_REG_ARM64_SVE_VLS) |
| 486 | return get_sve_vls(vcpu, reg); |
| 487 | |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 488 | /* Try to interpret reg ID as an architectural SVE register... */ |
| 489 | ret = sve_reg_to_region(®ion, vcpu, reg); |
| 490 | if (ret) |
| 491 | return ret; |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 492 | |
| 493 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) |
| 494 | return -EPERM; |
| 495 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 496 | if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, |
| 497 | region.klen) || |
| 498 | clear_user(uptr + region.klen, region.upad)) |
| 499 | return -EFAULT; |
| 500 | |
| 501 | return 0; |
| 502 | } |
| 503 | |
| 504 | static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 505 | { |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 506 | int ret; |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 507 | struct sve_state_reg_region region; |
| 508 | const char __user *uptr = (const char __user *)reg->addr; |
| 509 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 510 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ |
| 511 | if (reg->id == KVM_REG_ARM64_SVE_VLS) |
| 512 | return set_sve_vls(vcpu, reg); |
| 513 | |
Dave Martin | 52110aa | 2019-04-11 16:13:39 +0100 | [diff] [blame] | 514 | /* Try to interpret reg ID as an architectural SVE register... */ |
| 515 | ret = sve_reg_to_region(®ion, vcpu, reg); |
| 516 | if (ret) |
| 517 | return ret; |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 518 | |
| 519 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) |
| 520 | return -EPERM; |
| 521 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 522 | if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, |
| 523 | region.klen)) |
| 524 | return -EFAULT; |
| 525 | |
| 526 | return 0; |
| 527 | } |
| 528 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 529 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 530 | { |
| 531 | return -EINVAL; |
| 532 | } |
| 533 | |
| 534 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
| 535 | { |
| 536 | return -EINVAL; |
| 537 | } |
| 538 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 539 | static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, |
| 540 | u64 __user *uindices) |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 541 | { |
| 542 | unsigned int i; |
| 543 | int n = 0; |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 544 | |
| 545 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 546 | u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; |
| 547 | int size = core_reg_size_from_offset(vcpu, i); |
| 548 | |
| 549 | if (size < 0) |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 550 | continue; |
| 551 | |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 552 | switch (size) { |
| 553 | case sizeof(__u32): |
| 554 | reg |= KVM_REG_SIZE_U32; |
| 555 | break; |
| 556 | |
| 557 | case sizeof(__u64): |
| 558 | reg |= KVM_REG_SIZE_U64; |
| 559 | break; |
| 560 | |
| 561 | case sizeof(__uint128_t): |
| 562 | reg |= KVM_REG_SIZE_U128; |
| 563 | break; |
| 564 | |
| 565 | default: |
| 566 | WARN_ON(1); |
| 567 | continue; |
| 568 | } |
| 569 | |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 570 | if (uindices) { |
Dave Martin | df205b5 | 2019-06-12 13:44:49 +0100 | [diff] [blame] | 571 | if (put_user(reg, uindices)) |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 572 | return -EFAULT; |
| 573 | uindices++; |
| 574 | } |
| 575 | |
| 576 | n++; |
| 577 | } |
| 578 | |
| 579 | return n; |
| 580 | } |
| 581 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 582 | static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 583 | { |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 584 | return copy_core_reg_indices(vcpu, NULL); |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 585 | } |
| 586 | |
| 587 | /** |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 588 | * ARM64 versions of the TIMER registers, always available on arm64 |
| 589 | */ |
| 590 | |
| 591 | #define NUM_TIMER_REGS 3 |
| 592 | |
| 593 | static bool is_timer_reg(u64 index) |
| 594 | { |
| 595 | switch (index) { |
| 596 | case KVM_REG_ARM_TIMER_CTL: |
| 597 | case KVM_REG_ARM_TIMER_CNT: |
| 598 | case KVM_REG_ARM_TIMER_CVAL: |
| 599 | return true; |
| 600 | } |
| 601 | return false; |
| 602 | } |
| 603 | |
| 604 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
| 605 | { |
| 606 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) |
| 607 | return -EFAULT; |
| 608 | uindices++; |
| 609 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) |
| 610 | return -EFAULT; |
| 611 | uindices++; |
| 612 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) |
| 613 | return -EFAULT; |
| 614 | |
| 615 | return 0; |
| 616 | } |
| 617 | |
| 618 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 619 | { |
| 620 | void __user *uaddr = (void __user *)(long)reg->addr; |
| 621 | u64 val; |
| 622 | int ret; |
| 623 | |
| 624 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); |
| 625 | if (ret != 0) |
Will Deacon | bd218bc | 2014-08-26 15:13:23 +0100 | [diff] [blame] | 626 | return -EFAULT; |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 627 | |
| 628 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); |
| 629 | } |
| 630 | |
| 631 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 632 | { |
| 633 | void __user *uaddr = (void __user *)(long)reg->addr; |
| 634 | u64 val; |
| 635 | |
| 636 | val = kvm_arm_timer_get_reg(vcpu, reg->id); |
Michael S. Tsirkin | 4cad67f | 2016-02-28 17:32:07 +0200 | [diff] [blame] | 637 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 638 | } |
| 639 | |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 640 | static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) |
| 641 | { |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 642 | const unsigned int slices = vcpu_sve_slices(vcpu); |
| 643 | |
| 644 | if (!vcpu_has_sve(vcpu)) |
| 645 | return 0; |
| 646 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 647 | /* Policed by KVM_GET_REG_LIST: */ |
| 648 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); |
| 649 | |
| 650 | return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) |
| 651 | + 1; /* KVM_REG_ARM64_SVE_VLS */ |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, |
| 655 | u64 __user *uindices) |
| 656 | { |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 657 | const unsigned int slices = vcpu_sve_slices(vcpu); |
| 658 | u64 reg; |
| 659 | unsigned int i, n; |
| 660 | int num_regs = 0; |
| 661 | |
| 662 | if (!vcpu_has_sve(vcpu)) |
| 663 | return 0; |
| 664 | |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 665 | /* Policed by KVM_GET_REG_LIST: */ |
| 666 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); |
| 667 | |
| 668 | /* |
| 669 | * Enumerate this first, so that userspace can save/restore in |
| 670 | * the order reported by KVM_GET_REG_LIST: |
| 671 | */ |
| 672 | reg = KVM_REG_ARM64_SVE_VLS; |
| 673 | if (put_user(reg, uindices++)) |
| 674 | return -EFAULT; |
Dave Martin | 9033bba | 2019-02-28 18:46:44 +0000 | [diff] [blame] | 675 | ++num_regs; |
| 676 | |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 677 | for (i = 0; i < slices; i++) { |
| 678 | for (n = 0; n < SVE_NUM_ZREGS; n++) { |
| 679 | reg = KVM_REG_ARM64_SVE_ZREG(n, i); |
| 680 | if (put_user(reg, uindices++)) |
| 681 | return -EFAULT; |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 682 | num_regs++; |
| 683 | } |
| 684 | |
| 685 | for (n = 0; n < SVE_NUM_PREGS; n++) { |
| 686 | reg = KVM_REG_ARM64_SVE_PREG(n, i); |
| 687 | if (put_user(reg, uindices++)) |
| 688 | return -EFAULT; |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 689 | num_regs++; |
| 690 | } |
| 691 | |
| 692 | reg = KVM_REG_ARM64_SVE_FFR(i); |
| 693 | if (put_user(reg, uindices++)) |
| 694 | return -EFAULT; |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 695 | num_regs++; |
| 696 | } |
| 697 | |
| 698 | return num_regs; |
| 699 | } |
| 700 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 701 | /** |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 702 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG |
| 703 | * |
| 704 | * This is for all registers. |
| 705 | */ |
| 706 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
| 707 | { |
Dave Martin | 7aa92cf | 2018-09-28 14:39:07 +0100 | [diff] [blame] | 708 | unsigned long res = 0; |
| 709 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 710 | res += num_core_regs(vcpu); |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 711 | res += num_sve_regs(vcpu); |
Dave Martin | 7aa92cf | 2018-09-28 14:39:07 +0100 | [diff] [blame] | 712 | res += kvm_arm_num_sys_reg_descs(vcpu); |
| 713 | res += kvm_arm_get_fw_num_regs(vcpu); |
| 714 | res += NUM_TIMER_REGS; |
| 715 | |
| 716 | return res; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | /** |
| 720 | * kvm_arm_copy_reg_indices - get indices of all registers. |
| 721 | * |
Andrea Gelmini | edce229 | 2016-05-21 13:53:14 +0200 | [diff] [blame] | 722 | * We do core registers right here, then we append system regs. |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 723 | */ |
| 724 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
| 725 | { |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 726 | int ret; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 727 | |
Dave Martin | 8c86dfe | 2018-12-11 20:31:08 +0000 | [diff] [blame] | 728 | ret = copy_core_reg_indices(vcpu, uindices); |
Marc Zyngier | 5d8d4af | 2019-04-02 03:28:39 +0100 | [diff] [blame] | 729 | if (ret < 0) |
Dave Martin | be25bbb | 2019-03-15 15:47:04 +0000 | [diff] [blame] | 730 | return ret; |
| 731 | uindices += ret; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 732 | |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 733 | ret = copy_sve_reg_indices(vcpu, uindices); |
Marc Zyngier | 5d8d4af | 2019-04-02 03:28:39 +0100 | [diff] [blame] | 734 | if (ret < 0) |
Dave Martin | 8e3c54c | 2018-09-28 14:39:20 +0100 | [diff] [blame] | 735 | return ret; |
| 736 | uindices += ret; |
| 737 | |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 738 | ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); |
Marc Zyngier | 5d8d4af | 2019-04-02 03:28:39 +0100 | [diff] [blame] | 739 | if (ret < 0) |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 740 | return ret; |
| 741 | uindices += kvm_arm_get_fw_num_regs(vcpu); |
| 742 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 743 | ret = copy_timer_indices(vcpu, uindices); |
Marc Zyngier | 5d8d4af | 2019-04-02 03:28:39 +0100 | [diff] [blame] | 744 | if (ret < 0) |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 745 | return ret; |
| 746 | uindices += NUM_TIMER_REGS; |
| 747 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 748 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); |
| 749 | } |
| 750 | |
| 751 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 752 | { |
| 753 | /* We currently use nothing arch-specific in upper 32 bits */ |
| 754 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
| 755 | return -EINVAL; |
| 756 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 757 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
| 758 | case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); |
| 759 | case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); |
| 760 | case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 761 | } |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 762 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 763 | if (is_timer_reg(reg->id)) |
| 764 | return get_timer_reg(vcpu, reg); |
| 765 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 766 | return kvm_arm_sys_reg_get_reg(vcpu, reg); |
| 767 | } |
| 768 | |
| 769 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| 770 | { |
| 771 | /* We currently use nothing arch-specific in upper 32 bits */ |
| 772 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
| 773 | return -EINVAL; |
| 774 | |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 775 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
| 776 | case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); |
| 777 | case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); |
| 778 | case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); |
Dave Martin | e1c9c98 | 2018-09-28 14:39:19 +0100 | [diff] [blame] | 779 | } |
Marc Zyngier | 85bd0ba | 2018-01-21 16:42:56 +0000 | [diff] [blame] | 780 | |
Alex Bennée | 1df08ba | 2014-07-04 15:54:14 +0100 | [diff] [blame] | 781 | if (is_timer_reg(reg->id)) |
| 782 | return set_timer_reg(vcpu, reg); |
| 783 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 784 | return kvm_arm_sys_reg_set_reg(vcpu, reg); |
| 785 | } |
| 786 | |
| 787 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| 788 | struct kvm_sregs *sregs) |
| 789 | { |
| 790 | return -EINVAL; |
| 791 | } |
| 792 | |
| 793 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| 794 | struct kvm_sregs *sregs) |
| 795 | { |
| 796 | return -EINVAL; |
| 797 | } |
| 798 | |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 799 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
| 800 | struct kvm_vcpu_events *events) |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 801 | { |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 802 | events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); |
| 803 | events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); |
| 804 | |
| 805 | if (events->exception.serror_pending && events->exception.serror_has_esr) |
| 806 | events->exception.serror_esr = vcpu_get_vsesr(vcpu); |
| 807 | |
Christoffer Dall | da34517 | 2019-10-11 13:07:06 +0200 | [diff] [blame] | 808 | /* |
| 809 | * We never return a pending ext_dabt here because we deliver it to |
| 810 | * the virtual CPU directly when setting the event and it's no longer |
| 811 | * 'pending' at this point. |
| 812 | */ |
| 813 | |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 814 | return 0; |
| 815 | } |
| 816 | |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 817 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
| 818 | struct kvm_vcpu_events *events) |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 819 | { |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 820 | bool serror_pending = events->exception.serror_pending; |
| 821 | bool has_esr = events->exception.serror_has_esr; |
Christoffer Dall | da34517 | 2019-10-11 13:07:06 +0200 | [diff] [blame] | 822 | bool ext_dabt_pending = events->exception.ext_dabt_pending; |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 823 | |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 824 | if (serror_pending && has_esr) { |
| 825 | if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) |
| 826 | return -EINVAL; |
| 827 | |
| 828 | if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) |
| 829 | kvm_set_sei_esr(vcpu, events->exception.serror_esr); |
| 830 | else |
| 831 | return -EINVAL; |
| 832 | } else if (serror_pending) { |
| 833 | kvm_inject_vabt(vcpu); |
| 834 | } |
| 835 | |
Christoffer Dall | da34517 | 2019-10-11 13:07:06 +0200 | [diff] [blame] | 836 | if (ext_dabt_pending) |
| 837 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| 838 | |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 839 | return 0; |
| 840 | } |
| 841 | |
Anshuman Khandual | 6b7982f | 2021-08-12 10:39:53 +0530 | [diff] [blame] | 842 | u32 __attribute_const__ kvm_target_cpu(void) |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 843 | { |
| 844 | unsigned long implementor = read_cpuid_implementor(); |
| 845 | unsigned long part_number = read_cpuid_part_number(); |
| 846 | |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 847 | switch (implementor) { |
| 848 | case ARM_CPU_IMP_ARM: |
| 849 | switch (part_number) { |
| 850 | case ARM_CPU_PART_AEM_V8: |
| 851 | return KVM_ARM_TARGET_AEM_V8; |
| 852 | case ARM_CPU_PART_FOUNDATION: |
| 853 | return KVM_ARM_TARGET_FOUNDATION_V8; |
Marc Zyngier | 1252b33 | 2014-05-20 18:06:03 +0100 | [diff] [blame] | 854 | case ARM_CPU_PART_CORTEX_A53: |
| 855 | return KVM_ARM_TARGET_CORTEX_A53; |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 856 | case ARM_CPU_PART_CORTEX_A57: |
| 857 | return KVM_ARM_TARGET_CORTEX_A57; |
zhong jiang | f072534 | 2018-08-09 22:20:41 +0800 | [diff] [blame] | 858 | } |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 859 | break; |
| 860 | case ARM_CPU_IMP_APM: |
| 861 | switch (part_number) { |
| 862 | case APM_CPU_PART_POTENZA: |
| 863 | return KVM_ARM_TARGET_XGENE_POTENZA; |
zhong jiang | f072534 | 2018-08-09 22:20:41 +0800 | [diff] [blame] | 864 | } |
Anup Patel | e28100b | 2013-11-14 15:20:08 +0000 | [diff] [blame] | 865 | break; |
zhong jiang | f072534 | 2018-08-09 22:20:41 +0800 | [diff] [blame] | 866 | } |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 867 | |
Suzuki K. Poulose | bca556a | 2015-06-17 10:00:46 +0100 | [diff] [blame] | 868 | /* Return a default generic target */ |
| 869 | return KVM_ARM_TARGET_GENERIC_V8; |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 870 | } |
| 871 | |
YueHaibing | 08e873c | 2021-11-05 09:15:00 +0800 | [diff] [blame] | 872 | void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
Anup Patel | 473bdc0 | 2013-09-30 14:20:06 +0530 | [diff] [blame] | 873 | { |
Anshuman Khandual | 6b7982f | 2021-08-12 10:39:53 +0530 | [diff] [blame] | 874 | u32 target = kvm_target_cpu(); |
Anup Patel | 473bdc0 | 2013-09-30 14:20:06 +0530 | [diff] [blame] | 875 | |
Anup Patel | 473bdc0 | 2013-09-30 14:20:06 +0530 | [diff] [blame] | 876 | memset(init, 0, sizeof(*init)); |
| 877 | |
| 878 | /* |
| 879 | * For now, we don't return any features. |
| 880 | * In future, we might use features to return target |
| 881 | * specific features available for the preferred |
| 882 | * target type. |
| 883 | */ |
| 884 | init->target = (__u32)target; |
Anup Patel | 473bdc0 | 2013-09-30 14:20:06 +0530 | [diff] [blame] | 885 | } |
| 886 | |
Marc Zyngier | 2f4a07c | 2012-12-10 16:37:02 +0000 | [diff] [blame] | 887 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 888 | { |
| 889 | return -EINVAL; |
| 890 | } |
| 891 | |
| 892 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 893 | { |
| 894 | return -EINVAL; |
| 895 | } |
| 896 | |
| 897 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 898 | struct kvm_translation *tr) |
| 899 | { |
| 900 | return -EINVAL; |
| 901 | } |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 902 | |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 903 | /** |
| 904 | * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging |
| 905 | * @kvm: pointer to the KVM struct |
| 906 | * @kvm_guest_debug: the ioctl data buffer |
| 907 | * |
| 908 | * This sets up and enables the VM for guest debugging. Userspace |
| 909 | * passes in a control flag to enable different debug types and |
| 910 | * potentially other architecture specific information in the rest of |
| 911 | * the structure. |
| 912 | */ |
| 913 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 914 | struct kvm_guest_debug *dbg) |
| 915 | { |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 916 | int ret = 0; |
| 917 | |
Alex Bennée | eef8c85 | 2015-07-07 17:30:03 +0100 | [diff] [blame] | 918 | trace_kvm_set_guest_debug(vcpu, dbg->control); |
| 919 | |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 920 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { |
| 921 | ret = -EINVAL; |
| 922 | goto out; |
| 923 | } |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 924 | |
| 925 | if (dbg->control & KVM_GUESTDBG_ENABLE) { |
| 926 | vcpu->guest_debug = dbg->control; |
Alex Bennée | 834bf88 | 2015-07-07 17:30:02 +0100 | [diff] [blame] | 927 | |
| 928 | /* Hardware assisted Break and Watch points */ |
| 929 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { |
| 930 | vcpu->arch.external_debug_state = dbg->arch; |
| 931 | } |
| 932 | |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 933 | } else { |
| 934 | /* If not enabled clear all flags */ |
| 935 | vcpu->guest_debug = 0; |
| 936 | } |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 937 | |
| 938 | out: |
Christoffer Dall | 66b5656 | 2017-12-04 21:35:33 +0100 | [diff] [blame] | 939 | return ret; |
Alex Bennée | 0e6f07f | 2015-07-07 17:29:55 +0100 | [diff] [blame] | 940 | } |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 941 | |
| 942 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, |
| 943 | struct kvm_device_attr *attr) |
| 944 | { |
| 945 | int ret; |
| 946 | |
| 947 | switch (attr->group) { |
| 948 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 949 | ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); |
| 950 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 951 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 952 | ret = kvm_arm_timer_set_attr(vcpu, attr); |
| 953 | break; |
Steven Price | 58772e9 | 2019-10-21 16:28:20 +0100 | [diff] [blame] | 954 | case KVM_ARM_VCPU_PVTIME_CTRL: |
| 955 | ret = kvm_arm_pvtime_set_attr(vcpu, attr); |
| 956 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 957 | default: |
| 958 | ret = -ENXIO; |
| 959 | break; |
| 960 | } |
| 961 | |
| 962 | return ret; |
| 963 | } |
| 964 | |
| 965 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, |
| 966 | struct kvm_device_attr *attr) |
| 967 | { |
| 968 | int ret; |
| 969 | |
| 970 | switch (attr->group) { |
| 971 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 972 | ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); |
| 973 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 974 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 975 | ret = kvm_arm_timer_get_attr(vcpu, attr); |
| 976 | break; |
Steven Price | 58772e9 | 2019-10-21 16:28:20 +0100 | [diff] [blame] | 977 | case KVM_ARM_VCPU_PVTIME_CTRL: |
| 978 | ret = kvm_arm_pvtime_get_attr(vcpu, attr); |
| 979 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 980 | default: |
| 981 | ret = -ENXIO; |
| 982 | break; |
| 983 | } |
| 984 | |
| 985 | return ret; |
| 986 | } |
| 987 | |
| 988 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, |
| 989 | struct kvm_device_attr *attr) |
| 990 | { |
| 991 | int ret; |
| 992 | |
| 993 | switch (attr->group) { |
| 994 | case KVM_ARM_VCPU_PMU_V3_CTRL: |
| 995 | ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); |
| 996 | break; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 997 | case KVM_ARM_VCPU_TIMER_CTRL: |
| 998 | ret = kvm_arm_timer_has_attr(vcpu, attr); |
| 999 | break; |
Steven Price | 58772e9 | 2019-10-21 16:28:20 +0100 | [diff] [blame] | 1000 | case KVM_ARM_VCPU_PVTIME_CTRL: |
| 1001 | ret = kvm_arm_pvtime_has_attr(vcpu, attr); |
| 1002 | break; |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 1003 | default: |
| 1004 | ret = -ENXIO; |
| 1005 | break; |
| 1006 | } |
| 1007 | |
| 1008 | return ret; |
| 1009 | } |
Steven Price | f0376ed | 2021-06-21 12:17:15 +0100 | [diff] [blame] | 1010 | |
| 1011 | long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, |
| 1012 | struct kvm_arm_copy_mte_tags *copy_tags) |
| 1013 | { |
| 1014 | gpa_t guest_ipa = copy_tags->guest_ipa; |
| 1015 | size_t length = copy_tags->length; |
| 1016 | void __user *tags = copy_tags->addr; |
| 1017 | gpa_t gfn; |
| 1018 | bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST); |
| 1019 | int ret = 0; |
| 1020 | |
| 1021 | if (!kvm_has_mte(kvm)) |
| 1022 | return -EINVAL; |
| 1023 | |
| 1024 | if (copy_tags->reserved[0] || copy_tags->reserved[1]) |
| 1025 | return -EINVAL; |
| 1026 | |
| 1027 | if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST) |
| 1028 | return -EINVAL; |
| 1029 | |
| 1030 | if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK) |
| 1031 | return -EINVAL; |
| 1032 | |
| 1033 | gfn = gpa_to_gfn(guest_ipa); |
| 1034 | |
| 1035 | mutex_lock(&kvm->slots_lock); |
| 1036 | |
| 1037 | while (length > 0) { |
| 1038 | kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL); |
| 1039 | void *maddr; |
| 1040 | unsigned long num_tags; |
| 1041 | struct page *page; |
| 1042 | |
| 1043 | if (is_error_noslot_pfn(pfn)) { |
| 1044 | ret = -EFAULT; |
| 1045 | goto out; |
| 1046 | } |
| 1047 | |
| 1048 | page = pfn_to_online_page(pfn); |
| 1049 | if (!page) { |
| 1050 | /* Reject ZONE_DEVICE memory */ |
| 1051 | ret = -EFAULT; |
| 1052 | goto out; |
| 1053 | } |
| 1054 | maddr = page_address(page); |
| 1055 | |
| 1056 | if (!write) { |
| 1057 | if (test_bit(PG_mte_tagged, &page->flags)) |
| 1058 | num_tags = mte_copy_tags_to_user(tags, maddr, |
| 1059 | MTE_GRANULES_PER_PAGE); |
| 1060 | else |
| 1061 | /* No tags in memory, so write zeros */ |
| 1062 | num_tags = MTE_GRANULES_PER_PAGE - |
| 1063 | clear_user(tags, MTE_GRANULES_PER_PAGE); |
| 1064 | kvm_release_pfn_clean(pfn); |
| 1065 | } else { |
| 1066 | num_tags = mte_copy_tags_from_user(maddr, tags, |
| 1067 | MTE_GRANULES_PER_PAGE); |
Marc Zyngier | 98db725 | 2021-06-24 14:21:05 +0100 | [diff] [blame] | 1068 | |
| 1069 | /* |
| 1070 | * Set the flag after checking the write |
| 1071 | * completed fully |
| 1072 | */ |
| 1073 | if (num_tags == MTE_GRANULES_PER_PAGE) |
| 1074 | set_bit(PG_mte_tagged, &page->flags); |
| 1075 | |
Steven Price | f0376ed | 2021-06-21 12:17:15 +0100 | [diff] [blame] | 1076 | kvm_release_pfn_dirty(pfn); |
| 1077 | } |
| 1078 | |
| 1079 | if (num_tags != MTE_GRANULES_PER_PAGE) { |
| 1080 | ret = -EFAULT; |
| 1081 | goto out; |
| 1082 | } |
| 1083 | |
Steven Price | f0376ed | 2021-06-21 12:17:15 +0100 | [diff] [blame] | 1084 | gfn++; |
| 1085 | tags += num_tags; |
| 1086 | length -= PAGE_SIZE; |
| 1087 | } |
| 1088 | |
| 1089 | out: |
| 1090 | mutex_unlock(&kvm->slots_lock); |
| 1091 | /* If some data has been copied report the number of bytes copied */ |
| 1092 | if (length != copy_tags->length) |
| 1093 | return copy_tags->length - length; |
| 1094 | return ret; |
| 1095 | } |