Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers |
| 4 | * |
| 5 | * Copyright 2018 Arm Limited |
| 6 | * Author: Dave Martin <Dave.Martin@arm.com> |
| 7 | */ |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 8 | #include <linux/irqflags.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 9 | #include <linux/sched.h> |
| 10 | #include <linux/thread_info.h> |
| 11 | #include <linux/kvm_host.h> |
Dave Martin | 0495067 | 2018-09-28 14:39:11 +0100 | [diff] [blame] | 12 | #include <asm/fpsimd.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 13 | #include <asm/kvm_asm.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 14 | #include <asm/kvm_mmu.h> |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 15 | #include <asm/sysreg.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * Called on entry to KVM_RUN unless this vcpu previously ran at least |
| 19 | * once and the most recent prior KVM_RUN for this vcpu was called from |
| 20 | * the same task as current (highly likely). |
| 21 | * |
| 22 | * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), |
| 23 | * such that on entering hyp the relevant parts of current are already |
| 24 | * mapped. |
| 25 | */ |
| 26 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) |
| 27 | { |
| 28 | int ret; |
| 29 | |
| 30 | struct thread_info *ti = ¤t->thread_info; |
| 31 | struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; |
| 32 | |
| 33 | /* |
| 34 | * Make sure the host task thread flags and fpsimd state are |
| 35 | * visible to hyp: |
| 36 | */ |
| 37 | ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); |
| 38 | if (ret) |
| 39 | goto error; |
| 40 | |
| 41 | ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); |
| 42 | if (ret) |
| 43 | goto error; |
| 44 | |
| 45 | vcpu->arch.host_thread_info = kern_hyp_va(ti); |
| 46 | vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); |
| 47 | error: |
| 48 | return ret; |
| 49 | } |
| 50 | |
| 51 | /* |
| 52 | * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. |
| 53 | * The actual loading is done by the FPSIMD access trap taken to hyp. |
| 54 | * |
| 55 | * Here, we just set the correct metadata to indicate that the FPSIMD |
| 56 | * state in the cpu regs (if any) belongs to current on the host. |
| 57 | * |
| 58 | * TIF_SVE is backed up here, since it may get clobbered with guest state. |
| 59 | * This flag is restored by kvm_arch_vcpu_put_fp(vcpu). |
| 60 | */ |
| 61 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) |
| 62 | { |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 63 | BUG_ON(!current->mm); |
| 64 | |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 65 | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | |
| 66 | KVM_ARM64_HOST_SVE_IN_USE | |
| 67 | KVM_ARM64_HOST_SVE_ENABLED); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 68 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 69 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 70 | if (test_thread_flag(TIF_SVE)) |
| 71 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 72 | |
| 73 | if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) |
| 74 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | /* |
| 78 | * If the guest FPSIMD state was loaded, update the host's context |
| 79 | * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu |
| 80 | * so that they will be written back if the kernel clobbers them due to |
| 81 | * kernel-mode NEON before re-entry into the guest. |
| 82 | */ |
| 83 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) |
| 84 | { |
| 85 | WARN_ON_ONCE(!irqs_disabled()); |
| 86 | |
| 87 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 88 | fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, |
Dave Martin | b43b5dd | 2018-09-28 14:39:17 +0100 | [diff] [blame] | 89 | vcpu->arch.sve_state, |
| 90 | vcpu->arch.sve_max_vl); |
Dave Martin | 0495067 | 2018-09-28 14:39:11 +0100 | [diff] [blame] | 91 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 92 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
Dave Martin | b43b5dd | 2018-09-28 14:39:17 +0100 | [diff] [blame] | 93 | update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 94 | } |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the |
| 99 | * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu |
| 100 | * disappears and another task or vcpu appears that recycles the same |
| 101 | * struct fpsimd_state. |
| 102 | */ |
| 103 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
| 104 | { |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 105 | unsigned long flags; |
Dave Martin | 7343376 | 2018-09-28 14:39:16 +0100 | [diff] [blame] | 106 | bool host_has_sve = system_supports_sve(); |
| 107 | bool guest_has_sve = vcpu_has_sve(vcpu); |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 108 | |
| 109 | local_irq_save(flags); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 110 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 111 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
Julien Grall | 54b8c7c | 2019-05-21 18:21:38 +0100 | [diff] [blame] | 112 | fpsimd_save_and_flush_cpu_state(); |
Dave Martin | 7343376 | 2018-09-28 14:39:16 +0100 | [diff] [blame] | 113 | |
| 114 | if (guest_has_sve) |
Marc Zyngier | 308472c | 2019-07-04 17:25:09 +0100 | [diff] [blame] | 115 | __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_s(SYS_ZCR_EL12); |
Dave Martin | 7343376 | 2018-09-28 14:39:16 +0100 | [diff] [blame] | 116 | } else if (host_has_sve) { |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 117 | /* |
| 118 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
| 119 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been |
| 120 | * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE |
| 121 | * for EL0. To avoid spurious traps, restore the trap state |
| 122 | * seen by kvm_arch_vcpu_load_fp(): |
| 123 | */ |
| 124 | if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) |
| 125 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); |
| 126 | else |
| 127 | sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 128 | } |
| 129 | |
Dave Martin | 2955bcc | 2018-06-15 16:47:26 +0100 | [diff] [blame] | 130 | update_thread_flag(TIF_SVE, |
| 131 | vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); |
| 132 | |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 133 | local_irq_restore(flags); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 134 | } |