Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers |
| 4 | * |
| 5 | * Copyright 2018 Arm Limited |
| 6 | * Author: Dave Martin <Dave.Martin@arm.com> |
| 7 | */ |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 8 | #include <linux/irqflags.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 9 | #include <linux/sched.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 10 | #include <linux/kvm_host.h> |
Dave Martin | 0495067 | 2018-09-28 14:39:11 +0100 | [diff] [blame] | 11 | #include <asm/fpsimd.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 12 | #include <asm/kvm_asm.h> |
Marc Zyngier | 8385737 | 2021-03-11 13:51:44 +0000 | [diff] [blame] | 13 | #include <asm/kvm_hyp.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 14 | #include <asm/kvm_mmu.h> |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 15 | #include <asm/sysreg.h> |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 16 | |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 17 | void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) |
| 18 | { |
| 19 | struct task_struct *p = vcpu->arch.parent_task; |
| 20 | struct user_fpsimd_state *fpsimd; |
| 21 | |
| 22 | if (!is_protected_kvm_enabled() || !p) |
| 23 | return; |
| 24 | |
| 25 | fpsimd = &p->thread.uw.fpsimd_state; |
| 26 | kvm_unshare_hyp(fpsimd, fpsimd + 1); |
| 27 | put_task_struct(p); |
| 28 | } |
| 29 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 30 | /* |
| 31 | * Called on entry to KVM_RUN unless this vcpu previously ran at least |
| 32 | * once and the most recent prior KVM_RUN for this vcpu was called from |
| 33 | * the same task as current (highly likely). |
| 34 | * |
| 35 | * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), |
| 36 | * such that on entering hyp the relevant parts of current are already |
| 37 | * mapped. |
| 38 | */ |
| 39 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) |
| 40 | { |
| 41 | int ret; |
| 42 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 43 | struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; |
| 44 | |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 45 | kvm_vcpu_unshare_task_fp(vcpu); |
| 46 | |
Marc Zyngier | bee14bc | 2021-10-21 14:18:00 +0100 | [diff] [blame] | 47 | /* Make sure the host task fpsimd state is visible to hyp: */ |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 48 | ret = kvm_share_hyp(fpsimd, fpsimd + 1); |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 49 | if (ret) |
| 50 | return ret; |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 51 | |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 52 | vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); |
| 53 | |
| 54 | /* |
| 55 | * We need to keep current's task_struct pinned until its data has been |
| 56 | * unshared with the hypervisor to make sure it is not re-used by the |
| 57 | * kernel and donated to someone else while already shared -- see |
| 58 | * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). |
| 59 | */ |
| 60 | if (is_protected_kvm_enabled()) { |
| 61 | get_task_struct(current); |
| 62 | vcpu->arch.parent_task = current; |
| 63 | } |
| 64 | |
| 65 | return 0; |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | /* |
| 69 | * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. |
| 70 | * The actual loading is done by the FPSIMD access trap taken to hyp. |
| 71 | * |
| 72 | * Here, we just set the correct metadata to indicate that the FPSIMD |
| 73 | * state in the cpu regs (if any) belongs to current on the host. |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 74 | */ |
| 75 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) |
| 76 | { |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 77 | BUG_ON(!current->mm); |
Marc Zyngier | 8383741 | 2021-10-27 11:18:00 +0100 | [diff] [blame] | 78 | BUG_ON(test_thread_flag(TIF_SVE)); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 79 | |
Marc Zyngier | 8383741 | 2021-10-27 11:18:00 +0100 | [diff] [blame] | 80 | vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 81 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 82 | |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 83 | if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) |
| 84 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 85 | } |
| 86 | |
Marc Zyngier | af9a0e2 | 2021-10-21 14:10:35 +0100 | [diff] [blame] | 87 | void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) |
| 88 | { |
| 89 | if (test_thread_flag(TIF_FOREIGN_FPSTATE)) |
| 90 | vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE; |
| 91 | else |
| 92 | vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE; |
| 93 | } |
| 94 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 95 | /* |
| 96 | * If the guest FPSIMD state was loaded, update the host's context |
| 97 | * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu |
| 98 | * so that they will be written back if the kernel clobbers them due to |
| 99 | * kernel-mode NEON before re-entry into the guest. |
| 100 | */ |
| 101 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) |
| 102 | { |
| 103 | WARN_ON_ONCE(!irqs_disabled()); |
| 104 | |
| 105 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
Marc Zyngier | e47c205 | 2019-06-28 22:40:58 +0100 | [diff] [blame] | 106 | fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, |
Dave Martin | b43b5dd | 2018-09-28 14:39:17 +0100 | [diff] [blame] | 107 | vcpu->arch.sve_state, |
| 108 | vcpu->arch.sve_max_vl); |
Dave Martin | 0495067 | 2018-09-28 14:39:11 +0100 | [diff] [blame] | 109 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 110 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
Dave Martin | b43b5dd | 2018-09-28 14:39:17 +0100 | [diff] [blame] | 111 | update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 112 | } |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the |
| 117 | * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu |
| 118 | * disappears and another task or vcpu appears that recycles the same |
| 119 | * struct fpsimd_state. |
| 120 | */ |
| 121 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
| 122 | { |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 123 | unsigned long flags; |
| 124 | |
| 125 | local_irq_save(flags); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 126 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 127 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
Marc Zyngier | 8383741 | 2021-10-27 11:18:00 +0100 | [diff] [blame] | 128 | if (vcpu_has_sve(vcpu)) { |
Marc Zyngier | 8385737 | 2021-03-11 13:51:44 +0000 | [diff] [blame] | 129 | __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); |
Marc Zyngier | b145a84 | 2021-03-12 14:30:52 +0000 | [diff] [blame] | 130 | |
Marc Zyngier | 8c8010d | 2021-03-11 18:29:55 +0000 | [diff] [blame] | 131 | /* Restore the VL that was saved when bound to the CPU */ |
| 132 | if (!has_vhe()) |
| 133 | sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, |
| 134 | SYS_ZCR_EL1); |
| 135 | } |
| 136 | |
Marc Zyngier | b145a84 | 2021-03-12 14:30:52 +0000 | [diff] [blame] | 137 | fpsimd_save_and_flush_cpu_state(); |
Marc Zyngier | 8383741 | 2021-10-27 11:18:00 +0100 | [diff] [blame] | 138 | } else if (has_vhe() && system_supports_sve()) { |
Dave Martin | b3eb56b | 2018-06-15 16:47:25 +0100 | [diff] [blame] | 139 | /* |
| 140 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
| 141 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been |
| 142 | * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE |
| 143 | * for EL0. To avoid spurious traps, restore the trap state |
| 144 | * seen by kvm_arch_vcpu_load_fp(): |
| 145 | */ |
| 146 | if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) |
| 147 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); |
| 148 | else |
| 149 | sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 150 | } |
| 151 | |
Marc Zyngier | 8383741 | 2021-10-27 11:18:00 +0100 | [diff] [blame] | 152 | update_thread_flag(TIF_SVE, 0); |
Dave Martin | 2955bcc | 2018-06-15 16:47:26 +0100 | [diff] [blame] | 153 | |
Dave Martin | b045e4d | 2018-06-15 16:47:24 +0100 | [diff] [blame] | 154 | local_irq_restore(flags); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 155 | } |