Thomas Gleixner | d94d71c | 2019-05-29 07:12:40 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 5 | */ |
| 6 | |
Dave Martin | 85acda3 | 2018-04-20 16:20:43 +0100 | [diff] [blame] | 7 | #include <linux/bug.h> |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 8 | #include <linux/cpu_pm.h> |
Oliver Upton | 6caa581 | 2021-08-02 19:28:09 +0000 | [diff] [blame] | 9 | #include <linux/entry-kvm.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 10 | #include <linux/errno.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/kvm_host.h> |
Andre Przywara | 1085fdc | 2016-07-15 12:43:31 +0100 | [diff] [blame] | 13 | #include <linux/list.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/fs.h> |
| 17 | #include <linux/mman.h> |
| 18 | #include <linux/sched.h> |
Marc Zyngier | 47e6223 | 2021-08-02 13:38:30 +0100 | [diff] [blame] | 19 | #include <linux/kmemleak.h> |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 20 | #include <linux/kvm.h> |
Eric Auger | 2412405 | 2017-10-27 15:28:31 +0100 | [diff] [blame] | 21 | #include <linux/kvm_irqfd.h> |
| 22 | #include <linux/irqbypass.h> |
Marc Zyngier | de73708 | 2018-06-21 10:43:59 +0100 | [diff] [blame] | 23 | #include <linux/sched/stat.h> |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 24 | #include <linux/psci.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 25 | #include <trace/events/kvm.h> |
| 26 | |
| 27 | #define CREATE_TRACE_POINTS |
Marc Zyngier | 9ed24f4 | 2020-05-13 11:40:34 +0100 | [diff] [blame] | 28 | #include "trace_arm.h" |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 29 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 30 | #include <linux/uaccess.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 31 | #include <asm/ptrace.h> |
| 32 | #include <asm/mman.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 33 | #include <asm/tlbflush.h> |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 34 | #include <asm/cacheflush.h> |
Dave Martin | 85acda3 | 2018-04-20 16:20:43 +0100 | [diff] [blame] | 35 | #include <asm/cpufeature.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 36 | #include <asm/virt.h> |
| 37 | #include <asm/kvm_arm.h> |
| 38 | #include <asm/kvm_asm.h> |
| 39 | #include <asm/kvm_mmu.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 40 | #include <asm/kvm_emulate.h> |
Marc Zyngier | 910917b | 2015-10-27 12:18:48 +0000 | [diff] [blame] | 41 | #include <asm/sections.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 42 | |
Steven Price | 8564d63 | 2019-10-21 16:28:18 +0100 | [diff] [blame] | 43 | #include <kvm/arm_hypercalls.h> |
| 44 | #include <kvm/arm_pmu.h> |
| 45 | #include <kvm/arm_psci.h> |
| 46 | |
David Brazdil | d8b369c | 2020-12-02 18:40:57 +0000 | [diff] [blame] | 47 | static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; |
David Brazdil | f19f664 | 2020-12-02 18:41:22 +0000 | [diff] [blame] | 48 | DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); |
David Brazdil | d8b369c | 2020-12-02 18:40:57 +0000 | [diff] [blame] | 49 | |
Marc Zyngier | 14ef9d0 | 2020-09-30 14:05:35 +0100 | [diff] [blame] | 50 | DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); |
| 51 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 52 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 53 | unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; |
David Brazdil | 63fec24 | 2020-12-02 18:41:06 +0000 | [diff] [blame] | 54 | DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 55 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 56 | /* The VMID used in the VTTBR */ |
| 57 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 58 | static u32 kvm_next_vmid; |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 59 | static DEFINE_SPINLOCK(kvm_vmid_lock); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 60 | |
Pavel Fedin | c7da6fa | 2015-12-18 14:38:43 +0300 | [diff] [blame] | 61 | static bool vgic_present; |
| 62 | |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 63 | static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); |
Christoffer Dall | 61bbe38 | 2017-10-27 19:57:51 +0200 | [diff] [blame] | 64 | DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
| 65 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 66 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 67 | { |
| 68 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
| 69 | } |
| 70 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 71 | int kvm_arch_hardware_setup(void *opaque) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 72 | { |
| 73 | return 0; |
| 74 | } |
| 75 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 76 | int kvm_arch_check_processor_compat(void *opaque) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 77 | { |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 78 | return 0; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 79 | } |
| 80 | |
Christoffer Dall | c726200 | 2019-10-11 13:07:05 +0200 | [diff] [blame] | 81 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 82 | struct kvm_enable_cap *cap) |
| 83 | { |
| 84 | int r; |
| 85 | |
| 86 | if (cap->flags) |
| 87 | return -EINVAL; |
| 88 | |
| 89 | switch (cap->cap) { |
| 90 | case KVM_CAP_ARM_NISV_TO_USER: |
| 91 | r = 0; |
| 92 | kvm->arch.return_nisv_io_abort_to_user = true; |
| 93 | break; |
Steven Price | 673638f | 2021-06-21 12:17:14 +0100 | [diff] [blame] | 94 | case KVM_CAP_ARM_MTE: |
Steven Price | c4d7c51 | 2021-07-29 17:00:36 +0100 | [diff] [blame] | 95 | mutex_lock(&kvm->lock); |
| 96 | if (!system_supports_mte() || kvm->created_vcpus) { |
| 97 | r = -EINVAL; |
| 98 | } else { |
| 99 | r = 0; |
| 100 | kvm->arch.mte_enabled = true; |
| 101 | } |
| 102 | mutex_unlock(&kvm->lock); |
Steven Price | 673638f | 2021-06-21 12:17:14 +0100 | [diff] [blame] | 103 | break; |
Christoffer Dall | c726200 | 2019-10-11 13:07:05 +0200 | [diff] [blame] | 104 | default: |
| 105 | r = -EINVAL; |
| 106 | break; |
| 107 | } |
| 108 | |
| 109 | return r; |
| 110 | } |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 111 | |
Marc Zyngier | 5107000 | 2020-04-27 15:15:07 +0100 | [diff] [blame] | 112 | static int kvm_arm_default_max_vcpus(void) |
| 113 | { |
| 114 | return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; |
| 115 | } |
| 116 | |
Marc Zyngier | 4f1df62 | 2020-11-26 17:27:13 +0000 | [diff] [blame] | 117 | static void set_default_spectre(struct kvm *kvm) |
Marc Zyngier | 23711a5 | 2020-11-10 14:13:06 +0000 | [diff] [blame] | 118 | { |
| 119 | /* |
| 120 | * The default is to expose CSV2 == 1 if the HW isn't affected. |
| 121 | * Although this is a per-CPU feature, we make it global because |
| 122 | * asymmetric systems are just a nuisance. |
| 123 | * |
| 124 | * Userspace can override this as long as it doesn't promise |
| 125 | * the impossible. |
| 126 | */ |
| 127 | if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) |
| 128 | kvm->arch.pfr0_csv2 = 1; |
Marc Zyngier | 4f1df62 | 2020-11-26 17:27:13 +0000 | [diff] [blame] | 129 | if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) |
| 130 | kvm->arch.pfr0_csv3 = 1; |
Marc Zyngier | 23711a5 | 2020-11-10 14:13:06 +0000 | [diff] [blame] | 131 | } |
| 132 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 133 | /** |
| 134 | * kvm_arch_init_vm - initializes a VM data structure |
| 135 | * @kvm: pointer to the KVM struct |
| 136 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 137 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
| 138 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 139 | int ret; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 140 | |
Marc Zyngier | bca607e | 2018-10-01 13:40:36 +0100 | [diff] [blame] | 141 | ret = kvm_arm_setup_stage2(kvm, type); |
Suzuki K Poulose | 5b6c674 | 2018-09-26 17:32:42 +0100 | [diff] [blame] | 142 | if (ret) |
| 143 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 144 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 145 | ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 146 | if (ret) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 147 | return ret; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 148 | |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 149 | ret = kvm_share_hyp(kvm, kvm + 1); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 150 | if (ret) |
| 151 | goto out_free_stage2_pgd; |
| 152 | |
Marc Zyngier | 6c3d63c | 2014-06-23 17:37:18 +0100 | [diff] [blame] | 153 | kvm_vgic_early_init(kvm); |
Christoffer Dall | a1a6438 | 2013-11-16 10:51:25 -0800 | [diff] [blame] | 154 | |
Andre Przywara | 3caa2d8 | 2014-06-02 16:26:01 +0200 | [diff] [blame] | 155 | /* The maximum number of VCPUs is limited by the host's GIC model */ |
Marc Zyngier | 5107000 | 2020-04-27 15:15:07 +0100 | [diff] [blame] | 156 | kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); |
Andre Przywara | 3caa2d8 | 2014-06-02 16:26:01 +0200 | [diff] [blame] | 157 | |
Marc Zyngier | 4f1df62 | 2020-11-26 17:27:13 +0000 | [diff] [blame] | 158 | set_default_spectre(kvm); |
Marc Zyngier | 23711a5 | 2020-11-10 14:13:06 +0000 | [diff] [blame] | 159 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 160 | return ret; |
| 161 | out_free_stage2_pgd: |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 162 | kvm_free_stage2_pgd(&kvm->arch.mmu); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 163 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 164 | } |
| 165 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 166 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 167 | { |
| 168 | return VM_FAULT_SIGBUS; |
| 169 | } |
| 170 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 171 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 172 | /** |
| 173 | * kvm_arch_destroy_vm - destroy the VM data structure |
| 174 | * @kvm: pointer to the KVM struct |
| 175 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 176 | void kvm_arch_destroy_vm(struct kvm *kvm) |
| 177 | { |
Marc Zyngier | d7eec23 | 2020-02-12 11:31:02 +0000 | [diff] [blame] | 178 | bitmap_free(kvm->arch.pmu_filter); |
| 179 | |
Marc Zyngier | b2c9a85 | 2017-10-27 15:28:34 +0100 | [diff] [blame] | 180 | kvm_vgic_destroy(kvm); |
| 181 | |
Marc Zyngier | 27592ae | 2021-11-16 16:03:57 +0000 | [diff] [blame] | 182 | kvm_destroy_vcpus(kvm); |
Quentin Perret | 52b2865 | 2021-12-15 16:12:31 +0000 | [diff] [blame] | 183 | |
| 184 | kvm_unshare_hyp(kvm, kvm + 1); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 185 | } |
| 186 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 187 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 188 | { |
| 189 | int r; |
| 190 | switch (ext) { |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 191 | case KVM_CAP_IRQCHIP: |
Pavel Fedin | c7da6fa | 2015-12-18 14:38:43 +0300 | [diff] [blame] | 192 | r = vgic_present; |
| 193 | break; |
Nikolay Nikolaev | d44758c | 2015-01-24 12:00:02 +0000 | [diff] [blame] | 194 | case KVM_CAP_IOEVENTFD: |
Christoffer Dall | 7330672 | 2013-10-25 17:29:18 +0100 | [diff] [blame] | 195 | case KVM_CAP_DEVICE_CTRL: |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 196 | case KVM_CAP_USER_MEMORY: |
| 197 | case KVM_CAP_SYNC_MMU: |
| 198 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
| 199 | case KVM_CAP_ONE_REG: |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 200 | case KVM_CAP_ARM_PSCI: |
Anup Patel | 4447a20 | 2014-04-29 11:24:25 +0530 | [diff] [blame] | 201 | case KVM_CAP_ARM_PSCI_0_2: |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 202 | case KVM_CAP_READONLY_MEM: |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 203 | case KVM_CAP_MP_STATE: |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 204 | case KVM_CAP_IMMEDIATE_EXIT: |
Dongjiu Geng | 58bf437 | 2018-10-13 00:12:49 +0800 | [diff] [blame] | 205 | case KVM_CAP_VCPU_EVENTS: |
Marc Zyngier | 92f35b7 | 2019-08-18 14:09:47 +0100 | [diff] [blame] | 206 | case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: |
Christoffer Dall | c726200 | 2019-10-11 13:07:05 +0200 | [diff] [blame] | 207 | case KVM_CAP_ARM_NISV_TO_USER: |
Christoffer Dall | da34517 | 2019-10-11 13:07:06 +0200 | [diff] [blame] | 208 | case KVM_CAP_ARM_INJECT_EXT_DABT: |
Will Deacon | 36fb4cd | 2020-11-18 19:44:01 +0000 | [diff] [blame] | 209 | case KVM_CAP_SET_GUEST_DEBUG: |
| 210 | case KVM_CAP_VCPU_ATTRIBUTES: |
Jianyong Wu | 3bf7256 | 2020-12-09 14:09:29 +0800 | [diff] [blame] | 211 | case KVM_CAP_PTP_KVM: |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 212 | r = 1; |
| 213 | break; |
Maxim Levitsky | fa18aca | 2021-04-01 16:54:46 +0300 | [diff] [blame] | 214 | case KVM_CAP_SET_GUEST_DEBUG2: |
| 215 | return KVM_GUESTDBG_VALID_MASK; |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 216 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
| 217 | r = 1; |
Marc Zyngier | ca46e10 | 2013-04-03 10:43:13 +0100 | [diff] [blame] | 218 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 219 | case KVM_CAP_NR_VCPUS: |
Vitaly Kuznetsov | f60a00d | 2021-11-16 17:34:38 +0100 | [diff] [blame] | 220 | /* |
| 221 | * ARM64 treats KVM_CAP_NR_CPUS differently from all other |
| 222 | * architectures, as it does not always bound it to |
| 223 | * KVM_CAP_MAX_VCPUS. It should not matter much because |
| 224 | * this is just an advisory value. |
| 225 | */ |
| 226 | r = min_t(unsigned int, num_online_cpus(), |
| 227 | kvm_arm_default_max_vcpus()); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 228 | break; |
| 229 | case KVM_CAP_MAX_VCPUS: |
Thomas Huth | a86cb41 | 2019-05-23 18:43:08 +0200 | [diff] [blame] | 230 | case KVM_CAP_MAX_VCPU_ID: |
Marc Zyngier | 5107000 | 2020-04-27 15:15:07 +0100 | [diff] [blame] | 231 | if (kvm) |
| 232 | r = kvm->arch.max_vcpus; |
| 233 | else |
| 234 | r = kvm_arm_default_max_vcpus(); |
Thomas Huth | a86cb41 | 2019-05-23 18:43:08 +0200 | [diff] [blame] | 235 | break; |
Vladimir Murzin | 2988509 | 2016-11-02 11:55:34 +0000 | [diff] [blame] | 236 | case KVM_CAP_MSI_DEVID: |
| 237 | if (!kvm) |
| 238 | r = -EINVAL; |
| 239 | else |
| 240 | r = kvm->arch.vgic.msis_require_devid; |
| 241 | break; |
Christoffer Dall | f7214e6 | 2017-02-01 12:54:11 +0100 | [diff] [blame] | 242 | case KVM_CAP_ARM_USER_IRQ: |
| 243 | /* |
| 244 | * 1: EL1_VTIMER, EL1_PTIMER, and PMU. |
| 245 | * (bump this number if adding more devices) |
| 246 | */ |
| 247 | r = 1; |
| 248 | break; |
Steven Price | 673638f | 2021-06-21 12:17:14 +0100 | [diff] [blame] | 249 | case KVM_CAP_ARM_MTE: |
| 250 | r = system_supports_mte(); |
| 251 | break; |
Andrew Jones | 004a012 | 2020-08-04 19:06:04 +0200 | [diff] [blame] | 252 | case KVM_CAP_STEAL_TIME: |
| 253 | r = kvm_arm_pvtime_supported(); |
| 254 | break; |
Will Deacon | 36fb4cd | 2020-11-18 19:44:01 +0000 | [diff] [blame] | 255 | case KVM_CAP_ARM_EL1_32BIT: |
| 256 | r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 257 | break; |
Will Deacon | 36fb4cd | 2020-11-18 19:44:01 +0000 | [diff] [blame] | 258 | case KVM_CAP_GUEST_DEBUG_HW_BPS: |
| 259 | r = get_num_brps(); |
| 260 | break; |
| 261 | case KVM_CAP_GUEST_DEBUG_HW_WPS: |
| 262 | r = get_num_wrps(); |
| 263 | break; |
| 264 | case KVM_CAP_ARM_PMU_V3: |
| 265 | r = kvm_arm_support_pmu_v3(); |
| 266 | break; |
| 267 | case KVM_CAP_ARM_INJECT_SERROR_ESR: |
| 268 | r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); |
| 269 | break; |
| 270 | case KVM_CAP_ARM_VM_IPA_SIZE: |
| 271 | r = get_kvm_ipa_limit(); |
| 272 | break; |
| 273 | case KVM_CAP_ARM_SVE: |
| 274 | r = system_supports_sve(); |
| 275 | break; |
| 276 | case KVM_CAP_ARM_PTRAUTH_ADDRESS: |
| 277 | case KVM_CAP_ARM_PTRAUTH_GENERIC: |
| 278 | r = system_has_full_ptr_auth(); |
| 279 | break; |
| 280 | default: |
| 281 | r = 0; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 282 | } |
Will Deacon | 36fb4cd | 2020-11-18 19:44:01 +0000 | [diff] [blame] | 283 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 284 | return r; |
| 285 | } |
| 286 | |
| 287 | long kvm_arch_dev_ioctl(struct file *filp, |
| 288 | unsigned int ioctl, unsigned long arg) |
| 289 | { |
| 290 | return -EINVAL; |
| 291 | } |
| 292 | |
Marc Orr | d1e5b0e | 2018-05-15 04:37:37 -0700 | [diff] [blame] | 293 | struct kvm *kvm_arch_alloc_vm(void) |
| 294 | { |
Jia He | 115bae9 | 2021-09-07 20:31:12 +0800 | [diff] [blame] | 295 | size_t sz = sizeof(struct kvm); |
Marc Orr | d1e5b0e | 2018-05-15 04:37:37 -0700 | [diff] [blame] | 296 | |
Jia He | 115bae9 | 2021-09-07 20:31:12 +0800 | [diff] [blame] | 297 | if (!has_vhe()) |
| 298 | return kzalloc(sz, GFP_KERNEL_ACCOUNT); |
| 299 | |
| 300 | return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); |
Marc Orr | d1e5b0e | 2018-05-15 04:37:37 -0700 | [diff] [blame] | 301 | } |
| 302 | |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 303 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
| 304 | { |
| 305 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) |
| 306 | return -EBUSY; |
| 307 | |
| 308 | if (id >= kvm->arch.max_vcpus) |
| 309 | return -EINVAL; |
| 310 | |
| 311 | return 0; |
| 312 | } |
| 313 | |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 314 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 315 | { |
Sean Christopherson | 39a93a8 | 2019-12-18 13:55:25 -0800 | [diff] [blame] | 316 | int err; |
| 317 | |
| 318 | /* Force users to call KVM_ARM_VCPU_INIT */ |
| 319 | vcpu->arch.target = -1; |
| 320 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); |
| 321 | |
Sean Christopherson | e539451 | 2020-07-02 19:35:41 -0700 | [diff] [blame] | 322 | vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; |
| 323 | |
Sean Christopherson | 39a93a8 | 2019-12-18 13:55:25 -0800 | [diff] [blame] | 324 | /* Set up the timer */ |
| 325 | kvm_timer_vcpu_init(vcpu); |
| 326 | |
| 327 | kvm_pmu_vcpu_init(vcpu); |
| 328 | |
| 329 | kvm_arm_reset_debug_ptr(vcpu); |
| 330 | |
| 331 | kvm_arm_pvtime_vcpu_init(&vcpu->arch); |
| 332 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 333 | vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; |
| 334 | |
Sean Christopherson | 39a93a8 | 2019-12-18 13:55:25 -0800 | [diff] [blame] | 335 | err = kvm_vgic_vcpu_init(vcpu); |
| 336 | if (err) |
| 337 | return err; |
| 338 | |
Quentin Perret | 3f868e1 | 2021-12-15 16:12:23 +0000 | [diff] [blame] | 339 | return kvm_share_hyp(vcpu, vcpu + 1); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 340 | } |
| 341 | |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 342 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 343 | { |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 344 | } |
| 345 | |
Sean Christopherson | 4b8fff7 | 2019-12-18 13:55:04 -0800 | [diff] [blame] | 346 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 347 | { |
Marc Zyngier | cc5705f | 2021-10-14 12:13:06 +0100 | [diff] [blame] | 348 | if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) |
Christoffer Dall | f1d7231 | 2018-01-25 18:32:29 +0100 | [diff] [blame] | 349 | static_branch_dec(&userspace_irqchip_in_use); |
| 350 | |
Will Deacon | 9af3e08 | 2020-09-11 14:25:09 +0100 | [diff] [blame] | 351 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 352 | kvm_timer_vcpu_terminate(vcpu); |
Shannon Zhao | 5f0a714 | 2015-09-11 15:18:05 +0800 | [diff] [blame] | 353 | kvm_pmu_vcpu_destroy(vcpu); |
Sean Christopherson | 19bcc89 | 2019-12-18 13:55:27 -0800 | [diff] [blame] | 354 | |
| 355 | kvm_arm_vcpu_destroy(vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 356 | } |
| 357 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 358 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 359 | { |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 360 | return kvm_timer_is_pending(vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 361 | } |
| 362 | |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 363 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
| 364 | { |
Sean Christopherson | 6109c5a | 2021-10-08 19:12:03 -0700 | [diff] [blame] | 365 | |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
| 369 | { |
Sean Christopherson | 6109c5a | 2021-10-08 19:12:03 -0700 | [diff] [blame] | 370 | |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 371 | } |
| 372 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 373 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 374 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 375 | struct kvm_s2_mmu *mmu; |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 376 | int *last_ran; |
| 377 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 378 | mmu = vcpu->arch.hw_mmu; |
| 379 | last_ran = this_cpu_ptr(mmu->last_vcpu_ran); |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 380 | |
| 381 | /* |
Marc Zyngier | 01dc926 | 2021-03-03 16:45:05 +0000 | [diff] [blame] | 382 | * We guarantee that both TLBs and I-cache are private to each |
| 383 | * vcpu. If detecting that a vcpu from the same VM has |
| 384 | * previously run on the same physical CPU, call into the |
| 385 | * hypervisor code to nuke the relevant contexts. |
| 386 | * |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 387 | * We might get preempted before the vCPU actually runs, but |
| 388 | * over-invalidation doesn't affect correctness. |
| 389 | */ |
| 390 | if (*last_ran != vcpu->vcpu_id) { |
Marc Zyngier | 01dc926 | 2021-03-03 16:45:05 +0000 | [diff] [blame] | 391 | kvm_call_hyp(__kvm_flush_cpu_context, mmu); |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 392 | *last_ran = vcpu->vcpu_id; |
| 393 | } |
| 394 | |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 395 | vcpu->cpu = cpu; |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 396 | |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 397 | kvm_vgic_load(vcpu); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 398 | kvm_timer_vcpu_load(vcpu); |
David Brazdil | 13aeb9b | 2020-06-25 14:14:16 +0100 | [diff] [blame] | 399 | if (has_vhe()) |
| 400 | kvm_vcpu_load_sysregs_vhe(vcpu); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 401 | kvm_arch_vcpu_load_fp(vcpu); |
Andrew Murray | 435e53f | 2019-04-09 20:22:15 +0100 | [diff] [blame] | 402 | kvm_vcpu_pmu_restore_guest(vcpu); |
Steven Price | 8564d63 | 2019-10-21 16:28:18 +0100 | [diff] [blame] | 403 | if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) |
| 404 | kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); |
Marc Zyngier | de73708 | 2018-06-21 10:43:59 +0100 | [diff] [blame] | 405 | |
| 406 | if (single_task_running()) |
Marc Zyngier | ef2e78d | 2019-11-07 16:04:12 +0000 | [diff] [blame] | 407 | vcpu_clear_wfx_traps(vcpu); |
Marc Zyngier | de73708 | 2018-06-21 10:43:59 +0100 | [diff] [blame] | 408 | else |
Marc Zyngier | ef2e78d | 2019-11-07 16:04:12 +0000 | [diff] [blame] | 409 | vcpu_set_wfx_traps(vcpu); |
Mark Rutland | 384b40c | 2019-04-23 10:12:35 +0530 | [diff] [blame] | 410 | |
Marc Zyngier | 29eb5a3 | 2020-06-04 11:14:00 +0100 | [diff] [blame] | 411 | if (vcpu_has_ptrauth(vcpu)) |
Marc Zyngier | ef3e40a | 2020-06-03 18:24:01 +0100 | [diff] [blame] | 412 | vcpu_ptrauth_disable(vcpu); |
Suzuki K Poulose | d2602bb | 2021-04-05 17:42:53 +0100 | [diff] [blame] | 413 | kvm_arch_vcpu_load_debug_state_flags(vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 417 | { |
Suzuki K Poulose | d2602bb | 2021-04-05 17:42:53 +0100 | [diff] [blame] | 418 | kvm_arch_vcpu_put_debug_state_flags(vcpu); |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 419 | kvm_arch_vcpu_put_fp(vcpu); |
David Brazdil | 13aeb9b | 2020-06-25 14:14:16 +0100 | [diff] [blame] | 420 | if (has_vhe()) |
| 421 | kvm_vcpu_put_sysregs_vhe(vcpu); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 422 | kvm_timer_vcpu_put(vcpu); |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 423 | kvm_vgic_put(vcpu); |
Andrew Murray | 435e53f | 2019-04-09 20:22:15 +0100 | [diff] [blame] | 424 | kvm_vcpu_pmu_restore_host(vcpu); |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 425 | |
Christoffer Dall | e9b152c | 2013-12-11 20:29:11 -0800 | [diff] [blame] | 426 | vcpu->cpu = -1; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 427 | } |
| 428 | |
Andrew Jones | 424c989 | 2017-06-04 14:43:57 +0200 | [diff] [blame] | 429 | static void vcpu_power_off(struct kvm_vcpu *vcpu) |
| 430 | { |
| 431 | vcpu->arch.power_off = true; |
Andrew Jones | 7b244e2 | 2017-06-04 14:43:58 +0200 | [diff] [blame] | 432 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
Andrew Jones | 424c989 | 2017-06-04 14:43:57 +0200 | [diff] [blame] | 433 | kvm_vcpu_kick(vcpu); |
| 434 | } |
| 435 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 436 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 437 | struct kvm_mp_state *mp_state) |
| 438 | { |
Eric Auger | 3781528 | 2015-09-25 23:41:14 +0200 | [diff] [blame] | 439 | if (vcpu->arch.power_off) |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 440 | mp_state->mp_state = KVM_MP_STATE_STOPPED; |
| 441 | else |
| 442 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; |
| 443 | |
| 444 | return 0; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 448 | struct kvm_mp_state *mp_state) |
| 449 | { |
Christoffer Dall | e83dff5 | 2017-12-04 21:35:31 +0100 | [diff] [blame] | 450 | int ret = 0; |
| 451 | |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 452 | switch (mp_state->mp_state) { |
| 453 | case KVM_MP_STATE_RUNNABLE: |
Eric Auger | 3781528 | 2015-09-25 23:41:14 +0200 | [diff] [blame] | 454 | vcpu->arch.power_off = false; |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 455 | break; |
| 456 | case KVM_MP_STATE_STOPPED: |
Andrew Jones | 424c989 | 2017-06-04 14:43:57 +0200 | [diff] [blame] | 457 | vcpu_power_off(vcpu); |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 458 | break; |
| 459 | default: |
Christoffer Dall | e83dff5 | 2017-12-04 21:35:31 +0100 | [diff] [blame] | 460 | ret = -EINVAL; |
Alex Bennée | ecccf0c | 2015-03-13 17:02:52 +0000 | [diff] [blame] | 461 | } |
| 462 | |
Christoffer Dall | e83dff5 | 2017-12-04 21:35:31 +0100 | [diff] [blame] | 463 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 464 | } |
| 465 | |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 466 | /** |
| 467 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled |
| 468 | * @v: The VCPU pointer |
| 469 | * |
| 470 | * If the guest CPU is not waiting for interrupts or an interrupt line is |
| 471 | * asserted, the CPU is by definition runnable. |
| 472 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 473 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 474 | { |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 475 | bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); |
| 476 | return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 477 | && !v->arch.power_off && !v->arch.pause); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 478 | } |
| 479 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 480 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 481 | { |
Longpeng(Mike) | f01fbd2 | 2017-08-08 12:05:35 +0800 | [diff] [blame] | 482 | return vcpu_mode_priv(vcpu); |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 483 | } |
| 484 | |
Linus Torvalds | 8e5b0ad | 2022-01-12 16:26:58 -0800 | [diff] [blame] | 485 | #ifdef CONFIG_GUEST_PERF_EVENTS |
Sean Christopherson | e1bfc24 | 2021-11-11 02:07:33 +0000 | [diff] [blame] | 486 | unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) |
| 487 | { |
| 488 | return *vcpu_pc(vcpu); |
| 489 | } |
Linus Torvalds | 8e5b0ad | 2022-01-12 16:26:58 -0800 | [diff] [blame] | 490 | #endif |
Sean Christopherson | e1bfc24 | 2021-11-11 02:07:33 +0000 | [diff] [blame] | 491 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 492 | /* Just ensure a guest exit from a particular CPU */ |
| 493 | static void exit_vm_noop(void *info) |
| 494 | { |
| 495 | } |
| 496 | |
| 497 | void force_vm_exit(const cpumask_t *mask) |
| 498 | { |
Eric Auger | 898f949 | 2016-03-07 23:50:36 +0700 | [diff] [blame] | 499 | preempt_disable(); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 500 | smp_call_function_many(mask, exit_vm_noop, NULL, true); |
Eric Auger | 898f949 | 2016-03-07 23:50:36 +0700 | [diff] [blame] | 501 | preempt_enable(); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | /** |
| 505 | * need_new_vmid_gen - check that the VMID is still valid |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 506 | * @vmid: The VMID to check |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 507 | * |
| 508 | * return true if there is a new generation of VMIDs being used |
| 509 | * |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 510 | * The hardware supports a limited set of values with the value zero reserved |
| 511 | * for the host, so we check if an assigned value belongs to a previous |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 512 | * generation, which requires us to assign a new value. If we're the first to |
| 513 | * use a VMID for the new generation, we must flush necessary caches and TLBs |
| 514 | * on all CPUs. |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 515 | */ |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 516 | static bool need_new_vmid_gen(struct kvm_vmid *vmid) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 517 | { |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 518 | u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); |
| 519 | smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 520 | return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | /** |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 524 | * update_vmid - Update the vmid with a valid VMID for the current generation |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 525 | * @vmid: The stage-2 VMID information struct |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 526 | */ |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 527 | static void update_vmid(struct kvm_vmid *vmid) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 528 | { |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 529 | if (!need_new_vmid_gen(vmid)) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 530 | return; |
| 531 | |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 532 | spin_lock(&kvm_vmid_lock); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 533 | |
| 534 | /* |
| 535 | * We need to re-check the vmid_gen here to ensure that if another vcpu |
| 536 | * already allocated a valid vmid for this vm, then this vcpu should |
| 537 | * use the same vmid. |
| 538 | */ |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 539 | if (!need_new_vmid_gen(vmid)) { |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 540 | spin_unlock(&kvm_vmid_lock); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 541 | return; |
| 542 | } |
| 543 | |
| 544 | /* First user of a new VMID generation? */ |
| 545 | if (unlikely(kvm_next_vmid == 0)) { |
| 546 | atomic64_inc(&kvm_vmid_gen); |
| 547 | kvm_next_vmid = 1; |
| 548 | |
| 549 | /* |
| 550 | * On SMP we know no other CPUs can use this CPU's or each |
| 551 | * other's VMID after force_vm_exit returns since the |
| 552 | * kvm_vmid_lock blocks them from reentry to the guest. |
| 553 | */ |
| 554 | force_vm_exit(cpu_all_mask); |
| 555 | /* |
| 556 | * Now broadcast TLB + ICACHE invalidation over the inner |
| 557 | * shareable domain to make sure all data structures are |
| 558 | * clean. |
| 559 | */ |
| 560 | kvm_call_hyp(__kvm_flush_vm_context); |
| 561 | } |
| 562 | |
Marc Zyngier | cf364e0 | 2021-08-06 12:31:08 +0100 | [diff] [blame] | 563 | WRITE_ONCE(vmid->vmid, kvm_next_vmid); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 564 | kvm_next_vmid++; |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 565 | kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 566 | |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 567 | smp_wmb(); |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 568 | WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); |
Christoffer Dall | fb544d1 | 2018-12-11 13:23:57 +0100 | [diff] [blame] | 569 | |
| 570 | spin_unlock(&kvm_vmid_lock); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 571 | } |
| 572 | |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 573 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
Marc Zyngier | 052f064 | 2021-10-14 11:30:42 +0100 | [diff] [blame] | 574 | { |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 575 | return vcpu->arch.target >= 0; |
Marc Zyngier | 052f064 | 2021-10-14 11:30:42 +0100 | [diff] [blame] | 576 | } |
| 577 | |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 578 | /* |
| 579 | * Handle both the initialisation that is being done when the vcpu is |
| 580 | * run for the first time, as well as the updates that must be |
| 581 | * performed each time we get a new thread dealing with this vcpu. |
| 582 | */ |
| 583 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 584 | { |
Christoffer Dall | 0597112 | 2014-12-12 21:19:23 +0100 | [diff] [blame] | 585 | struct kvm *kvm = vcpu->kvm; |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 586 | int ret; |
Christoffer Dall | e1ba020 | 2013-09-23 14:55:55 -0700 | [diff] [blame] | 587 | |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 588 | if (!kvm_vcpu_initialized(vcpu)) |
| 589 | return -ENOEXEC; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 590 | |
Dave Martin | 7dd32a0 | 2018-12-19 14:27:01 +0000 | [diff] [blame] | 591 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
| 592 | return -EPERM; |
| 593 | |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 594 | ret = kvm_arch_vcpu_run_map_fp(vcpu); |
| 595 | if (ret) |
| 596 | return ret; |
| 597 | |
Marc Zyngier | cc5705f | 2021-10-14 12:13:06 +0100 | [diff] [blame] | 598 | if (likely(vcpu_has_run_once(vcpu))) |
Marc Zyngier | b5aa368 | 2021-10-14 11:42:38 +0100 | [diff] [blame] | 599 | return 0; |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 600 | |
Alexandru Elisei | 263d628 | 2021-04-07 15:48:57 +0100 | [diff] [blame] | 601 | kvm_arm_vcpu_init_debug(vcpu); |
| 602 | |
Christoffer Dall | 61bbe38 | 2017-10-27 19:57:51 +0200 | [diff] [blame] | 603 | if (likely(irqchip_in_kernel(kvm))) { |
| 604 | /* |
| 605 | * Map the VGIC hardware resources before running a vcpu the |
| 606 | * first time on this VM. |
| 607 | */ |
Alexandru Elisei | 1c91f06 | 2020-12-01 15:01:55 +0000 | [diff] [blame] | 608 | ret = kvm_vgic_map_resources(kvm); |
| 609 | if (ret) |
| 610 | return ret; |
Marc Zyngier | 01ac5e3 | 2013-01-21 19:36:16 -0500 | [diff] [blame] | 611 | } |
| 612 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 613 | ret = kvm_timer_enable(vcpu); |
Christoffer Dall | a2befac | 2017-05-02 13:41:02 +0200 | [diff] [blame] | 614 | if (ret) |
| 615 | return ret; |
| 616 | |
| 617 | ret = kvm_arm_pmu_v3_enable(vcpu); |
Marc Zyngier | 1408e73 | 2021-10-14 12:18:48 +0100 | [diff] [blame] | 618 | if (ret) |
| 619 | return ret; |
| 620 | |
| 621 | if (!irqchip_in_kernel(kvm)) { |
| 622 | /* |
| 623 | * Tell the rest of the code that there are userspace irqchip |
| 624 | * VMs in the wild. |
| 625 | */ |
| 626 | static_branch_inc(&userspace_irqchip_in_use); |
| 627 | } |
Christoffer Dall | 0597112 | 2014-12-12 21:19:23 +0100 | [diff] [blame] | 628 | |
Fuad Tabba | 2a0c343 | 2021-10-10 15:56:33 +0100 | [diff] [blame] | 629 | /* |
| 630 | * Initialize traps for protected VMs. |
| 631 | * NOTE: Move to run in EL2 directly, rather than via a hypercall, once |
| 632 | * the code is in place for first run initialization at EL2. |
| 633 | */ |
| 634 | if (kvm_vm_is_protected(kvm)) |
| 635 | kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); |
| 636 | |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 637 | return ret; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 638 | } |
| 639 | |
Eric Auger | c1426e4 | 2015-03-04 11:14:34 +0100 | [diff] [blame] | 640 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 641 | { |
| 642 | return vgic_initialized(kvm); |
| 643 | } |
| 644 | |
Christoffer Dall | b13216c | 2016-04-27 10:28:00 +0100 | [diff] [blame] | 645 | void kvm_arm_halt_guest(struct kvm *kvm) |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 646 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 647 | unsigned long i; |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 648 | struct kvm_vcpu *vcpu; |
| 649 | |
| 650 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 651 | vcpu->arch.pause = true; |
Andrew Jones | 7b244e2 | 2017-06-04 14:43:58 +0200 | [diff] [blame] | 652 | kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 653 | } |
| 654 | |
Christoffer Dall | b13216c | 2016-04-27 10:28:00 +0100 | [diff] [blame] | 655 | void kvm_arm_resume_guest(struct kvm *kvm) |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 656 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 657 | unsigned long i; |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 658 | struct kvm_vcpu *vcpu; |
| 659 | |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 660 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 661 | vcpu->arch.pause = false; |
Sean Christopherson | d92a5d1 | 2021-10-08 19:12:12 -0700 | [diff] [blame] | 662 | __kvm_vcpu_wake_up(vcpu); |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 663 | } |
Eric Auger | 3b92830 | 2015-09-25 23:41:17 +0200 | [diff] [blame] | 664 | } |
| 665 | |
Andrew Jones | 7b244e2 | 2017-06-04 14:43:58 +0200 | [diff] [blame] | 666 | static void vcpu_req_sleep(struct kvm_vcpu *vcpu) |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 667 | { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 668 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 669 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 670 | rcuwait_wait_event(wait, |
| 671 | (!vcpu->arch.power_off) &&(!vcpu->arch.pause), |
| 672 | TASK_INTERRUPTIBLE); |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 673 | |
Andrew Jones | 424c989 | 2017-06-04 14:43:57 +0200 | [diff] [blame] | 674 | if (vcpu->arch.power_off || vcpu->arch.pause) { |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 675 | /* Awaken to handle a signal, request we sleep again later. */ |
Andrew Jones | 7b244e2 | 2017-06-04 14:43:58 +0200 | [diff] [blame] | 676 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 677 | } |
Marc Zyngier | 358b28f | 2018-12-20 11:36:07 +0000 | [diff] [blame] | 678 | |
| 679 | /* |
| 680 | * Make sure we will observe a potential reset request if we've |
| 681 | * observed a change to the power state. Pairs with the smp_wmb() in |
| 682 | * kvm_psci_vcpu_on(). |
| 683 | */ |
| 684 | smp_rmb(); |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 685 | } |
| 686 | |
Sean Christopherson | 6109c5a | 2021-10-08 19:12:03 -0700 | [diff] [blame] | 687 | /** |
| 688 | * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior |
| 689 | * @vcpu: The VCPU pointer |
| 690 | * |
| 691 | * Suspend execution of a vCPU until a valid wake event is detected, i.e. until |
| 692 | * the vCPU is runnable. The vCPU may or may not be scheduled out, depending |
| 693 | * on when a wake event arrives, e.g. there may already be a pending wake event. |
| 694 | */ |
| 695 | void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 696 | { |
Sean Christopherson | 6109c5a | 2021-10-08 19:12:03 -0700 | [diff] [blame] | 697 | /* |
| 698 | * Sync back the state of the GIC CPU interface so that we have |
| 699 | * the latest PMR and group enables. This ensures that |
| 700 | * kvm_arch_vcpu_runnable has up-to-date data to decide whether |
| 701 | * we have pending interrupts, e.g. when determining if the |
| 702 | * vCPU should block. |
| 703 | * |
| 704 | * For the same reason, we want to tell GICv4 that we need |
| 705 | * doorbells to be signalled, should an interrupt become pending. |
| 706 | */ |
| 707 | preempt_disable(); |
| 708 | kvm_vgic_vmcr_sync(vcpu); |
| 709 | vgic_v4_put(vcpu, true); |
| 710 | preempt_enable(); |
| 711 | |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 712 | kvm_vcpu_halt(vcpu); |
Sean Christopherson | 6109c5a | 2021-10-08 19:12:03 -0700 | [diff] [blame] | 713 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
| 714 | |
| 715 | preempt_disable(); |
| 716 | vgic_v4_load(vcpu); |
| 717 | preempt_enable(); |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 718 | } |
| 719 | |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 720 | static void check_vcpu_requests(struct kvm_vcpu *vcpu) |
| 721 | { |
| 722 | if (kvm_request_pending(vcpu)) { |
Andrew Jones | 7b244e2 | 2017-06-04 14:43:58 +0200 | [diff] [blame] | 723 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
| 724 | vcpu_req_sleep(vcpu); |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 725 | |
Marc Zyngier | 358b28f | 2018-12-20 11:36:07 +0000 | [diff] [blame] | 726 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
| 727 | kvm_reset_vcpu(vcpu); |
| 728 | |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 729 | /* |
| 730 | * Clear IRQ_PENDING requests that were made to guarantee |
| 731 | * that a VCPU sees new virtual interrupts. |
| 732 | */ |
| 733 | kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); |
Steven Price | 8564d63 | 2019-10-21 16:28:18 +0100 | [diff] [blame] | 734 | |
| 735 | if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) |
| 736 | kvm_update_stolen_time(vcpu); |
Marc Zyngier | d9c3872 | 2020-03-04 20:33:28 +0000 | [diff] [blame] | 737 | |
| 738 | if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { |
| 739 | /* The distributor enable bits were changed */ |
| 740 | preempt_disable(); |
| 741 | vgic_v4_put(vcpu, false); |
| 742 | vgic_v4_load(vcpu); |
| 743 | preempt_enable(); |
| 744 | } |
Marc Zyngier | d0c94c4 | 2021-06-03 16:50:02 +0100 | [diff] [blame] | 745 | |
| 746 | if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) |
| 747 | kvm_pmu_handle_pmcr(vcpu, |
| 748 | __vcpu_sys_reg(vcpu, PMCR_EL0)); |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 749 | } |
| 750 | } |
| 751 | |
Will Deacon | 2f6a49b | 2021-06-08 19:02:56 +0100 | [diff] [blame] | 752 | static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) |
| 753 | { |
| 754 | if (likely(!vcpu_mode_is_32bit(vcpu))) |
| 755 | return false; |
| 756 | |
| 757 | return !system_supports_32bit_el0() || |
| 758 | static_branch_unlikely(&arm64_mismatched_32bit_el0); |
| 759 | } |
| 760 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 761 | /** |
Oliver Upton | 6caa581 | 2021-08-02 19:28:09 +0000 | [diff] [blame] | 762 | * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest |
| 763 | * @vcpu: The VCPU pointer |
| 764 | * @ret: Pointer to write optional return code |
| 765 | * |
| 766 | * Returns: true if the VCPU needs to return to a preemptible + interruptible |
| 767 | * and skip guest entry. |
| 768 | * |
| 769 | * This function disambiguates between two different types of exits: exits to a |
| 770 | * preemptible + interruptible kernel context and exits to userspace. For an |
| 771 | * exit to userspace, this function will write the return code to ret and return |
| 772 | * true. For an exit to preemptible + interruptible kernel context (i.e. check |
| 773 | * for pending work and re-enter), return true without writing to ret. |
| 774 | */ |
| 775 | static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) |
| 776 | { |
| 777 | struct kvm_run *run = vcpu->run; |
| 778 | |
| 779 | /* |
| 780 | * If we're using a userspace irqchip, then check if we need |
| 781 | * to tell a userspace irqchip about timer or PMU level |
| 782 | * changes and if so, exit to userspace (the actual level |
| 783 | * state gets updated in kvm_timer_update_run and |
| 784 | * kvm_pmu_update_run below). |
| 785 | */ |
| 786 | if (static_branch_unlikely(&userspace_irqchip_in_use)) { |
| 787 | if (kvm_timer_should_notify_user(vcpu) || |
| 788 | kvm_pmu_should_notify_user(vcpu)) { |
| 789 | *ret = -EINTR; |
| 790 | run->exit_reason = KVM_EXIT_INTR; |
| 791 | return true; |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | return kvm_request_pending(vcpu) || |
| 796 | need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || |
| 797 | xfer_to_guest_mode_work_pending(); |
| 798 | } |
| 799 | |
| 800 | /** |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 801 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
| 802 | * @vcpu: The VCPU pointer |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 803 | * |
| 804 | * This function is called through the VCPU_RUN ioctl called from user space. It |
| 805 | * will execute VM code in a loop until the time slice for the process is used |
| 806 | * or some emulation is needed from user space in which case the function will |
| 807 | * return with return value 0 and with the kvm_run structure filled in with the |
| 808 | * required data for the requested emulation. |
| 809 | */ |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 810 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 811 | { |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 812 | struct kvm_run *run = vcpu->run; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 813 | int ret; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 814 | |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 815 | if (run->exit_reason == KVM_EXIT_MMIO) { |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 816 | ret = kvm_handle_mmio_return(vcpu); |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 817 | if (ret) |
Christoffer Dall | 829a586 | 2017-11-29 16:37:53 +0100 | [diff] [blame] | 818 | return ret; |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 819 | } |
| 820 | |
Christoffer Dall | 829a586 | 2017-11-29 16:37:53 +0100 | [diff] [blame] | 821 | vcpu_load(vcpu); |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 822 | |
Zenghui Yu | e3e880b | 2021-05-26 22:18:31 +0800 | [diff] [blame] | 823 | if (run->immediate_exit) { |
| 824 | ret = -EINTR; |
| 825 | goto out; |
| 826 | } |
| 827 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 828 | kvm_sigset_activate(vcpu); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 829 | |
| 830 | ret = 1; |
| 831 | run->exit_reason = KVM_EXIT_UNKNOWN; |
| 832 | while (ret > 0) { |
| 833 | /* |
| 834 | * Check conditions before entering the guest |
| 835 | */ |
Oliver Upton | 6caa581 | 2021-08-02 19:28:09 +0000 | [diff] [blame] | 836 | ret = xfer_to_guest_mode_handle_work(vcpu); |
| 837 | if (!ret) |
| 838 | ret = 1; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 839 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 840 | update_vmid(&vcpu->arch.hw_mmu->vmid); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 841 | |
Andrew Jones | 0592c00 | 2017-06-04 14:43:55 +0200 | [diff] [blame] | 842 | check_vcpu_requests(vcpu); |
| 843 | |
Marc Zyngier | abdf584 | 2015-06-08 15:00:28 +0100 | [diff] [blame] | 844 | /* |
Marc Zyngier | abdf584 | 2015-06-08 15:00:28 +0100 | [diff] [blame] | 845 | * Preparing the interrupts to be injected also |
| 846 | * involves poking the GIC, which must be done in a |
| 847 | * non-preemptible context. |
| 848 | */ |
| 849 | preempt_disable(); |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 850 | |
Shannon Zhao | b02386e | 2016-02-26 19:29:19 +0800 | [diff] [blame] | 851 | kvm_pmu_flush_hwstate(vcpu); |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 852 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 853 | local_irq_disable(); |
| 854 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 855 | kvm_vgic_flush_hwstate(vcpu); |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 856 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 857 | /* |
Andrew Jones | 6a6d73b | 2017-06-04 14:43:54 +0200 | [diff] [blame] | 858 | * Ensure we set mode to IN_GUEST_MODE after we disable |
| 859 | * interrupts and before the final VCPU requests check. |
| 860 | * See the comment in kvm_vcpu_exiting_guest_mode() and |
Christoph Hellwig | 2f5947d | 2019-07-24 09:24:49 +0200 | [diff] [blame] | 861 | * Documentation/virt/kvm/vcpu-requests.rst |
Andrew Jones | 6a6d73b | 2017-06-04 14:43:54 +0200 | [diff] [blame] | 862 | */ |
| 863 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
| 864 | |
Oliver Upton | 6caa581 | 2021-08-02 19:28:09 +0000 | [diff] [blame] | 865 | if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) { |
Andrew Jones | 6a6d73b | 2017-06-04 14:43:54 +0200 | [diff] [blame] | 866 | vcpu->mode = OUTSIDE_GUEST_MODE; |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 867 | isb(); /* Ensure work in x_flush_hwstate is committed */ |
Shannon Zhao | b02386e | 2016-02-26 19:29:19 +0800 | [diff] [blame] | 868 | kvm_pmu_sync_hwstate(vcpu); |
Christoffer Dall | 61bbe38 | 2017-10-27 19:57:51 +0200 | [diff] [blame] | 869 | if (static_branch_unlikely(&userspace_irqchip_in_use)) |
Marc Zyngier | 3c5ff0c | 2020-04-22 08:58:22 +0100 | [diff] [blame] | 870 | kvm_timer_sync_user(vcpu); |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 871 | kvm_vgic_sync_hwstate(vcpu); |
Christoffer Dall | ee9bb9a | 2016-10-16 20:24:30 +0200 | [diff] [blame] | 872 | local_irq_enable(); |
Marc Zyngier | abdf584 | 2015-06-08 15:00:28 +0100 | [diff] [blame] | 873 | preempt_enable(); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 874 | continue; |
| 875 | } |
| 876 | |
Alex Bennée | 56c7f5e | 2015-07-07 17:29:56 +0100 | [diff] [blame] | 877 | kvm_arm_setup_debug(vcpu); |
Marc Zyngier | af9a0e2 | 2021-10-21 14:10:35 +0100 | [diff] [blame] | 878 | kvm_arch_vcpu_ctxflush_fp(vcpu); |
Alex Bennée | 56c7f5e | 2015-07-07 17:29:56 +0100 | [diff] [blame] | 879 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 880 | /************************************************************** |
| 881 | * Enter the guest |
| 882 | */ |
| 883 | trace_kvm_entry(*vcpu_pc(vcpu)); |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 884 | guest_enter_irqoff(); |
Christoffer Dall | 3f5c90b | 2017-10-03 14:02:12 +0200 | [diff] [blame] | 885 | |
David Brazdil | 09cf57e | 2020-06-25 14:14:14 +0100 | [diff] [blame] | 886 | ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); |
Christoffer Dall | 3f5c90b | 2017-10-03 14:02:12 +0200 | [diff] [blame] | 887 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 888 | vcpu->mode = OUTSIDE_GUEST_MODE; |
Amit Tomar | b19e689 | 2015-11-26 10:09:43 +0000 | [diff] [blame] | 889 | vcpu->stat.exits++; |
Christoffer Dall | 1b3d546 | 2015-05-28 19:49:10 +0100 | [diff] [blame] | 890 | /* |
| 891 | * Back from guest |
| 892 | *************************************************************/ |
| 893 | |
Alex Bennée | 56c7f5e | 2015-07-07 17:29:56 +0100 | [diff] [blame] | 894 | kvm_arm_clear_debug(vcpu); |
| 895 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 896 | /* |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 897 | * We must sync the PMU state before the vgic state so |
Christoffer Dall | ee9bb9a | 2016-10-16 20:24:30 +0200 | [diff] [blame] | 898 | * that the vgic can properly sample the updated state of the |
| 899 | * interrupt line. |
| 900 | */ |
| 901 | kvm_pmu_sync_hwstate(vcpu); |
Christoffer Dall | ee9bb9a | 2016-10-16 20:24:30 +0200 | [diff] [blame] | 902 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 903 | /* |
| 904 | * Sync the vgic state before syncing the timer state because |
| 905 | * the timer code needs to know if the virtual timer |
| 906 | * interrupts are active. |
| 907 | */ |
Christoffer Dall | ee9bb9a | 2016-10-16 20:24:30 +0200 | [diff] [blame] | 908 | kvm_vgic_sync_hwstate(vcpu); |
| 909 | |
| 910 | /* |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 911 | * Sync the timer hardware state before enabling interrupts as |
| 912 | * we don't want vtimer interrupts to race with syncing the |
| 913 | * timer virtual interrupt state. |
| 914 | */ |
Christoffer Dall | 61bbe38 | 2017-10-27 19:57:51 +0200 | [diff] [blame] | 915 | if (static_branch_unlikely(&userspace_irqchip_in_use)) |
Marc Zyngier | 3c5ff0c | 2020-04-22 08:58:22 +0100 | [diff] [blame] | 916 | kvm_timer_sync_user(vcpu); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 917 | |
Dave Martin | e6b673b | 2018-04-06 14:55:59 +0100 | [diff] [blame] | 918 | kvm_arch_vcpu_ctxsync_fp(vcpu); |
| 919 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 920 | /* |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 921 | * We may have taken a host interrupt in HYP mode (ie |
| 922 | * while executing the guest). This interrupt is still |
| 923 | * pending, as we haven't serviced it yet! |
| 924 | * |
| 925 | * We're now back in SVC mode, with interrupts |
| 926 | * disabled. Enabling the interrupts now will have |
| 927 | * the effect of taking the interrupt again, in SVC |
| 928 | * mode this time. |
| 929 | */ |
| 930 | local_irq_enable(); |
| 931 | |
| 932 | /* |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 933 | * We do local_irq_enable() before calling guest_exit() so |
Christoffer Dall | 1b3d546 | 2015-05-28 19:49:10 +0100 | [diff] [blame] | 934 | * that if a timer interrupt hits while running the guest we |
| 935 | * account that tick as being spent in the guest. We enable |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 936 | * preemption after calling guest_exit() so that if we get |
Christoffer Dall | 1b3d546 | 2015-05-28 19:49:10 +0100 | [diff] [blame] | 937 | * preempted we make sure ticks after that is not counted as |
| 938 | * guest time. |
| 939 | */ |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 940 | guest_exit(); |
Christoffer Dall | b5905dc | 2015-08-30 15:55:22 +0200 | [diff] [blame] | 941 | trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); |
Christoffer Dall | 1b3d546 | 2015-05-28 19:49:10 +0100 | [diff] [blame] | 942 | |
James Morse | 3368bd8 | 2018-01-15 19:39:04 +0000 | [diff] [blame] | 943 | /* Exit types that need handling before we can be preempted */ |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 944 | handle_exit_early(vcpu, ret); |
James Morse | 3368bd8 | 2018-01-15 19:39:04 +0000 | [diff] [blame] | 945 | |
Marc Zyngier | abdf584 | 2015-06-08 15:00:28 +0100 | [diff] [blame] | 946 | preempt_enable(); |
| 947 | |
Qais Yousef | 22f5538 | 2020-10-27 21:51:13 +0000 | [diff] [blame] | 948 | /* |
| 949 | * The ARMv8 architecture doesn't give the hypervisor |
| 950 | * a mechanism to prevent a guest from dropping to AArch32 EL0 |
| 951 | * if implemented by the CPU. If we spot the guest in such |
| 952 | * state and that we decided it wasn't supposed to do so (like |
| 953 | * with the asymmetric AArch32 case), return to userspace with |
| 954 | * a fatal error. |
| 955 | */ |
Will Deacon | 2f6a49b | 2021-06-08 19:02:56 +0100 | [diff] [blame] | 956 | if (vcpu_mode_is_bad_32bit(vcpu)) { |
Qais Yousef | 22f5538 | 2020-10-27 21:51:13 +0000 | [diff] [blame] | 957 | /* |
| 958 | * As we have caught the guest red-handed, decide that |
| 959 | * it isn't fit for purpose anymore by making the vcpu |
| 960 | * invalid. The VMM can try and fix it by issuing a |
| 961 | * KVM_ARM_VCPU_INIT if it really wants to. |
| 962 | */ |
| 963 | vcpu->arch.target = -1; |
| 964 | ret = ARM_EXCEPTION_IL; |
| 965 | } |
| 966 | |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 967 | ret = handle_exit(vcpu, ret); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 968 | } |
| 969 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 970 | /* Tell userspace about in-kernel device output levels */ |
Christoffer Dall | 3dbbdf7 | 2017-02-01 12:51:52 +0100 | [diff] [blame] | 971 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { |
| 972 | kvm_timer_update_run(vcpu); |
| 973 | kvm_pmu_update_run(vcpu); |
| 974 | } |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 975 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 976 | kvm_sigset_deactivate(vcpu); |
| 977 | |
Zenghui Yu | e3e880b | 2021-05-26 22:18:31 +0800 | [diff] [blame] | 978 | out: |
Marc Zyngier | 26778aa | 2021-05-06 15:20:12 +0100 | [diff] [blame] | 979 | /* |
| 980 | * In the unlikely event that we are returning to userspace |
| 981 | * with pending exceptions or PC adjustment, commit these |
| 982 | * adjustments in order to give userspace a consistent view of |
| 983 | * the vcpu state. Note that this relies on __kvm_adjust_pc() |
| 984 | * being preempt-safe on VHE. |
| 985 | */ |
| 986 | if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION | |
| 987 | KVM_ARM64_INCREMENT_PC))) |
| 988 | kvm_call_hyp(__kvm_adjust_pc, vcpu); |
| 989 | |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 990 | vcpu_put(vcpu); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 991 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 992 | } |
| 993 | |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 994 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) |
| 995 | { |
| 996 | int bit_index; |
| 997 | bool set; |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 998 | unsigned long *hcr; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 999 | |
| 1000 | if (number == KVM_ARM_IRQ_CPU_IRQ) |
| 1001 | bit_index = __ffs(HCR_VI); |
| 1002 | else /* KVM_ARM_IRQ_CPU_FIQ */ |
| 1003 | bit_index = __ffs(HCR_VF); |
| 1004 | |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1005 | hcr = vcpu_hcr(vcpu); |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1006 | if (level) |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1007 | set = test_and_set_bit(bit_index, hcr); |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1008 | else |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 1009 | set = test_and_clear_bit(bit_index, hcr); |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1010 | |
| 1011 | /* |
| 1012 | * If we didn't change anything, no need to wake up or kick other CPUs |
| 1013 | */ |
| 1014 | if (set == level) |
| 1015 | return 0; |
| 1016 | |
| 1017 | /* |
| 1018 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and |
| 1019 | * trigger a world-switch round on the running physical CPU to set the |
| 1020 | * virtual IRQ/FIQ fields in the HCR appropriately. |
| 1021 | */ |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 1022 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1023 | kvm_vcpu_kick(vcpu); |
| 1024 | |
| 1025 | return 0; |
| 1026 | } |
| 1027 | |
Alexander Graf | 79558f1 | 2013-04-16 19:21:41 +0200 | [diff] [blame] | 1028 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
| 1029 | bool line_status) |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1030 | { |
| 1031 | u32 irq = irq_level->irq; |
| 1032 | unsigned int irq_type, vcpu_idx, irq_num; |
| 1033 | int nrcpus = atomic_read(&kvm->online_vcpus); |
| 1034 | struct kvm_vcpu *vcpu = NULL; |
| 1035 | bool level = irq_level->level; |
| 1036 | |
| 1037 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; |
| 1038 | vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; |
Marc Zyngier | 92f35b7 | 2019-08-18 14:09:47 +0100 | [diff] [blame] | 1039 | vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1040 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; |
| 1041 | |
| 1042 | trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); |
| 1043 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1044 | switch (irq_type) { |
| 1045 | case KVM_ARM_IRQ_TYPE_CPU: |
| 1046 | if (irqchip_in_kernel(kvm)) |
| 1047 | return -ENXIO; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1048 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1049 | if (vcpu_idx >= nrcpus) |
| 1050 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1051 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1052 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
| 1053 | if (!vcpu) |
| 1054 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1055 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1056 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) |
| 1057 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1058 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1059 | return vcpu_interrupt_line(vcpu, irq_num, level); |
| 1060 | case KVM_ARM_IRQ_TYPE_PPI: |
| 1061 | if (!irqchip_in_kernel(kvm)) |
| 1062 | return -ENXIO; |
| 1063 | |
| 1064 | if (vcpu_idx >= nrcpus) |
| 1065 | return -EINVAL; |
| 1066 | |
| 1067 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
| 1068 | if (!vcpu) |
| 1069 | return -EINVAL; |
| 1070 | |
| 1071 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) |
| 1072 | return -EINVAL; |
| 1073 | |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 1074 | return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1075 | case KVM_ARM_IRQ_TYPE_SPI: |
| 1076 | if (!irqchip_in_kernel(kvm)) |
| 1077 | return -ENXIO; |
| 1078 | |
Andre Przywara | fd1d0dd | 2015-04-10 16:17:59 +0100 | [diff] [blame] | 1079 | if (irq_num < VGIC_NR_PRIVATE_IRQS) |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1080 | return -EINVAL; |
| 1081 | |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 1082 | return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1083 | } |
| 1084 | |
| 1085 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 1086 | } |
| 1087 | |
Christoffer Dall | f7fa034d | 2014-10-16 16:40:53 +0200 | [diff] [blame] | 1088 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
| 1089 | const struct kvm_vcpu_init *init) |
| 1090 | { |
Andrew Jones | 811328f | 2019-04-04 19:42:30 +0200 | [diff] [blame] | 1091 | unsigned int i, ret; |
Anshuman Khandual | 6b7982f | 2021-08-12 10:39:53 +0530 | [diff] [blame] | 1092 | u32 phys_target = kvm_target_cpu(); |
Christoffer Dall | f7fa034d | 2014-10-16 16:40:53 +0200 | [diff] [blame] | 1093 | |
| 1094 | if (init->target != phys_target) |
| 1095 | return -EINVAL; |
| 1096 | |
| 1097 | /* |
| 1098 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must |
| 1099 | * use the same target. |
| 1100 | */ |
| 1101 | if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) |
| 1102 | return -EINVAL; |
| 1103 | |
| 1104 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ |
| 1105 | for (i = 0; i < sizeof(init->features) * 8; i++) { |
| 1106 | bool set = (init->features[i / 32] & (1 << (i % 32))); |
| 1107 | |
| 1108 | if (set && i >= KVM_VCPU_MAX_FEATURES) |
| 1109 | return -ENOENT; |
| 1110 | |
| 1111 | /* |
| 1112 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must |
| 1113 | * use the same feature set. |
| 1114 | */ |
| 1115 | if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && |
| 1116 | test_bit(i, vcpu->arch.features) != set) |
| 1117 | return -EINVAL; |
| 1118 | |
| 1119 | if (set) |
| 1120 | set_bit(i, vcpu->arch.features); |
| 1121 | } |
| 1122 | |
| 1123 | vcpu->arch.target = phys_target; |
| 1124 | |
| 1125 | /* Now we know what it is, we can reset it. */ |
Andrew Jones | 811328f | 2019-04-04 19:42:30 +0200 | [diff] [blame] | 1126 | ret = kvm_reset_vcpu(vcpu); |
| 1127 | if (ret) { |
| 1128 | vcpu->arch.target = -1; |
| 1129 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); |
| 1130 | } |
Christoffer Dall | f7fa034d | 2014-10-16 16:40:53 +0200 | [diff] [blame] | 1131 | |
Andrew Jones | 811328f | 2019-04-04 19:42:30 +0200 | [diff] [blame] | 1132 | return ret; |
| 1133 | } |
Christoffer Dall | f7fa034d | 2014-10-16 16:40:53 +0200 | [diff] [blame] | 1134 | |
Christoffer Dall | 478a823 | 2013-11-19 17:43:19 -0800 | [diff] [blame] | 1135 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, |
| 1136 | struct kvm_vcpu_init *init) |
| 1137 | { |
| 1138 | int ret; |
| 1139 | |
| 1140 | ret = kvm_vcpu_set_target(vcpu, init); |
| 1141 | if (ret) |
| 1142 | return ret; |
| 1143 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1144 | /* |
| 1145 | * Ensure a rebooted VM will fault in RAM pages and detect if the |
| 1146 | * guest MMU is turned off and flush the caches as needed. |
Zenghui Yu | 892713e | 2020-04-15 15:28:35 +0800 | [diff] [blame] | 1147 | * |
Marc Zyngier | 7ae2f3d | 2020-05-30 17:22:19 +0100 | [diff] [blame] | 1148 | * S2FWB enforces all memory accesses to RAM being cacheable, |
| 1149 | * ensuring that the data side is always coherent. We still |
| 1150 | * need to invalidate the I-cache though, as FWB does *not* |
| 1151 | * imply CTR_EL0.DIC. |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1152 | */ |
Marc Zyngier | cc5705f | 2021-10-14 12:13:06 +0100 | [diff] [blame] | 1153 | if (vcpu_has_run_once(vcpu)) { |
Marc Zyngier | 7ae2f3d | 2020-05-30 17:22:19 +0100 | [diff] [blame] | 1154 | if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
| 1155 | stage2_unmap_vm(vcpu->kvm); |
| 1156 | else |
Fuad Tabba | fade9c2 | 2021-05-24 09:30:01 +0100 | [diff] [blame] | 1157 | icache_inval_all_pou(); |
Marc Zyngier | 7ae2f3d | 2020-05-30 17:22:19 +0100 | [diff] [blame] | 1158 | } |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1159 | |
Christoffer Dall | b856a59 | 2014-10-16 17:21:16 +0200 | [diff] [blame] | 1160 | vcpu_reset_hcr(vcpu); |
Fuad Tabba | cd49622 | 2021-08-17 09:11:27 +0100 | [diff] [blame] | 1161 | vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT; |
Christoffer Dall | b856a59 | 2014-10-16 17:21:16 +0200 | [diff] [blame] | 1162 | |
Christoffer Dall | 478a823 | 2013-11-19 17:43:19 -0800 | [diff] [blame] | 1163 | /* |
Eric Auger | 3781528 | 2015-09-25 23:41:14 +0200 | [diff] [blame] | 1164 | * Handle the "start in power-off" case. |
Christoffer Dall | 478a823 | 2013-11-19 17:43:19 -0800 | [diff] [blame] | 1165 | */ |
Christoffer Dall | 03f1d4c | 2014-12-02 15:27:51 +0100 | [diff] [blame] | 1166 | if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) |
Andrew Jones | 424c989 | 2017-06-04 14:43:57 +0200 | [diff] [blame] | 1167 | vcpu_power_off(vcpu); |
Christoffer Dall | 3ad8b3d | 2014-10-16 16:14:43 +0200 | [diff] [blame] | 1168 | else |
Eric Auger | 3781528 | 2015-09-25 23:41:14 +0200 | [diff] [blame] | 1169 | vcpu->arch.power_off = false; |
Christoffer Dall | 478a823 | 2013-11-19 17:43:19 -0800 | [diff] [blame] | 1170 | |
| 1171 | return 0; |
| 1172 | } |
| 1173 | |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1174 | static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, |
| 1175 | struct kvm_device_attr *attr) |
| 1176 | { |
| 1177 | int ret = -ENXIO; |
| 1178 | |
| 1179 | switch (attr->group) { |
| 1180 | default: |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 1181 | ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1182 | break; |
| 1183 | } |
| 1184 | |
| 1185 | return ret; |
| 1186 | } |
| 1187 | |
| 1188 | static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, |
| 1189 | struct kvm_device_attr *attr) |
| 1190 | { |
| 1191 | int ret = -ENXIO; |
| 1192 | |
| 1193 | switch (attr->group) { |
| 1194 | default: |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 1195 | ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1196 | break; |
| 1197 | } |
| 1198 | |
| 1199 | return ret; |
| 1200 | } |
| 1201 | |
| 1202 | static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, |
| 1203 | struct kvm_device_attr *attr) |
| 1204 | { |
| 1205 | int ret = -ENXIO; |
| 1206 | |
| 1207 | switch (attr->group) { |
| 1208 | default: |
Shannon Zhao | bb0c70b | 2016-01-11 21:35:32 +0800 | [diff] [blame] | 1209 | ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1210 | break; |
| 1211 | } |
| 1212 | |
| 1213 | return ret; |
| 1214 | } |
| 1215 | |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 1216 | static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
| 1217 | struct kvm_vcpu_events *events) |
| 1218 | { |
| 1219 | memset(events, 0, sizeof(*events)); |
| 1220 | |
| 1221 | return __kvm_arm_vcpu_get_events(vcpu, events); |
| 1222 | } |
| 1223 | |
| 1224 | static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
| 1225 | struct kvm_vcpu_events *events) |
| 1226 | { |
| 1227 | int i; |
| 1228 | |
| 1229 | /* check whether the reserved field is zero */ |
| 1230 | for (i = 0; i < ARRAY_SIZE(events->reserved); i++) |
| 1231 | if (events->reserved[i]) |
| 1232 | return -EINVAL; |
| 1233 | |
| 1234 | /* check whether the pad field is zero */ |
| 1235 | for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) |
| 1236 | if (events->exception.pad[i]) |
| 1237 | return -EINVAL; |
| 1238 | |
| 1239 | return __kvm_arm_vcpu_set_events(vcpu, events); |
| 1240 | } |
James Morse | 539aee0 | 2018-07-19 16:24:24 +0100 | [diff] [blame] | 1241 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1242 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 1243 | unsigned int ioctl, unsigned long arg) |
| 1244 | { |
| 1245 | struct kvm_vcpu *vcpu = filp->private_data; |
| 1246 | void __user *argp = (void __user *)arg; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1247 | struct kvm_device_attr attr; |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1248 | long r; |
| 1249 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1250 | switch (ioctl) { |
| 1251 | case KVM_ARM_VCPU_INIT: { |
| 1252 | struct kvm_vcpu_init init; |
| 1253 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1254 | r = -EFAULT; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1255 | if (copy_from_user(&init, argp, sizeof(init))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1256 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1257 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1258 | r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); |
| 1259 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1260 | } |
| 1261 | case KVM_SET_ONE_REG: |
| 1262 | case KVM_GET_ONE_REG: { |
| 1263 | struct kvm_one_reg reg; |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 1264 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1265 | r = -ENOEXEC; |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 1266 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1267 | break; |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 1268 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1269 | r = -EFAULT; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1270 | if (copy_from_user(®, argp, sizeof(reg))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1271 | break; |
| 1272 | |
Oliver Upton | 6826c68 | 2021-08-18 20:21:31 +0000 | [diff] [blame] | 1273 | /* |
| 1274 | * We could owe a reset due to PSCI. Handle the pending reset |
| 1275 | * here to ensure userspace register accesses are ordered after |
| 1276 | * the reset. |
| 1277 | */ |
| 1278 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
| 1279 | kvm_reset_vcpu(vcpu); |
| 1280 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1281 | if (ioctl == KVM_SET_ONE_REG) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1282 | r = kvm_arm_set_reg(vcpu, ®); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1283 | else |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1284 | r = kvm_arm_get_reg(vcpu, ®); |
| 1285 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1286 | } |
| 1287 | case KVM_GET_REG_LIST: { |
| 1288 | struct kvm_reg_list __user *user_list = argp; |
| 1289 | struct kvm_reg_list reg_list; |
| 1290 | unsigned n; |
| 1291 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1292 | r = -ENOEXEC; |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 1293 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1294 | break; |
Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 1295 | |
Dave Martin | 7dd32a0 | 2018-12-19 14:27:01 +0000 | [diff] [blame] | 1296 | r = -EPERM; |
| 1297 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
| 1298 | break; |
| 1299 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1300 | r = -EFAULT; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1301 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1302 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1303 | n = reg_list.n; |
| 1304 | reg_list.n = kvm_arm_num_regs(vcpu); |
| 1305 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1306 | break; |
| 1307 | r = -E2BIG; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1308 | if (n < reg_list.n) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1309 | break; |
| 1310 | r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); |
| 1311 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1312 | } |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1313 | case KVM_SET_DEVICE_ATTR: { |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1314 | r = -EFAULT; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1315 | if (copy_from_user(&attr, argp, sizeof(attr))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1316 | break; |
| 1317 | r = kvm_arm_vcpu_set_attr(vcpu, &attr); |
| 1318 | break; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1319 | } |
| 1320 | case KVM_GET_DEVICE_ATTR: { |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1321 | r = -EFAULT; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1322 | if (copy_from_user(&attr, argp, sizeof(attr))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1323 | break; |
| 1324 | r = kvm_arm_vcpu_get_attr(vcpu, &attr); |
| 1325 | break; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1326 | } |
| 1327 | case KVM_HAS_DEVICE_ATTR: { |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1328 | r = -EFAULT; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1329 | if (copy_from_user(&attr, argp, sizeof(attr))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1330 | break; |
| 1331 | r = kvm_arm_vcpu_has_attr(vcpu, &attr); |
| 1332 | break; |
Shannon Zhao | f577f6c | 2016-01-11 20:56:17 +0800 | [diff] [blame] | 1333 | } |
Dongjiu Geng | b7b27fa | 2018-07-19 16:24:22 +0100 | [diff] [blame] | 1334 | case KVM_GET_VCPU_EVENTS: { |
| 1335 | struct kvm_vcpu_events events; |
| 1336 | |
| 1337 | if (kvm_arm_vcpu_get_events(vcpu, &events)) |
| 1338 | return -EINVAL; |
| 1339 | |
| 1340 | if (copy_to_user(argp, &events, sizeof(events))) |
| 1341 | return -EFAULT; |
| 1342 | |
| 1343 | return 0; |
| 1344 | } |
| 1345 | case KVM_SET_VCPU_EVENTS: { |
| 1346 | struct kvm_vcpu_events events; |
| 1347 | |
| 1348 | if (copy_from_user(&events, argp, sizeof(events))) |
| 1349 | return -EFAULT; |
| 1350 | |
| 1351 | return kvm_arm_vcpu_set_events(vcpu, &events); |
| 1352 | } |
Dave Martin | 7dd32a0 | 2018-12-19 14:27:01 +0000 | [diff] [blame] | 1353 | case KVM_ARM_VCPU_FINALIZE: { |
| 1354 | int what; |
| 1355 | |
| 1356 | if (!kvm_vcpu_initialized(vcpu)) |
| 1357 | return -ENOEXEC; |
| 1358 | |
| 1359 | if (get_user(what, (const int __user *)argp)) |
| 1360 | return -EFAULT; |
| 1361 | |
| 1362 | return kvm_arm_vcpu_finalize(vcpu, what); |
| 1363 | } |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1364 | default: |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1365 | r = -EINVAL; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1366 | } |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1367 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1368 | return r; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1369 | } |
| 1370 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1371 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1372 | { |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 1373 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1374 | } |
| 1375 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1376 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
Paolo Bonzini | 6c9dd6d | 2021-04-02 17:53:09 +0200 | [diff] [blame] | 1377 | const struct kvm_memory_slot *memslot) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1378 | { |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1379 | kvm_flush_remote_tlbs(kvm); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1380 | } |
| 1381 | |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 1382 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, |
| 1383 | struct kvm_arm_device_addr *dev_addr) |
| 1384 | { |
Christoffer Dall | 330690c | 2013-01-21 19:36:13 -0500 | [diff] [blame] | 1385 | unsigned long dev_id, type; |
| 1386 | |
| 1387 | dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> |
| 1388 | KVM_ARM_DEVICE_ID_SHIFT; |
| 1389 | type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> |
| 1390 | KVM_ARM_DEVICE_TYPE_SHIFT; |
| 1391 | |
| 1392 | switch (dev_id) { |
| 1393 | case KVM_ARM_DEVICE_VGIC_V2: |
Pavel Fedin | c7da6fa | 2015-12-18 14:38:43 +0300 | [diff] [blame] | 1394 | if (!vgic_present) |
| 1395 | return -ENXIO; |
Christoffer Dall | ce01e4e | 2013-09-23 14:55:56 -0700 | [diff] [blame] | 1396 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); |
Christoffer Dall | 330690c | 2013-01-21 19:36:13 -0500 | [diff] [blame] | 1397 | default: |
| 1398 | return -ENODEV; |
| 1399 | } |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 1400 | } |
| 1401 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1402 | long kvm_arch_vm_ioctl(struct file *filp, |
| 1403 | unsigned int ioctl, unsigned long arg) |
| 1404 | { |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 1405 | struct kvm *kvm = filp->private_data; |
| 1406 | void __user *argp = (void __user *)arg; |
| 1407 | |
| 1408 | switch (ioctl) { |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1409 | case KVM_CREATE_IRQCHIP: { |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 1410 | int ret; |
Pavel Fedin | c7da6fa | 2015-12-18 14:38:43 +0300 | [diff] [blame] | 1411 | if (!vgic_present) |
| 1412 | return -ENXIO; |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 1413 | mutex_lock(&kvm->lock); |
| 1414 | ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); |
| 1415 | mutex_unlock(&kvm->lock); |
| 1416 | return ret; |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 1417 | } |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 1418 | case KVM_ARM_SET_DEVICE_ADDR: { |
| 1419 | struct kvm_arm_device_addr dev_addr; |
| 1420 | |
| 1421 | if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) |
| 1422 | return -EFAULT; |
| 1423 | return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); |
| 1424 | } |
Anup Patel | 42c4e0c | 2013-09-30 14:20:07 +0530 | [diff] [blame] | 1425 | case KVM_ARM_PREFERRED_TARGET: { |
Anup Patel | 42c4e0c | 2013-09-30 14:20:07 +0530 | [diff] [blame] | 1426 | struct kvm_vcpu_init init; |
| 1427 | |
YueHaibing | 08e873c | 2021-11-05 09:15:00 +0800 | [diff] [blame] | 1428 | kvm_vcpu_preferred_target(&init); |
Anup Patel | 42c4e0c | 2013-09-30 14:20:07 +0530 | [diff] [blame] | 1429 | |
| 1430 | if (copy_to_user(argp, &init, sizeof(init))) |
| 1431 | return -EFAULT; |
| 1432 | |
| 1433 | return 0; |
| 1434 | } |
Steven Price | f0376ed | 2021-06-21 12:17:15 +0100 | [diff] [blame] | 1435 | case KVM_ARM_MTE_COPY_TAGS: { |
| 1436 | struct kvm_arm_copy_mte_tags copy_tags; |
| 1437 | |
| 1438 | if (copy_from_user(©_tags, argp, sizeof(copy_tags))) |
| 1439 | return -EFAULT; |
| 1440 | return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); |
| 1441 | } |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 1442 | default: |
| 1443 | return -EINVAL; |
| 1444 | } |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1445 | } |
| 1446 | |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1447 | static unsigned long nvhe_percpu_size(void) |
| 1448 | { |
| 1449 | return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - |
| 1450 | (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); |
| 1451 | } |
| 1452 | |
| 1453 | static unsigned long nvhe_percpu_order(void) |
| 1454 | { |
| 1455 | unsigned long size = nvhe_percpu_size(); |
| 1456 | |
| 1457 | return size ? get_order(size) : 0; |
| 1458 | } |
| 1459 | |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1460 | /* A lookup table holding the hypervisor VA for each vector slot */ |
| 1461 | static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; |
Will Deacon | de5bcdb | 2020-11-13 11:38:39 +0000 | [diff] [blame] | 1462 | |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1463 | static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) |
| 1464 | { |
Quentin Perret | bc1d289 | 2021-03-19 10:01:23 +0000 | [diff] [blame] | 1465 | hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1466 | } |
| 1467 | |
| 1468 | static int kvm_init_vector_slots(void) |
| 1469 | { |
| 1470 | int err; |
| 1471 | void *base; |
| 1472 | |
| 1473 | base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); |
| 1474 | kvm_init_vector_slot(base, HYP_VECTOR_DIRECT); |
| 1475 | |
| 1476 | base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); |
| 1477 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); |
| 1478 | |
Will Deacon | c4792b6 | 2020-11-13 11:38:45 +0000 | [diff] [blame] | 1479 | if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) |
Will Deacon | de5bcdb | 2020-11-13 11:38:39 +0000 | [diff] [blame] | 1480 | return 0; |
Will Deacon | 9ef2b48 | 2020-09-28 11:45:24 +0100 | [diff] [blame] | 1481 | |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1482 | if (!has_vhe()) { |
| 1483 | err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), |
| 1484 | __BP_HARDEN_HYP_VECS_SZ, &base); |
| 1485 | if (err) |
| 1486 | return err; |
Will Deacon | 9ef2b48 | 2020-09-28 11:45:24 +0100 | [diff] [blame] | 1487 | } |
| 1488 | |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1489 | kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT); |
| 1490 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT); |
Will Deacon | 9ef2b48 | 2020-09-28 11:45:24 +0100 | [diff] [blame] | 1491 | return 0; |
| 1492 | } |
| 1493 | |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1494 | static void cpu_prepare_hyp_mode(int cpu) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1495 | { |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1496 | struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); |
David Brazdil | d3e1086 | 2020-12-02 18:41:07 +0000 | [diff] [blame] | 1497 | unsigned long tcr; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1498 | |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1499 | /* |
| 1500 | * Calculate the raw per-cpu offset without a translation from the |
| 1501 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 |
| 1502 | * so that we can use adr_l to access per-cpu variables in EL2. |
Steven Price | e166337 | 2021-01-08 16:12:54 +0000 | [diff] [blame] | 1503 | * Also drop the KASAN tag which gets in the way... |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1504 | */ |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1505 | params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - |
David Brazdil | 63fec24 | 2020-12-02 18:41:06 +0000 | [diff] [blame] | 1506 | (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1507 | |
David Brazdil | d3e1086 | 2020-12-02 18:41:07 +0000 | [diff] [blame] | 1508 | params->mair_el2 = read_sysreg(mair_el1); |
| 1509 | |
| 1510 | /* |
| 1511 | * The ID map may be configured to use an extended virtual address |
| 1512 | * range. This is only the case if system RAM is out of range for the |
| 1513 | * currently configured page size and VA_BITS, in which case we will |
| 1514 | * also need the extended virtual range for the HYP ID map, or we won't |
| 1515 | * be able to enable the EL2 MMU. |
| 1516 | * |
| 1517 | * However, at EL2, there is only one TTBR register, and we can't switch |
| 1518 | * between translation tables *and* update TCR_EL2.T0SZ at the same |
| 1519 | * time. Bottom line: we need to use the extended range with *both* our |
| 1520 | * translation tables. |
| 1521 | * |
| 1522 | * So use the same T0SZ value we use for the ID map. |
| 1523 | */ |
| 1524 | tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1; |
| 1525 | tcr &= ~TCR_T0SZ_MASK; |
| 1526 | tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET; |
| 1527 | params->tcr_el2 = tcr; |
| 1528 | |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1529 | params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE); |
David Brazdil | 63fec24 | 2020-12-02 18:41:06 +0000 | [diff] [blame] | 1530 | params->pgd_pa = kvm_mmu_get_httbr(); |
Quentin Perret | 734864c | 2021-03-19 10:01:29 +0000 | [diff] [blame] | 1531 | if (is_protected_kvm_enabled()) |
| 1532 | params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; |
| 1533 | else |
| 1534 | params->hcr_el2 = HCR_HOST_NVHE_FLAGS; |
| 1535 | params->vttbr = params->vtcr = 0; |
David Brazdil | 63fec24 | 2020-12-02 18:41:06 +0000 | [diff] [blame] | 1536 | |
| 1537 | /* |
| 1538 | * Flush the init params from the data cache because the struct will |
| 1539 | * be read while the MMU is off. |
| 1540 | */ |
| 1541 | kvm_flush_dcache_to_poc(params, sizeof(*params)); |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1542 | } |
| 1543 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1544 | static void hyp_install_host_vector(void) |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1545 | { |
| 1546 | struct kvm_nvhe_init_params *params; |
| 1547 | struct arm_smccc_res res; |
| 1548 | |
| 1549 | /* Switch from the HYP stub to our own HYP init vector */ |
| 1550 | __hyp_set_vectors(kvm_get_idmap_vector()); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1551 | |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1552 | /* |
| 1553 | * Call initialization code, and switch to the full blown HYP code. |
| 1554 | * If the cpucaps haven't been finalized yet, something has gone very |
| 1555 | * wrong, and hyp will crash and burn when it uses any |
| 1556 | * cpus_have_const_cap() wrapper. |
| 1557 | */ |
| 1558 | BUG_ON(!system_capabilities_finalized()); |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1559 | params = this_cpu_ptr_nvhe_sym(kvm_init_params); |
David Brazdil | 63fec24 | 2020-12-02 18:41:06 +0000 | [diff] [blame] | 1560 | arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); |
Andrew Scull | 04e4caa | 2020-09-15 11:46:42 +0100 | [diff] [blame] | 1561 | WARN_ON(res.a0 != SMCCC_RET_SUCCESS); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1562 | } |
| 1563 | |
| 1564 | static void cpu_init_hyp_mode(void) |
| 1565 | { |
| 1566 | hyp_install_host_vector(); |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1567 | |
| 1568 | /* |
| 1569 | * Disabling SSBD on a non-VHE system requires us to enable SSBS |
| 1570 | * at EL2. |
| 1571 | */ |
| 1572 | if (this_cpu_has_cap(ARM64_SSBS) && |
Marc Zyngier | d63d975 | 2020-09-18 14:08:54 +0100 | [diff] [blame] | 1573 | arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { |
David Brazdil | 13aeb9b | 2020-06-25 14:14:16 +0100 | [diff] [blame] | 1574 | kvm_call_hyp_nvhe(__kvm_enable_ssbs); |
David Brazdil | 71b3ec5 | 2020-05-15 16:20:56 +0100 | [diff] [blame] | 1575 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1576 | } |
| 1577 | |
Marc Zyngier | 47eb3cb | 2017-04-03 19:38:01 +0100 | [diff] [blame] | 1578 | static void cpu_hyp_reset(void) |
| 1579 | { |
| 1580 | if (!is_kernel_in_hyp_mode()) |
| 1581 | __hyp_reset_vectors(); |
| 1582 | } |
| 1583 | |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1584 | /* |
| 1585 | * EL2 vectors can be mapped and rerouted in a number of ways, |
| 1586 | * depending on the kernel configuration and CPU present: |
| 1587 | * |
| 1588 | * - If the CPU is affected by Spectre-v2, the hardening sequence is |
| 1589 | * placed in one of the vector slots, which is executed before jumping |
| 1590 | * to the real vectors. |
| 1591 | * |
Will Deacon | c4792b6 | 2020-11-13 11:38:45 +0000 | [diff] [blame] | 1592 | * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1593 | * containing the hardening sequence is mapped next to the idmap page, |
| 1594 | * and executed before jumping to the real vectors. |
| 1595 | * |
Will Deacon | c4792b6 | 2020-11-13 11:38:45 +0000 | [diff] [blame] | 1596 | * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1597 | * empty slot is selected, mapped next to the idmap page, and |
| 1598 | * executed before jumping to the real vectors. |
| 1599 | * |
Will Deacon | c4792b6 | 2020-11-13 11:38:45 +0000 | [diff] [blame] | 1600 | * Note that ARM64_SPECTRE_V3A is somewhat incompatible with |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1601 | * VHE, as we don't have hypervisor-specific mappings. If the system |
| 1602 | * is VHE and yet selects this capability, it will be ignored. |
| 1603 | */ |
| 1604 | static void cpu_set_hyp_vector(void) |
| 1605 | { |
Will Deacon | 6279017 | 2020-11-13 11:38:42 +0000 | [diff] [blame] | 1606 | struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 1607 | void *vector = hyp_spectre_vector_selector[data->slot]; |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1608 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1609 | if (!is_protected_kvm_enabled()) |
| 1610 | *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; |
| 1611 | else |
| 1612 | kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); |
Will Deacon | 042c76a | 2020-11-13 11:38:40 +0000 | [diff] [blame] | 1613 | } |
| 1614 | |
Will Deacon | 8579a18 | 2021-10-08 14:58:36 +0100 | [diff] [blame] | 1615 | static void cpu_hyp_init_context(void) |
James Morse | 5f5560b | 2016-03-30 18:33:04 +0100 | [diff] [blame] | 1616 | { |
David Brazdil | 2a1198c | 2020-09-22 21:49:08 +0100 | [diff] [blame] | 1617 | kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); |
Marc Zyngier | 1e0cf16 | 2019-07-05 23:35:56 +0100 | [diff] [blame] | 1618 | |
Will Deacon | 8579a18 | 2021-10-08 14:58:36 +0100 | [diff] [blame] | 1619 | if (!is_kernel_in_hyp_mode()) |
| 1620 | cpu_init_hyp_mode(); |
| 1621 | } |
| 1622 | |
| 1623 | static void cpu_hyp_init_features(void) |
| 1624 | { |
| 1625 | cpu_set_hyp_vector(); |
| 1626 | kvm_arm_init_debug(); |
Andrew Scull | a0e4795 | 2020-09-15 11:46:29 +0100 | [diff] [blame] | 1627 | |
Marc Zyngier | 9d47bb0 | 2018-10-01 13:41:32 +0100 | [diff] [blame] | 1628 | if (is_kernel_in_hyp_mode()) |
Hu Huajun | 02d50cd | 2017-06-12 22:37:48 +0800 | [diff] [blame] | 1629 | kvm_timer_init_vhe(); |
Christoffer Dall | 5b0d2cc | 2017-03-18 13:56:56 +0100 | [diff] [blame] | 1630 | |
| 1631 | if (vgic_present) |
| 1632 | kvm_vgic_init_cpu_hardware(); |
James Morse | 5f5560b | 2016-03-30 18:33:04 +0100 | [diff] [blame] | 1633 | } |
| 1634 | |
Will Deacon | 8579a18 | 2021-10-08 14:58:36 +0100 | [diff] [blame] | 1635 | static void cpu_hyp_reinit(void) |
| 1636 | { |
| 1637 | cpu_hyp_reset(); |
| 1638 | cpu_hyp_init_context(); |
| 1639 | cpu_hyp_init_features(); |
| 1640 | } |
| 1641 | |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1642 | static void _kvm_arch_hardware_enable(void *discard) |
| 1643 | { |
| 1644 | if (!__this_cpu_read(kvm_arm_hardware_enabled)) { |
| 1645 | cpu_hyp_reinit(); |
| 1646 | __this_cpu_write(kvm_arm_hardware_enabled, 1); |
| 1647 | } |
| 1648 | } |
| 1649 | |
| 1650 | int kvm_arch_hardware_enable(void) |
| 1651 | { |
| 1652 | _kvm_arch_hardware_enable(NULL); |
| 1653 | return 0; |
| 1654 | } |
| 1655 | |
| 1656 | static void _kvm_arch_hardware_disable(void *discard) |
| 1657 | { |
| 1658 | if (__this_cpu_read(kvm_arm_hardware_enabled)) { |
| 1659 | cpu_hyp_reset(); |
| 1660 | __this_cpu_write(kvm_arm_hardware_enabled, 0); |
| 1661 | } |
| 1662 | } |
| 1663 | |
| 1664 | void kvm_arch_hardware_disable(void) |
| 1665 | { |
David Brazdil | fa8c3d6 | 2020-12-02 18:41:20 +0000 | [diff] [blame] | 1666 | if (!is_protected_kvm_enabled()) |
| 1667 | _kvm_arch_hardware_disable(NULL); |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1668 | } |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 1669 | |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1670 | #ifdef CONFIG_CPU_PM |
| 1671 | static int hyp_init_cpu_pm_notifier(struct notifier_block *self, |
| 1672 | unsigned long cmd, |
| 1673 | void *v) |
| 1674 | { |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1675 | /* |
| 1676 | * kvm_arm_hardware_enabled is left with its old value over |
| 1677 | * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should |
| 1678 | * re-enable hyp. |
| 1679 | */ |
| 1680 | switch (cmd) { |
| 1681 | case CPU_PM_ENTER: |
| 1682 | if (__this_cpu_read(kvm_arm_hardware_enabled)) |
| 1683 | /* |
| 1684 | * don't update kvm_arm_hardware_enabled here |
| 1685 | * so that the hardware will be re-enabled |
| 1686 | * when we resume. See below. |
| 1687 | */ |
| 1688 | cpu_hyp_reset(); |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1689 | |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1690 | return NOTIFY_OK; |
James Morse | 58d6b15 | 2018-01-22 18:19:06 +0000 | [diff] [blame] | 1691 | case CPU_PM_ENTER_FAILED: |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1692 | case CPU_PM_EXIT: |
| 1693 | if (__this_cpu_read(kvm_arm_hardware_enabled)) |
| 1694 | /* The hardware was enabled before suspend. */ |
| 1695 | cpu_hyp_reinit(); |
| 1696 | |
| 1697 | return NOTIFY_OK; |
| 1698 | |
| 1699 | default: |
| 1700 | return NOTIFY_DONE; |
| 1701 | } |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1702 | } |
| 1703 | |
| 1704 | static struct notifier_block hyp_init_cpu_pm_nb = { |
| 1705 | .notifier_call = hyp_init_cpu_pm_notifier, |
| 1706 | }; |
| 1707 | |
Marc Zyngier | 44362a3 | 2020-12-23 12:08:54 +0000 | [diff] [blame] | 1708 | static void hyp_cpu_pm_init(void) |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1709 | { |
David Brazdil | fa8c3d6 | 2020-12-02 18:41:20 +0000 | [diff] [blame] | 1710 | if (!is_protected_kvm_enabled()) |
| 1711 | cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1712 | } |
Marc Zyngier | 44362a3 | 2020-12-23 12:08:54 +0000 | [diff] [blame] | 1713 | static void hyp_cpu_pm_exit(void) |
Sudeep Holla | 06a71a2 | 2016-04-04 14:46:51 +0100 | [diff] [blame] | 1714 | { |
David Brazdil | fa8c3d6 | 2020-12-02 18:41:20 +0000 | [diff] [blame] | 1715 | if (!is_protected_kvm_enabled()) |
| 1716 | cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); |
Sudeep Holla | 06a71a2 | 2016-04-04 14:46:51 +0100 | [diff] [blame] | 1717 | } |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1718 | #else |
| 1719 | static inline void hyp_cpu_pm_init(void) |
| 1720 | { |
| 1721 | } |
Sudeep Holla | 06a71a2 | 2016-04-04 14:46:51 +0100 | [diff] [blame] | 1722 | static inline void hyp_cpu_pm_exit(void) |
| 1723 | { |
| 1724 | } |
Lorenzo Pieralisi | 1fcf7ce | 2013-08-05 15:04:46 +0100 | [diff] [blame] | 1725 | #endif |
| 1726 | |
David Brazdil | 94f5e8a | 2020-12-02 18:41:10 +0000 | [diff] [blame] | 1727 | static void init_cpu_logical_map(void) |
| 1728 | { |
| 1729 | unsigned int cpu; |
| 1730 | |
| 1731 | /* |
| 1732 | * Copy the MPIDR <-> logical CPU ID mapping to hyp. |
| 1733 | * Only copy the set of online CPUs whose features have been chacked |
| 1734 | * against the finalized system capabilities. The hypervisor will not |
| 1735 | * allow any other CPUs from the `possible` set to boot. |
| 1736 | */ |
| 1737 | for_each_online_cpu(cpu) |
David Brazdil | 61fe0c3 | 2020-12-08 14:24:50 +0000 | [diff] [blame] | 1738 | hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); |
David Brazdil | 94f5e8a | 2020-12-02 18:41:10 +0000 | [diff] [blame] | 1739 | } |
| 1740 | |
Marc Zyngier | 767c973 | 2020-12-22 12:46:41 +0000 | [diff] [blame] | 1741 | #define init_psci_0_1_impl_state(config, what) \ |
| 1742 | config.psci_0_1_ ## what ## _implemented = psci_ops.what |
| 1743 | |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 1744 | static bool init_psci_relay(void) |
| 1745 | { |
| 1746 | /* |
| 1747 | * If PSCI has not been initialized, protected KVM cannot install |
| 1748 | * itself on newly booted CPUs. |
| 1749 | */ |
| 1750 | if (!psci_ops.get_version) { |
| 1751 | kvm_err("Cannot initialize protected mode without PSCI\n"); |
| 1752 | return false; |
| 1753 | } |
| 1754 | |
David Brazdil | ff367fe | 2020-12-08 14:24:47 +0000 | [diff] [blame] | 1755 | kvm_host_psci_config.version = psci_ops.get_version(); |
| 1756 | |
| 1757 | if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) { |
| 1758 | kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids(); |
Marc Zyngier | 767c973 | 2020-12-22 12:46:41 +0000 | [diff] [blame] | 1759 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend); |
| 1760 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on); |
| 1761 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off); |
| 1762 | init_psci_0_1_impl_state(kvm_host_psci_config, migrate); |
David Brazdil | ff367fe | 2020-12-08 14:24:47 +0000 | [diff] [blame] | 1763 | } |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 1764 | return true; |
| 1765 | } |
| 1766 | |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1767 | static int init_subsystems(void) |
| 1768 | { |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1769 | int err = 0; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1770 | |
| 1771 | /* |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1772 | * Enable hardware so that subsystem initialisation can access EL2. |
James Morse | 5f5560b | 2016-03-30 18:33:04 +0100 | [diff] [blame] | 1773 | */ |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1774 | on_each_cpu(_kvm_arch_hardware_enable, NULL, 1); |
James Morse | 5f5560b | 2016-03-30 18:33:04 +0100 | [diff] [blame] | 1775 | |
| 1776 | /* |
| 1777 | * Register CPU lower-power notifier |
| 1778 | */ |
| 1779 | hyp_cpu_pm_init(); |
| 1780 | |
| 1781 | /* |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1782 | * Init HYP view of VGIC |
| 1783 | */ |
| 1784 | err = kvm_vgic_hyp_init(); |
| 1785 | switch (err) { |
| 1786 | case 0: |
| 1787 | vgic_present = true; |
| 1788 | break; |
| 1789 | case -ENODEV: |
| 1790 | case -ENXIO: |
| 1791 | vgic_present = false; |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1792 | err = 0; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1793 | break; |
| 1794 | default: |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1795 | goto out; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1796 | } |
| 1797 | |
| 1798 | /* |
| 1799 | * Init HYP architected timer support |
| 1800 | */ |
Marc Zyngier | f384dcf | 2017-12-07 11:46:15 +0000 | [diff] [blame] | 1801 | err = kvm_timer_hyp_init(vgic_present); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1802 | if (err) |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1803 | goto out; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1804 | |
Sean Christopherson | 17ed14e | 2021-11-11 02:07:37 +0000 | [diff] [blame] | 1805 | kvm_register_perf_callbacks(NULL); |
| 1806 | |
Marc Zyngier | 6ac4a5a | 2020-11-02 18:11:16 +0000 | [diff] [blame] | 1807 | kvm_sys_reg_table_init(); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1808 | |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1809 | out: |
David Brazdil | fa8c3d6 | 2020-12-02 18:41:20 +0000 | [diff] [blame] | 1810 | if (err || !is_protected_kvm_enabled()) |
| 1811 | on_each_cpu(_kvm_arch_hardware_disable, NULL, 1); |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 1812 | |
| 1813 | return err; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1814 | } |
| 1815 | |
| 1816 | static void teardown_hyp_mode(void) |
| 1817 | { |
| 1818 | int cpu; |
| 1819 | |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1820 | free_hyp_pgds(); |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1821 | for_each_possible_cpu(cpu) { |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1822 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1823 | free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); |
| 1824 | } |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1825 | } |
| 1826 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1827 | static int do_pkvm_init(u32 hyp_va_bits) |
| 1828 | { |
| 1829 | void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); |
| 1830 | int ret; |
| 1831 | |
| 1832 | preempt_disable(); |
Will Deacon | 8579a18 | 2021-10-08 14:58:36 +0100 | [diff] [blame] | 1833 | cpu_hyp_init_context(); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1834 | ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, |
| 1835 | num_possible_cpus(), kern_hyp_va(per_cpu_base), |
| 1836 | hyp_va_bits); |
Will Deacon | 8579a18 | 2021-10-08 14:58:36 +0100 | [diff] [blame] | 1837 | cpu_hyp_init_features(); |
| 1838 | |
| 1839 | /* |
| 1840 | * The stub hypercalls are now disabled, so set our local flag to |
| 1841 | * prevent a later re-init attempt in kvm_arch_hardware_enable(). |
| 1842 | */ |
| 1843 | __this_cpu_write(kvm_arm_hardware_enabled, 1); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1844 | preempt_enable(); |
| 1845 | |
| 1846 | return ret; |
| 1847 | } |
| 1848 | |
| 1849 | static int kvm_hyp_init_protection(u32 hyp_va_bits) |
| 1850 | { |
| 1851 | void *addr = phys_to_virt(hyp_mem_base); |
| 1852 | int ret; |
| 1853 | |
Fuad Tabba | 6c30bfb | 2021-10-10 15:56:32 +0100 | [diff] [blame] | 1854 | kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
| 1855 | kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); |
| 1856 | kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); |
| 1857 | kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
Marc Zyngier | 7c41993 | 2021-03-22 13:32:34 +0000 | [diff] [blame] | 1858 | kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
| 1859 | kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
Fuad Tabba | 6c30bfb | 2021-10-10 15:56:32 +0100 | [diff] [blame] | 1860 | kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); |
Marc Zyngier | 7c41993 | 2021-03-22 13:32:34 +0000 | [diff] [blame] | 1861 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1862 | ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); |
| 1863 | if (ret) |
| 1864 | return ret; |
| 1865 | |
| 1866 | ret = do_pkvm_init(hyp_va_bits); |
| 1867 | if (ret) |
| 1868 | return ret; |
| 1869 | |
| 1870 | free_hyp_pgds(); |
| 1871 | |
| 1872 | return 0; |
| 1873 | } |
| 1874 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1875 | /** |
| 1876 | * Inits Hyp-mode on all online CPUs |
| 1877 | */ |
| 1878 | static int init_hyp_mode(void) |
| 1879 | { |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1880 | u32 hyp_va_bits; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1881 | int cpu; |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1882 | int err = -ENOMEM; |
| 1883 | |
| 1884 | /* |
| 1885 | * The protected Hyp-mode cannot be initialized if the memory pool |
| 1886 | * allocation has failed. |
| 1887 | */ |
| 1888 | if (is_protected_kvm_enabled() && !hyp_mem_base) |
| 1889 | goto out_err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1890 | |
| 1891 | /* |
| 1892 | * Allocate Hyp PGD and setup Hyp identity mapping |
| 1893 | */ |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 1894 | err = kvm_mmu_init(&hyp_va_bits); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1895 | if (err) |
| 1896 | goto out_err; |
| 1897 | |
| 1898 | /* |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1899 | * Allocate stack pages for Hypervisor-mode |
| 1900 | */ |
| 1901 | for_each_possible_cpu(cpu) { |
| 1902 | unsigned long stack_page; |
| 1903 | |
| 1904 | stack_page = __get_free_page(GFP_KERNEL); |
| 1905 | if (!stack_page) { |
| 1906 | err = -ENOMEM; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1907 | goto out_err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1908 | } |
| 1909 | |
| 1910 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; |
| 1911 | } |
| 1912 | |
| 1913 | /* |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1914 | * Allocate and initialize pages for Hypervisor-mode percpu regions. |
| 1915 | */ |
| 1916 | for_each_possible_cpu(cpu) { |
| 1917 | struct page *page; |
| 1918 | void *page_addr; |
| 1919 | |
| 1920 | page = alloc_pages(GFP_KERNEL, nvhe_percpu_order()); |
| 1921 | if (!page) { |
| 1922 | err = -ENOMEM; |
| 1923 | goto out_err; |
| 1924 | } |
| 1925 | |
| 1926 | page_addr = page_address(page); |
| 1927 | memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); |
| 1928 | kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; |
| 1929 | } |
| 1930 | |
| 1931 | /* |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1932 | * Map the Hyp-code called directly from the host |
| 1933 | */ |
Linus Torvalds | 588ab3f | 2016-03-17 20:03:47 -0700 | [diff] [blame] | 1934 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), |
Marc Zyngier | 5900270 | 2016-06-13 15:00:48 +0100 | [diff] [blame] | 1935 | kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1936 | if (err) { |
| 1937 | kvm_err("Cannot map world-switch code\n"); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1938 | goto out_err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1939 | } |
| 1940 | |
David Brazdil | 16174ee | 2021-01-05 18:05:35 +0000 | [diff] [blame] | 1941 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), |
| 1942 | kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); |
David Brazdil | 2d7bf21 | 2020-12-02 18:41:08 +0000 | [diff] [blame] | 1943 | if (err) { |
David Brazdil | 16174ee | 2021-01-05 18:05:35 +0000 | [diff] [blame] | 1944 | kvm_err("Cannot map .hyp.rodata section\n"); |
David Brazdil | 2d7bf21 | 2020-12-02 18:41:08 +0000 | [diff] [blame] | 1945 | goto out_err; |
| 1946 | } |
| 1947 | |
Ard Biesheuvel | a0bf977 | 2016-02-16 13:52:39 +0100 | [diff] [blame] | 1948 | err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), |
Marc Zyngier | 74a6b88 | 2016-06-13 15:00:47 +0100 | [diff] [blame] | 1949 | kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); |
Marc Zyngier | 910917b | 2015-10-27 12:18:48 +0000 | [diff] [blame] | 1950 | if (err) { |
| 1951 | kvm_err("Cannot map rodata section\n"); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1952 | goto out_err; |
Marc Zyngier | 910917b | 2015-10-27 12:18:48 +0000 | [diff] [blame] | 1953 | } |
| 1954 | |
Quentin Perret | 380e18a | 2021-03-19 10:01:15 +0000 | [diff] [blame] | 1955 | /* |
| 1956 | * .hyp.bss is guaranteed to be placed at the beginning of the .bss |
| 1957 | * section thanks to an assertion in the linker script. Map it RW and |
| 1958 | * the rest of .bss RO. |
| 1959 | */ |
| 1960 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), |
| 1961 | kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); |
| 1962 | if (err) { |
| 1963 | kvm_err("Cannot map hyp bss section: %d\n", err); |
| 1964 | goto out_err; |
| 1965 | } |
| 1966 | |
| 1967 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), |
Marc Zyngier | c8ea039 | 2016-10-20 10:17:21 +0100 | [diff] [blame] | 1968 | kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); |
| 1969 | if (err) { |
| 1970 | kvm_err("Cannot map bss section\n"); |
| 1971 | goto out_err; |
| 1972 | } |
| 1973 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1974 | /* |
| 1975 | * Map the Hyp stack pages |
| 1976 | */ |
| 1977 | for_each_possible_cpu(cpu) { |
| 1978 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 1979 | err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE, |
| 1980 | PAGE_HYP); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1981 | |
| 1982 | if (err) { |
| 1983 | kvm_err("Cannot map hyp stack\n"); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 1984 | goto out_err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1985 | } |
| 1986 | } |
| 1987 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1988 | for_each_possible_cpu(cpu) { |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1989 | char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; |
| 1990 | char *percpu_end = percpu_begin + nvhe_percpu_size(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1991 | |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1992 | /* Map Hyp percpu pages */ |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1993 | err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1994 | if (err) { |
David Brazdil | 30c9539 | 2020-09-22 21:49:09 +0100 | [diff] [blame] | 1995 | kvm_err("Cannot map hyp percpu region\n"); |
Andrew Scull | 6e3bfbb | 2020-09-15 11:46:30 +0100 | [diff] [blame] | 1996 | goto out_err; |
| 1997 | } |
Quentin Perret | 9cc7758 | 2021-03-19 10:01:12 +0000 | [diff] [blame] | 1998 | |
| 1999 | /* Prepare the CPU initialization parameters */ |
| 2000 | cpu_prepare_hyp_mode(cpu); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2001 | } |
| 2002 | |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 2003 | if (is_protected_kvm_enabled()) { |
David Brazdil | 94f5e8a | 2020-12-02 18:41:10 +0000 | [diff] [blame] | 2004 | init_cpu_logical_map(); |
| 2005 | |
Wang Wensheng | 52b9e26 | 2021-04-06 12:17:59 +0000 | [diff] [blame] | 2006 | if (!init_psci_relay()) { |
| 2007 | err = -ENODEV; |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 2008 | goto out_err; |
Wang Wensheng | 52b9e26 | 2021-04-06 12:17:59 +0000 | [diff] [blame] | 2009 | } |
David Brazdil | eeeee71 | 2020-12-02 18:41:12 +0000 | [diff] [blame] | 2010 | } |
| 2011 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 2012 | if (is_protected_kvm_enabled()) { |
| 2013 | err = kvm_hyp_init_protection(hyp_va_bits); |
| 2014 | if (err) { |
| 2015 | kvm_err("Failed to init hyp memory protection\n"); |
| 2016 | goto out_err; |
| 2017 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2018 | } |
| 2019 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2020 | return 0; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2021 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2022 | out_err: |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2023 | teardown_hyp_mode(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2024 | kvm_err("error initializing Hyp mode: %d\n", err); |
| 2025 | return err; |
| 2026 | } |
| 2027 | |
Will Deacon | 2f2e1a5 | 2021-10-08 14:58:37 +0100 | [diff] [blame] | 2028 | static void _kvm_host_prot_finalize(void *arg) |
Quentin Perret | 1025c8c | 2021-03-19 10:01:43 +0000 | [diff] [blame] | 2029 | { |
Will Deacon | 2f2e1a5 | 2021-10-08 14:58:37 +0100 | [diff] [blame] | 2030 | int *err = arg; |
| 2031 | |
| 2032 | if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize))) |
| 2033 | WRITE_ONCE(*err, -EINVAL); |
| 2034 | } |
| 2035 | |
| 2036 | static int pkvm_drop_host_privileges(void) |
| 2037 | { |
| 2038 | int ret = 0; |
| 2039 | |
| 2040 | /* |
| 2041 | * Flip the static key upfront as that may no longer be possible |
| 2042 | * once the host stage 2 is installed. |
| 2043 | */ |
| 2044 | static_branch_enable(&kvm_protected_mode_initialized); |
| 2045 | on_each_cpu(_kvm_host_prot_finalize, &ret, 1); |
| 2046 | return ret; |
Quentin Perret | 1025c8c | 2021-03-19 10:01:43 +0000 | [diff] [blame] | 2047 | } |
| 2048 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 2049 | static int finalize_hyp_mode(void) |
| 2050 | { |
| 2051 | if (!is_protected_kvm_enabled()) |
| 2052 | return 0; |
| 2053 | |
Marc Zyngier | 47e6223 | 2021-08-02 13:38:30 +0100 | [diff] [blame] | 2054 | /* |
| 2055 | * Exclude HYP BSS from kmemleak so that it doesn't get peeked |
| 2056 | * at, which would end badly once the section is inaccessible. |
| 2057 | * None of other sections should ever be introspected. |
| 2058 | */ |
| 2059 | kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); |
Will Deacon | 2f2e1a5 | 2021-10-08 14:58:37 +0100 | [diff] [blame] | 2060 | return pkvm_drop_host_privileges(); |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 2061 | } |
| 2062 | |
Andre Przywara | 4429fc6 | 2014-06-02 15:37:13 +0200 | [diff] [blame] | 2063 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) |
| 2064 | { |
| 2065 | struct kvm_vcpu *vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 2066 | unsigned long i; |
Andre Przywara | 4429fc6 | 2014-06-02 15:37:13 +0200 | [diff] [blame] | 2067 | |
| 2068 | mpidr &= MPIDR_HWID_BITMASK; |
| 2069 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 2070 | if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) |
| 2071 | return vcpu; |
| 2072 | } |
| 2073 | return NULL; |
| 2074 | } |
| 2075 | |
Eric Auger | 2412405 | 2017-10-27 15:28:31 +0100 | [diff] [blame] | 2076 | bool kvm_arch_has_irq_bypass(void) |
| 2077 | { |
| 2078 | return true; |
| 2079 | } |
| 2080 | |
| 2081 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
| 2082 | struct irq_bypass_producer *prod) |
| 2083 | { |
| 2084 | struct kvm_kernel_irqfd *irqfd = |
| 2085 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 2086 | |
Marc Zyngier | 196b136 | 2017-10-27 15:28:39 +0100 | [diff] [blame] | 2087 | return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, |
| 2088 | &irqfd->irq_entry); |
Eric Auger | 2412405 | 2017-10-27 15:28:31 +0100 | [diff] [blame] | 2089 | } |
| 2090 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
| 2091 | struct irq_bypass_producer *prod) |
| 2092 | { |
| 2093 | struct kvm_kernel_irqfd *irqfd = |
| 2094 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 2095 | |
Marc Zyngier | 196b136 | 2017-10-27 15:28:39 +0100 | [diff] [blame] | 2096 | kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, |
| 2097 | &irqfd->irq_entry); |
Eric Auger | 2412405 | 2017-10-27 15:28:31 +0100 | [diff] [blame] | 2098 | } |
| 2099 | |
| 2100 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) |
| 2101 | { |
| 2102 | struct kvm_kernel_irqfd *irqfd = |
| 2103 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 2104 | |
| 2105 | kvm_arm_halt_guest(irqfd->kvm); |
| 2106 | } |
| 2107 | |
| 2108 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) |
| 2109 | { |
| 2110 | struct kvm_kernel_irqfd *irqfd = |
| 2111 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 2112 | |
| 2113 | kvm_arm_resume_guest(irqfd->kvm); |
| 2114 | } |
| 2115 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2116 | /** |
| 2117 | * Initialize Hyp-mode and memory mappings on all CPUs. |
| 2118 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2119 | int kvm_arch_init(void *opaque) |
| 2120 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2121 | int err; |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2122 | bool in_hyp_mode; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2123 | |
| 2124 | if (!is_hyp_mode_available()) { |
Ard Biesheuvel | 58d0d19 | 2017-11-28 15:18:19 +0000 | [diff] [blame] | 2125 | kvm_info("HYP mode not available\n"); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2126 | return -ENODEV; |
| 2127 | } |
| 2128 | |
Marc Zyngier | b6a68b9 | 2021-10-01 18:05:53 +0100 | [diff] [blame] | 2129 | if (kvm_get_mode() == KVM_MODE_NONE) { |
| 2130 | kvm_info("KVM disabled from command line\n"); |
| 2131 | return -ENODEV; |
| 2132 | } |
| 2133 | |
Marc Zyngier | 33e5f4e | 2018-12-06 17:31:20 +0000 | [diff] [blame] | 2134 | in_hyp_mode = is_kernel_in_hyp_mode(); |
| 2135 | |
Rob Herring | 96d389ca | 2020-10-28 13:28:39 -0500 | [diff] [blame] | 2136 | if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || |
| 2137 | cpus_have_final_cap(ARM64_WORKAROUND_1508412)) |
Rob Herring | abf532c | 2020-08-03 13:31:25 -0600 | [diff] [blame] | 2138 | kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ |
| 2139 | "Only trusted guests should be used on this system.\n"); |
| 2140 | |
Anshuman Khandual | bf249d9 | 2021-08-12 10:39:52 +0530 | [diff] [blame] | 2141 | err = kvm_set_ipa_limit(); |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2142 | if (err) |
| 2143 | return err; |
Srivatsa S. Bhat | 8146875 | 2014-03-18 15:53:05 +0530 | [diff] [blame] | 2144 | |
Dave Martin | a3be836 | 2019-04-12 15:30:58 +0100 | [diff] [blame] | 2145 | err = kvm_arm_init_sve(); |
Dave Martin | 0f062bf | 2019-02-28 18:33:00 +0000 | [diff] [blame] | 2146 | if (err) |
| 2147 | return err; |
| 2148 | |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2149 | if (!in_hyp_mode) { |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2150 | err = init_hyp_mode(); |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2151 | if (err) |
| 2152 | goto out_err; |
| 2153 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2154 | |
Will Deacon | b881cdc | 2020-11-13 11:38:44 +0000 | [diff] [blame] | 2155 | err = kvm_init_vector_slots(); |
| 2156 | if (err) { |
| 2157 | kvm_err("Cannot initialise vector slots\n"); |
| 2158 | goto out_err; |
| 2159 | } |
| 2160 | |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2161 | err = init_subsystems(); |
| 2162 | if (err) |
| 2163 | goto out_hyp; |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 2164 | |
Quentin Perret | bfa79a8 | 2021-03-19 10:01:26 +0000 | [diff] [blame] | 2165 | if (!in_hyp_mode) { |
| 2166 | err = finalize_hyp_mode(); |
| 2167 | if (err) { |
| 2168 | kvm_err("Failed to finalize Hyp protection\n"); |
| 2169 | goto out_hyp; |
| 2170 | } |
| 2171 | } |
| 2172 | |
David Brazdil | f19f664 | 2020-12-02 18:41:22 +0000 | [diff] [blame] | 2173 | if (is_protected_kvm_enabled()) { |
David Brazdil | 3eb681f | 2020-12-02 18:40:58 +0000 | [diff] [blame] | 2174 | kvm_info("Protected nVHE mode initialized successfully\n"); |
David Brazdil | f19f664 | 2020-12-02 18:41:22 +0000 | [diff] [blame] | 2175 | } else if (in_hyp_mode) { |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2176 | kvm_info("VHE mode initialized successfully\n"); |
David Brazdil | f19f664 | 2020-12-02 18:41:22 +0000 | [diff] [blame] | 2177 | } else { |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2178 | kvm_info("Hyp mode initialized successfully\n"); |
David Brazdil | f19f664 | 2020-12-02 18:41:22 +0000 | [diff] [blame] | 2179 | } |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2180 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2181 | return 0; |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 2182 | |
| 2183 | out_hyp: |
Shannon Zhao | c3e3540 | 2019-12-02 15:42:11 +0800 | [diff] [blame] | 2184 | hyp_cpu_pm_exit(); |
Julien Thierry | fe7d7b0 | 2017-10-20 12:34:16 +0100 | [diff] [blame] | 2185 | if (!in_hyp_mode) |
| 2186 | teardown_hyp_mode(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2187 | out_err: |
| 2188 | return err; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2189 | } |
| 2190 | |
| 2191 | /* NOP: Compiling as a module not supported */ |
| 2192 | void kvm_arch_exit(void) |
| 2193 | { |
Sean Christopherson | 17ed14e | 2021-11-11 02:07:37 +0000 | [diff] [blame] | 2194 | kvm_unregister_perf_callbacks(); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2195 | } |
| 2196 | |
David Brazdil | d8b369c | 2020-12-02 18:40:57 +0000 | [diff] [blame] | 2197 | static int __init early_kvm_mode_cfg(char *arg) |
| 2198 | { |
| 2199 | if (!arg) |
| 2200 | return -EINVAL; |
| 2201 | |
| 2202 | if (strcmp(arg, "protected") == 0) { |
| 2203 | kvm_mode = KVM_MODE_PROTECTED; |
| 2204 | return 0; |
| 2205 | } |
| 2206 | |
Marc Zyngier | b6a68b9 | 2021-10-01 18:05:53 +0100 | [diff] [blame] | 2207 | if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { |
| 2208 | kvm_mode = KVM_MODE_DEFAULT; |
Marc Zyngier | 1945a06 | 2021-02-08 09:57:26 +0000 | [diff] [blame] | 2209 | return 0; |
Marc Zyngier | b6a68b9 | 2021-10-01 18:05:53 +0100 | [diff] [blame] | 2210 | } |
| 2211 | |
| 2212 | if (strcmp(arg, "none") == 0) { |
| 2213 | kvm_mode = KVM_MODE_NONE; |
| 2214 | return 0; |
| 2215 | } |
Marc Zyngier | 1945a06 | 2021-02-08 09:57:26 +0000 | [diff] [blame] | 2216 | |
David Brazdil | d8b369c | 2020-12-02 18:40:57 +0000 | [diff] [blame] | 2217 | return -EINVAL; |
| 2218 | } |
| 2219 | early_param("kvm-arm.mode", early_kvm_mode_cfg); |
| 2220 | |
David Brazdil | 3eb681f | 2020-12-02 18:40:58 +0000 | [diff] [blame] | 2221 | enum kvm_mode kvm_get_mode(void) |
| 2222 | { |
| 2223 | return kvm_mode; |
| 2224 | } |
| 2225 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2226 | static int arm_init(void) |
| 2227 | { |
| 2228 | int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
| 2229 | return rc; |
| 2230 | } |
| 2231 | |
| 2232 | module_init(arm_init); |