blob: 33410635b0154abe892d6aa938eb60e8e6e0a4db [file] [log] [blame]
Marc Zyngier4f8d6632012-12-10 16:29:28 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_HOST_H__
23#define __ARM64_KVM_HOST_H__
24
Dave Martin3f61f402018-09-28 14:39:08 +010025#include <linux/bitmap.h>
Paolo Bonzini65647302014-08-29 14:01:17 +020026#include <linux/types.h>
Dave Martin3f61f402018-09-28 14:39:08 +010027#include <linux/jump_label.h>
Paolo Bonzini65647302014-08-29 14:01:17 +020028#include <linux/kvm_types.h>
Dave Martin3f61f402018-09-28 14:39:08 +010029#include <linux/percpu.h>
Julien Thierry85738e02019-01-31 14:58:48 +000030#include <asm/arch_gicv3.h>
Dave Martin3f61f402018-09-28 14:39:08 +010031#include <asm/barrier.h>
Mark Rutland63a1e1c2017-05-16 15:18:05 +010032#include <asm/cpufeature.h>
James Morse4f5abad2018-01-15 19:39:00 +000033#include <asm/daifflags.h>
Dave Martin17eed272017-10-31 15:51:16 +000034#include <asm/fpsimd.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000035#include <asm/kvm.h>
Marc Zyngier3a3604b2015-01-29 13:19:45 +000036#include <asm/kvm_asm.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000037#include <asm/kvm_mmio.h>
Marc Zyngier32f13952019-01-19 15:29:54 +000038#include <asm/smp_plat.h>
Dave Martine6b673b2018-04-06 14:55:59 +010039#include <asm/thread_info.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000040
Eric Augerc1426e42015-03-04 11:14:34 +010041#define __KVM_HAVE_ARCH_INTC_INITIALIZED
42
Linu Cherian955a3fc2017-03-08 11:38:35 +053043#define KVM_USER_MEM_SLOTS 512
David Hildenbrand920552b2015-09-18 12:34:53 +020044#define KVM_HALT_POLL_NS_DEFAULT 500000
Marc Zyngier4f8d6632012-12-10 16:29:28 +000045
46#include <kvm/arm_vgic.h>
47#include <kvm/arm_arch_timer.h>
Shannon Zhao04fe4722015-09-11 09:38:32 +080048#include <kvm/arm_pmu.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000049
Ming Leief748912015-09-02 14:31:21 +080050#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
51
Amit Daniel Kachhapa22fa322019-04-23 10:12:36 +053052#define KVM_VCPU_MAX_FEATURES 7
Marc Zyngier4f8d6632012-12-10 16:29:28 +000053
Andrew Jones7b244e22017-06-04 14:43:58 +020054#define KVM_REQ_SLEEP \
Andrew Jones23871492017-06-04 14:43:51 +020055 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
Andrew Jones325f9c62017-06-04 14:43:59 +020056#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
Marc Zyngier358b28f2018-12-20 11:36:07 +000057#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
Christoffer Dallb13216c2016-04-27 10:28:00 +010058
Christoffer Dall61bbe382017-10-27 19:57:51 +020059DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
60
Dave Martin9033bba2019-02-28 18:46:44 +000061extern unsigned int kvm_sve_max_vl;
Dave Martina3be8362019-04-12 15:30:58 +010062int kvm_arm_init_sve(void);
Dave Martin0f062bf2019-02-28 18:33:00 +000063
Will Deacon6951e482014-08-26 15:13:20 +010064int __attribute_const__ kvm_target_cpu(void);
Marc Zyngier4f8d6632012-12-10 16:29:28 +000065int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
Dave Martin9033bba2019-02-28 18:46:44 +000066void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
Dongjiu Geng375bdd32018-10-13 00:12:48 +080067int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
James Morsec6125052016-04-29 18:27:03 +010068void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
Marc Zyngier4f8d6632012-12-10 16:29:28 +000069
Christoffer Dalle329fb72018-12-11 15:26:31 +010070struct kvm_vmid {
Marc Zyngier4f8d6632012-12-10 16:29:28 +000071 /* The VMID generation used for the virt. memory system */
72 u64 vmid_gen;
73 u32 vmid;
Christoffer Dalle329fb72018-12-11 15:26:31 +010074};
75
76struct kvm_arch {
77 struct kvm_vmid vmid;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000078
Suzuki K Poulose7665f3a2018-09-26 17:32:43 +010079 /* stage2 entry level table */
Marc Zyngier4f8d6632012-12-10 16:29:28 +000080 pgd_t *pgd;
Christoffer Dalle329fb72018-12-11 15:26:31 +010081 phys_addr_t pgd_phys;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000082
Suzuki K Poulose7665f3a2018-09-26 17:32:43 +010083 /* VTCR_EL2 value for this VM */
84 u64 vtcr;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000085
Marc Zyngier94d0e592016-10-18 18:37:49 +010086 /* The last vcpu id that ran on each physical CPU */
87 int __percpu *last_vcpu_ran;
88
Andre Przywara3caa2d82014-06-02 16:26:01 +020089 /* The maximum number of vCPUs depends on the used GIC model */
90 int max_vcpus;
91
Marc Zyngier4f8d6632012-12-10 16:29:28 +000092 /* Interrupt controller */
93 struct vgic_dist vgic;
Marc Zyngier85bd0ba2018-01-21 16:42:56 +000094
95 /* Mandated version of PSCI */
96 u32 psci_version;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000097};
98
99#define KVM_NR_MEM_OBJS 40
100
101/*
102 * We don't want allocation failures within the mmu code, so we preallocate
103 * enough memory for a single page fault in a cache.
104 */
105struct kvm_mmu_memory_cache {
106 int nobjs;
107 void *objects[KVM_NR_MEM_OBJS];
108};
109
110struct kvm_vcpu_fault_info {
111 u32 esr_el2; /* Hyp Syndrom Register */
112 u64 far_el2; /* Hyp Fault Address Register */
113 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
James Morse0067df42018-01-15 19:39:05 +0000114 u64 disr_el1; /* Deferred [SError] Status Register */
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000115};
116
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000117/*
118 * 0 is reserved as an invalid value.
119 * Order should be kept in sync with the save/restore code.
120 */
121enum vcpu_sysreg {
122 __INVALID_SYSREG__,
123 MPIDR_EL1, /* MultiProcessor Affinity Register */
124 CSSELR_EL1, /* Cache Size Selection Register */
125 SCTLR_EL1, /* System Control Register */
126 ACTLR_EL1, /* Auxiliary Control Register */
127 CPACR_EL1, /* Coprocessor Access Control */
Dave Martin73433762018-09-28 14:39:16 +0100128 ZCR_EL1, /* SVE Control */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000129 TTBR0_EL1, /* Translation Table Base Register 0 */
130 TTBR1_EL1, /* Translation Table Base Register 1 */
131 TCR_EL1, /* Translation Control Register */
132 ESR_EL1, /* Exception Syndrome Register */
Adam Buchbinderef769e32016-02-24 09:52:41 -0800133 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
134 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000135 FAR_EL1, /* Fault Address Register */
136 MAIR_EL1, /* Memory Attribute Indirection Register */
137 VBAR_EL1, /* Vector Base Address Register */
138 CONTEXTIDR_EL1, /* Context ID Register */
139 TPIDR_EL0, /* Thread ID, User R/W */
140 TPIDRRO_EL0, /* Thread ID, User R/O */
141 TPIDR_EL1, /* Thread ID, Privileged */
142 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
143 CNTKCTL_EL1, /* Timer Control Register (EL1) */
144 PAR_EL1, /* Physical Address Register */
145 MDSCR_EL1, /* Monitor Debug System Control Register */
146 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
James Morsec773ae22018-01-15 19:39:02 +0000147 DISR_EL1, /* Deferred Interrupt Status Register */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000148
Shannon Zhaoab946832015-06-18 16:01:53 +0800149 /* Performance Monitors Registers */
150 PMCR_EL0, /* Control Register */
Shannon Zhao3965c3c2015-08-31 17:20:22 +0800151 PMSELR_EL0, /* Event Counter Selection Register */
Shannon Zhao051ff582015-12-08 15:29:06 +0800152 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
153 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
154 PMCCNTR_EL0, /* Cycle Counter Register */
Shannon Zhao9feb21a2016-02-23 11:11:27 +0800155 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
156 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
157 PMCCFILTR_EL0, /* Cycle Count Filter Register */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800158 PMCNTENSET_EL0, /* Count Enable Set Register */
Shannon Zhao9db52c72015-09-08 14:40:20 +0800159 PMINTENSET_EL1, /* Interrupt Enable Set Register */
Shannon Zhao76d883c2015-09-08 15:03:26 +0800160 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800161 PMSWINC_EL0, /* Software Increment Register */
Shannon Zhaod692b8a2015-09-08 15:15:56 +0800162 PMUSERENR_EL0, /* User Enable Register */
Shannon Zhaoab946832015-06-18 16:01:53 +0800163
Mark Rutland384b40c2019-04-23 10:12:35 +0530164 /* Pointer Authentication Registers in a strict increasing order. */
165 APIAKEYLO_EL1,
166 APIAKEYHI_EL1,
167 APIBKEYLO_EL1,
168 APIBKEYHI_EL1,
169 APDAKEYLO_EL1,
170 APDAKEYHI_EL1,
171 APDBKEYLO_EL1,
172 APDBKEYHI_EL1,
173 APGAKEYLO_EL1,
174 APGAKEYHI_EL1,
175
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000176 /* 32bit specific registers. Keep them at the end of the range */
177 DACR32_EL2, /* Domain Access Control Register */
178 IFSR32_EL2, /* Instruction Fault Status Register */
179 FPEXC32_EL2, /* Floating-Point Exception Control Register */
180 DBGVCR32_EL2, /* Debug Vector Catch Register */
181
182 NR_SYS_REGS /* Nothing after this line! */
183};
184
185/* 32bit mapping */
186#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
187#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
188#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
189#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
190#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
191#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
192#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
193#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
194#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
195#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
196#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
197#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
198#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
199#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
200#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
201#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
202#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
203#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
204#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
205#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
206#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
207#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
208#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
209#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
210#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
211#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
212#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
213#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
214#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
215
216#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
217#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
218#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
219#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
220#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
221#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
222#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
223
224#define NR_COPRO_REGS (NR_SYS_REGS * 2)
225
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000226struct kvm_cpu_context {
227 struct kvm_regs gp_regs;
Marc Zyngier40033a62013-02-06 19:17:50 +0000228 union {
229 u64 sys_regs[NR_SYS_REGS];
Marc Zyngier72564012014-04-24 10:27:13 +0100230 u32 copro[NR_COPRO_REGS];
Marc Zyngier40033a62013-02-06 19:17:50 +0000231 };
James Morsec97e1662018-01-08 15:38:05 +0000232
233 struct kvm_vcpu *__hyp_running_vcpu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000234};
235
Andrew Murrayeb412382019-04-09 20:22:12 +0100236struct kvm_pmu_events {
237 u32 events_host;
238 u32 events_guest;
239};
240
Andrew Murray630a1682019-04-09 20:22:11 +0100241struct kvm_host_data {
242 struct kvm_cpu_context host_ctxt;
Andrew Murrayeb412382019-04-09 20:22:12 +0100243 struct kvm_pmu_events pmu_events;
Andrew Murray630a1682019-04-09 20:22:11 +0100244};
245
246typedef struct kvm_host_data kvm_host_data_t;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000247
Marc Zyngier358b28f2018-12-20 11:36:07 +0000248struct vcpu_reset_state {
249 unsigned long pc;
250 unsigned long r0;
251 bool be;
252 bool reset;
253};
254
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000255struct kvm_vcpu_arch {
256 struct kvm_cpu_context ctxt;
Dave Martinb43b5dd2018-09-28 14:39:17 +0100257 void *sve_state;
258 unsigned int sve_max_vl;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000259
260 /* HYP configuration */
261 u64 hcr_el2;
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100262 u32 mdcr_el2;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000263
264 /* Exception Information */
265 struct kvm_vcpu_fault_info fault;
266
Marc Zyngier55e37482018-05-29 13:11:16 +0100267 /* State of various workarounds, see kvm_asm.h for bit assignment */
268 u64 workaround_flags;
269
Dave Martinfa89d31c2018-05-08 14:47:23 +0100270 /* Miscellaneous vcpu state flags */
271 u64 flags;
Marc Zyngier0c557ed2014-04-24 10:24:46 +0100272
Alex Bennée84e690b2015-07-07 17:30:00 +0100273 /*
274 * We maintain more than a single set of debug registers to support
275 * debugging the guest from the host and to maintain separate host and
276 * guest state during world switches. vcpu_debug_state are the debug
277 * registers of the vcpu as the guest sees them. host_debug_state are
Alex Bennée834bf882015-07-07 17:30:02 +0100278 * the host registers which are saved and restored during
279 * world switches. external_debug_state contains the debug
280 * values we want to debug the guest. This is set via the
281 * KVM_SET_GUEST_DEBUG ioctl.
Alex Bennée84e690b2015-07-07 17:30:00 +0100282 *
283 * debug_ptr points to the set of debug registers that should be loaded
284 * onto the hardware when running the guest.
285 */
286 struct kvm_guest_debug_arch *debug_ptr;
287 struct kvm_guest_debug_arch vcpu_debug_state;
Alex Bennée834bf882015-07-07 17:30:02 +0100288 struct kvm_guest_debug_arch external_debug_state;
Alex Bennée84e690b2015-07-07 17:30:00 +0100289
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000290 /* Pointer to host CPU context */
Andrew Murray630a1682019-04-09 20:22:11 +0100291 struct kvm_cpu_context *host_cpu_context;
Dave Martine6b673b2018-04-06 14:55:59 +0100292
293 struct thread_info *host_thread_info; /* hyp VA */
294 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
295
Will Deaconf85279b2016-09-22 11:35:43 +0100296 struct {
297 /* {Break,watch}point registers */
298 struct kvm_guest_debug_arch regs;
299 /* Statistical profiling extension */
300 u64 pmscr_el1;
301 } host_debug_state;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000302
303 /* VGIC state */
304 struct vgic_cpu vgic_cpu;
305 struct arch_timer_cpu timer_cpu;
Shannon Zhao04fe4722015-09-11 09:38:32 +0800306 struct kvm_pmu pmu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000307
308 /*
309 * Anything that is not used directly from assembly code goes
310 * here.
311 */
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000312
Alex Bennée337b99b2015-07-07 17:29:58 +0100313 /*
314 * Guest registers we preserve during guest debugging.
315 *
316 * These shadow registers are updated by the kvm_handle_sys_reg
317 * trap handler if the guest accesses or updates them while we
318 * are using guest debug.
319 */
320 struct {
321 u32 mdscr_el1;
322 } guest_debug_preserved;
323
Eric Auger37815282015-09-25 23:41:14 +0200324 /* vcpu power-off state */
325 bool power_off;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000326
Eric Auger3b928302015-09-25 23:41:17 +0200327 /* Don't run the guest (internal implementation need) */
328 bool pause;
329
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000330 /* IO related fields */
331 struct kvm_decode mmio_decode;
332
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000333 /* Cache some mmu pages needed inside spinlock regions */
334 struct kvm_mmu_memory_cache mmu_page_cache;
335
336 /* Target CPU and feature flags */
Chen Gang6c8c0c42013-07-22 04:40:38 +0100337 int target;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000338 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
339
340 /* Detect first run of a vcpu */
341 bool has_run_once;
James Morse4715c142018-01-15 19:39:01 +0000342
343 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
344 u64 vsesr_el2;
Christoffer Dalld47533d2017-12-23 21:53:48 +0100345
Marc Zyngier358b28f2018-12-20 11:36:07 +0000346 /* Additional reset state */
347 struct vcpu_reset_state reset_state;
348
Christoffer Dalld47533d2017-12-23 21:53:48 +0100349 /* True when deferrable sysregs are loaded on the physical CPU,
350 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
351 bool sysregs_loaded_on_cpu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000352};
353
Dave Martinb43b5dd2018-09-28 14:39:17 +0100354/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
355#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
356 sve_ffr_offset((vcpu)->arch.sve_max_vl)))
357
Dave Martine1c9c982018-09-28 14:39:19 +0100358#define vcpu_sve_state_size(vcpu) ({ \
359 size_t __size_ret; \
360 unsigned int __vcpu_vq; \
361 \
362 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
363 __size_ret = 0; \
364 } else { \
365 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
366 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
367 } \
368 \
369 __size_ret; \
370})
371
Dave Martinfa89d31c2018-05-08 14:47:23 +0100372/* vcpu_arch flags field values: */
373#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
Dave Martine6b673b2018-04-06 14:55:59 +0100374#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
375#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
376#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
Dave Martinb3eb56b2018-06-15 16:47:25 +0100377#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
Dave Martin1765edb2018-09-28 14:39:12 +0100378#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
Dave Martin9033bba2019-02-28 18:46:44 +0000379#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
Amit Daniel Kachhapb890d752019-04-23 10:12:34 +0530380#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
Dave Martin1765edb2018-09-28 14:39:12 +0100381
382#define vcpu_has_sve(vcpu) (system_supports_sve() && \
383 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
Dave Martinfa89d31c2018-05-08 14:47:23 +0100384
Amit Daniel Kachhapb890d752019-04-23 10:12:34 +0530385#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
386 system_supports_generic_auth()) && \
387 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
388
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000389#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
Christoffer Dall8d404c42016-03-16 15:38:53 +0100390
391/*
392 * Only use __vcpu_sys_reg if you know you want the memory backed version of a
393 * register, and not the one most recently accessed by a running VCPU. For
394 * example, for userspace access or for system registers that are never context
395 * switched, but only emulated.
396 */
397#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
398
Christoffer Dallda6f1662018-11-29 12:20:01 +0100399u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
Christoffer Dalld47533d2017-12-23 21:53:48 +0100400void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100401
Marc Zyngier72564012014-04-24 10:27:13 +0100402/*
403 * CP14 and CP15 live in the same array, as they are backed by the
404 * same system registers.
405 */
406#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
407#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000408
409struct kvm_vm_stat {
Suraj Jitindar Singh8a7e75d2016-08-02 14:03:22 +1000410 ulong remote_tlb_flush;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000411};
412
413struct kvm_vcpu_stat {
Suraj Jitindar Singh8a7e75d2016-08-02 14:03:22 +1000414 u64 halt_successful_poll;
415 u64 halt_attempted_poll;
416 u64 halt_poll_invalid;
417 u64 halt_wakeup;
418 u64 hvc_exit_stat;
Amit Tomarb19e6892015-11-26 10:09:43 +0000419 u64 wfe_exit_stat;
420 u64 wfi_exit_stat;
421 u64 mmio_exit_user;
422 u64 mmio_exit_kernel;
423 u64 exits;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000424};
425
Anup Patel473bdc02013-09-30 14:20:06 +0530426int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000427unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
428int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000429int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
430int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
James Morse539aee02018-07-19 16:24:24 +0100431int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
432 struct kvm_vcpu_events *events);
Dongjiu Gengb7b27fa2018-07-19 16:24:22 +0100433
James Morse539aee02018-07-19 16:24:24 +0100434int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
435 struct kvm_vcpu_events *events);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000436
437#define KVM_ARCH_WANT_MMU_NOTIFIER
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000438int kvm_unmap_hva_range(struct kvm *kvm,
439 unsigned long start, unsigned long end);
Lan Tianyu748c0e32018-12-06 21:21:10 +0800440int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Marc Zyngier35307b92015-03-12 18:16:51 +0000441int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
442int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000443
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000444struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
Will Deacon4000be42014-08-26 15:13:21 +0100445struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
Christoffer Dallb13216c2016-04-27 10:28:00 +0100446void kvm_arm_halt_guest(struct kvm *kvm);
447void kvm_arm_resume_guest(struct kvm *kvm);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000448
Ard Biesheuvela0bf9772016-02-16 13:52:39 +0100449u64 __kvm_call_hyp(void *hypfn, ...);
Marc Zyngier18fc7bf2019-01-05 15:57:56 +0000450
451/*
452 * The couple of isb() below are there to guarantee the same behaviour
453 * on VHE as on !VHE, where the eret to EL1 acts as a context
454 * synchronization event.
455 */
456#define kvm_call_hyp(f, ...) \
457 do { \
458 if (has_vhe()) { \
459 f(__VA_ARGS__); \
460 isb(); \
461 } else { \
462 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
463 } \
464 } while(0)
465
466#define kvm_call_hyp_ret(f, ...) \
467 ({ \
468 typeof(f(__VA_ARGS__)) ret; \
469 \
470 if (has_vhe()) { \
471 ret = f(__VA_ARGS__); \
472 isb(); \
473 } else { \
474 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
475 ##__VA_ARGS__); \
476 } \
477 \
478 ret; \
479 })
Marc Zyngier22b39ca2016-03-01 13:12:44 +0000480
Christoffer Dallcf5d31882014-10-16 17:00:18 +0200481void force_vm_exit(const cpumask_t *mask);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800482void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000483
484int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
485 int exception_index);
James Morse3368bd82018-01-15 19:39:04 +0000486void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
487 int exception_index);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000488
489int kvm_perf_init(void);
490int kvm_perf_teardown(void);
491
Dongjiu Gengb7b27fa2018-07-19 16:24:22 +0100492void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
493
Andre Przywara4429fc62014-06-02 15:37:13 +0200494struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
495
Andrew Murray630a1682019-04-09 20:22:11 +0100496DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
Christoffer Dall4464e212017-10-08 17:01:56 +0200497
Andrew Murray630a1682019-04-09 20:22:11 +0100498static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
Marc Zyngier32f13952019-01-19 15:29:54 +0000499 int cpu)
500{
501 /* The host's MPIDR is immutable, so let's set it up at boot time */
502 cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
503}
504
Will Deacon7c364472018-08-08 16:10:54 +0100505void __kvm_enable_ssbs(void);
506
Marc Zyngier12fda812016-06-30 18:40:45 +0100507static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
Marc Zyngier092bd142012-12-17 17:07:52 +0000508 unsigned long hyp_stack_ptr,
509 unsigned long vector_ptr)
510{
Marc Zyngier9bc03f12018-07-10 13:20:47 +0100511 /*
512 * Calculate the raw per-cpu offset without a translation from the
513 * kernel's mapping to the linear mapping, and store it in tpidr_el2
514 * so that we can use adr_l to access per-cpu variables in EL2.
515 */
Andrew Murray630a1682019-04-09 20:22:11 +0100516 u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
517 (u64)kvm_ksym_ref(kvm_host_data));
Christoffer Dall4464e212017-10-08 17:01:56 +0200518
Marc Zyngier092bd142012-12-17 17:07:52 +0000519 /*
Mark Rutland63a1e1c2017-05-16 15:18:05 +0100520 * Call initialization code, and switch to the full blown HYP code.
521 * If the cpucaps haven't been finalized yet, something has gone very
522 * wrong, and hyp will crash and burn when it uses any
523 * cpus_have_const_cap() wrapper.
Marc Zyngier092bd142012-12-17 17:07:52 +0000524 */
Mark Rutland63a1e1c2017-05-16 15:18:05 +0100525 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
Marc Zyngier9bc03f12018-07-10 13:20:47 +0100526 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
Will Deacon7c364472018-08-08 16:10:54 +0100527
528 /*
529 * Disabling SSBD on a non-VHE system requires us to enable SSBS
530 * at EL2.
531 */
532 if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
533 arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
534 kvm_call_hyp(__kvm_enable_ssbs);
535 }
Marc Zyngier092bd142012-12-17 17:07:52 +0000536}
537
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000538static inline bool kvm_arch_requires_vhe(void)
Dave Martin85acda32018-04-20 16:20:43 +0100539{
540 /*
541 * The Arm architecture specifies that implementation of SVE
542 * requires VHE also to be implemented. The KVM code for arm64
543 * relies on this when SVE is present:
544 */
545 if (system_supports_sve())
Dave Martin85acda32018-04-20 16:20:43 +0100546 return true;
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000547
Marc Zyngier8b2cca92018-12-06 17:31:23 +0000548 /* Some implementations have defects that confine them to VHE */
549 if (cpus_have_cap(ARM64_WORKAROUND_1165522))
550 return true;
551
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000552 return false;
Dave Martin85acda32018-04-20 16:20:43 +0100553}
554
Mark Rutland384b40c2019-04-23 10:12:35 +0530555void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
556
Radim Krčmář0865e632014-08-28 15:13:02 +0200557static inline void kvm_arch_hardware_unsetup(void) {}
558static inline void kvm_arch_sync_events(struct kvm *kvm) {}
Radim Krčmář0865e632014-08-28 15:13:02 +0200559static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
Christian Borntraeger3491caf2016-05-13 12:16:35 +0200560static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
Radim Krčmář0865e632014-08-28 15:13:02 +0200561
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100562void kvm_arm_init_debug(void);
563void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
564void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
Alex Bennée84e690b2015-07-07 17:30:00 +0100565void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800566int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
567 struct kvm_device_attr *attr);
568int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
569 struct kvm_device_attr *attr);
570int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
571 struct kvm_device_attr *attr);
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100572
Suzuki K Poulose0f62f0e2018-09-26 17:32:52 +0100573static inline void __cpu_init_stage2(void) {}
Marc Zyngier21a41792016-02-22 10:57:30 +0000574
Dave Martine6b673b2018-04-06 14:55:59 +0100575/* Guest/host FPSIMD coordination helpers */
576int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
577void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
578void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
579void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
580
Andrew Murrayeb412382019-04-09 20:22:12 +0100581static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
582{
Andrew Murray435e53f2019-04-09 20:22:15 +0100583 return (!has_vhe() && attr->exclude_host);
Andrew Murrayeb412382019-04-09 20:22:12 +0100584}
585
Dave Martine6b673b2018-04-06 14:55:59 +0100586#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
587static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
Dave Martin17eed272017-10-31 15:51:16 +0000588{
Dave Martine6b673b2018-04-06 14:55:59 +0100589 return kvm_arch_vcpu_run_map_fp(vcpu);
Dave Martin17eed272017-10-31 15:51:16 +0000590}
Andrew Murrayeb412382019-04-09 20:22:12 +0100591
592void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
593void kvm_clr_pmu_events(u32 clr);
Andrew Murray3d91bef2019-04-09 20:22:14 +0100594
Andrew Murray435e53f2019-04-09 20:22:15 +0100595void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
596void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
Andrew Murrayeb412382019-04-09 20:22:12 +0100597#else
598static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
599static inline void kvm_clr_pmu_events(u32 clr) {}
Dave Martine6b673b2018-04-06 14:55:59 +0100600#endif
Dave Martin17eed272017-10-31 15:51:16 +0000601
James Morse4f5abad2018-01-15 19:39:00 +0000602static inline void kvm_arm_vhe_guest_enter(void)
603{
604 local_daif_mask();
Julien Thierry85738e02019-01-31 14:58:48 +0000605
606 /*
607 * Having IRQs masked via PMR when entering the guest means the GIC
608 * will not signal the CPU of interrupts of lower priority, and the
609 * only way to get out will be via guest exceptions.
610 * Naturally, we want to avoid this.
Julien Thierrybd82d4b2019-06-11 10:38:10 +0100611 *
612 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
613 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
Julien Thierry85738e02019-01-31 14:58:48 +0000614 */
Julien Thierrybd82d4b2019-06-11 10:38:10 +0100615 if (system_uses_irq_prio_masking())
Julien Thierry85738e02019-01-31 14:58:48 +0000616 dsb(sy);
James Morse4f5abad2018-01-15 19:39:00 +0000617}
618
619static inline void kvm_arm_vhe_guest_exit(void)
620{
Julien Thierry85738e02019-01-31 14:58:48 +0000621 /*
622 * local_daif_restore() takes care to properly restore PSTATE.DAIF
623 * and the GIC PMR if the host is using IRQ priorities.
624 */
James Morse4f5abad2018-01-15 19:39:00 +0000625 local_daif_restore(DAIF_PROCCTX_NOIRQ);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200626
627 /*
628 * When we exit from the guest we change a number of CPU configuration
629 * parameters, such as traps. Make sure these changes take effect
630 * before running the host or additional guests.
631 */
632 isb();
James Morse4f5abad2018-01-15 19:39:00 +0000633}
Marc Zyngier6167ec52018-02-06 17:56:14 +0000634
635static inline bool kvm_arm_harden_branch_predictor(void)
636{
637 return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
638}
639
Marc Zyngier5d81f7d2018-05-29 13:11:18 +0100640#define KVM_SSBD_UNKNOWN -1
641#define KVM_SSBD_FORCE_DISABLE 0
642#define KVM_SSBD_KERNEL 1
643#define KVM_SSBD_FORCE_ENABLE 2
644#define KVM_SSBD_MITIGATED 3
645
646static inline int kvm_arm_have_ssbd(void)
647{
648 switch (arm64_get_ssbd_state()) {
649 case ARM64_SSBD_FORCE_DISABLE:
650 return KVM_SSBD_FORCE_DISABLE;
651 case ARM64_SSBD_KERNEL:
652 return KVM_SSBD_KERNEL;
653 case ARM64_SSBD_FORCE_ENABLE:
654 return KVM_SSBD_FORCE_ENABLE;
655 case ARM64_SSBD_MITIGATED:
656 return KVM_SSBD_MITIGATED;
657 case ARM64_SSBD_UNKNOWN:
658 default:
659 return KVM_SSBD_UNKNOWN;
660 }
661}
662
Christoffer Dallbc192ce2017-10-10 10:21:18 +0200663void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
664void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
665
Suzuki K Poulose0f62f0e2018-09-26 17:32:52 +0100666void kvm_set_ipa_limit(void);
667
Marc Orrd1e5b0e2018-05-15 04:37:37 -0700668#define __KVM_HAVE_ARCH_VM_ALLOC
669struct kvm *kvm_arch_alloc_vm(void);
670void kvm_arch_free_vm(struct kvm *kvm);
671
Marc Zyngierbca607e2018-10-01 13:40:36 +0100672int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
Suzuki K Poulose5b6c6742018-09-26 17:32:42 +0100673
Dave Martin92e68b22019-04-10 17:17:37 +0100674int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
Dave Martin9033bba2019-02-28 18:46:44 +0000675bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
676
677#define kvm_arm_vcpu_sve_finalized(vcpu) \
678 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
Dave Martin7dd32a02018-12-19 14:27:01 +0000679
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000680#endif /* __ARM64_KVM_HOST_H__ */