blob: 019bc560edc19f0f08e3dd05ecd418007e46f42e [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marc Zyngier4f8d6632012-12-10 16:29:28 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Marc Zyngier4f8d6632012-12-10 16:29:28 +00009 */
10
11#ifndef __ARM64_KVM_HOST_H__
12#define __ARM64_KVM_HOST_H__
13
Dave Martin3f61f402018-09-28 14:39:08 +010014#include <linux/bitmap.h>
Paolo Bonzini65647302014-08-29 14:01:17 +020015#include <linux/types.h>
Dave Martin3f61f402018-09-28 14:39:08 +010016#include <linux/jump_label.h>
Paolo Bonzini65647302014-08-29 14:01:17 +020017#include <linux/kvm_types.h>
Dave Martin3f61f402018-09-28 14:39:08 +010018#include <linux/percpu.h>
Julien Thierry85738e02019-01-31 14:58:48 +000019#include <asm/arch_gicv3.h>
Dave Martin3f61f402018-09-28 14:39:08 +010020#include <asm/barrier.h>
Mark Rutland63a1e1c2017-05-16 15:18:05 +010021#include <asm/cpufeature.h>
Marc Zyngier1e0cf162019-07-05 23:35:56 +010022#include <asm/cputype.h>
James Morse4f5abad2018-01-15 19:39:00 +000023#include <asm/daifflags.h>
Dave Martin17eed272017-10-31 15:51:16 +000024#include <asm/fpsimd.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000025#include <asm/kvm.h>
Marc Zyngier3a3604b2015-01-29 13:19:45 +000026#include <asm/kvm_asm.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000027#include <asm/kvm_mmio.h>
Dave Martine6b673b2018-04-06 14:55:59 +010028#include <asm/thread_info.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000029
Eric Augerc1426e42015-03-04 11:14:34 +010030#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
Linu Cherian955a3fc2017-03-08 11:38:35 +053032#define KVM_USER_MEM_SLOTS 512
David Hildenbrand920552b2015-09-18 12:34:53 +020033#define KVM_HALT_POLL_NS_DEFAULT 500000
Marc Zyngier4f8d6632012-12-10 16:29:28 +000034
35#include <kvm/arm_vgic.h>
36#include <kvm/arm_arch_timer.h>
Shannon Zhao04fe4722015-09-11 09:38:32 +080037#include <kvm/arm_pmu.h>
Marc Zyngier4f8d6632012-12-10 16:29:28 +000038
Ming Leief748912015-09-02 14:31:21 +080039#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40
Amit Daniel Kachhapa22fa322019-04-23 10:12:36 +053041#define KVM_VCPU_MAX_FEATURES 7
Marc Zyngier4f8d6632012-12-10 16:29:28 +000042
Andrew Jones7b244e22017-06-04 14:43:58 +020043#define KVM_REQ_SLEEP \
Andrew Jones23871492017-06-04 14:43:51 +020044 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
Andrew Jones325f9c62017-06-04 14:43:59 +020045#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
Marc Zyngier358b28f2018-12-20 11:36:07 +000046#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
Christoffer Dallb13216c2016-04-27 10:28:00 +010047
Christoffer Dall61bbe382017-10-27 19:57:51 +020048DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
49
Dave Martin9033bba2019-02-28 18:46:44 +000050extern unsigned int kvm_sve_max_vl;
Dave Martina3be8362019-04-12 15:30:58 +010051int kvm_arm_init_sve(void);
Dave Martin0f062bf2019-02-28 18:33:00 +000052
Will Deacon6951e482014-08-26 15:13:20 +010053int __attribute_const__ kvm_target_cpu(void);
Marc Zyngier4f8d6632012-12-10 16:29:28 +000054int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
Dave Martin9033bba2019-02-28 18:46:44 +000055void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
Dongjiu Geng375bdd32018-10-13 00:12:48 +080056int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
James Morsec6125052016-04-29 18:27:03 +010057void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
Marc Zyngier4f8d6632012-12-10 16:29:28 +000058
Christoffer Dalle329fb72018-12-11 15:26:31 +010059struct kvm_vmid {
Marc Zyngier4f8d6632012-12-10 16:29:28 +000060 /* The VMID generation used for the virt. memory system */
61 u64 vmid_gen;
62 u32 vmid;
Christoffer Dalle329fb72018-12-11 15:26:31 +010063};
64
65struct kvm_arch {
66 struct kvm_vmid vmid;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000067
Suzuki K Poulose7665f3a2018-09-26 17:32:43 +010068 /* stage2 entry level table */
Marc Zyngier4f8d6632012-12-10 16:29:28 +000069 pgd_t *pgd;
Christoffer Dalle329fb72018-12-11 15:26:31 +010070 phys_addr_t pgd_phys;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000071
Suzuki K Poulose7665f3a2018-09-26 17:32:43 +010072 /* VTCR_EL2 value for this VM */
73 u64 vtcr;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000074
Marc Zyngier94d0e592016-10-18 18:37:49 +010075 /* The last vcpu id that ran on each physical CPU */
76 int __percpu *last_vcpu_ran;
77
Andre Przywara3caa2d82014-06-02 16:26:01 +020078 /* The maximum number of vCPUs depends on the used GIC model */
79 int max_vcpus;
80
Marc Zyngier4f8d6632012-12-10 16:29:28 +000081 /* Interrupt controller */
82 struct vgic_dist vgic;
Marc Zyngier85bd0ba2018-01-21 16:42:56 +000083
84 /* Mandated version of PSCI */
85 u32 psci_version;
Christoffer Dallc7262002019-10-11 13:07:05 +020086
87 /*
88 * If we encounter a data abort without valid instruction syndrome
89 * information, report this to user space. User space can (and
90 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
91 * supported.
92 */
93 bool return_nisv_io_abort_to_user;
Marc Zyngier4f8d6632012-12-10 16:29:28 +000094};
95
96#define KVM_NR_MEM_OBJS 40
97
98/*
99 * We don't want allocation failures within the mmu code, so we preallocate
100 * enough memory for a single page fault in a cache.
101 */
102struct kvm_mmu_memory_cache {
103 int nobjs;
104 void *objects[KVM_NR_MEM_OBJS];
105};
106
107struct kvm_vcpu_fault_info {
108 u32 esr_el2; /* Hyp Syndrom Register */
109 u64 far_el2; /* Hyp Fault Address Register */
110 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
James Morse0067df42018-01-15 19:39:05 +0000111 u64 disr_el1; /* Deferred [SError] Status Register */
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000112};
113
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000114/*
115 * 0 is reserved as an invalid value.
116 * Order should be kept in sync with the save/restore code.
117 */
118enum vcpu_sysreg {
119 __INVALID_SYSREG__,
120 MPIDR_EL1, /* MultiProcessor Affinity Register */
121 CSSELR_EL1, /* Cache Size Selection Register */
122 SCTLR_EL1, /* System Control Register */
123 ACTLR_EL1, /* Auxiliary Control Register */
124 CPACR_EL1, /* Coprocessor Access Control */
Dave Martin73433762018-09-28 14:39:16 +0100125 ZCR_EL1, /* SVE Control */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000126 TTBR0_EL1, /* Translation Table Base Register 0 */
127 TTBR1_EL1, /* Translation Table Base Register 1 */
128 TCR_EL1, /* Translation Control Register */
129 ESR_EL1, /* Exception Syndrome Register */
Adam Buchbinderef769e32016-02-24 09:52:41 -0800130 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
131 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000132 FAR_EL1, /* Fault Address Register */
133 MAIR_EL1, /* Memory Attribute Indirection Register */
134 VBAR_EL1, /* Vector Base Address Register */
135 CONTEXTIDR_EL1, /* Context ID Register */
136 TPIDR_EL0, /* Thread ID, User R/W */
137 TPIDRRO_EL0, /* Thread ID, User R/O */
138 TPIDR_EL1, /* Thread ID, Privileged */
139 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
140 CNTKCTL_EL1, /* Timer Control Register (EL1) */
141 PAR_EL1, /* Physical Address Register */
142 MDSCR_EL1, /* Monitor Debug System Control Register */
143 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
James Morsec773ae22018-01-15 19:39:02 +0000144 DISR_EL1, /* Deferred Interrupt Status Register */
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000145
Shannon Zhaoab946832015-06-18 16:01:53 +0800146 /* Performance Monitors Registers */
147 PMCR_EL0, /* Control Register */
Shannon Zhao3965c3c2015-08-31 17:20:22 +0800148 PMSELR_EL0, /* Event Counter Selection Register */
Shannon Zhao051ff582015-12-08 15:29:06 +0800149 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
150 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
151 PMCCNTR_EL0, /* Cycle Counter Register */
Shannon Zhao9feb21a2016-02-23 11:11:27 +0800152 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
153 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
154 PMCCFILTR_EL0, /* Cycle Count Filter Register */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800155 PMCNTENSET_EL0, /* Count Enable Set Register */
Shannon Zhao9db52c72015-09-08 14:40:20 +0800156 PMINTENSET_EL1, /* Interrupt Enable Set Register */
Shannon Zhao76d883c2015-09-08 15:03:26 +0800157 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800158 PMSWINC_EL0, /* Software Increment Register */
Shannon Zhaod692b8a2015-09-08 15:15:56 +0800159 PMUSERENR_EL0, /* User Enable Register */
Shannon Zhaoab946832015-06-18 16:01:53 +0800160
Mark Rutland384b40c2019-04-23 10:12:35 +0530161 /* Pointer Authentication Registers in a strict increasing order. */
162 APIAKEYLO_EL1,
163 APIAKEYHI_EL1,
164 APIBKEYLO_EL1,
165 APIBKEYHI_EL1,
166 APDAKEYLO_EL1,
167 APDAKEYHI_EL1,
168 APDBKEYLO_EL1,
169 APDBKEYHI_EL1,
170 APGAKEYLO_EL1,
171 APGAKEYHI_EL1,
172
Marc Zyngier9d8415d2015-10-25 19:57:11 +0000173 /* 32bit specific registers. Keep them at the end of the range */
174 DACR32_EL2, /* Domain Access Control Register */
175 IFSR32_EL2, /* Instruction Fault Status Register */
176 FPEXC32_EL2, /* Floating-Point Exception Control Register */
177 DBGVCR32_EL2, /* Debug Vector Catch Register */
178
179 NR_SYS_REGS /* Nothing after this line! */
180};
181
182/* 32bit mapping */
183#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
184#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
185#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
186#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
187#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
188#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
189#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
190#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
191#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
192#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
193#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
194#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
195#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
196#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
197#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
198#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
199#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
200#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
201#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
202#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
203#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
204#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
205#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
206#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
207#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
208#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
209#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
210#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
211#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
212
213#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
214#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
215#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
216#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
217#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
218#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
219#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
220
221#define NR_COPRO_REGS (NR_SYS_REGS * 2)
222
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000223struct kvm_cpu_context {
224 struct kvm_regs gp_regs;
Marc Zyngier40033a62013-02-06 19:17:50 +0000225 union {
226 u64 sys_regs[NR_SYS_REGS];
Marc Zyngier72564012014-04-24 10:27:13 +0100227 u32 copro[NR_COPRO_REGS];
Marc Zyngier40033a62013-02-06 19:17:50 +0000228 };
James Morsec97e1662018-01-08 15:38:05 +0000229
230 struct kvm_vcpu *__hyp_running_vcpu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000231};
232
Andrew Murrayeb412382019-04-09 20:22:12 +0100233struct kvm_pmu_events {
234 u32 events_host;
235 u32 events_guest;
236};
237
Andrew Murray630a1682019-04-09 20:22:11 +0100238struct kvm_host_data {
239 struct kvm_cpu_context host_ctxt;
Andrew Murrayeb412382019-04-09 20:22:12 +0100240 struct kvm_pmu_events pmu_events;
Andrew Murray630a1682019-04-09 20:22:11 +0100241};
242
243typedef struct kvm_host_data kvm_host_data_t;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000244
Marc Zyngier358b28f2018-12-20 11:36:07 +0000245struct vcpu_reset_state {
246 unsigned long pc;
247 unsigned long r0;
248 bool be;
249 bool reset;
250};
251
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000252struct kvm_vcpu_arch {
253 struct kvm_cpu_context ctxt;
Dave Martinb43b5dd2018-09-28 14:39:17 +0100254 void *sve_state;
255 unsigned int sve_max_vl;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000256
257 /* HYP configuration */
258 u64 hcr_el2;
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100259 u32 mdcr_el2;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000260
261 /* Exception Information */
262 struct kvm_vcpu_fault_info fault;
263
Marc Zyngier55e37482018-05-29 13:11:16 +0100264 /* State of various workarounds, see kvm_asm.h for bit assignment */
265 u64 workaround_flags;
266
Dave Martinfa89d31c2018-05-08 14:47:23 +0100267 /* Miscellaneous vcpu state flags */
268 u64 flags;
Marc Zyngier0c557ed2014-04-24 10:24:46 +0100269
Alex Bennée84e690b2015-07-07 17:30:00 +0100270 /*
271 * We maintain more than a single set of debug registers to support
272 * debugging the guest from the host and to maintain separate host and
273 * guest state during world switches. vcpu_debug_state are the debug
274 * registers of the vcpu as the guest sees them. host_debug_state are
Alex Bennée834bf882015-07-07 17:30:02 +0100275 * the host registers which are saved and restored during
276 * world switches. external_debug_state contains the debug
277 * values we want to debug the guest. This is set via the
278 * KVM_SET_GUEST_DEBUG ioctl.
Alex Bennée84e690b2015-07-07 17:30:00 +0100279 *
280 * debug_ptr points to the set of debug registers that should be loaded
281 * onto the hardware when running the guest.
282 */
283 struct kvm_guest_debug_arch *debug_ptr;
284 struct kvm_guest_debug_arch vcpu_debug_state;
Alex Bennée834bf882015-07-07 17:30:02 +0100285 struct kvm_guest_debug_arch external_debug_state;
Alex Bennée84e690b2015-07-07 17:30:00 +0100286
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000287 /* Pointer to host CPU context */
Andrew Murray630a1682019-04-09 20:22:11 +0100288 struct kvm_cpu_context *host_cpu_context;
Dave Martine6b673b2018-04-06 14:55:59 +0100289
290 struct thread_info *host_thread_info; /* hyp VA */
291 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
292
Will Deaconf85279b2016-09-22 11:35:43 +0100293 struct {
294 /* {Break,watch}point registers */
295 struct kvm_guest_debug_arch regs;
296 /* Statistical profiling extension */
297 u64 pmscr_el1;
298 } host_debug_state;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000299
300 /* VGIC state */
301 struct vgic_cpu vgic_cpu;
302 struct arch_timer_cpu timer_cpu;
Shannon Zhao04fe4722015-09-11 09:38:32 +0800303 struct kvm_pmu pmu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000304
305 /*
306 * Anything that is not used directly from assembly code goes
307 * here.
308 */
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000309
Alex Bennée337b99b2015-07-07 17:29:58 +0100310 /*
311 * Guest registers we preserve during guest debugging.
312 *
313 * These shadow registers are updated by the kvm_handle_sys_reg
314 * trap handler if the guest accesses or updates them while we
315 * are using guest debug.
316 */
317 struct {
318 u32 mdscr_el1;
319 } guest_debug_preserved;
320
Eric Auger37815282015-09-25 23:41:14 +0200321 /* vcpu power-off state */
322 bool power_off;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000323
Eric Auger3b928302015-09-25 23:41:17 +0200324 /* Don't run the guest (internal implementation need) */
325 bool pause;
326
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000327 /* IO related fields */
328 struct kvm_decode mmio_decode;
329
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000330 /* Cache some mmu pages needed inside spinlock regions */
331 struct kvm_mmu_memory_cache mmu_page_cache;
332
333 /* Target CPU and feature flags */
Chen Gang6c8c0c42013-07-22 04:40:38 +0100334 int target;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000335 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
336
337 /* Detect first run of a vcpu */
338 bool has_run_once;
James Morse4715c142018-01-15 19:39:01 +0000339
340 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
341 u64 vsesr_el2;
Christoffer Dalld47533d2017-12-23 21:53:48 +0100342
Marc Zyngier358b28f2018-12-20 11:36:07 +0000343 /* Additional reset state */
344 struct vcpu_reset_state reset_state;
345
Christoffer Dalld47533d2017-12-23 21:53:48 +0100346 /* True when deferrable sysregs are loaded on the physical CPU,
347 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
348 bool sysregs_loaded_on_cpu;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000349};
350
Dave Martinb43b5dd2018-09-28 14:39:17 +0100351/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
352#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
353 sve_ffr_offset((vcpu)->arch.sve_max_vl)))
354
Dave Martine1c9c982018-09-28 14:39:19 +0100355#define vcpu_sve_state_size(vcpu) ({ \
356 size_t __size_ret; \
357 unsigned int __vcpu_vq; \
358 \
359 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
360 __size_ret = 0; \
361 } else { \
362 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
363 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
364 } \
365 \
366 __size_ret; \
367})
368
Dave Martinfa89d31c2018-05-08 14:47:23 +0100369/* vcpu_arch flags field values: */
370#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
Dave Martine6b673b2018-04-06 14:55:59 +0100371#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
372#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
373#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
Dave Martinb3eb56b2018-06-15 16:47:25 +0100374#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
Dave Martin1765edb2018-09-28 14:39:12 +0100375#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
Dave Martin9033bba2019-02-28 18:46:44 +0000376#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
Amit Daniel Kachhapb890d752019-04-23 10:12:34 +0530377#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
Dave Martin1765edb2018-09-28 14:39:12 +0100378
379#define vcpu_has_sve(vcpu) (system_supports_sve() && \
380 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
Dave Martinfa89d31c2018-05-08 14:47:23 +0100381
Amit Daniel Kachhapb890d752019-04-23 10:12:34 +0530382#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
383 system_supports_generic_auth()) && \
384 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
385
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000386#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
Christoffer Dall8d404c42016-03-16 15:38:53 +0100387
388/*
389 * Only use __vcpu_sys_reg if you know you want the memory backed version of a
390 * register, and not the one most recently accessed by a running VCPU. For
391 * example, for userspace access or for system registers that are never context
392 * switched, but only emulated.
393 */
394#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
395
Christoffer Dallda6f1662018-11-29 12:20:01 +0100396u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
Christoffer Dalld47533d2017-12-23 21:53:48 +0100397void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100398
Marc Zyngier72564012014-04-24 10:27:13 +0100399/*
400 * CP14 and CP15 live in the same array, as they are backed by the
401 * same system registers.
402 */
403#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
404#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000405
406struct kvm_vm_stat {
Suraj Jitindar Singh8a7e75d2016-08-02 14:03:22 +1000407 ulong remote_tlb_flush;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000408};
409
410struct kvm_vcpu_stat {
Suraj Jitindar Singh8a7e75d2016-08-02 14:03:22 +1000411 u64 halt_successful_poll;
412 u64 halt_attempted_poll;
413 u64 halt_poll_invalid;
414 u64 halt_wakeup;
415 u64 hvc_exit_stat;
Amit Tomarb19e6892015-11-26 10:09:43 +0000416 u64 wfe_exit_stat;
417 u64 wfi_exit_stat;
418 u64 mmio_exit_user;
419 u64 mmio_exit_kernel;
420 u64 exits;
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000421};
422
Anup Patel473bdc02013-09-30 14:20:06 +0530423int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000424unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
425int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000426int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
427int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
James Morse539aee02018-07-19 16:24:24 +0100428int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
429 struct kvm_vcpu_events *events);
Dongjiu Gengb7b27fa2018-07-19 16:24:22 +0100430
James Morse539aee02018-07-19 16:24:24 +0100431int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
432 struct kvm_vcpu_events *events);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000433
434#define KVM_ARCH_WANT_MMU_NOTIFIER
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000435int kvm_unmap_hva_range(struct kvm *kvm,
436 unsigned long start, unsigned long end);
Lan Tianyu748c0e32018-12-06 21:21:10 +0800437int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Marc Zyngier35307b92015-03-12 18:16:51 +0000438int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
439int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000440
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000441struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
Will Deacon4000be42014-08-26 15:13:21 +0100442struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
Christoffer Dallb13216c2016-04-27 10:28:00 +0100443void kvm_arm_halt_guest(struct kvm *kvm);
444void kvm_arm_resume_guest(struct kvm *kvm);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000445
Ard Biesheuvela0bf9772016-02-16 13:52:39 +0100446u64 __kvm_call_hyp(void *hypfn, ...);
Marc Zyngier18fc7bf2019-01-05 15:57:56 +0000447
448/*
449 * The couple of isb() below are there to guarantee the same behaviour
450 * on VHE as on !VHE, where the eret to EL1 acts as a context
451 * synchronization event.
452 */
453#define kvm_call_hyp(f, ...) \
454 do { \
455 if (has_vhe()) { \
456 f(__VA_ARGS__); \
457 isb(); \
458 } else { \
459 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
460 } \
461 } while(0)
462
463#define kvm_call_hyp_ret(f, ...) \
464 ({ \
465 typeof(f(__VA_ARGS__)) ret; \
466 \
467 if (has_vhe()) { \
468 ret = f(__VA_ARGS__); \
469 isb(); \
470 } else { \
471 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
472 ##__VA_ARGS__); \
473 } \
474 \
475 ret; \
476 })
Marc Zyngier22b39ca2016-03-01 13:12:44 +0000477
Christoffer Dallcf5d31882014-10-16 17:00:18 +0200478void force_vm_exit(const cpumask_t *mask);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800479void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000480
481int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
482 int exception_index);
James Morse3368bd82018-01-15 19:39:04 +0000483void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
484 int exception_index);
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000485
486int kvm_perf_init(void);
487int kvm_perf_teardown(void);
488
Dongjiu Gengb7b27fa2018-07-19 16:24:22 +0100489void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
490
Andre Przywara4429fc62014-06-02 15:37:13 +0200491struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
492
Andrew Murray630a1682019-04-09 20:22:11 +0100493DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
Christoffer Dall4464e212017-10-08 17:01:56 +0200494
Marc Zyngier1e0cf162019-07-05 23:35:56 +0100495static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
Marc Zyngier32f13952019-01-19 15:29:54 +0000496{
497 /* The host's MPIDR is immutable, so let's set it up at boot time */
Marc Zyngier1e0cf162019-07-05 23:35:56 +0100498 cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
Marc Zyngier32f13952019-01-19 15:29:54 +0000499}
500
Will Deacon7c364472018-08-08 16:10:54 +0100501void __kvm_enable_ssbs(void);
502
Marc Zyngier12fda812016-06-30 18:40:45 +0100503static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
Marc Zyngier092bd142012-12-17 17:07:52 +0000504 unsigned long hyp_stack_ptr,
505 unsigned long vector_ptr)
506{
Marc Zyngier9bc03f12018-07-10 13:20:47 +0100507 /*
508 * Calculate the raw per-cpu offset without a translation from the
509 * kernel's mapping to the linear mapping, and store it in tpidr_el2
510 * so that we can use adr_l to access per-cpu variables in EL2.
511 */
Andrew Murray630a1682019-04-09 20:22:11 +0100512 u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
513 (u64)kvm_ksym_ref(kvm_host_data));
Christoffer Dall4464e212017-10-08 17:01:56 +0200514
Marc Zyngier092bd142012-12-17 17:07:52 +0000515 /*
Mark Rutland63a1e1c2017-05-16 15:18:05 +0100516 * Call initialization code, and switch to the full blown HYP code.
517 * If the cpucaps haven't been finalized yet, something has gone very
518 * wrong, and hyp will crash and burn when it uses any
519 * cpus_have_const_cap() wrapper.
Marc Zyngier092bd142012-12-17 17:07:52 +0000520 */
Mark Rutland63a1e1c2017-05-16 15:18:05 +0100521 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
Marc Zyngier9bc03f12018-07-10 13:20:47 +0100522 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
Will Deacon7c364472018-08-08 16:10:54 +0100523
524 /*
525 * Disabling SSBD on a non-VHE system requires us to enable SSBS
526 * at EL2.
527 */
528 if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
529 arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
530 kvm_call_hyp(__kvm_enable_ssbs);
531 }
Marc Zyngier092bd142012-12-17 17:07:52 +0000532}
533
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000534static inline bool kvm_arch_requires_vhe(void)
Dave Martin85acda32018-04-20 16:20:43 +0100535{
536 /*
537 * The Arm architecture specifies that implementation of SVE
538 * requires VHE also to be implemented. The KVM code for arm64
539 * relies on this when SVE is present:
540 */
541 if (system_supports_sve())
Dave Martin85acda32018-04-20 16:20:43 +0100542 return true;
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000543
Marc Zyngier8b2cca92018-12-06 17:31:23 +0000544 /* Some implementations have defects that confine them to VHE */
545 if (cpus_have_cap(ARM64_WORKAROUND_1165522))
546 return true;
547
Marc Zyngier33e5f4e2018-12-06 17:31:20 +0000548 return false;
Dave Martin85acda32018-04-20 16:20:43 +0100549}
550
Mark Rutland384b40c2019-04-23 10:12:35 +0530551void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
552
Radim Krčmář0865e632014-08-28 15:13:02 +0200553static inline void kvm_arch_hardware_unsetup(void) {}
554static inline void kvm_arch_sync_events(struct kvm *kvm) {}
Radim Krčmář0865e632014-08-28 15:13:02 +0200555static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
Christian Borntraeger3491caf2016-05-13 12:16:35 +0200556static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
Radim Krčmář0865e632014-08-28 15:13:02 +0200557
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100558void kvm_arm_init_debug(void);
559void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
560void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
Alex Bennée84e690b2015-07-07 17:30:00 +0100561void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800562int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
563 struct kvm_device_attr *attr);
564int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
565 struct kvm_device_attr *attr);
566int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
567 struct kvm_device_attr *attr);
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100568
Suzuki K Poulose0f62f0e2018-09-26 17:32:52 +0100569static inline void __cpu_init_stage2(void) {}
Marc Zyngier21a41792016-02-22 10:57:30 +0000570
Dave Martine6b673b2018-04-06 14:55:59 +0100571/* Guest/host FPSIMD coordination helpers */
572int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
573void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
574void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
575void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
576
Andrew Murrayeb412382019-04-09 20:22:12 +0100577static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
578{
Andrew Murray435e53f2019-04-09 20:22:15 +0100579 return (!has_vhe() && attr->exclude_host);
Andrew Murrayeb412382019-04-09 20:22:12 +0100580}
581
Dave Martine6b673b2018-04-06 14:55:59 +0100582#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
583static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
Dave Martin17eed272017-10-31 15:51:16 +0000584{
Dave Martine6b673b2018-04-06 14:55:59 +0100585 return kvm_arch_vcpu_run_map_fp(vcpu);
Dave Martin17eed272017-10-31 15:51:16 +0000586}
Andrew Murrayeb412382019-04-09 20:22:12 +0100587
588void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
589void kvm_clr_pmu_events(u32 clr);
Andrew Murray3d91bef2019-04-09 20:22:14 +0100590
Andrew Murray435e53f2019-04-09 20:22:15 +0100591void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
592void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
Andrew Murrayeb412382019-04-09 20:22:12 +0100593#else
594static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
595static inline void kvm_clr_pmu_events(u32 clr) {}
Dave Martine6b673b2018-04-06 14:55:59 +0100596#endif
Dave Martin17eed272017-10-31 15:51:16 +0000597
James Morse4f5abad2018-01-15 19:39:00 +0000598static inline void kvm_arm_vhe_guest_enter(void)
599{
600 local_daif_mask();
Julien Thierry85738e02019-01-31 14:58:48 +0000601
602 /*
603 * Having IRQs masked via PMR when entering the guest means the GIC
604 * will not signal the CPU of interrupts of lower priority, and the
605 * only way to get out will be via guest exceptions.
606 * Naturally, we want to avoid this.
Julien Thierrybd82d4b2019-06-11 10:38:10 +0100607 *
608 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
609 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
Julien Thierry85738e02019-01-31 14:58:48 +0000610 */
Julien Thierrybd82d4b2019-06-11 10:38:10 +0100611 if (system_uses_irq_prio_masking())
Julien Thierry85738e02019-01-31 14:58:48 +0000612 dsb(sy);
James Morse4f5abad2018-01-15 19:39:00 +0000613}
614
615static inline void kvm_arm_vhe_guest_exit(void)
616{
Julien Thierry85738e02019-01-31 14:58:48 +0000617 /*
618 * local_daif_restore() takes care to properly restore PSTATE.DAIF
619 * and the GIC PMR if the host is using IRQ priorities.
620 */
James Morse4f5abad2018-01-15 19:39:00 +0000621 local_daif_restore(DAIF_PROCCTX_NOIRQ);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200622
623 /*
624 * When we exit from the guest we change a number of CPU configuration
625 * parameters, such as traps. Make sure these changes take effect
626 * before running the host or additional guests.
627 */
628 isb();
James Morse4f5abad2018-01-15 19:39:00 +0000629}
Marc Zyngier6167ec52018-02-06 17:56:14 +0000630
Andre Przywarac118bbb2019-05-03 15:27:48 +0100631#define KVM_BP_HARDEN_UNKNOWN -1
632#define KVM_BP_HARDEN_WA_NEEDED 0
633#define KVM_BP_HARDEN_NOT_REQUIRED 1
634
635static inline int kvm_arm_harden_branch_predictor(void)
Marc Zyngier6167ec52018-02-06 17:56:14 +0000636{
Andre Przywarac118bbb2019-05-03 15:27:48 +0100637 switch (get_spectre_v2_workaround_state()) {
638 case ARM64_BP_HARDEN_WA_NEEDED:
639 return KVM_BP_HARDEN_WA_NEEDED;
640 case ARM64_BP_HARDEN_NOT_REQUIRED:
641 return KVM_BP_HARDEN_NOT_REQUIRED;
642 case ARM64_BP_HARDEN_UNKNOWN:
643 default:
644 return KVM_BP_HARDEN_UNKNOWN;
645 }
Marc Zyngier6167ec52018-02-06 17:56:14 +0000646}
647
Marc Zyngier5d81f7d2018-05-29 13:11:18 +0100648#define KVM_SSBD_UNKNOWN -1
649#define KVM_SSBD_FORCE_DISABLE 0
650#define KVM_SSBD_KERNEL 1
651#define KVM_SSBD_FORCE_ENABLE 2
652#define KVM_SSBD_MITIGATED 3
653
654static inline int kvm_arm_have_ssbd(void)
655{
656 switch (arm64_get_ssbd_state()) {
657 case ARM64_SSBD_FORCE_DISABLE:
658 return KVM_SSBD_FORCE_DISABLE;
659 case ARM64_SSBD_KERNEL:
660 return KVM_SSBD_KERNEL;
661 case ARM64_SSBD_FORCE_ENABLE:
662 return KVM_SSBD_FORCE_ENABLE;
663 case ARM64_SSBD_MITIGATED:
664 return KVM_SSBD_MITIGATED;
665 case ARM64_SSBD_UNKNOWN:
666 default:
667 return KVM_SSBD_UNKNOWN;
668 }
669}
670
Christoffer Dallbc192ce2017-10-10 10:21:18 +0200671void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
672void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
673
Suzuki K Poulose0f62f0e2018-09-26 17:32:52 +0100674void kvm_set_ipa_limit(void);
675
Marc Orrd1e5b0e2018-05-15 04:37:37 -0700676#define __KVM_HAVE_ARCH_VM_ALLOC
677struct kvm *kvm_arch_alloc_vm(void);
678void kvm_arch_free_vm(struct kvm *kvm);
679
Marc Zyngierbca607e2018-10-01 13:40:36 +0100680int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
Suzuki K Poulose5b6c6742018-09-26 17:32:42 +0100681
Dave Martin92e68b22019-04-10 17:17:37 +0100682int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
Dave Martin9033bba2019-02-28 18:46:44 +0000683bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
684
685#define kvm_arm_vcpu_sve_finalized(vcpu) \
686 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
Dave Martin7dd32a02018-12-19 14:27:01 +0000687
Marc Zyngier4f8d6632012-12-10 16:29:28 +0000688#endif /* __ARM64_KVM_HOST_H__ */