blob: 8abca5df01e549a68121719d8d9e204271b66a73 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marc Zyngier83a49792012-12-10 13:27:52 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Marc Zyngier83a49792012-12-10 13:27:52 +00009 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000015
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000016#include <asm/debug-monitors.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000017#include <asm/esr.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000018#include <asm/kvm_arm.h>
Christoffer Dall00536ec2017-12-27 20:01:52 +010019#include <asm/kvm_hyp.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000020#include <asm/kvm_mmio.h>
21#include <asm/ptrace.h>
Andre Przywara4429fc62014-06-02 15:37:13 +020022#include <asm/cputype.h>
Marc Zyngier68908bf2015-01-29 15:47:55 +000023#include <asm/virt.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000024
Marc Zyngierb5476312013-02-06 19:40:29 +000025unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
Christoffer Dalla8928192017-12-27 21:59:09 +010026unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
27void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
Marc Zyngierb5476312013-02-06 19:40:29 +000028
Marc Zyngier27b190b2013-02-06 19:54:04 +000029bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
30void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
31
Marc Zyngier83a49792012-12-10 13:27:52 +000032void kvm_inject_undefined(struct kvm_vcpu *vcpu);
Marc Zyngier10cf3392016-09-06 14:02:01 +010033void kvm_inject_vabt(struct kvm_vcpu *vcpu);
Marc Zyngier83a49792012-12-10 13:27:52 +000034void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
35void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
Marc Zyngier74a64a92017-10-29 02:18:09 +000036void kvm_inject_undef32(struct kvm_vcpu *vcpu);
37void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
38void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
Marc Zyngier83a49792012-12-10 13:27:52 +000039
Christoffer Dalle72341c2017-12-13 22:56:48 +010040static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
41{
42 return !(vcpu->arch.hcr_el2 & HCR_RW);
43}
44
Christoffer Dallb856a592014-10-16 17:21:16 +020045static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
46{
47 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
Marc Zyngier68908bf2015-01-29 15:47:55 +000048 if (is_kernel_in_hyp_mode())
49 vcpu->arch.hcr_el2 |= HCR_E2H;
Dongjiu Geng558daf62018-01-15 19:39:06 +000050 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
51 /* route synchronous external abort exceptions to EL2 */
52 vcpu->arch.hcr_el2 |= HCR_TEA;
53 /* trap error record accesses */
54 vcpu->arch.hcr_el2 |= HCR_TERR;
55 }
Marc Zyngiere48d53a2018-04-06 12:27:28 +010056 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
57 vcpu->arch.hcr_el2 |= HCR_FWB;
Dongjiu Geng558daf62018-01-15 19:39:06 +000058
Marc Zyngier801f6772015-01-11 14:10:11 +010059 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
60 vcpu->arch.hcr_el2 &= ~HCR_RW;
Dave Martin005781b2017-12-01 15:19:40 +000061
62 /*
63 * TID3: trap feature register accesses that we virtualise.
64 * For now this is conditional, since no AArch32 feature regs
65 * are currently virtualised.
66 */
Christoffer Dalle72341c2017-12-13 22:56:48 +010067 if (!vcpu_el1_is_32bit(vcpu))
Dave Martin005781b2017-12-01 15:19:40 +000068 vcpu->arch.hcr_el2 |= HCR_TID3;
Ard Biesheuvelf7f2b152019-01-31 14:17:17 +010069
Ard Biesheuvel793acf82019-01-31 14:17:18 +010070 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
71 vcpu_el1_is_32bit(vcpu))
Ard Biesheuvelf7f2b152019-01-31 14:17:17 +010072 vcpu->arch.hcr_el2 |= HCR_TID2;
Christoffer Dallb856a592014-10-16 17:21:16 +020073}
74
Christoffer Dall3df59d82017-08-03 12:09:05 +020075static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
Marc Zyngier3c1e7162014-12-19 16:05:31 +000076{
Christoffer Dall3df59d82017-08-03 12:09:05 +020077 return (unsigned long *)&vcpu->arch.hcr_el2;
Marc Zyngier3c1e7162014-12-19 16:05:31 +000078}
79
Marc Zyngierde737082018-06-21 10:43:59 +010080static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
81{
82 vcpu->arch.hcr_el2 &= ~HCR_TWE;
83}
84
85static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
86{
87 vcpu->arch.hcr_el2 |= HCR_TWE;
88}
89
Mark Rutland384b40c2019-04-23 10:12:35 +053090static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
91{
92 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
93}
94
95static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
98}
99
100static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
101{
102 if (vcpu_has_ptrauth(vcpu))
103 vcpu_ptrauth_disable(vcpu);
104}
105
Dongjiu Gengb7b27fa2018-07-19 16:24:22 +0100106static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
107{
108 return vcpu->arch.vsesr_el2;
109}
110
James Morse4715c142018-01-15 19:39:01 +0000111static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
112{
113 vcpu->arch.vsesr_el2 = vsesr;
114}
115
Marc Zyngier83a49792012-12-10 13:27:52 +0000116static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
117{
118 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
119}
120
Christoffer Dall6d4bd902017-12-27 20:51:04 +0100121static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
Marc Zyngier83a49792012-12-10 13:27:52 +0000122{
123 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
124}
125
Christoffer Dall6d4bd902017-12-27 20:51:04 +0100126static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
127{
128 if (vcpu->arch.sysregs_loaded_on_cpu)
129 return read_sysreg_el1(elr);
130 else
131 return *__vcpu_elr_el1(vcpu);
132}
133
134static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
135{
136 if (vcpu->arch.sysregs_loaded_on_cpu)
137 write_sysreg_el1(v, elr);
138 else
139 *__vcpu_elr_el1(vcpu) = v;
140}
141
Marc Zyngier83a49792012-12-10 13:27:52 +0000142static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
143{
144 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
145}
146
147static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
148{
Marc Zyngierb5476312013-02-06 19:40:29 +0000149 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000150}
151
152static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
153{
Marc Zyngier27b190b2013-02-06 19:54:04 +0000154 if (vcpu_mode_is_32bit(vcpu))
155 return kvm_condition_valid32(vcpu);
156
157 return true;
Marc Zyngier83a49792012-12-10 13:27:52 +0000158}
159
Marc Zyngier83a49792012-12-10 13:27:52 +0000160static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
161{
Mark Rutland256c0962018-07-05 15:16:53 +0100162 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000163}
164
Marc Zyngierc0f09632015-11-16 10:28:17 +0000165/*
Pavel Fedinf6be5632015-12-04 15:03:14 +0300166 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
167 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
168 * AArch32 with banked registers.
Marc Zyngierc0f09632015-11-16 10:28:17 +0000169 */
Pavel Fedinbc45a512015-12-04 15:03:11 +0300170static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
171 u8 reg_num)
172{
173 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
174}
175
176static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
177 unsigned long val)
178{
179 if (reg_num != 31)
180 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
181}
182
Christoffer Dall00536ec2017-12-27 20:01:52 +0100183static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
Marc Zyngier83a49792012-12-10 13:27:52 +0000184{
Christoffer Dalla8928192017-12-27 21:59:09 +0100185 if (vcpu_mode_is_32bit(vcpu))
186 return vcpu_read_spsr32(vcpu);
Christoffer Dall00536ec2017-12-27 20:01:52 +0100187
188 if (vcpu->arch.sysregs_loaded_on_cpu)
189 return read_sysreg_el1(spsr);
190 else
Christoffer Dalla8928192017-12-27 21:59:09 +0100191 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
Christoffer Dall00536ec2017-12-27 20:01:52 +0100192}
193
Christoffer Dalla8928192017-12-27 21:59:09 +0100194static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
Christoffer Dall00536ec2017-12-27 20:01:52 +0100195{
Christoffer Dall00536ec2017-12-27 20:01:52 +0100196 if (vcpu_mode_is_32bit(vcpu)) {
Christoffer Dalla8928192017-12-27 21:59:09 +0100197 vcpu_write_spsr32(vcpu, v);
198 return;
Christoffer Dall00536ec2017-12-27 20:01:52 +0100199 }
200
201 if (vcpu->arch.sysregs_loaded_on_cpu)
202 write_sysreg_el1(v, spsr);
203 else
Christoffer Dalla8928192017-12-27 21:59:09 +0100204 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
Marc Zyngier83a49792012-12-10 13:27:52 +0000205}
206
207static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
208{
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800209 u32 mode;
Marc Zyngier83a49792012-12-10 13:27:52 +0000210
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800211 if (vcpu_mode_is_32bit(vcpu)) {
Mark Rutland256c0962018-07-05 15:16:53 +0100212 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
213 return mode > PSR_AA32_MODE_USR;
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800214 }
215
216 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000217
Marc Zyngier83a49792012-12-10 13:27:52 +0000218 return mode != PSR_MODE_EL0t;
219}
220
221static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
222{
223 return vcpu->arch.fault.esr_el2;
224}
225
Marc Zyngier3e51d432016-09-06 09:28:41 +0100226static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
227{
228 u32 esr = kvm_vcpu_get_hsr(vcpu);
229
230 if (esr & ESR_ELx_CV)
231 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
232
233 return -1;
234}
235
Marc Zyngier83a49792012-12-10 13:27:52 +0000236static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
237{
238 return vcpu->arch.fault.far_el2;
239}
240
241static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
242{
243 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
244}
245
James Morse0067df42018-01-15 19:39:05 +0000246static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
247{
248 return vcpu->arch.fault.disr_el1;
249}
250
Wei Huang0d97f8842015-01-12 11:53:36 -0500251static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
252{
Paolo Bonzini1c6007d2015-01-23 13:39:51 +0100253 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
Wei Huang0d97f8842015-01-12 11:53:36 -0500254}
255
Marc Zyngier83a49792012-12-10 13:27:52 +0000256static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
257{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000258 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
Marc Zyngier83a49792012-12-10 13:27:52 +0000259}
260
Marc Zyngier83a49792012-12-10 13:27:52 +0000261static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
262{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000263 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
Marc Zyngier83a49792012-12-10 13:27:52 +0000264}
265
266static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
267{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000268 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000269}
270
Marc Zyngier83a49792012-12-10 13:27:52 +0000271static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
272{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000273 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
Marc Zyngier83a49792012-12-10 13:27:52 +0000274}
275
Will Deacon60e21a02016-09-29 12:37:01 +0100276static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
277{
278 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
279 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
280}
281
Marc Zyngier57c841f2016-01-29 15:01:28 +0000282static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
283{
284 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
285}
286
Marc Zyngier83a49792012-12-10 13:27:52 +0000287static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
288{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000289 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000290}
291
292/* This one is not specific to Data Abort */
293static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
294{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000295 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
Marc Zyngier83a49792012-12-10 13:27:52 +0000296}
297
298static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
299{
Mark Rutland561454e2016-05-31 12:33:02 +0100300 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
Marc Zyngier83a49792012-12-10 13:27:52 +0000301}
302
303static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
304{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000305 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
Marc Zyngier83a49792012-12-10 13:27:52 +0000306}
307
308static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
309{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000310 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
Christoffer Dall0496daa52014-09-26 12:29:34 +0200311}
312
313static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
314{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000315 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
Marc Zyngier83a49792012-12-10 13:27:52 +0000316}
317
James Morsebb428922017-07-18 13:37:41 +0100318static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
319{
Dongjiu Genga2b83132017-10-30 14:05:18 +0800320 switch (kvm_vcpu_trap_get_fault(vcpu)) {
James Morsebb428922017-07-18 13:37:41 +0100321 case FSC_SEA:
322 case FSC_SEA_TTW0:
323 case FSC_SEA_TTW1:
324 case FSC_SEA_TTW2:
325 case FSC_SEA_TTW3:
326 case FSC_SECC:
327 case FSC_SECC_TTW0:
328 case FSC_SECC_TTW1:
329 case FSC_SECC_TTW2:
330 case FSC_SECC_TTW3:
331 return true;
332 default:
333 return false;
334 }
335}
336
Marc Zyngierc6671862017-04-27 19:06:48 +0100337static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
338{
339 u32 esr = kvm_vcpu_get_hsr(vcpu);
Anshuman Khandual1c839142018-09-20 09:36:19 +0530340 return ESR_ELx_SYS64_ISS_RT(esr);
Marc Zyngierc6671862017-04-27 19:06:48 +0100341}
342
Christoffer Dall64cf98f2016-05-01 22:29:58 +0200343static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
344{
345 if (kvm_vcpu_trap_is_iabt(vcpu))
346 return false;
347
348 return kvm_vcpu_dabt_iswrite(vcpu);
349}
350
Andre Przywara4429fc62014-06-02 15:37:13 +0200351static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
Marc Zyngier79c64882013-10-18 18:19:03 +0100352{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100353 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
Marc Zyngier79c64882013-10-18 18:19:03 +0100354}
355
Andre Przywara99adb5672019-05-03 15:27:49 +0100356static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
357{
358 return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
359}
360
361static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
362 bool flag)
363{
364 if (flag)
365 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
366 else
367 vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
368}
369
Marc Zyngierce94fe92013-11-05 14:12:15 +0000370static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
371{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100372 if (vcpu_mode_is_32bit(vcpu)) {
Mark Rutland256c0962018-07-05 15:16:53 +0100373 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100374 } else {
375 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
376 sctlr |= (1 << 25);
James Morse1975fa52018-05-02 12:17:02 +0100377 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100378 }
Marc Zyngierce94fe92013-11-05 14:12:15 +0000379}
380
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000381static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
382{
383 if (vcpu_mode_is_32bit(vcpu))
Mark Rutland256c0962018-07-05 15:16:53 +0100384 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000385
Christoffer Dall8d404c42016-03-16 15:38:53 +0100386 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000387}
388
389static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
390 unsigned long data,
391 unsigned int len)
392{
393 if (kvm_vcpu_is_be(vcpu)) {
394 switch (len) {
395 case 1:
396 return data & 0xff;
397 case 2:
398 return be16_to_cpu(data & 0xffff);
399 case 4:
400 return be32_to_cpu(data & 0xffffffff);
401 default:
402 return be64_to_cpu(data);
403 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700404 } else {
405 switch (len) {
406 case 1:
407 return data & 0xff;
408 case 2:
409 return le16_to_cpu(data & 0xffff);
410 case 4:
411 return le32_to_cpu(data & 0xffffffff);
412 default:
413 return le64_to_cpu(data);
414 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000415 }
416
417 return data; /* Leave LE untouched */
418}
419
420static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
421 unsigned long data,
422 unsigned int len)
423{
424 if (kvm_vcpu_is_be(vcpu)) {
425 switch (len) {
426 case 1:
427 return data & 0xff;
428 case 2:
429 return cpu_to_be16(data & 0xffff);
430 case 4:
431 return cpu_to_be32(data & 0xffffffff);
432 default:
433 return cpu_to_be64(data);
434 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700435 } else {
436 switch (len) {
437 case 1:
438 return data & 0xff;
439 case 2:
440 return cpu_to_le16(data & 0xffff);
441 case 4:
442 return cpu_to_le32(data & 0xffffffff);
443 default:
444 return cpu_to_le64(data);
445 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000446 }
447
448 return data; /* Leave LE untouched */
449}
450
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000451static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
452{
453 if (vcpu_mode_is_32bit(vcpu))
454 kvm_skip_instr32(vcpu, is_wide_instr);
455 else
456 *vcpu_pc(vcpu) += 4;
457
458 /* advance the singlestep state machine */
459 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
460}
461
462/*
463 * Skip an instruction which has been emulated at hyp while most guest sysregs
464 * are live.
465 */
466static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
467{
468 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
469 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
470
471 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
472
473 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
474 write_sysreg_el2(*vcpu_pc(vcpu), elr);
475}
476
Marc Zyngier83a49792012-12-10 13:27:52 +0000477#endif /* __ARM64_KVM_EMULATE_H__ */