blob: 1dab3a9846082e5bfbf640dc04c3e8aa70ba24cc [file] [log] [blame]
Marc Zyngier83a49792012-12-10 13:27:52 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000026
27#include <asm/esr.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000028#include <asm/kvm_arm.h>
Christoffer Dall00536ec2017-12-27 20:01:52 +010029#include <asm/kvm_hyp.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000030#include <asm/kvm_mmio.h>
31#include <asm/ptrace.h>
Andre Przywara4429fc62014-06-02 15:37:13 +020032#include <asm/cputype.h>
Marc Zyngier68908bf2015-01-29 15:47:55 +000033#include <asm/virt.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000034
Marc Zyngierb5476312013-02-06 19:40:29 +000035unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
Christoffer Dalla8928192017-12-27 21:59:09 +010036unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
37void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
Marc Zyngierb5476312013-02-06 19:40:29 +000038
Marc Zyngier27b190b2013-02-06 19:54:04 +000039bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
40void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
41
Marc Zyngier83a49792012-12-10 13:27:52 +000042void kvm_inject_undefined(struct kvm_vcpu *vcpu);
Marc Zyngier10cf3392016-09-06 14:02:01 +010043void kvm_inject_vabt(struct kvm_vcpu *vcpu);
Marc Zyngier83a49792012-12-10 13:27:52 +000044void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
45void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
Marc Zyngier74a64a92017-10-29 02:18:09 +000046void kvm_inject_undef32(struct kvm_vcpu *vcpu);
47void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
48void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
Marc Zyngier83a49792012-12-10 13:27:52 +000049
Christoffer Dalle72341c2017-12-13 22:56:48 +010050static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
51{
52 return !(vcpu->arch.hcr_el2 & HCR_RW);
53}
54
Christoffer Dallb856a592014-10-16 17:21:16 +020055static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
56{
57 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
Marc Zyngier68908bf2015-01-29 15:47:55 +000058 if (is_kernel_in_hyp_mode())
59 vcpu->arch.hcr_el2 |= HCR_E2H;
Dongjiu Geng558daf62018-01-15 19:39:06 +000060 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
61 /* route synchronous external abort exceptions to EL2 */
62 vcpu->arch.hcr_el2 |= HCR_TEA;
63 /* trap error record accesses */
64 vcpu->arch.hcr_el2 |= HCR_TERR;
65 }
66
Marc Zyngier801f6772015-01-11 14:10:11 +010067 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
68 vcpu->arch.hcr_el2 &= ~HCR_RW;
Dave Martin005781b2017-12-01 15:19:40 +000069
70 /*
71 * TID3: trap feature register accesses that we virtualise.
72 * For now this is conditional, since no AArch32 feature regs
73 * are currently virtualised.
74 */
Christoffer Dalle72341c2017-12-13 22:56:48 +010075 if (!vcpu_el1_is_32bit(vcpu))
Dave Martin005781b2017-12-01 15:19:40 +000076 vcpu->arch.hcr_el2 |= HCR_TID3;
Christoffer Dallb856a592014-10-16 17:21:16 +020077}
78
Christoffer Dall3df59d82017-08-03 12:09:05 +020079static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
Marc Zyngier3c1e7162014-12-19 16:05:31 +000080{
Christoffer Dall3df59d82017-08-03 12:09:05 +020081 return (unsigned long *)&vcpu->arch.hcr_el2;
Marc Zyngier3c1e7162014-12-19 16:05:31 +000082}
83
James Morse4715c142018-01-15 19:39:01 +000084static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
85{
86 vcpu->arch.vsesr_el2 = vsesr;
87}
88
Marc Zyngier83a49792012-12-10 13:27:52 +000089static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
90{
91 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
92}
93
Christoffer Dall6d4bd902017-12-27 20:51:04 +010094static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
Marc Zyngier83a49792012-12-10 13:27:52 +000095{
96 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
97}
98
Christoffer Dall6d4bd902017-12-27 20:51:04 +010099static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
100{
101 if (vcpu->arch.sysregs_loaded_on_cpu)
102 return read_sysreg_el1(elr);
103 else
104 return *__vcpu_elr_el1(vcpu);
105}
106
107static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
108{
109 if (vcpu->arch.sysregs_loaded_on_cpu)
110 write_sysreg_el1(v, elr);
111 else
112 *__vcpu_elr_el1(vcpu) = v;
113}
114
Marc Zyngier83a49792012-12-10 13:27:52 +0000115static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
116{
117 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
118}
119
120static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
121{
Marc Zyngierb5476312013-02-06 19:40:29 +0000122 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000123}
124
125static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
126{
Marc Zyngier27b190b2013-02-06 19:54:04 +0000127 if (vcpu_mode_is_32bit(vcpu))
128 return kvm_condition_valid32(vcpu);
129
130 return true;
Marc Zyngier83a49792012-12-10 13:27:52 +0000131}
132
133static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
134{
Marc Zyngier27b190b2013-02-06 19:54:04 +0000135 if (vcpu_mode_is_32bit(vcpu))
136 kvm_skip_instr32(vcpu, is_wide_instr);
137 else
138 *vcpu_pc(vcpu) += 4;
Marc Zyngier83a49792012-12-10 13:27:52 +0000139}
140
141static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
142{
Marc Zyngierb5476312013-02-06 19:40:29 +0000143 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000144}
145
Marc Zyngierc0f09632015-11-16 10:28:17 +0000146/*
Pavel Fedinf6be5632015-12-04 15:03:14 +0300147 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
148 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
149 * AArch32 with banked registers.
Marc Zyngierc0f09632015-11-16 10:28:17 +0000150 */
Pavel Fedinbc45a512015-12-04 15:03:11 +0300151static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
152 u8 reg_num)
153{
154 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
155}
156
157static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
158 unsigned long val)
159{
160 if (reg_num != 31)
161 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
162}
163
Christoffer Dall00536ec2017-12-27 20:01:52 +0100164static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
Marc Zyngier83a49792012-12-10 13:27:52 +0000165{
Christoffer Dalla8928192017-12-27 21:59:09 +0100166 if (vcpu_mode_is_32bit(vcpu))
167 return vcpu_read_spsr32(vcpu);
Christoffer Dall00536ec2017-12-27 20:01:52 +0100168
169 if (vcpu->arch.sysregs_loaded_on_cpu)
170 return read_sysreg_el1(spsr);
171 else
Christoffer Dalla8928192017-12-27 21:59:09 +0100172 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
Christoffer Dall00536ec2017-12-27 20:01:52 +0100173}
174
Christoffer Dalla8928192017-12-27 21:59:09 +0100175static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
Christoffer Dall00536ec2017-12-27 20:01:52 +0100176{
Christoffer Dall00536ec2017-12-27 20:01:52 +0100177 if (vcpu_mode_is_32bit(vcpu)) {
Christoffer Dalla8928192017-12-27 21:59:09 +0100178 vcpu_write_spsr32(vcpu, v);
179 return;
Christoffer Dall00536ec2017-12-27 20:01:52 +0100180 }
181
182 if (vcpu->arch.sysregs_loaded_on_cpu)
183 write_sysreg_el1(v, spsr);
184 else
Christoffer Dalla8928192017-12-27 21:59:09 +0100185 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
Marc Zyngier83a49792012-12-10 13:27:52 +0000186}
187
188static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
189{
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800190 u32 mode;
Marc Zyngier83a49792012-12-10 13:27:52 +0000191
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800192 if (vcpu_mode_is_32bit(vcpu)) {
193 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000194 return mode > COMPAT_PSR_MODE_USR;
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800195 }
196
197 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000198
Marc Zyngier83a49792012-12-10 13:27:52 +0000199 return mode != PSR_MODE_EL0t;
200}
201
202static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
203{
204 return vcpu->arch.fault.esr_el2;
205}
206
Marc Zyngier3e51d432016-09-06 09:28:41 +0100207static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
208{
209 u32 esr = kvm_vcpu_get_hsr(vcpu);
210
211 if (esr & ESR_ELx_CV)
212 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
213
214 return -1;
215}
216
Marc Zyngier83a49792012-12-10 13:27:52 +0000217static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
218{
219 return vcpu->arch.fault.far_el2;
220}
221
222static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
223{
224 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
225}
226
James Morse0067df42018-01-15 19:39:05 +0000227static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
228{
229 return vcpu->arch.fault.disr_el1;
230}
231
Wei Huang0d97f8842015-01-12 11:53:36 -0500232static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
233{
Paolo Bonzini1c6007d2015-01-23 13:39:51 +0100234 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
Wei Huang0d97f8842015-01-12 11:53:36 -0500235}
236
Marc Zyngier83a49792012-12-10 13:27:52 +0000237static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
238{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000239 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
Marc Zyngier83a49792012-12-10 13:27:52 +0000240}
241
Marc Zyngier83a49792012-12-10 13:27:52 +0000242static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
243{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000244 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
Marc Zyngier83a49792012-12-10 13:27:52 +0000245}
246
247static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
248{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000249 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000250}
251
Marc Zyngier83a49792012-12-10 13:27:52 +0000252static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
253{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000254 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
Marc Zyngier83a49792012-12-10 13:27:52 +0000255}
256
Will Deacon60e21a02016-09-29 12:37:01 +0100257static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
258{
259 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
260 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
261}
262
Marc Zyngier57c841f2016-01-29 15:01:28 +0000263static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
264{
265 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
266}
267
Marc Zyngier83a49792012-12-10 13:27:52 +0000268static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
269{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000270 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000271}
272
273/* This one is not specific to Data Abort */
274static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
275{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000276 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
Marc Zyngier83a49792012-12-10 13:27:52 +0000277}
278
279static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
280{
Mark Rutland561454e2016-05-31 12:33:02 +0100281 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
Marc Zyngier83a49792012-12-10 13:27:52 +0000282}
283
284static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
285{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000286 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
Marc Zyngier83a49792012-12-10 13:27:52 +0000287}
288
289static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
290{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000291 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
Christoffer Dall0496daa52014-09-26 12:29:34 +0200292}
293
294static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
295{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000296 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
Marc Zyngier83a49792012-12-10 13:27:52 +0000297}
298
James Morsebb428922017-07-18 13:37:41 +0100299static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
300{
Dongjiu Genga2b83132017-10-30 14:05:18 +0800301 switch (kvm_vcpu_trap_get_fault(vcpu)) {
James Morsebb428922017-07-18 13:37:41 +0100302 case FSC_SEA:
303 case FSC_SEA_TTW0:
304 case FSC_SEA_TTW1:
305 case FSC_SEA_TTW2:
306 case FSC_SEA_TTW3:
307 case FSC_SECC:
308 case FSC_SECC_TTW0:
309 case FSC_SECC_TTW1:
310 case FSC_SECC_TTW2:
311 case FSC_SECC_TTW3:
312 return true;
313 default:
314 return false;
315 }
316}
317
Marc Zyngierc6671862017-04-27 19:06:48 +0100318static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
319{
320 u32 esr = kvm_vcpu_get_hsr(vcpu);
321 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
322}
323
Andre Przywara4429fc62014-06-02 15:37:13 +0200324static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
Marc Zyngier79c64882013-10-18 18:19:03 +0100325{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100326 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
Marc Zyngier79c64882013-10-18 18:19:03 +0100327}
328
Marc Zyngierce94fe92013-11-05 14:12:15 +0000329static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
330{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100331 if (vcpu_mode_is_32bit(vcpu)) {
Marc Zyngierce94fe92013-11-05 14:12:15 +0000332 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100333 } else {
334 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
335 sctlr |= (1 << 25);
James Morse1975fa52018-05-02 12:17:02 +0100336 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100337 }
Marc Zyngierce94fe92013-11-05 14:12:15 +0000338}
339
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000340static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
341{
342 if (vcpu_mode_is_32bit(vcpu))
343 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
344
Christoffer Dall8d404c42016-03-16 15:38:53 +0100345 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000346}
347
348static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
349 unsigned long data,
350 unsigned int len)
351{
352 if (kvm_vcpu_is_be(vcpu)) {
353 switch (len) {
354 case 1:
355 return data & 0xff;
356 case 2:
357 return be16_to_cpu(data & 0xffff);
358 case 4:
359 return be32_to_cpu(data & 0xffffffff);
360 default:
361 return be64_to_cpu(data);
362 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700363 } else {
364 switch (len) {
365 case 1:
366 return data & 0xff;
367 case 2:
368 return le16_to_cpu(data & 0xffff);
369 case 4:
370 return le32_to_cpu(data & 0xffffffff);
371 default:
372 return le64_to_cpu(data);
373 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000374 }
375
376 return data; /* Leave LE untouched */
377}
378
379static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
380 unsigned long data,
381 unsigned int len)
382{
383 if (kvm_vcpu_is_be(vcpu)) {
384 switch (len) {
385 case 1:
386 return data & 0xff;
387 case 2:
388 return cpu_to_be16(data & 0xffff);
389 case 4:
390 return cpu_to_be32(data & 0xffffffff);
391 default:
392 return cpu_to_be64(data);
393 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700394 } else {
395 switch (len) {
396 case 1:
397 return data & 0xff;
398 case 2:
399 return cpu_to_le16(data & 0xffff);
400 case 4:
401 return cpu_to_le32(data & 0xffffffff);
402 default:
403 return cpu_to_le64(data);
404 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000405 }
406
407 return data; /* Leave LE untouched */
408}
409
Marc Zyngier83a49792012-12-10 13:27:52 +0000410#endif /* __ARM64_KVM_EMULATE_H__ */