blob: accc1d5fba615498bca30a8e3da644eb2e33b2da [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngierb5476312013-02-06 19:40:29 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/emulate.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Marc Zyngierb5476312013-02-06 19:40:29 +00009 */
10
11#include <linux/mm.h>
12#include <linux/kvm_host.h>
13#include <asm/kvm_emulate.h>
14#include <asm/ptrace.h>
15
16#define VCPU_NR_MODES 6
17#define REG_OFFSET(_reg) \
18 (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
19
20#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
21
22static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
23 /* USR Registers */
24 {
25 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
26 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
27 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
28 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
29 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
30 REG_OFFSET(pc)
31 },
32
33 /* FIQ Registers */
34 {
35 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
36 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
37 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
38 REG_OFFSET(compat_r8_fiq), /* r8 */
39 REG_OFFSET(compat_r9_fiq), /* r9 */
40 REG_OFFSET(compat_r10_fiq), /* r10 */
41 REG_OFFSET(compat_r11_fiq), /* r11 */
42 REG_OFFSET(compat_r12_fiq), /* r12 */
43 REG_OFFSET(compat_sp_fiq), /* r13 */
44 REG_OFFSET(compat_lr_fiq), /* r14 */
45 REG_OFFSET(pc)
46 },
47
48 /* IRQ Registers */
49 {
50 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
51 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
52 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
53 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
54 USR_REG_OFFSET(12),
55 REG_OFFSET(compat_sp_irq), /* r13 */
56 REG_OFFSET(compat_lr_irq), /* r14 */
57 REG_OFFSET(pc)
58 },
59
60 /* SVC Registers */
61 {
62 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
63 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
64 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
65 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
66 USR_REG_OFFSET(12),
67 REG_OFFSET(compat_sp_svc), /* r13 */
68 REG_OFFSET(compat_lr_svc), /* r14 */
69 REG_OFFSET(pc)
70 },
71
72 /* ABT Registers */
73 {
74 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
75 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
76 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
77 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
78 USR_REG_OFFSET(12),
79 REG_OFFSET(compat_sp_abt), /* r13 */
80 REG_OFFSET(compat_lr_abt), /* r14 */
81 REG_OFFSET(pc)
82 },
83
84 /* UND Registers */
85 {
86 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
87 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
88 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
89 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
90 USR_REG_OFFSET(12),
91 REG_OFFSET(compat_sp_und), /* r13 */
92 REG_OFFSET(compat_lr_und), /* r14 */
93 REG_OFFSET(pc)
94 },
95};
96
97/*
98 * Return a pointer to the register number valid in the current mode of
99 * the virtual CPU.
100 */
101unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
102{
Marc Zyngiere47c2052019-06-28 22:40:58 +0100103 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.regs;
Mark Rutland256c0962018-07-05 15:16:53 +0100104 unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000105
106 switch (mode) {
Mark Rutland256c0962018-07-05 15:16:53 +0100107 case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
Marc Zyngierb5476312013-02-06 19:40:29 +0000108 mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
109 break;
110
Mark Rutland256c0962018-07-05 15:16:53 +0100111 case PSR_AA32_MODE_ABT:
Marc Zyngierb5476312013-02-06 19:40:29 +0000112 mode = 4;
113 break;
114
Mark Rutland256c0962018-07-05 15:16:53 +0100115 case PSR_AA32_MODE_UND:
Marc Zyngierb5476312013-02-06 19:40:29 +0000116 mode = 5;
117 break;
118
Mark Rutland256c0962018-07-05 15:16:53 +0100119 case PSR_AA32_MODE_SYS:
Marc Zyngierb5476312013-02-06 19:40:29 +0000120 mode = 0; /* SYS maps to USR */
121 break;
122
123 default:
124 BUG();
125 }
126
127 return reg_array + vcpu_reg_offsets[mode][reg_num];
128}
129
130/*
131 * Return the SPSR for the current mode of the virtual CPU.
132 */
Christoffer Dalla8928192017-12-27 21:59:09 +0100133static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
Marc Zyngierb5476312013-02-06 19:40:29 +0000134{
Mark Rutland256c0962018-07-05 15:16:53 +0100135 unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000136 switch (mode) {
Mark Rutland256c0962018-07-05 15:16:53 +0100137 case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
138 case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
139 case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
140 case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
141 case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
Christoffer Dalla8928192017-12-27 21:59:09 +0100142 default: BUG();
143 }
144}
145
146unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
147{
148 int spsr_idx = vcpu_spsr32_mode(vcpu);
149
Marc Zyngierfd85b662019-06-28 23:36:42 +0100150 if (!vcpu->arch.sysregs_loaded_on_cpu) {
151 switch (spsr_idx) {
152 case KVM_SPSR_SVC:
Marc Zyngier710f1982019-06-28 23:05:38 +0100153 return __vcpu_sys_reg(vcpu, SPSR_EL1);
Marc Zyngierfd85b662019-06-28 23:36:42 +0100154 case KVM_SPSR_ABT:
155 return vcpu->arch.ctxt.spsr_abt;
156 case KVM_SPSR_UND:
157 return vcpu->arch.ctxt.spsr_und;
158 case KVM_SPSR_IRQ:
159 return vcpu->arch.ctxt.spsr_irq;
160 case KVM_SPSR_FIQ:
161 return vcpu->arch.ctxt.spsr_fiq;
162 }
163 }
Christoffer Dalla8928192017-12-27 21:59:09 +0100164
165 switch (spsr_idx) {
166 case KVM_SPSR_SVC:
Dave Martinfdec2a92019-04-06 11:29:40 +0100167 return read_sysreg_el1(SYS_SPSR);
Christoffer Dalla8928192017-12-27 21:59:09 +0100168 case KVM_SPSR_ABT:
169 return read_sysreg(spsr_abt);
170 case KVM_SPSR_UND:
171 return read_sysreg(spsr_und);
172 case KVM_SPSR_IRQ:
173 return read_sysreg(spsr_irq);
174 case KVM_SPSR_FIQ:
175 return read_sysreg(spsr_fiq);
Marc Zyngierb5476312013-02-06 19:40:29 +0000176 default:
177 BUG();
178 }
Christoffer Dalla8928192017-12-27 21:59:09 +0100179}
Marc Zyngierb5476312013-02-06 19:40:29 +0000180
Christoffer Dalla8928192017-12-27 21:59:09 +0100181void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
182{
183 int spsr_idx = vcpu_spsr32_mode(vcpu);
184
185 if (!vcpu->arch.sysregs_loaded_on_cpu) {
Marc Zyngierfd85b662019-06-28 23:36:42 +0100186 switch (spsr_idx) {
187 case KVM_SPSR_SVC:
Marc Zyngier710f1982019-06-28 23:05:38 +0100188 __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
Marc Zyngierfd85b662019-06-28 23:36:42 +0100189 break;
190 case KVM_SPSR_ABT:
191 vcpu->arch.ctxt.spsr_abt = v;
192 break;
193 case KVM_SPSR_UND:
194 vcpu->arch.ctxt.spsr_und = v;
195 break;
196 case KVM_SPSR_IRQ:
197 vcpu->arch.ctxt.spsr_irq = v;
198 break;
199 case KVM_SPSR_FIQ:
200 vcpu->arch.ctxt.spsr_fiq = v;
201 break;
202 }
203
Christoffer Dalla8928192017-12-27 21:59:09 +0100204 return;
205 }
206
207 switch (spsr_idx) {
208 case KVM_SPSR_SVC:
Dave Martinfdec2a92019-04-06 11:29:40 +0100209 write_sysreg_el1(v, SYS_SPSR);
Anders Roxell3d584a32019-07-26 13:27:05 +0200210 break;
Christoffer Dalla8928192017-12-27 21:59:09 +0100211 case KVM_SPSR_ABT:
212 write_sysreg(v, spsr_abt);
Anders Roxell3d584a32019-07-26 13:27:05 +0200213 break;
Christoffer Dalla8928192017-12-27 21:59:09 +0100214 case KVM_SPSR_UND:
215 write_sysreg(v, spsr_und);
Anders Roxell3d584a32019-07-26 13:27:05 +0200216 break;
Christoffer Dalla8928192017-12-27 21:59:09 +0100217 case KVM_SPSR_IRQ:
218 write_sysreg(v, spsr_irq);
Anders Roxell3d584a32019-07-26 13:27:05 +0200219 break;
Christoffer Dalla8928192017-12-27 21:59:09 +0100220 case KVM_SPSR_FIQ:
221 write_sysreg(v, spsr_fiq);
Anders Roxell3d584a32019-07-26 13:27:05 +0200222 break;
Christoffer Dalla8928192017-12-27 21:59:09 +0100223 }
Marc Zyngierb5476312013-02-06 19:40:29 +0000224}