blob: c0d820280a5eb50ea153ca065f2f6233256a36ca [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000019
Marc Zyngier55c74012012-12-10 16:40:18 +000020#include <asm/asm-offsets.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000021#include <asm/assembler.h>
Marc Zyngierb0e626b2014-05-07 13:44:49 +010022#include <asm/debug-monitors.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000023#include <asm/esr.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000024#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000026#include <asm/kvm_arm.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000027#include <asm/kvm_asm.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000028#include <asm/kvm_mmu.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000029#include <asm/memory.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000030
31#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
32#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
33#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
34#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
35
36 .text
37 .pushsection .hyp.text, "ax"
38 .align PAGE_SHIFT
39
Marc Zyngier55c74012012-12-10 16:40:18 +000040.macro save_common_regs
41 // x2: base address for cpu context
42 // x3: tmp register
43
44 add x3, x2, #CPU_XREG_OFFSET(19)
45 stp x19, x20, [x3]
46 stp x21, x22, [x3, #16]
47 stp x23, x24, [x3, #32]
48 stp x25, x26, [x3, #48]
49 stp x27, x28, [x3, #64]
50 stp x29, lr, [x3, #80]
51
52 mrs x19, sp_el0
53 mrs x20, elr_el2 // EL1 PC
54 mrs x21, spsr_el2 // EL1 pstate
55
56 stp x19, x20, [x3, #96]
57 str x21, [x3, #112]
58
59 mrs x22, sp_el1
60 mrs x23, elr_el1
61 mrs x24, spsr_el1
62
63 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
64 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
65 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
66.endm
67
68.macro restore_common_regs
69 // x2: base address for cpu context
70 // x3: tmp register
71
72 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
73 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
74 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
75
76 msr sp_el1, x22
77 msr elr_el1, x23
78 msr spsr_el1, x24
79
80 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
81 ldp x19, x20, [x3]
82 ldr x21, [x3, #16]
83
84 msr sp_el0, x19
85 msr elr_el2, x20 // EL1 PC
86 msr spsr_el2, x21 // EL1 pstate
87
88 add x3, x2, #CPU_XREG_OFFSET(19)
89 ldp x19, x20, [x3]
90 ldp x21, x22, [x3, #16]
91 ldp x23, x24, [x3, #32]
92 ldp x25, x26, [x3, #48]
93 ldp x27, x28, [x3, #64]
94 ldp x29, lr, [x3, #80]
95.endm
96
97.macro save_host_regs
98 save_common_regs
99.endm
100
101.macro restore_host_regs
102 restore_common_regs
103.endm
104
105.macro save_fpsimd
106 // x2: cpu context address
107 // x3, x4: tmp regs
108 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
109 fpsimd_save x3, 4
110.endm
111
112.macro restore_fpsimd
113 // x2: cpu context address
114 // x3, x4: tmp regs
115 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
116 fpsimd_restore x3, 4
117.endm
118
119.macro save_guest_regs
120 // x0 is the vcpu address
121 // x1 is the return code, do not corrupt!
122 // x2 is the cpu context
123 // x3 is a tmp register
124 // Guest's x0-x3 are on the stack
125
126 // Compute base to save registers
127 add x3, x2, #CPU_XREG_OFFSET(4)
128 stp x4, x5, [x3]
129 stp x6, x7, [x3, #16]
130 stp x8, x9, [x3, #32]
131 stp x10, x11, [x3, #48]
132 stp x12, x13, [x3, #64]
133 stp x14, x15, [x3, #80]
134 stp x16, x17, [x3, #96]
135 str x18, [x3, #112]
136
137 pop x6, x7 // x2, x3
138 pop x4, x5 // x0, x1
139
140 add x3, x2, #CPU_XREG_OFFSET(0)
141 stp x4, x5, [x3]
142 stp x6, x7, [x3, #16]
143
144 save_common_regs
145.endm
146
147.macro restore_guest_regs
148 // x0 is the vcpu address.
149 // x2 is the cpu context
150 // x3 is a tmp register
151
152 // Prepare x0-x3 for later restore
153 add x3, x2, #CPU_XREG_OFFSET(0)
154 ldp x4, x5, [x3]
155 ldp x6, x7, [x3, #16]
156 push x4, x5 // Push x0-x3 on the stack
157 push x6, x7
158
159 // x4-x18
160 ldp x4, x5, [x3, #32]
161 ldp x6, x7, [x3, #48]
162 ldp x8, x9, [x3, #64]
163 ldp x10, x11, [x3, #80]
164 ldp x12, x13, [x3, #96]
165 ldp x14, x15, [x3, #112]
166 ldp x16, x17, [x3, #128]
167 ldr x18, [x3, #144]
168
169 // x19-x29, lr, sp*, elr*, spsr*
170 restore_common_regs
171
172 // Last bits of the 64bit state
173 pop x2, x3
174 pop x0, x1
175
176 // Do not touch any register after this!
177.endm
178
179/*
180 * Macros to perform system register save/restore.
181 *
182 * Ordering here is absolutely critical, and must be kept consistent
183 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
184 * and in kvm_asm.h.
185 *
186 * In other words, don't touch any of these unless you know what
187 * you are doing.
188 */
189.macro save_sysregs
190 // x2: base address for cpu context
191 // x3: tmp register
192
193 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
194
195 mrs x4, vmpidr_el2
196 mrs x5, csselr_el1
197 mrs x6, sctlr_el1
198 mrs x7, actlr_el1
199 mrs x8, cpacr_el1
200 mrs x9, ttbr0_el1
201 mrs x10, ttbr1_el1
202 mrs x11, tcr_el1
203 mrs x12, esr_el1
204 mrs x13, afsr0_el1
205 mrs x14, afsr1_el1
206 mrs x15, far_el1
207 mrs x16, mair_el1
208 mrs x17, vbar_el1
209 mrs x18, contextidr_el1
210 mrs x19, tpidr_el0
211 mrs x20, tpidrro_el0
212 mrs x21, tpidr_el1
213 mrs x22, amair_el1
214 mrs x23, cntkctl_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100215 mrs x24, par_el1
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100216 mrs x25, mdscr_el1
Marc Zyngier55c74012012-12-10 16:40:18 +0000217
218 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100228 stp x24, x25, [x3, #160]
229.endm
230
231.macro save_debug
232 // x2: base address for cpu context
233 // x3: tmp register
234
235 mrs x26, id_aa64dfr0_el1
236 ubfx x24, x26, #12, #4 // Extract BRPs
237 ubfx x25, x26, #20, #4 // Extract WRPs
238 mov w26, #15
239 sub w24, w26, w24 // How many BPs to skip
240 sub w25, w26, w25 // How many WPs to skip
241
242 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
243
244 adr x26, 1f
245 add x26, x26, x24, lsl #2
246 br x26
2471:
248 mrs x20, dbgbcr15_el1
249 mrs x19, dbgbcr14_el1
250 mrs x18, dbgbcr13_el1
251 mrs x17, dbgbcr12_el1
252 mrs x16, dbgbcr11_el1
253 mrs x15, dbgbcr10_el1
254 mrs x14, dbgbcr9_el1
255 mrs x13, dbgbcr8_el1
256 mrs x12, dbgbcr7_el1
257 mrs x11, dbgbcr6_el1
258 mrs x10, dbgbcr5_el1
259 mrs x9, dbgbcr4_el1
260 mrs x8, dbgbcr3_el1
261 mrs x7, dbgbcr2_el1
262 mrs x6, dbgbcr1_el1
263 mrs x5, dbgbcr0_el1
264
265 adr x26, 1f
266 add x26, x26, x24, lsl #2
267 br x26
268
2691:
270 str x20, [x3, #(15 * 8)]
271 str x19, [x3, #(14 * 8)]
272 str x18, [x3, #(13 * 8)]
273 str x17, [x3, #(12 * 8)]
274 str x16, [x3, #(11 * 8)]
275 str x15, [x3, #(10 * 8)]
276 str x14, [x3, #(9 * 8)]
277 str x13, [x3, #(8 * 8)]
278 str x12, [x3, #(7 * 8)]
279 str x11, [x3, #(6 * 8)]
280 str x10, [x3, #(5 * 8)]
281 str x9, [x3, #(4 * 8)]
282 str x8, [x3, #(3 * 8)]
283 str x7, [x3, #(2 * 8)]
284 str x6, [x3, #(1 * 8)]
285 str x5, [x3, #(0 * 8)]
286
287 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
288
289 adr x26, 1f
290 add x26, x26, x24, lsl #2
291 br x26
2921:
293 mrs x20, dbgbvr15_el1
294 mrs x19, dbgbvr14_el1
295 mrs x18, dbgbvr13_el1
296 mrs x17, dbgbvr12_el1
297 mrs x16, dbgbvr11_el1
298 mrs x15, dbgbvr10_el1
299 mrs x14, dbgbvr9_el1
300 mrs x13, dbgbvr8_el1
301 mrs x12, dbgbvr7_el1
302 mrs x11, dbgbvr6_el1
303 mrs x10, dbgbvr5_el1
304 mrs x9, dbgbvr4_el1
305 mrs x8, dbgbvr3_el1
306 mrs x7, dbgbvr2_el1
307 mrs x6, dbgbvr1_el1
308 mrs x5, dbgbvr0_el1
309
310 adr x26, 1f
311 add x26, x26, x24, lsl #2
312 br x26
313
3141:
315 str x20, [x3, #(15 * 8)]
316 str x19, [x3, #(14 * 8)]
317 str x18, [x3, #(13 * 8)]
318 str x17, [x3, #(12 * 8)]
319 str x16, [x3, #(11 * 8)]
320 str x15, [x3, #(10 * 8)]
321 str x14, [x3, #(9 * 8)]
322 str x13, [x3, #(8 * 8)]
323 str x12, [x3, #(7 * 8)]
324 str x11, [x3, #(6 * 8)]
325 str x10, [x3, #(5 * 8)]
326 str x9, [x3, #(4 * 8)]
327 str x8, [x3, #(3 * 8)]
328 str x7, [x3, #(2 * 8)]
329 str x6, [x3, #(1 * 8)]
330 str x5, [x3, #(0 * 8)]
331
332 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
333
334 adr x26, 1f
335 add x26, x26, x25, lsl #2
336 br x26
3371:
338 mrs x20, dbgwcr15_el1
339 mrs x19, dbgwcr14_el1
340 mrs x18, dbgwcr13_el1
341 mrs x17, dbgwcr12_el1
342 mrs x16, dbgwcr11_el1
343 mrs x15, dbgwcr10_el1
344 mrs x14, dbgwcr9_el1
345 mrs x13, dbgwcr8_el1
346 mrs x12, dbgwcr7_el1
347 mrs x11, dbgwcr6_el1
348 mrs x10, dbgwcr5_el1
349 mrs x9, dbgwcr4_el1
350 mrs x8, dbgwcr3_el1
351 mrs x7, dbgwcr2_el1
352 mrs x6, dbgwcr1_el1
353 mrs x5, dbgwcr0_el1
354
355 adr x26, 1f
356 add x26, x26, x25, lsl #2
357 br x26
358
3591:
360 str x20, [x3, #(15 * 8)]
361 str x19, [x3, #(14 * 8)]
362 str x18, [x3, #(13 * 8)]
363 str x17, [x3, #(12 * 8)]
364 str x16, [x3, #(11 * 8)]
365 str x15, [x3, #(10 * 8)]
366 str x14, [x3, #(9 * 8)]
367 str x13, [x3, #(8 * 8)]
368 str x12, [x3, #(7 * 8)]
369 str x11, [x3, #(6 * 8)]
370 str x10, [x3, #(5 * 8)]
371 str x9, [x3, #(4 * 8)]
372 str x8, [x3, #(3 * 8)]
373 str x7, [x3, #(2 * 8)]
374 str x6, [x3, #(1 * 8)]
375 str x5, [x3, #(0 * 8)]
376
377 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
378
379 adr x26, 1f
380 add x26, x26, x25, lsl #2
381 br x26
3821:
383 mrs x20, dbgwvr15_el1
384 mrs x19, dbgwvr14_el1
385 mrs x18, dbgwvr13_el1
386 mrs x17, dbgwvr12_el1
387 mrs x16, dbgwvr11_el1
388 mrs x15, dbgwvr10_el1
389 mrs x14, dbgwvr9_el1
390 mrs x13, dbgwvr8_el1
391 mrs x12, dbgwvr7_el1
392 mrs x11, dbgwvr6_el1
393 mrs x10, dbgwvr5_el1
394 mrs x9, dbgwvr4_el1
395 mrs x8, dbgwvr3_el1
396 mrs x7, dbgwvr2_el1
397 mrs x6, dbgwvr1_el1
398 mrs x5, dbgwvr0_el1
399
400 adr x26, 1f
401 add x26, x26, x25, lsl #2
402 br x26
403
4041:
405 str x20, [x3, #(15 * 8)]
406 str x19, [x3, #(14 * 8)]
407 str x18, [x3, #(13 * 8)]
408 str x17, [x3, #(12 * 8)]
409 str x16, [x3, #(11 * 8)]
410 str x15, [x3, #(10 * 8)]
411 str x14, [x3, #(9 * 8)]
412 str x13, [x3, #(8 * 8)]
413 str x12, [x3, #(7 * 8)]
414 str x11, [x3, #(6 * 8)]
415 str x10, [x3, #(5 * 8)]
416 str x9, [x3, #(4 * 8)]
417 str x8, [x3, #(3 * 8)]
418 str x7, [x3, #(2 * 8)]
419 str x6, [x3, #(1 * 8)]
420 str x5, [x3, #(0 * 8)]
421
422 mrs x21, mdccint_el1
423 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
Marc Zyngier55c74012012-12-10 16:40:18 +0000424.endm
425
426.macro restore_sysregs
427 // x2: base address for cpu context
428 // x3: tmp register
429
430 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
431
432 ldp x4, x5, [x3]
433 ldp x6, x7, [x3, #16]
434 ldp x8, x9, [x3, #32]
435 ldp x10, x11, [x3, #48]
436 ldp x12, x13, [x3, #64]
437 ldp x14, x15, [x3, #80]
438 ldp x16, x17, [x3, #96]
439 ldp x18, x19, [x3, #112]
440 ldp x20, x21, [x3, #128]
441 ldp x22, x23, [x3, #144]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100442 ldp x24, x25, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000443
444 msr vmpidr_el2, x4
445 msr csselr_el1, x5
446 msr sctlr_el1, x6
447 msr actlr_el1, x7
448 msr cpacr_el1, x8
449 msr ttbr0_el1, x9
450 msr ttbr1_el1, x10
451 msr tcr_el1, x11
452 msr esr_el1, x12
453 msr afsr0_el1, x13
454 msr afsr1_el1, x14
455 msr far_el1, x15
456 msr mair_el1, x16
457 msr vbar_el1, x17
458 msr contextidr_el1, x18
459 msr tpidr_el0, x19
460 msr tpidrro_el0, x20
461 msr tpidr_el1, x21
462 msr amair_el1, x22
463 msr cntkctl_el1, x23
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100464 msr par_el1, x24
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100465 msr mdscr_el1, x25
466.endm
467
468.macro restore_debug
469 // x2: base address for cpu context
470 // x3: tmp register
471
472 mrs x26, id_aa64dfr0_el1
473 ubfx x24, x26, #12, #4 // Extract BRPs
474 ubfx x25, x26, #20, #4 // Extract WRPs
475 mov w26, #15
476 sub w24, w26, w24 // How many BPs to skip
477 sub w25, w26, w25 // How many WPs to skip
478
479 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
480
481 adr x26, 1f
482 add x26, x26, x24, lsl #2
483 br x26
4841:
485 ldr x20, [x3, #(15 * 8)]
486 ldr x19, [x3, #(14 * 8)]
487 ldr x18, [x3, #(13 * 8)]
488 ldr x17, [x3, #(12 * 8)]
489 ldr x16, [x3, #(11 * 8)]
490 ldr x15, [x3, #(10 * 8)]
491 ldr x14, [x3, #(9 * 8)]
492 ldr x13, [x3, #(8 * 8)]
493 ldr x12, [x3, #(7 * 8)]
494 ldr x11, [x3, #(6 * 8)]
495 ldr x10, [x3, #(5 * 8)]
496 ldr x9, [x3, #(4 * 8)]
497 ldr x8, [x3, #(3 * 8)]
498 ldr x7, [x3, #(2 * 8)]
499 ldr x6, [x3, #(1 * 8)]
500 ldr x5, [x3, #(0 * 8)]
501
502 adr x26, 1f
503 add x26, x26, x24, lsl #2
504 br x26
5051:
506 msr dbgbcr15_el1, x20
507 msr dbgbcr14_el1, x19
508 msr dbgbcr13_el1, x18
509 msr dbgbcr12_el1, x17
510 msr dbgbcr11_el1, x16
511 msr dbgbcr10_el1, x15
512 msr dbgbcr9_el1, x14
513 msr dbgbcr8_el1, x13
514 msr dbgbcr7_el1, x12
515 msr dbgbcr6_el1, x11
516 msr dbgbcr5_el1, x10
517 msr dbgbcr4_el1, x9
518 msr dbgbcr3_el1, x8
519 msr dbgbcr2_el1, x7
520 msr dbgbcr1_el1, x6
521 msr dbgbcr0_el1, x5
522
523 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
524
525 adr x26, 1f
526 add x26, x26, x24, lsl #2
527 br x26
5281:
529 ldr x20, [x3, #(15 * 8)]
530 ldr x19, [x3, #(14 * 8)]
531 ldr x18, [x3, #(13 * 8)]
532 ldr x17, [x3, #(12 * 8)]
533 ldr x16, [x3, #(11 * 8)]
534 ldr x15, [x3, #(10 * 8)]
535 ldr x14, [x3, #(9 * 8)]
536 ldr x13, [x3, #(8 * 8)]
537 ldr x12, [x3, #(7 * 8)]
538 ldr x11, [x3, #(6 * 8)]
539 ldr x10, [x3, #(5 * 8)]
540 ldr x9, [x3, #(4 * 8)]
541 ldr x8, [x3, #(3 * 8)]
542 ldr x7, [x3, #(2 * 8)]
543 ldr x6, [x3, #(1 * 8)]
544 ldr x5, [x3, #(0 * 8)]
545
546 adr x26, 1f
547 add x26, x26, x24, lsl #2
548 br x26
5491:
550 msr dbgbvr15_el1, x20
551 msr dbgbvr14_el1, x19
552 msr dbgbvr13_el1, x18
553 msr dbgbvr12_el1, x17
554 msr dbgbvr11_el1, x16
555 msr dbgbvr10_el1, x15
556 msr dbgbvr9_el1, x14
557 msr dbgbvr8_el1, x13
558 msr dbgbvr7_el1, x12
559 msr dbgbvr6_el1, x11
560 msr dbgbvr5_el1, x10
561 msr dbgbvr4_el1, x9
562 msr dbgbvr3_el1, x8
563 msr dbgbvr2_el1, x7
564 msr dbgbvr1_el1, x6
565 msr dbgbvr0_el1, x5
566
567 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
568
569 adr x26, 1f
570 add x26, x26, x25, lsl #2
571 br x26
5721:
573 ldr x20, [x3, #(15 * 8)]
574 ldr x19, [x3, #(14 * 8)]
575 ldr x18, [x3, #(13 * 8)]
576 ldr x17, [x3, #(12 * 8)]
577 ldr x16, [x3, #(11 * 8)]
578 ldr x15, [x3, #(10 * 8)]
579 ldr x14, [x3, #(9 * 8)]
580 ldr x13, [x3, #(8 * 8)]
581 ldr x12, [x3, #(7 * 8)]
582 ldr x11, [x3, #(6 * 8)]
583 ldr x10, [x3, #(5 * 8)]
584 ldr x9, [x3, #(4 * 8)]
585 ldr x8, [x3, #(3 * 8)]
586 ldr x7, [x3, #(2 * 8)]
587 ldr x6, [x3, #(1 * 8)]
588 ldr x5, [x3, #(0 * 8)]
589
590 adr x26, 1f
591 add x26, x26, x25, lsl #2
592 br x26
5931:
594 msr dbgwcr15_el1, x20
595 msr dbgwcr14_el1, x19
596 msr dbgwcr13_el1, x18
597 msr dbgwcr12_el1, x17
598 msr dbgwcr11_el1, x16
599 msr dbgwcr10_el1, x15
600 msr dbgwcr9_el1, x14
601 msr dbgwcr8_el1, x13
602 msr dbgwcr7_el1, x12
603 msr dbgwcr6_el1, x11
604 msr dbgwcr5_el1, x10
605 msr dbgwcr4_el1, x9
606 msr dbgwcr3_el1, x8
607 msr dbgwcr2_el1, x7
608 msr dbgwcr1_el1, x6
609 msr dbgwcr0_el1, x5
610
611 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
612
613 adr x26, 1f
614 add x26, x26, x25, lsl #2
615 br x26
6161:
617 ldr x20, [x3, #(15 * 8)]
618 ldr x19, [x3, #(14 * 8)]
619 ldr x18, [x3, #(13 * 8)]
620 ldr x17, [x3, #(12 * 8)]
621 ldr x16, [x3, #(11 * 8)]
622 ldr x15, [x3, #(10 * 8)]
623 ldr x14, [x3, #(9 * 8)]
624 ldr x13, [x3, #(8 * 8)]
625 ldr x12, [x3, #(7 * 8)]
626 ldr x11, [x3, #(6 * 8)]
627 ldr x10, [x3, #(5 * 8)]
628 ldr x9, [x3, #(4 * 8)]
629 ldr x8, [x3, #(3 * 8)]
630 ldr x7, [x3, #(2 * 8)]
631 ldr x6, [x3, #(1 * 8)]
632 ldr x5, [x3, #(0 * 8)]
633
634 adr x26, 1f
635 add x26, x26, x25, lsl #2
636 br x26
6371:
638 msr dbgwvr15_el1, x20
639 msr dbgwvr14_el1, x19
640 msr dbgwvr13_el1, x18
641 msr dbgwvr12_el1, x17
642 msr dbgwvr11_el1, x16
643 msr dbgwvr10_el1, x15
644 msr dbgwvr9_el1, x14
645 msr dbgwvr8_el1, x13
646 msr dbgwvr7_el1, x12
647 msr dbgwvr6_el1, x11
648 msr dbgwvr5_el1, x10
649 msr dbgwvr4_el1, x9
650 msr dbgwvr3_el1, x8
651 msr dbgwvr2_el1, x7
652 msr dbgwvr1_el1, x6
653 msr dbgwvr0_el1, x5
654
655 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
656 msr mdccint_el1, x21
Marc Zyngier55c74012012-12-10 16:40:18 +0000657.endm
658
Marc Zyngierb4afad02013-02-07 10:52:10 +0000659.macro skip_32bit_state tmp, target
660 // Skip 32bit state if not needed
661 mrs \tmp, hcr_el2
662 tbnz \tmp, #HCR_RW_SHIFT, \target
663.endm
664
665.macro skip_tee_state tmp, target
666 // Skip ThumbEE state if not needed
667 mrs \tmp, id_pfr0_el1
668 tbz \tmp, #12, \target
669.endm
670
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100671.macro skip_debug_state tmp, target
672 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
673 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
674.endm
675
676.macro compute_debug_state target
677 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
678 // is set, we do a full save/restore cycle and disable trapping.
679 add x25, x0, #VCPU_CONTEXT
680
681 // Check the state of MDSCR_EL1
682 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
683 and x26, x25, #DBG_MDSCR_KDE
684 and x25, x25, #DBG_MDSCR_MDE
685 adds xzr, x25, x26
686 b.eq 9998f // Nothing to see there
687
688 // If any interesting bits was set, we must set the flag
689 mov x26, #KVM_ARM64_DEBUG_DIRTY
690 str x26, [x0, #VCPU_DEBUG_FLAGS]
691 b 9999f // Don't skip restore
692
6939998:
694 // Otherwise load the flags from memory in case we recently
695 // trapped
696 skip_debug_state x25, \target
6979999:
698.endm
699
Marc Zyngierb4afad02013-02-07 10:52:10 +0000700.macro save_guest_32bit_state
701 skip_32bit_state x3, 1f
702
703 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
704 mrs x4, spsr_abt
705 mrs x5, spsr_und
706 mrs x6, spsr_irq
707 mrs x7, spsr_fiq
708 stp x4, x5, [x3]
709 stp x6, x7, [x3, #16]
710
711 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
712 mrs x4, dacr32_el2
713 mrs x5, ifsr32_el2
714 mrs x6, fpexc32_el2
Marc Zyngierb4afad02013-02-07 10:52:10 +0000715 stp x4, x5, [x3]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100716 str x6, [x3, #16]
Marc Zyngierb4afad02013-02-07 10:52:10 +0000717
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100718 skip_debug_state x8, 2f
719 mrs x7, dbgvcr32_el2
720 str x7, [x3, #24]
7212:
Marc Zyngierb4afad02013-02-07 10:52:10 +0000722 skip_tee_state x8, 1f
723
724 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
725 mrs x4, teecr32_el1
726 mrs x5, teehbr32_el1
727 stp x4, x5, [x3]
7281:
729.endm
730
731.macro restore_guest_32bit_state
732 skip_32bit_state x3, 1f
733
734 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
735 ldp x4, x5, [x3]
736 ldp x6, x7, [x3, #16]
737 msr spsr_abt, x4
738 msr spsr_und, x5
739 msr spsr_irq, x6
740 msr spsr_fiq, x7
741
742 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
743 ldp x4, x5, [x3]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100744 ldr x6, [x3, #16]
Marc Zyngierb4afad02013-02-07 10:52:10 +0000745 msr dacr32_el2, x4
746 msr ifsr32_el2, x5
747 msr fpexc32_el2, x6
Marc Zyngierb4afad02013-02-07 10:52:10 +0000748
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100749 skip_debug_state x8, 2f
750 ldr x7, [x3, #24]
751 msr dbgvcr32_el2, x7
7522:
Marc Zyngierb4afad02013-02-07 10:52:10 +0000753 skip_tee_state x8, 1f
754
755 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
756 ldp x4, x5, [x3]
757 msr teecr32_el1, x4
758 msr teehbr32_el1, x5
7591:
760.endm
761
Marc Zyngier55c74012012-12-10 16:40:18 +0000762.macro activate_traps
Marc Zyngierac3c3742013-08-09 18:19:11 +0100763 ldr x2, [x0, #VCPU_HCR_EL2]
764 msr hcr_el2, x2
Ard Biesheuvel302cd372014-11-07 14:12:34 +0000765 mov x2, #CPTR_EL2_TTA
Marc Zyngier55c74012012-12-10 16:40:18 +0000766 msr cptr_el2, x2
767
Ard Biesheuvel302cd372014-11-07 14:12:34 +0000768 mov x2, #(1 << 15) // Trap CP15 Cr=15
Marc Zyngier55c74012012-12-10 16:40:18 +0000769 msr hstr_el2, x2
770
771 mrs x2, mdcr_el2
772 and x2, x2, #MDCR_EL2_HPMN_MASK
773 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
Marc Zyngierd329de02014-04-24 10:32:03 +0100774 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
775
776 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
777 // if not dirty.
778 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
779 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
780 orr x2, x2, #MDCR_EL2_TDA
7811:
Marc Zyngier55c74012012-12-10 16:40:18 +0000782 msr mdcr_el2, x2
783.endm
784
785.macro deactivate_traps
786 mov x2, #HCR_RW
787 msr hcr_el2, x2
788 msr cptr_el2, xzr
789 msr hstr_el2, xzr
790
791 mrs x2, mdcr_el2
792 and x2, x2, #MDCR_EL2_HPMN_MASK
793 msr mdcr_el2, x2
794.endm
795
796.macro activate_vm
797 ldr x1, [x0, #VCPU_KVM]
798 kern_hyp_va x1
799 ldr x2, [x1, #KVM_VTTBR]
800 msr vttbr_el2, x2
801.endm
802
803.macro deactivate_vm
804 msr vttbr_el2, xzr
805.endm
806
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000807/*
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100808 * Call into the vgic backend for state saving
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000809 */
810.macro save_vgic_state
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100811 adr x24, __vgic_sr_vectors
812 ldr x24, [x24, VGIC_SAVE_FN]
813 kern_hyp_va x24
814 blr x24
Marc Zyngierac3c3742013-08-09 18:19:11 +0100815 mrs x24, hcr_el2
816 mov x25, #HCR_INT_OVERRIDE
817 neg x25, x25
818 and x24, x24, x25
819 msr hcr_el2, x24
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000820.endm
821
822/*
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100823 * Call into the vgic backend for state restoring
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000824 */
825.macro restore_vgic_state
Marc Zyngierac3c3742013-08-09 18:19:11 +0100826 mrs x24, hcr_el2
827 ldr x25, [x0, #VCPU_IRQ_LINES]
828 orr x24, x24, #HCR_INT_OVERRIDE
829 orr x24, x24, x25
830 msr hcr_el2, x24
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100831 adr x24, __vgic_sr_vectors
832 ldr x24, [x24, #VGIC_RESTORE_FN]
833 kern_hyp_va x24
834 blr x24
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000835.endm
836
Marc Zyngier003300d2012-12-07 17:52:03 +0000837.macro save_timer_state
838 // x0: vcpu pointer
839 ldr x2, [x0, #VCPU_KVM]
840 kern_hyp_va x2
841 ldr w3, [x2, #KVM_TIMER_ENABLED]
842 cbz w3, 1f
843
844 mrs x3, cntv_ctl_el0
845 and x3, x3, #3
846 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
847 bic x3, x3, #1 // Clear Enable
848 msr cntv_ctl_el0, x3
849
850 isb
851
852 mrs x3, cntv_cval_el0
853 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
854
8551:
856 // Allow physical timer/counter access for the host
857 mrs x2, cnthctl_el2
858 orr x2, x2, #3
859 msr cnthctl_el2, x2
860
861 // Clear cntvoff for the host
862 msr cntvoff_el2, xzr
863.endm
864
865.macro restore_timer_state
866 // x0: vcpu pointer
867 // Disallow physical timer access for the guest
868 // Physical counter access is allowed
869 mrs x2, cnthctl_el2
870 orr x2, x2, #1
871 bic x2, x2, #2
872 msr cnthctl_el2, x2
873
874 ldr x2, [x0, #VCPU_KVM]
875 kern_hyp_va x2
876 ldr w3, [x2, #KVM_TIMER_ENABLED]
877 cbz w3, 1f
878
879 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
880 msr cntvoff_el2, x3
881 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
882 msr cntv_cval_el0, x2
883 isb
884
885 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
886 and x2, x2, #3
887 msr cntv_ctl_el0, x2
8881:
889.endm
890
Marc Zyngier55c74012012-12-10 16:40:18 +0000891__save_sysregs:
892 save_sysregs
893 ret
894
895__restore_sysregs:
896 restore_sysregs
897 ret
898
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100899__save_debug:
900 save_debug
901 ret
902
903__restore_debug:
904 restore_debug
905 ret
906
Marc Zyngier55c74012012-12-10 16:40:18 +0000907__save_fpsimd:
908 save_fpsimd
909 ret
910
911__restore_fpsimd:
912 restore_fpsimd
913 ret
914
915/*
916 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
917 *
918 * This is the world switch. The first half of the function
919 * deals with entering the guest, and anything from __kvm_vcpu_return
920 * to the end of the function deals with reentering the host.
921 * On the enter path, only x0 (vcpu pointer) must be preserved until
922 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
923 * code) must both be preserved until the epilogue.
924 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
925 */
926ENTRY(__kvm_vcpu_run)
927 kern_hyp_va x0
928 msr tpidr_el2, x0 // Save the vcpu register
929
930 // Host context
931 ldr x2, [x0, #VCPU_HOST_CONTEXT]
932 kern_hyp_va x2
933
934 save_host_regs
935 bl __save_fpsimd
936 bl __save_sysregs
937
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100938 compute_debug_state 1f
939 bl __save_debug
9401:
Marc Zyngier55c74012012-12-10 16:40:18 +0000941 activate_traps
942 activate_vm
943
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000944 restore_vgic_state
Marc Zyngier003300d2012-12-07 17:52:03 +0000945 restore_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000946
Marc Zyngier55c74012012-12-10 16:40:18 +0000947 // Guest context
948 add x2, x0, #VCPU_CONTEXT
949
950 bl __restore_sysregs
951 bl __restore_fpsimd
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100952
953 skip_debug_state x3, 1f
954 bl __restore_debug
9551:
Marc Zyngierb4afad02013-02-07 10:52:10 +0000956 restore_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000957 restore_guest_regs
958
959 // That's it, no more messing around.
960 eret
961
962__kvm_vcpu_return:
963 // Assume x0 is the vcpu pointer, x1 the return code
964 // Guest's x0-x3 are on the stack
965
966 // Guest context
967 add x2, x0, #VCPU_CONTEXT
968
969 save_guest_regs
970 bl __save_fpsimd
971 bl __save_sysregs
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100972
973 skip_debug_state x3, 1f
974 bl __save_debug
9751:
Marc Zyngierb4afad02013-02-07 10:52:10 +0000976 save_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000977
Marc Zyngier003300d2012-12-07 17:52:03 +0000978 save_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000979 save_vgic_state
980
Marc Zyngier55c74012012-12-10 16:40:18 +0000981 deactivate_traps
982 deactivate_vm
983
984 // Host context
985 ldr x2, [x0, #VCPU_HOST_CONTEXT]
986 kern_hyp_va x2
987
988 bl __restore_sysregs
989 bl __restore_fpsimd
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100990
991 skip_debug_state x3, 1f
992 // Clear the dirty flag for the next run, as all the state has
993 // already been saved. Note that we nuke the whole 64bit word.
994 // If we ever add more flags, we'll have to be more careful...
995 str xzr, [x0, #VCPU_DEBUG_FLAGS]
996 bl __restore_debug
9971:
Marc Zyngier55c74012012-12-10 16:40:18 +0000998 restore_host_regs
999
1000 mov x0, x1
1001 ret
1002END(__kvm_vcpu_run)
1003
1004// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1005ENTRY(__kvm_tlb_flush_vmid_ipa)
Marc Zyngierf142e5e2013-06-11 18:05:25 +01001006 dsb ishst
1007
Marc Zyngier55c74012012-12-10 16:40:18 +00001008 kern_hyp_va x0
1009 ldr x2, [x0, #KVM_VTTBR]
1010 msr vttbr_el2, x2
1011 isb
1012
1013 /*
1014 * We could do so much better if we had the VA as well.
1015 * Instead, we invalidate Stage-2 for this IPA, and the
1016 * whole of Stage-1. Weep...
1017 */
1018 tlbi ipas2e1is, x1
Will Deaconee9e1012014-05-02 16:24:14 +01001019 /*
1020 * We have to ensure completion of the invalidation at Stage-2,
1021 * since a table walk on another CPU could refill a TLB with a
1022 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1023 * the Stage-1 invalidation happened first.
1024 */
1025 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +00001026 tlbi vmalle1is
Will Deaconee9e1012014-05-02 16:24:14 +01001027 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +00001028 isb
1029
1030 msr vttbr_el2, xzr
1031 ret
1032ENDPROC(__kvm_tlb_flush_vmid_ipa)
1033
1034ENTRY(__kvm_flush_vm_context)
Marc Zyngierf142e5e2013-06-11 18:05:25 +01001035 dsb ishst
Marc Zyngier55c74012012-12-10 16:40:18 +00001036 tlbi alle1is
1037 ic ialluis
Will Deaconee9e1012014-05-02 16:24:14 +01001038 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +00001039 ret
1040ENDPROC(__kvm_flush_vm_context)
1041
Marc Zyngier1a9b1302013-06-21 11:57:56 +01001042 // struct vgic_sr_vectors __vgi_sr_vectors;
1043 .align 3
1044ENTRY(__vgic_sr_vectors)
1045 .skip VGIC_SR_VECTOR_SZ
1046ENDPROC(__vgic_sr_vectors)
1047
Marc Zyngier55c74012012-12-10 16:40:18 +00001048__kvm_hyp_panic:
1049 // Guess the context by looking at VTTBR:
1050 // If zero, then we're already a host.
1051 // Otherwise restore a minimal host context before panicing.
1052 mrs x0, vttbr_el2
1053 cbz x0, 1f
1054
1055 mrs x0, tpidr_el2
1056
1057 deactivate_traps
1058 deactivate_vm
1059
1060 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1061 kern_hyp_va x2
1062
1063 bl __restore_sysregs
1064
10651: adr x0, __hyp_panic_str
1066 adr x1, 2f
1067 ldp x2, x3, [x1]
1068 sub x0, x0, x2
1069 add x0, x0, x3
1070 mrs x1, spsr_el2
1071 mrs x2, elr_el2
1072 mrs x3, esr_el2
1073 mrs x4, far_el2
1074 mrs x5, hpfar_el2
1075 mrs x6, par_el1
1076 mrs x7, tpidr_el2
1077
1078 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1079 PSR_MODE_EL1h)
1080 msr spsr_el2, lr
1081 ldr lr, =panic
1082 msr elr_el2, lr
1083 eret
1084
1085 .align 3
10862: .quad HYP_PAGE_OFFSET
1087 .quad PAGE_OFFSET
1088ENDPROC(__kvm_hyp_panic)
1089
1090__hyp_panic_str:
1091 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1092
1093 .align 2
1094
Marc Zyngierb20c9f22014-02-26 18:47:36 +00001095/*
1096 * u64 kvm_call_hyp(void *hypfn, ...);
1097 *
1098 * This is not really a variadic function in the classic C-way and care must
1099 * be taken when calling this to ensure parameters are passed in registers
1100 * only, since the stack will change between the caller and the callee.
1101 *
1102 * Call the function with the first argument containing a pointer to the
1103 * function you wish to call in Hyp mode, and subsequent arguments will be
1104 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1105 * function pointer can be passed). The function being called must be mapped
1106 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1107 * passed in r0 and r1.
1108 *
1109 * A function pointer with a value of 0 has a special meaning, and is
1110 * used to implement __hyp_get_vectors in the same way as in
1111 * arch/arm64/kernel/hyp_stub.S.
1112 */
Marc Zyngier55c74012012-12-10 16:40:18 +00001113ENTRY(kvm_call_hyp)
1114 hvc #0
1115 ret
1116ENDPROC(kvm_call_hyp)
1117
1118.macro invalid_vector label, target
1119 .align 2
1120\label:
1121 b \target
1122ENDPROC(\label)
1123.endm
1124
1125 /* None of these should ever happen */
1126 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1127 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1128 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1129 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1130 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1131 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1132 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1133 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1134 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1135 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1136 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1137 invalid_vector el1_error_invalid, __kvm_hyp_panic
1138
1139el1_sync: // Guest trapped into EL2
1140 push x0, x1
1141 push x2, x3
1142
1143 mrs x1, esr_el2
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001144 lsr x2, x1, #ESR_ELx_EC_SHIFT
Marc Zyngier55c74012012-12-10 16:40:18 +00001145
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001146 cmp x2, #ESR_ELx_EC_HVC64
Marc Zyngier55c74012012-12-10 16:40:18 +00001147 b.ne el1_trap
1148
1149 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1150 cbnz x3, el1_trap // called HVC
1151
1152 /* Here, we're pretty sure the host called HVC. */
1153 pop x2, x3
1154 pop x0, x1
1155
Marc Zyngierb20c9f22014-02-26 18:47:36 +00001156 /* Check for __hyp_get_vectors */
1157 cbnz x0, 1f
1158 mrs x0, vbar_el2
1159 b 2f
1160
11611: push lr, xzr
Marc Zyngier55c74012012-12-10 16:40:18 +00001162
1163 /*
1164 * Compute the function address in EL2, and shuffle the parameters.
1165 */
1166 kern_hyp_va x0
1167 mov lr, x0
1168 mov x0, x1
1169 mov x1, x2
1170 mov x2, x3
1171 blr lr
1172
1173 pop lr, xzr
Marc Zyngierb20c9f22014-02-26 18:47:36 +000011742: eret
Marc Zyngier55c74012012-12-10 16:40:18 +00001175
1176el1_trap:
1177 /*
1178 * x1: ESR
1179 * x2: ESR_EC
1180 */
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001181 cmp x2, #ESR_ELx_EC_DABT_LOW
1182 mov x0, #ESR_ELx_EC_IABT_LOW
Marc Zyngier55c74012012-12-10 16:40:18 +00001183 ccmp x2, x0, #4, ne
1184 b.ne 1f // Not an abort we care about
1185
1186 /* This is an abort. Check for permission fault */
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001187 and x2, x1, #ESR_ELx_FSC_TYPE
Marc Zyngier55c74012012-12-10 16:40:18 +00001188 cmp x2, #FSC_PERM
1189 b.ne 1f // Not a permission fault
1190
1191 /*
1192 * Check for Stage-1 page table walk, which is guaranteed
1193 * to give a valid HPFAR_EL2.
1194 */
1195 tbnz x1, #7, 1f // S1PTW is set
1196
Marc Zyngier1bbd8052013-06-07 11:02:34 +01001197 /* Preserve PAR_EL1 */
1198 mrs x3, par_el1
1199 push x3, xzr
1200
Marc Zyngier55c74012012-12-10 16:40:18 +00001201 /*
1202 * Permission fault, HPFAR_EL2 is invalid.
1203 * Resolve the IPA the hard way using the guest VA.
1204 * Stage-1 translation already validated the memory access rights.
1205 * As such, we can use the EL1 translation regime, and don't have
1206 * to distinguish between EL0 and EL1 access.
1207 */
1208 mrs x2, far_el2
1209 at s1e1r, x2
1210 isb
1211
1212 /* Read result */
1213 mrs x3, par_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +01001214 pop x0, xzr // Restore PAR_EL1 from the stack
1215 msr par_el1, x0
Marc Zyngier55c74012012-12-10 16:40:18 +00001216 tbnz x3, #0, 3f // Bail out if we failed the translation
1217 ubfx x3, x3, #12, #36 // Extract IPA
1218 lsl x3, x3, #4 // and present it like HPFAR
1219 b 2f
1220
12211: mrs x3, hpfar_el2
1222 mrs x2, far_el2
1223
12242: mrs x0, tpidr_el2
Victor Kamenskyba083d22014-06-12 09:30:09 -07001225 str w1, [x0, #VCPU_ESR_EL2]
Marc Zyngier55c74012012-12-10 16:40:18 +00001226 str x2, [x0, #VCPU_FAR_EL2]
1227 str x3, [x0, #VCPU_HPFAR_EL2]
1228
1229 mov x1, #ARM_EXCEPTION_TRAP
1230 b __kvm_vcpu_return
1231
1232 /*
1233 * Translation failed. Just return to the guest and
1234 * let it fault again. Another CPU is probably playing
1235 * behind our back.
1236 */
12373: pop x2, x3
1238 pop x0, x1
1239
1240 eret
1241
1242el1_irq:
1243 push x0, x1
1244 push x2, x3
1245 mrs x0, tpidr_el2
1246 mov x1, #ARM_EXCEPTION_IRQ
1247 b __kvm_vcpu_return
1248
1249 .ltorg
1250
1251 .align 11
1252
1253ENTRY(__kvm_hyp_vector)
1254 ventry el2t_sync_invalid // Synchronous EL2t
1255 ventry el2t_irq_invalid // IRQ EL2t
1256 ventry el2t_fiq_invalid // FIQ EL2t
1257 ventry el2t_error_invalid // Error EL2t
1258
1259 ventry el2h_sync_invalid // Synchronous EL2h
1260 ventry el2h_irq_invalid // IRQ EL2h
1261 ventry el2h_fiq_invalid // FIQ EL2h
1262 ventry el2h_error_invalid // Error EL2h
1263
1264 ventry el1_sync // Synchronous 64-bit EL1
1265 ventry el1_irq // IRQ 64-bit EL1
1266 ventry el1_fiq_invalid // FIQ 64-bit EL1
1267 ventry el1_error_invalid // Error 64-bit EL1
1268
1269 ventry el1_sync // Synchronous 32-bit EL1
1270 ventry el1_irq // IRQ 32-bit EL1
1271 ventry el1_fiq_invalid // FIQ 32-bit EL1
1272 ventry el1_error_invalid // Error 32-bit EL1
1273ENDPROC(__kvm_hyp_vector)
1274
Marc Zyngier55c74012012-12-10 16:40:18 +00001275 .popsection