KVM: PPC: Make SLB switching code the new segment framework

We just introduced generic segment switching code that only needs to call
small macros to do the actual switching, but keeps most of the entry / exit
code generic.

So let's move the SLB switching code over to use this new mechanism.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 0919679..04e7d3b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@
  *                                                                            *
  *****************************************************************************/
 
-.global kvmppc_handler_trampoline_enter
-kvmppc_handler_trampoline_enter:
+.macro LOAD_GUEST_SEGMENTS
 
 	/* Required state:
 	 *
@@ -53,20 +52,14 @@
 	 * R13 = PACA
 	 * R1 = host R1
 	 * R2 = host R2
-	 * R9 = guest IP
-	 * R10 = guest MSR
-	 * all other GPRS = free
-	 * PACA[KVM_CR] = guest CR
-	 * PACA[KVM_XER] = guest XER
+	 * R3 = shadow vcpu
+	 * all other volatile GPRS = free
+	 * SVCPU[CR]  = guest CR
+	 * SVCPU[XER] = guest XER
+	 * SVCPU[CTR] = guest CTR
+	 * SVCPU[LR]  = guest LR
 	 */
 
-	mtsrr0	r9
-	mtsrr1	r10
-
-	/* Activate guest mode, so faults get handled by KVM */
-	li	r11, KVM_GUEST_MODE_GUEST
-	stb	r11, PACA_KVM_IN_GUEST(r13)
-
 	/* Remove LPAR shadow entries */
 
 #if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@
 
 	/* Fill SLB with our shadow */
 
-	lbz	r12, PACA_KVM_SLB_MAX(r13)
+	lbz	r12, SVCPU_SLB_MAX(r3)
 	mulli	r12, r12, 16
-	addi	r12, r12, PACA_KVM_SLB
-	add	r12, r12, r13
+	addi	r12, r12, SVCPU_SLB
+	add	r12, r12, r3
 
 	/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
-	li	r11, PACA_KVM_SLB
-	add	r11, r11, r13
+	li	r11, SVCPU_SLB
+	add	r11, r11, r3
 
 slb_loop_enter:
 
@@ -127,34 +120,7 @@
 
 slb_do_enter:
 
-	/* Enter guest */
-
-	ld	r0, (PACA_KVM_R0)(r13)
-	ld	r1, (PACA_KVM_R1)(r13)
-	ld	r2, (PACA_KVM_R2)(r13)
-	ld	r3, (PACA_KVM_R3)(r13)
-	ld	r4, (PACA_KVM_R4)(r13)
-	ld	r5, (PACA_KVM_R5)(r13)
-	ld	r6, (PACA_KVM_R6)(r13)
-	ld	r7, (PACA_KVM_R7)(r13)
-	ld	r8, (PACA_KVM_R8)(r13)
-	ld	r9, (PACA_KVM_R9)(r13)
-	ld	r10, (PACA_KVM_R10)(r13)
-	ld	r12, (PACA_KVM_R12)(r13)
-
-	lwz	r11, (PACA_KVM_CR)(r13)
-	mtcr	r11
-
-	lwz	r11, (PACA_KVM_XER)(r13)
-	mtxer	r11
-
-	ld	r11, (PACA_KVM_R11)(r13)
-	ld	r13, (PACA_KVM_R13)(r13)
-
-	RFI
-kvmppc_handler_trampoline_enter_end:
-
-
+.endm
 
 /******************************************************************************
  *                                                                            *
@@ -162,99 +128,22 @@
  *                                                                            *
  *****************************************************************************/
 
-.global kvmppc_handler_trampoline_exit
-kvmppc_handler_trampoline_exit:
+.macro LOAD_HOST_SEGMENTS
 
 	/* Register usage at this point:
 	 *
-	 * SPRG_SCRATCH0     = guest R13
-	 * R12               = exit handler id
-	 * R13               = PACA
-	 * PACA.KVM.SCRATCH0 = guest R12
-	 * PACA.KVM.SCRATCH1 = guest CR
+	 * R1         = host R1
+	 * R2         = host R2
+	 * R12        = exit handler id
+	 * R13        = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+	 * SVCPU.*    = guest *
+	 * SVCPU[CR]  = guest CR
+	 * SVCPU[XER] = guest XER
+	 * SVCPU[CTR] = guest CTR
+	 * SVCPU[LR]  = guest LR
 	 *
 	 */
 
-	/* Save registers */
-
-	std	r0, PACA_KVM_R0(r13)
-	std	r1, PACA_KVM_R1(r13)
-	std	r2, PACA_KVM_R2(r13)
-	std	r3, PACA_KVM_R3(r13)
-	std	r4, PACA_KVM_R4(r13)
-	std	r5, PACA_KVM_R5(r13)
-	std	r6, PACA_KVM_R6(r13)
-	std	r7, PACA_KVM_R7(r13)
-	std	r8, PACA_KVM_R8(r13)
-	std	r9, PACA_KVM_R9(r13)
-	std	r10, PACA_KVM_R10(r13)
-	std	r11, PACA_KVM_R11(r13)
-
-	/* Restore R1/R2 so we can handle faults */
-	ld	r1, PACA_KVM_HOST_R1(r13)
-	ld	r2, PACA_KVM_HOST_R2(r13)
-
-	/* Save guest PC and MSR in GPRs */
-	mfsrr0	r3
-	mfsrr1	r4
-
-	/* Get scratch'ed off registers */
-	mfspr	r9, SPRN_SPRG_SCRATCH0
-	std	r9, PACA_KVM_R13(r13)
-
-	ld	r8, PACA_KVM_SCRATCH0(r13)
-	std	r8, PACA_KVM_R12(r13)
-
-	lwz	r7, PACA_KVM_SCRATCH1(r13)
-	stw	r7, PACA_KVM_CR(r13)
-
-	/* Save more register state  */
-
-	mfxer	r6
-	stw	r6, PACA_KVM_XER(r13)
-
-	mfdar	r5
-	mfdsisr	r6
-
-	/*
-	 * In order for us to easily get the last instruction,
-	 * we got the #vmexit at, we exploit the fact that the
-	 * virtual layout is still the same here, so we can just
-	 * ld from the guest's PC address
-	 */
-
-	/* We only load the last instruction when it's safe */
-	cmpwi	r12, BOOK3S_INTERRUPT_DATA_STORAGE
-	beq	ld_last_inst
-	cmpwi	r12, BOOK3S_INTERRUPT_PROGRAM
-	beq	ld_last_inst
-
-	b	no_ld_last_inst
-
-ld_last_inst:
-	/* Save off the guest instruction we're at */
-
-	/* Set guest mode to 'jump over instruction' so if lwz faults
-	 * we'll just continue at the next IP. */
-	li	r9, KVM_GUEST_MODE_SKIP
-	stb	r9, PACA_KVM_IN_GUEST(r13)
-
-	/*    1) enable paging for data */
-	mfmsr	r9
-	ori	r11, r9, MSR_DR			/* Enable paging for data */
-	mtmsr	r11
-	/*    2) fetch the instruction */
-	li	r0, KVM_INST_FETCH_FAILED	/* In case lwz faults */
-	lwz	r0, 0(r3)
-	/*    3) disable paging again */
-	mtmsr	r9
-
-no_ld_last_inst:
-
-	/* Unset guest mode */
-	li	r9, KVM_GUEST_MODE_NONE
-	stb	r9, PACA_KVM_IN_GUEST(r13)
-
 	/* Restore bolted entries from the shadow and fix it along the way */
 
 	/* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@
 
 slb_do_exit:
 
-	/* Register usage at this point:
-	 *
-	 * R0         = guest last inst
-	 * R1         = host R1
-	 * R2         = host R2
-	 * R3         = guest PC
-	 * R4         = guest MSR
-	 * R5         = guest DAR
-	 * R6         = guest DSISR
-	 * R12        = exit handler id
-	 * R13        = PACA
-	 * PACA.KVM.* = guest *
-	 *
-	 */
-
-	/* RFI into the highmem handler */
-	mfmsr	r7
-	ori	r7, r7, MSR_IR|MSR_DR|MSR_RI	/* Enable paging */
-	mtsrr1	r7
-	ld	r8, PACA_KVM_VMHANDLER(r13)	/* Highmem handler address */
-	mtsrr0	r8
-
-	RFI
-kvmppc_handler_trampoline_exit_end:
-
+.endm