Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2 | /* |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3 | * |
| 4 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 5 | * |
| 6 | * Derived from book3s_rmhandlers.S and other files, which are: |
| 7 | * |
| 8 | * Copyright SUSE Linux Products GmbH 2009 |
| 9 | * |
| 10 | * Authors: Alexander Graf <agraf@suse.de> |
| 11 | */ |
| 12 | |
| 13 | #include <asm/ppc_asm.h> |
| 14 | #include <asm/kvm_asm.h> |
| 15 | #include <asm/reg.h> |
Paul Mackerras | 177339d | 2011-07-23 17:41:11 +1000 | [diff] [blame] | 16 | #include <asm/mmu.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 17 | #include <asm/page.h> |
Paul Mackerras | 177339d | 2011-07-23 17:41:11 +1000 | [diff] [blame] | 18 | #include <asm/ptrace.h> |
| 19 | #include <asm/hvcall.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 20 | #include <asm/asm-offsets.h> |
| 21 | #include <asm/exception-64s.h> |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 22 | #include <asm/kvm_book3s_asm.h> |
Aneesh Kumar K.V | f64e808 | 2016-03-01 12:59:20 +0530 | [diff] [blame] | 23 | #include <asm/book3s/64/mmu-hash.h> |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 24 | #include <asm/export.h> |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 25 | #include <asm/tm.h> |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 26 | #include <asm/opal.h> |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 27 | #include <asm/xive-regs.h> |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 28 | #include <asm/thread_info.h> |
Christophe Leroy | ec0c464 | 2018-07-05 16:24:57 +0000 | [diff] [blame] | 29 | #include <asm/asm-compat.h> |
Christophe Leroy | 2c86cd1 | 2018-07-05 16:25:01 +0000 | [diff] [blame] | 30 | #include <asm/feature-fixups.h> |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 31 | #include <asm/cpuidle.h> |
Sukadev Bhattiprolu | 6c85b7bc | 2019-08-22 00:48:38 -0300 | [diff] [blame^] | 32 | #include <asm/ultravisor-api.h> |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 33 | |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 34 | /* Sign-extend HDEC if not on POWER9 */ |
| 35 | #define EXTEND_HDEC(reg) \ |
| 36 | BEGIN_FTR_SECTION; \ |
| 37 | extsw reg, reg; \ |
| 38 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
| 39 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 40 | /* Values in HSTATE_NAPPING(r13) */ |
| 41 | #define NAPPING_CEDE 1 |
| 42 | #define NAPPING_NOVCPU 2 |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 43 | #define NAPPING_UNSPLIT 3 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 44 | |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 45 | /* Stack frame offsets for kvmppc_hv_entry */ |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 46 | #define SFS 208 |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 47 | #define STACK_SLOT_TRAP (SFS-4) |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 48 | #define STACK_SLOT_SHORT_PATH (SFS-8) |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 49 | #define STACK_SLOT_TID (SFS-16) |
| 50 | #define STACK_SLOT_PSSCR (SFS-24) |
| 51 | #define STACK_SLOT_PID (SFS-32) |
| 52 | #define STACK_SLOT_IAMR (SFS-40) |
| 53 | #define STACK_SLOT_CIABR (SFS-48) |
| 54 | #define STACK_SLOT_DAWR (SFS-56) |
| 55 | #define STACK_SLOT_DAWRX (SFS-64) |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 56 | #define STACK_SLOT_HFSCR (SFS-72) |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 57 | #define STACK_SLOT_AMR (SFS-80) |
| 58 | #define STACK_SLOT_UAMOR (SFS-88) |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 59 | /* the following is used by the P9 short path */ |
| 60 | #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 61 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 62 | /* |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 63 | * Call kvmppc_hv_entry in real mode. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 64 | * Must be called with interrupts hard-disabled. |
| 65 | * |
| 66 | * Input Registers: |
| 67 | * |
| 68 | * LR = return address to continue at after eventually re-enabling MMU |
| 69 | */ |
Anton Blanchard | 6ed179b | 2014-06-12 18:16:53 +1000 | [diff] [blame] | 70 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 71 | mflr r0 |
| 72 | std r0, PPC_LR_STKOFF(r1) |
| 73 | stdu r1, -112(r1) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 74 | mfmsr r10 |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 75 | std r10, HSTATE_HOST_MSR(r13) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 76 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 77 | li r0,MSR_RI |
| 78 | andc r0,r10,r0 |
| 79 | li r6,MSR_IR | MSR_DR |
| 80 | andc r6,r10,r6 |
| 81 | mtmsrd r0,1 /* clear RI in MSR */ |
| 82 | mtsrr0 r5 |
| 83 | mtsrr1 r6 |
Nicholas Piggin | 222f20f | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 84 | RFI_TO_KERNEL |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 85 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 86 | kvmppc_call_hv_entry: |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 87 | BEGIN_FTR_SECTION |
| 88 | /* On P9, do LPCR setting, if necessary */ |
| 89 | ld r3, HSTATE_SPLIT_MODE(r13) |
| 90 | cmpdi r3, 0 |
| 91 | beq 46f |
| 92 | lwz r4, KVM_SPLIT_DO_SET(r3) |
| 93 | cmpwi r4, 0 |
| 94 | beq 46f |
| 95 | bl kvmhv_p9_set_lpcr |
| 96 | nop |
| 97 | 46: |
| 98 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 99 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 100 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 101 | bl kvmppc_hv_entry |
| 102 | |
| 103 | /* Back from guest - restore host state and return to caller */ |
| 104 | |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 105 | BEGIN_FTR_SECTION |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 106 | /* Restore host DABR and DABRX */ |
| 107 | ld r5,HSTATE_DABR(r13) |
| 108 | li r6,7 |
| 109 | mtspr SPRN_DABR,r5 |
| 110 | mtspr SPRN_DABRX,r6 |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 111 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 112 | |
| 113 | /* Restore SPRG3 */ |
Scott Wood | 9d378df | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 114 | ld r3,PACA_SPRG_VDSO(r13) |
| 115 | mtspr SPRN_SPRG_VDSO_WRITE,r3 |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 116 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 117 | /* Reload the host's PMU registers */ |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 118 | bl kvmhv_load_host_pmu |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 119 | |
| 120 | /* |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 121 | * Reload DEC. HDEC interrupts were disabled when |
| 122 | * we reloaded the host's LPCR value. |
| 123 | */ |
| 124 | ld r3, HSTATE_DECEXP(r13) |
| 125 | mftb r4 |
| 126 | subf r4, r4, r3 |
| 127 | mtspr SPRN_DEC, r4 |
| 128 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 129 | /* hwthread_req may have got set by cede or no vcpu, so clear it */ |
| 130 | li r0, 0 |
| 131 | stb r0, HSTATE_HWTHREAD_REQ(r13) |
| 132 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 133 | /* |
Aravinda Prasad | e20bbd3 | 2017-05-11 16:33:37 +0530 | [diff] [blame] | 134 | * For external interrupts we need to call the Linux |
| 135 | * handler to process the interrupt. We do that by jumping |
| 136 | * to absolute address 0x500 for external interrupts. |
| 137 | * The [h]rfid at the end of the handler will return to |
| 138 | * the book3s_hv_interrupts.S code. For other interrupts |
| 139 | * we do the rfid to get back to the book3s_hv_interrupts.S |
| 140 | * code here. |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 141 | */ |
| 142 | ld r8, 112+PPC_LR_STKOFF(r1) |
| 143 | addi r1, r1, 112 |
| 144 | ld r7, HSTATE_HOST_MSR(r13) |
| 145 | |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 146 | /* Return the trap number on this thread as the return value */ |
| 147 | mr r3, r12 |
| 148 | |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 149 | /* |
| 150 | * If we came back from the guest via a relocation-on interrupt, |
| 151 | * we will be in virtual mode at this point, which makes it a |
| 152 | * little easier to get back to the caller. |
| 153 | */ |
| 154 | mfmsr r0 |
| 155 | andi. r0, r0, MSR_IR /* in real mode? */ |
| 156 | bne .Lvirt_return |
| 157 | |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 158 | /* RFI into the highmem handler */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 159 | mfmsr r6 |
| 160 | li r0, MSR_RI |
| 161 | andc r6, r6, r0 |
| 162 | mtmsrd r6, 1 /* Clear RI in MSR */ |
| 163 | mtsrr0 r8 |
| 164 | mtsrr1 r7 |
Nicholas Piggin | 222f20f | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 165 | RFI_TO_KERNEL |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 166 | |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 167 | /* Virtual-mode return */ |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 168 | .Lvirt_return: |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 169 | mtlr r8 |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 170 | blr |
| 171 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 172 | kvmppc_primary_no_guest: |
| 173 | /* We handle this much like a ceded vcpu */ |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 174 | /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 175 | /* HDEC may be larger than DEC for arch >= v3.00, but since the */ |
| 176 | /* HDEC value came from DEC in the first place, it will fit */ |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 177 | mfspr r3, SPRN_HDEC |
| 178 | mtspr SPRN_DEC, r3 |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 179 | /* |
| 180 | * Make sure the primary has finished the MMU switch. |
| 181 | * We should never get here on a secondary thread, but |
| 182 | * check it for robustness' sake. |
| 183 | */ |
| 184 | ld r5, HSTATE_KVM_VCORE(r13) |
| 185 | 65: lbz r0, VCORE_IN_GUEST(r5) |
| 186 | cmpwi r0, 0 |
| 187 | beq 65b |
| 188 | /* Set LPCR. */ |
| 189 | ld r8,VCORE_LPCR(r5) |
| 190 | mtspr SPRN_LPCR,r8 |
| 191 | isync |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 192 | /* set our bit in napping_threads */ |
| 193 | ld r5, HSTATE_KVM_VCORE(r13) |
| 194 | lbz r7, HSTATE_PTID(r13) |
| 195 | li r0, 1 |
| 196 | sld r0, r0, r7 |
| 197 | addi r6, r5, VCORE_NAPPING_THREADS |
| 198 | 1: lwarx r3, 0, r6 |
| 199 | or r3, r3, r0 |
| 200 | stwcx. r3, 0, r6 |
| 201 | bne 1b |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 202 | /* order napping_threads update vs testing entry_exit_map */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 203 | isync |
| 204 | li r12, 0 |
| 205 | lwz r7, VCORE_ENTRY_EXIT(r5) |
| 206 | cmpwi r7, 0x100 |
| 207 | bge kvm_novcpu_exit /* another thread already exiting */ |
| 208 | li r3, NAPPING_NOVCPU |
| 209 | stb r3, HSTATE_NAPPING(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 210 | |
Paul Mackerras | ccc0777 | 2015-03-28 14:21:07 +1100 | [diff] [blame] | 211 | li r3, 0 /* Don't wake on privileged (OS) doorbell */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 212 | b kvm_do_nap |
| 213 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 214 | /* |
| 215 | * kvm_novcpu_wakeup |
| 216 | * Entered from kvm_start_guest if kvm_hstate.napping is set |
| 217 | * to NAPPING_NOVCPU |
| 218 | * r2 = kernel TOC |
| 219 | * r13 = paca |
| 220 | */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 221 | kvm_novcpu_wakeup: |
| 222 | ld r1, HSTATE_HOST_R1(r13) |
| 223 | ld r5, HSTATE_KVM_VCORE(r13) |
| 224 | li r0, 0 |
| 225 | stb r0, HSTATE_NAPPING(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 226 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 227 | /* check the wake reason */ |
| 228 | bl kvmppc_check_wake_reason |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 229 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 230 | /* |
| 231 | * Restore volatile registers since we could have called |
| 232 | * a C routine in kvmppc_check_wake_reason. |
| 233 | * r5 = VCORE |
| 234 | */ |
| 235 | ld r5, HSTATE_KVM_VCORE(r13) |
| 236 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 237 | /* see if any other thread is already exiting */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 238 | lwz r0, VCORE_ENTRY_EXIT(r5) |
| 239 | cmpwi r0, 0x100 |
| 240 | bge kvm_novcpu_exit |
| 241 | |
| 242 | /* clear our bit in napping_threads */ |
| 243 | lbz r7, HSTATE_PTID(r13) |
| 244 | li r0, 1 |
| 245 | sld r0, r0, r7 |
| 246 | addi r6, r5, VCORE_NAPPING_THREADS |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 247 | 4: lwarx r7, 0, r6 |
| 248 | andc r7, r7, r0 |
| 249 | stwcx. r7, 0, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 250 | bne 4b |
| 251 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 252 | /* See if the wake reason means we need to exit */ |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 253 | cmpdi r3, 0 |
| 254 | bge kvm_novcpu_exit |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 255 | |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 256 | /* See if our timeslice has expired (HDEC is negative) */ |
| 257 | mfspr r0, SPRN_HDEC |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 258 | EXTEND_HDEC(r0) |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 259 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 260 | cmpdi r0, 0 |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 261 | blt kvm_novcpu_exit |
| 262 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 263 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ |
| 264 | ld r4, HSTATE_KVM_VCPU(r13) |
| 265 | cmpdi r4, 0 |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 266 | beq kvmppc_primary_no_guest |
| 267 | |
| 268 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 269 | addi r3, r4, VCPU_TB_RMENTRY |
| 270 | bl kvmhv_start_timing |
| 271 | #endif |
| 272 | b kvmppc_got_guest |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 273 | |
| 274 | kvm_novcpu_exit: |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 275 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 276 | ld r4, HSTATE_KVM_VCPU(r13) |
| 277 | cmpdi r4, 0 |
| 278 | beq 13f |
| 279 | addi r3, r4, VCPU_TB_RMEXIT |
| 280 | bl kvmhv_accumulate_time |
| 281 | #endif |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 282 | 13: mr r3, r12 |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 283 | stw r12, STACK_SLOT_TRAP(r1) |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 284 | bl kvmhv_commence_exit |
| 285 | nop |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 286 | b kvmhv_switch_to_host |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 287 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 288 | /* |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 289 | * We come in here when wakened from Linux offline idle code. |
| 290 | * Relocation is off |
Nicholas Piggin | 9d29250 | 2017-06-13 23:05:51 +1000 | [diff] [blame] | 291 | * r3 contains the SRR1 wakeup value, SRR1 is trashed. |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 292 | */ |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 293 | _GLOBAL(idle_kvm_start_guest) |
| 294 | ld r4,PACAEMERGSP(r13) |
| 295 | mfcr r5 |
| 296 | mflr r0 |
| 297 | std r1,0(r4) |
| 298 | std r5,8(r4) |
| 299 | std r0,16(r4) |
| 300 | subi r1,r4,STACK_FRAME_OVERHEAD |
| 301 | SAVE_NVGPRS(r1) |
Preeti U Murthy | fd17dc7 | 2014-04-11 16:01:58 +0530 | [diff] [blame] | 302 | |
Nicholas Piggin | 9d29250 | 2017-06-13 23:05:51 +1000 | [diff] [blame] | 303 | /* |
| 304 | * Could avoid this and pass it through in r3. For now, |
| 305 | * code expects it to be in SRR1. |
| 306 | */ |
| 307 | mtspr SPRN_SRR1,r3 |
| 308 | |
Naveen N. Rao | a4bc64d | 2018-04-19 12:34:05 +0530 | [diff] [blame] | 309 | li r0,0 |
| 310 | stb r0,PACA_FTRACE_ENABLED(r13) |
| 311 | |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 312 | li r0,KVM_HWTHREAD_IN_KVM |
| 313 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
| 314 | |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 315 | /* kvm cede / napping does not come through here */ |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 316 | lbz r0,HSTATE_NAPPING(r13) |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 317 | twnei r0,0 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 318 | |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 319 | b 1f |
| 320 | |
| 321 | kvm_unsplit_wakeup: |
| 322 | li r0, 0 |
| 323 | stb r0, HSTATE_NAPPING(r13) |
| 324 | |
| 325 | 1: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 326 | |
| 327 | /* |
| 328 | * We weren't napping due to cede, so this must be a secondary |
| 329 | * thread being woken up to run a guest, or being woken up due |
| 330 | * to a stray IPI. (Or due to some machine check or hypervisor |
| 331 | * maintenance interrupt while the core is in KVM.) |
| 332 | */ |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 333 | |
| 334 | /* Check the wake reason in SRR1 to see why we got here */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 335 | bl kvmppc_check_wake_reason |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 336 | /* |
| 337 | * kvmppc_check_wake_reason could invoke a C routine, but we |
| 338 | * have no volatile registers to restore when we return. |
| 339 | */ |
| 340 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 341 | cmpdi r3, 0 |
| 342 | bge kvm_no_guest |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 343 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 344 | /* get vcore pointer, NULL if we have nothing to run */ |
| 345 | ld r5,HSTATE_KVM_VCORE(r13) |
| 346 | cmpdi r5,0 |
| 347 | /* if we have no vcore to run, go back to sleep */ |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 348 | beq kvm_no_guest |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 349 | |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 350 | kvm_secondary_got_guest: |
| 351 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 352 | /* Set HSTATE_DSCR(r13) to something sensible */ |
Anshuman Khandual | 1db3652 | 2015-05-21 12:13:03 +0530 | [diff] [blame] | 353 | ld r6, PACA_DSCR_DEFAULT(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 354 | std r6, HSTATE_DSCR(r13) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 355 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 356 | /* On thread 0 of a subcore, set HDEC to max */ |
| 357 | lbz r4, HSTATE_PTID(r13) |
| 358 | cmpwi r4, 0 |
| 359 | bne 63f |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 360 | LOAD_REG_ADDR(r6, decrementer_max) |
| 361 | ld r6, 0(r6) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 362 | mtspr SPRN_HDEC, r6 |
| 363 | /* and set per-LPAR registers, if doing dynamic micro-threading */ |
| 364 | ld r6, HSTATE_SPLIT_MODE(r13) |
| 365 | cmpdi r6, 0 |
| 366 | beq 63f |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 367 | BEGIN_FTR_SECTION |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 368 | ld r0, KVM_SPLIT_RPR(r6) |
| 369 | mtspr SPRN_RPR, r0 |
| 370 | ld r0, KVM_SPLIT_PMMAR(r6) |
| 371 | mtspr SPRN_PMMAR, r0 |
| 372 | ld r0, KVM_SPLIT_LDBAR(r6) |
| 373 | mtspr SPRN_LDBAR, r0 |
| 374 | isync |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 375 | FTR_SECTION_ELSE |
| 376 | /* On P9 we use the split_info for coordinating LPCR changes */ |
| 377 | lwz r4, KVM_SPLIT_DO_SET(r6) |
| 378 | cmpwi r4, 0 |
Alexander Graf | d20fe50 | 2018-02-08 18:38:53 +0100 | [diff] [blame] | 379 | beq 1f |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 380 | mr r3, r6 |
| 381 | bl kvmhv_p9_set_lpcr |
| 382 | nop |
Alexander Graf | d20fe50 | 2018-02-08 18:38:53 +0100 | [diff] [blame] | 383 | 1: |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 384 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 385 | 63: |
| 386 | /* Order load of vcpu after load of vcore */ |
Paul Mackerras | 5d5b99c | 2015-03-28 14:21:06 +1100 | [diff] [blame] | 387 | lwsync |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 388 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 389 | bl kvmppc_hv_entry |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 390 | |
| 391 | /* Back from the guest, go back to nap */ |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 392 | /* Clear our vcpu and vcore pointers so we don't come back in early */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 393 | li r0, 0 |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 394 | std r0, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 395 | /* |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 396 | * Once we clear HSTATE_KVM_VCORE(r13), the code in |
Paul Mackerras | 5d5b99c | 2015-03-28 14:21:06 +1100 | [diff] [blame] | 397 | * kvmppc_run_core() is going to assume that all our vcpu |
| 398 | * state is visible in memory. This lwsync makes sure |
| 399 | * that that is true. |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 400 | */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 401 | lwsync |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 402 | std r0, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 403 | |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 404 | /* |
| 405 | * All secondaries exiting guest will fall through this path. |
| 406 | * Before proceeding, just check for HMI interrupt and |
| 407 | * invoke opal hmi handler. By now we are sure that the |
| 408 | * primary thread on this core/subcore has already made partition |
| 409 | * switch/TB resync and we are good to call opal hmi handler. |
| 410 | */ |
| 411 | cmpwi r12, BOOK3S_INTERRUPT_HMI |
| 412 | bne kvm_no_guest |
| 413 | |
| 414 | li r3,0 /* NULL argument */ |
| 415 | bl hmi_exception_realmode |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 416 | /* |
| 417 | * At this point we have finished executing in the guest. |
| 418 | * We need to wait for hwthread_req to become zero, since |
| 419 | * we may not turn on the MMU while hwthread_req is non-zero. |
| 420 | * While waiting we also need to check if we get given a vcpu to run. |
| 421 | */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 422 | kvm_no_guest: |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 423 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
| 424 | cmpwi r3, 0 |
| 425 | bne 53f |
| 426 | HMT_MEDIUM |
| 427 | li r0, KVM_HWTHREAD_IN_KERNEL |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 428 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 429 | /* need to recheck hwthread_req after a barrier, to avoid race */ |
| 430 | sync |
| 431 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
| 432 | cmpwi r3, 0 |
| 433 | bne 54f |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 434 | |
| 435 | /* |
| 436 | * Jump to idle_return_gpr_loss, which returns to the |
| 437 | * idle_kvm_start_guest caller. |
| 438 | */ |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 439 | li r3, LPCR_PECE0 |
| 440 | mfspr r4, SPRN_LPCR |
| 441 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
| 442 | mtspr SPRN_LPCR, r4 |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 443 | /* set up r3 for return */ |
| 444 | mfspr r3,SPRN_SRR1 |
| 445 | REST_NVGPRS(r1) |
| 446 | addi r1, r1, STACK_FRAME_OVERHEAD |
| 447 | ld r0, 16(r1) |
| 448 | ld r5, 8(r1) |
| 449 | ld r1, 0(r1) |
| 450 | mtlr r0 |
| 451 | mtcr r5 |
| 452 | blr |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 453 | |
| 454 | 53: HMT_LOW |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 455 | ld r5, HSTATE_KVM_VCORE(r13) |
| 456 | cmpdi r5, 0 |
| 457 | bne 60f |
| 458 | ld r3, HSTATE_SPLIT_MODE(r13) |
| 459 | cmpdi r3, 0 |
| 460 | beq kvm_no_guest |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 461 | lwz r0, KVM_SPLIT_DO_SET(r3) |
| 462 | cmpwi r0, 0 |
| 463 | bne kvmhv_do_set |
| 464 | lwz r0, KVM_SPLIT_DO_RESTORE(r3) |
| 465 | cmpwi r0, 0 |
| 466 | bne kvmhv_do_restore |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 467 | lbz r0, KVM_SPLIT_DO_NAP(r3) |
| 468 | cmpwi r0, 0 |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 469 | beq kvm_no_guest |
| 470 | HMT_MEDIUM |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 471 | b kvm_unsplit_nap |
| 472 | 60: HMT_MEDIUM |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 473 | b kvm_secondary_got_guest |
| 474 | |
| 475 | 54: li r0, KVM_HWTHREAD_IN_KVM |
| 476 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
| 477 | b kvm_no_guest |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 478 | |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 479 | kvmhv_do_set: |
| 480 | /* Set LPCR, LPIDR etc. on P9 */ |
| 481 | HMT_MEDIUM |
| 482 | bl kvmhv_p9_set_lpcr |
| 483 | nop |
| 484 | b kvm_no_guest |
| 485 | |
| 486 | kvmhv_do_restore: |
| 487 | HMT_MEDIUM |
| 488 | bl kvmhv_p9_restore_lpcr |
| 489 | nop |
| 490 | b kvm_no_guest |
| 491 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 492 | /* |
| 493 | * Here the primary thread is trying to return the core to |
| 494 | * whole-core mode, so we need to nap. |
| 495 | */ |
| 496 | kvm_unsplit_nap: |
Gautham R. Shenoy | 7f23532 | 2015-09-02 21:48:58 +0530 | [diff] [blame] | 497 | /* |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 498 | * When secondaries are napping in kvm_unsplit_nap() with |
| 499 | * hwthread_req = 1, HMI goes ignored even though subcores are |
| 500 | * already exited the guest. Hence HMI keeps waking up secondaries |
| 501 | * from nap in a loop and secondaries always go back to nap since |
| 502 | * no vcore is assigned to them. This makes impossible for primary |
| 503 | * thread to get hold of secondary threads resulting into a soft |
| 504 | * lockup in KVM path. |
| 505 | * |
| 506 | * Let us check if HMI is pending and handle it before we go to nap. |
| 507 | */ |
| 508 | cmpwi r12, BOOK3S_INTERRUPT_HMI |
| 509 | bne 55f |
| 510 | li r3, 0 /* NULL argument */ |
| 511 | bl hmi_exception_realmode |
| 512 | 55: |
| 513 | /* |
Gautham R. Shenoy | 7f23532 | 2015-09-02 21:48:58 +0530 | [diff] [blame] | 514 | * Ensure that secondary doesn't nap when it has |
| 515 | * its vcore pointer set. |
| 516 | */ |
| 517 | sync /* matches smp_mb() before setting split_info.do_nap */ |
| 518 | ld r0, HSTATE_KVM_VCORE(r13) |
| 519 | cmpdi r0, 0 |
| 520 | bne kvm_no_guest |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 521 | /* clear any pending message */ |
| 522 | BEGIN_FTR_SECTION |
| 523 | lis r6, (PPC_DBELL_SERVER << (63-36))@h |
| 524 | PPC_MSGCLR(6) |
| 525 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 526 | /* Set kvm_split_mode.napped[tid] = 1 */ |
| 527 | ld r3, HSTATE_SPLIT_MODE(r13) |
| 528 | li r0, 1 |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 529 | lbz r4, HSTATE_TID(r13) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 530 | addi r4, r4, KVM_SPLIT_NAPPED |
| 531 | stbx r0, r3, r4 |
| 532 | /* Check the do_nap flag again after setting napped[] */ |
| 533 | sync |
| 534 | lbz r0, KVM_SPLIT_DO_NAP(r3) |
| 535 | cmpwi r0, 0 |
| 536 | beq 57f |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 537 | li r3, NAPPING_UNSPLIT |
| 538 | stb r3, HSTATE_NAPPING(r13) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 539 | li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 |
Paul Mackerras | bf53c88 | 2016-11-18 14:34:07 +1100 | [diff] [blame] | 540 | mfspr r5, SPRN_LPCR |
| 541 | rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) |
| 542 | b kvm_nap_sequence |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 543 | |
| 544 | 57: li r0, 0 |
| 545 | stbx r0, r3, r4 |
| 546 | b kvm_no_guest |
| 547 | |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 548 | /****************************************************************************** |
| 549 | * * |
| 550 | * Entry code * |
| 551 | * * |
| 552 | *****************************************************************************/ |
| 553 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 554 | .global kvmppc_hv_entry |
| 555 | kvmppc_hv_entry: |
| 556 | |
| 557 | /* Required state: |
| 558 | * |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 559 | * R4 = vcpu pointer (or NULL) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 560 | * MSR = ~IR|DR |
| 561 | * R13 = PACA |
| 562 | * R1 = host R1 |
Michael Neuling | 06a29e4 | 2014-08-19 14:59:30 +1000 | [diff] [blame] | 563 | * R2 = TOC |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 564 | * all other volatile GPRS = free |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 565 | * Does not preserve non-volatile GPRs or CR fields |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 566 | */ |
| 567 | mflr r0 |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 568 | std r0, PPC_LR_STKOFF(r1) |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 569 | stdu r1, -SFS(r1) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 570 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 571 | /* Save R1 in the PACA */ |
| 572 | std r1, HSTATE_HOST_R1(r13) |
| 573 | |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 574 | li r6, KVM_GUEST_MODE_HOST_HV |
| 575 | stb r6, HSTATE_IN_GUEST(r13) |
| 576 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 577 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 578 | /* Store initial timestamp */ |
| 579 | cmpdi r4, 0 |
| 580 | beq 1f |
| 581 | addi r3, r4, VCPU_TB_RMENTRY |
| 582 | bl kvmhv_start_timing |
| 583 | 1: |
| 584 | #endif |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 585 | |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 586 | ld r5, HSTATE_KVM_VCORE(r13) |
| 587 | ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 588 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 589 | /* |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 590 | * POWER7/POWER8 host -> guest partition switch code. |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 591 | * We don't have to lock against concurrent tlbies, |
| 592 | * but we do have to coordinate across hardware threads. |
| 593 | */ |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 594 | /* Set bit in entry map iff exit map is zero. */ |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 595 | li r7, 1 |
| 596 | lbz r6, HSTATE_PTID(r13) |
| 597 | sld r7, r7, r6 |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 598 | addi r8, r5, VCORE_ENTRY_EXIT |
| 599 | 21: lwarx r3, 0, r8 |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 600 | cmpwi r3, 0x100 /* any threads starting to exit? */ |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 601 | bge secondary_too_late /* if so we're too late to the party */ |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 602 | or r3, r3, r7 |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 603 | stwcx. r3, 0, r8 |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 604 | bne 21b |
| 605 | |
| 606 | /* Primary thread switches to guest partition. */ |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 607 | cmpwi r6,0 |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 608 | bne 10f |
Nicholas Piggin | 9a4506e | 2018-05-17 17:06:29 +1000 | [diff] [blame] | 609 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 610 | lwz r7,KVM_LPID(r9) |
Paul Mackerras | 7a84084 | 2016-11-16 22:25:20 +1100 | [diff] [blame] | 611 | BEGIN_FTR_SECTION |
| 612 | ld r6,KVM_SDR1(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 613 | li r0,LPID_RSVD /* switch to reserved LPID */ |
| 614 | mtspr SPRN_LPID,r0 |
| 615 | ptesync |
| 616 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ |
Paul Mackerras | 7a84084 | 2016-11-16 22:25:20 +1100 | [diff] [blame] | 617 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 618 | mtspr SPRN_LPID,r7 |
| 619 | isync |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 620 | |
Paul Mackerras | 70ea13f | 2019-04-29 19:02:58 +1000 | [diff] [blame] | 621 | /* See if we need to flush the TLB. */ |
Paul Mackerras | 2940ba0 | 2019-04-29 19:00:40 +1000 | [diff] [blame] | 622 | mr r3, r9 /* kvm pointer */ |
Paul Mackerras | 70ea13f | 2019-04-29 19:02:58 +1000 | [diff] [blame] | 623 | lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ |
| 624 | li r5, 0 /* nested vcpu pointer */ |
| 625 | bl kvmppc_check_need_tlb_flush |
Paul Mackerras | 2940ba0 | 2019-04-29 19:00:40 +1000 | [diff] [blame] | 626 | nop |
| 627 | ld r5, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 628 | |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 629 | /* Add timebase offset onto timebase */ |
| 630 | 22: ld r8,VCORE_TB_OFFSET(r5) |
| 631 | cmpdi r8,0 |
| 632 | beq 37f |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 633 | std r8, VCORE_TB_OFFSET_APPL(r5) |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 634 | mftb r6 /* current host timebase */ |
| 635 | add r8,r8,r6 |
| 636 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| 637 | mftb r7 /* check if lower 24 bits overflowed */ |
| 638 | clrldi r6,r6,40 |
| 639 | clrldi r7,r7,40 |
| 640 | cmpld r7,r6 |
| 641 | bge 37f |
| 642 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ |
| 643 | mtspr SPRN_TBU40,r8 |
| 644 | |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 645 | /* Load guest PCR value to select appropriate compat mode */ |
| 646 | 37: ld r7, VCORE_PCR(r5) |
| 647 | cmpdi r7, 0 |
| 648 | beq 38f |
| 649 | mtspr SPRN_PCR, r7 |
| 650 | 38: |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 651 | |
| 652 | BEGIN_FTR_SECTION |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 653 | /* DPDES and VTB are shared between threads */ |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 654 | ld r8, VCORE_DPDES(r5) |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 655 | ld r7, VCORE_VTB(r5) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 656 | mtspr SPRN_DPDES, r8 |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 657 | mtspr SPRN_VTB, r7 |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 658 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 659 | |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 660 | /* Mark the subcore state as inside guest */ |
| 661 | bl kvmppc_subcore_enter_guest |
| 662 | nop |
| 663 | ld r5, HSTATE_KVM_VCORE(r13) |
| 664 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 665 | li r0,1 |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 666 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 667 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 668 | /* Do we have a guest vcpu to run? */ |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 669 | 10: cmpdi r4, 0 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 670 | beq kvmppc_primary_no_guest |
| 671 | kvmppc_got_guest: |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 672 | /* Increment yield count if they have a VPA */ |
| 673 | ld r3, VCPU_VPA(r4) |
| 674 | cmpdi r3, 0 |
| 675 | beq 25f |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 676 | li r6, LPPACA_YIELDCOUNT |
| 677 | LWZX_BE r5, r3, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 678 | addi r5, r5, 1 |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 679 | STWX_BE r5, r3, r6 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 680 | li r6, 1 |
| 681 | stb r6, VCPU_VPA_DIRTY(r4) |
| 682 | 25: |
| 683 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 684 | /* Save purr/spurr */ |
| 685 | mfspr r5,SPRN_PURR |
| 686 | mfspr r6,SPRN_SPURR |
| 687 | std r5,HSTATE_PURR(r13) |
| 688 | std r6,HSTATE_SPURR(r13) |
| 689 | ld r7,VCPU_PURR(r4) |
| 690 | ld r8,VCPU_SPURR(r4) |
| 691 | mtspr SPRN_PURR,r7 |
| 692 | mtspr SPRN_SPURR,r8 |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 693 | |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 694 | /* Save host values of some registers */ |
| 695 | BEGIN_FTR_SECTION |
| 696 | mfspr r5, SPRN_TIDR |
| 697 | mfspr r6, SPRN_PSSCR |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 698 | mfspr r7, SPRN_PID |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 699 | std r5, STACK_SLOT_TID(r1) |
| 700 | std r6, STACK_SLOT_PSSCR(r1) |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 701 | std r7, STACK_SLOT_PID(r1) |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 702 | mfspr r5, SPRN_HFSCR |
| 703 | std r5, STACK_SLOT_HFSCR(r1) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 704 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 705 | BEGIN_FTR_SECTION |
| 706 | mfspr r5, SPRN_CIABR |
| 707 | mfspr r6, SPRN_DAWR |
| 708 | mfspr r7, SPRN_DAWRX |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 709 | mfspr r8, SPRN_IAMR |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 710 | std r5, STACK_SLOT_CIABR(r1) |
| 711 | std r6, STACK_SLOT_DAWR(r1) |
| 712 | std r7, STACK_SLOT_DAWRX(r1) |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 713 | std r8, STACK_SLOT_IAMR(r1) |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 714 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 715 | |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 716 | mfspr r5, SPRN_AMR |
| 717 | std r5, STACK_SLOT_AMR(r1) |
| 718 | mfspr r6, SPRN_UAMOR |
| 719 | std r6, STACK_SLOT_UAMOR(r1) |
| 720 | |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 721 | BEGIN_FTR_SECTION |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 722 | /* Set partition DABR */ |
| 723 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 724 | lwz r5,VCPU_DABRX(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 725 | ld r6,VCPU_DABR(r4) |
| 726 | mtspr SPRN_DABRX,r5 |
| 727 | mtspr SPRN_DABR,r6 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 728 | isync |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 729 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 730 | |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 731 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 732 | /* |
| 733 | * Branch around the call if both CPU_FTR_TM and |
| 734 | * CPU_FTR_P9_TM_HV_ASSIST are off. |
| 735 | */ |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 736 | BEGIN_FTR_SECTION |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 737 | b 91f |
| 738 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 739 | /* |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 740 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 741 | */ |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 742 | mr r3, r4 |
| 743 | ld r4, VCPU_MSR(r3) |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 744 | li r5, 0 /* don't preserve non-vol regs */ |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 745 | bl kvmppc_restore_tm_hv |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 746 | nop |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 747 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 748 | 91: |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 749 | #endif |
| 750 | |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 751 | /* Load guest PMU registers; r4 = vcpu pointer here */ |
| 752 | mr r3, r4 |
| 753 | bl kvmhv_load_guest_pmu |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 754 | |
| 755 | /* Load up FP, VMX and VSX registers */ |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 756 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 757 | bl kvmppc_load_fp |
| 758 | |
| 759 | ld r14, VCPU_GPR(R14)(r4) |
| 760 | ld r15, VCPU_GPR(R15)(r4) |
| 761 | ld r16, VCPU_GPR(R16)(r4) |
| 762 | ld r17, VCPU_GPR(R17)(r4) |
| 763 | ld r18, VCPU_GPR(R18)(r4) |
| 764 | ld r19, VCPU_GPR(R19)(r4) |
| 765 | ld r20, VCPU_GPR(R20)(r4) |
| 766 | ld r21, VCPU_GPR(R21)(r4) |
| 767 | ld r22, VCPU_GPR(R22)(r4) |
| 768 | ld r23, VCPU_GPR(R23)(r4) |
| 769 | ld r24, VCPU_GPR(R24)(r4) |
| 770 | ld r25, VCPU_GPR(R25)(r4) |
| 771 | ld r26, VCPU_GPR(R26)(r4) |
| 772 | ld r27, VCPU_GPR(R27)(r4) |
| 773 | ld r28, VCPU_GPR(R28)(r4) |
| 774 | ld r29, VCPU_GPR(R29)(r4) |
| 775 | ld r30, VCPU_GPR(R30)(r4) |
| 776 | ld r31, VCPU_GPR(R31)(r4) |
| 777 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 778 | /* Switch DSCR to guest value */ |
| 779 | ld r5, VCPU_DSCR(r4) |
| 780 | mtspr SPRN_DSCR, r5 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 781 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 782 | BEGIN_FTR_SECTION |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 783 | /* Skip next section on POWER7 */ |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 784 | b 8f |
| 785 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 786 | /* Load up POWER8-specific registers */ |
| 787 | ld r5, VCPU_IAMR(r4) |
| 788 | lwz r6, VCPU_PSPB(r4) |
| 789 | ld r7, VCPU_FSCR(r4) |
| 790 | mtspr SPRN_IAMR, r5 |
| 791 | mtspr SPRN_PSPB, r6 |
| 792 | mtspr SPRN_FSCR, r7 |
Michael Neuling | b53221e | 2018-03-27 15:37:22 +1100 | [diff] [blame] | 793 | /* |
| 794 | * Handle broken DAWR case by not writing it. This means we |
| 795 | * can still store the DAWR register for migration. |
| 796 | */ |
Michael Neuling | c1fe190 | 2019-04-01 17:03:12 +1100 | [diff] [blame] | 797 | LOAD_REG_ADDR(r5, dawr_force_enable) |
| 798 | lbz r5, 0(r5) |
| 799 | cmpdi r5, 0 |
| 800 | beq 1f |
| 801 | ld r5, VCPU_DAWR(r4) |
| 802 | ld r6, VCPU_DAWRX(r4) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 803 | mtspr SPRN_DAWR, r5 |
| 804 | mtspr SPRN_DAWRX, r6 |
Michael Neuling | c1fe190 | 2019-04-01 17:03:12 +1100 | [diff] [blame] | 805 | 1: |
| 806 | ld r7, VCPU_CIABR(r4) |
| 807 | ld r8, VCPU_TAR(r4) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 808 | mtspr SPRN_CIABR, r7 |
| 809 | mtspr SPRN_TAR, r8 |
| 810 | ld r5, VCPU_IC(r4) |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 811 | ld r8, VCPU_EBBHR(r4) |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 812 | mtspr SPRN_IC, r5 |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 813 | mtspr SPRN_EBBHR, r8 |
| 814 | ld r5, VCPU_EBBRR(r4) |
| 815 | ld r6, VCPU_BESCR(r4) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 816 | lwz r7, VCPU_GUEST_PID(r4) |
| 817 | ld r8, VCPU_WORT(r4) |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 818 | mtspr SPRN_EBBRR, r5 |
| 819 | mtspr SPRN_BESCR, r6 |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 820 | mtspr SPRN_PID, r7 |
| 821 | mtspr SPRN_WORT, r8 |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 822 | BEGIN_FTR_SECTION |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 823 | /* POWER8-only registers */ |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 824 | ld r5, VCPU_TCSCR(r4) |
| 825 | ld r6, VCPU_ACOP(r4) |
| 826 | ld r7, VCPU_CSIGR(r4) |
| 827 | ld r8, VCPU_TACR(r4) |
| 828 | mtspr SPRN_TCSCR, r5 |
| 829 | mtspr SPRN_ACOP, r6 |
| 830 | mtspr SPRN_CSIGR, r7 |
| 831 | mtspr SPRN_TACR, r8 |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 832 | nop |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 833 | FTR_SECTION_ELSE |
| 834 | /* POWER9-only registers */ |
| 835 | ld r5, VCPU_TID(r4) |
| 836 | ld r6, VCPU_PSSCR(r4) |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 837 | lbz r8, HSTATE_FAKE_SUSPEND(r13) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 838 | oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 839 | rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 840 | ld r7, VCPU_HFSCR(r4) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 841 | mtspr SPRN_TIDR, r5 |
| 842 | mtspr SPRN_PSSCR, r6 |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 843 | mtspr SPRN_HFSCR, r7 |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 844 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 845 | 8: |
| 846 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 847 | ld r5, VCPU_SPRG0(r4) |
| 848 | ld r6, VCPU_SPRG1(r4) |
| 849 | ld r7, VCPU_SPRG2(r4) |
| 850 | ld r8, VCPU_SPRG3(r4) |
| 851 | mtspr SPRN_SPRG0, r5 |
| 852 | mtspr SPRN_SPRG1, r6 |
| 853 | mtspr SPRN_SPRG2, r7 |
| 854 | mtspr SPRN_SPRG3, r8 |
| 855 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 856 | /* Load up DAR and DSISR */ |
| 857 | ld r5, VCPU_DAR(r4) |
| 858 | lwz r6, VCPU_DSISR(r4) |
| 859 | mtspr SPRN_DAR, r5 |
| 860 | mtspr SPRN_DSISR, r6 |
| 861 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 862 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
| 863 | ld r5,VCPU_AMR(r4) |
| 864 | ld r6,VCPU_UAMOR(r4) |
| 865 | li r7,-1 |
| 866 | mtspr SPRN_AMR,r5 |
| 867 | mtspr SPRN_UAMOR,r6 |
| 868 | mtspr SPRN_AMOR,r7 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 869 | |
| 870 | /* Restore state of CTRL run bit; assume 1 on entry */ |
| 871 | lwz r5,VCPU_CTRL(r4) |
| 872 | andi. r5,r5,1 |
| 873 | bne 4f |
| 874 | mfspr r6,SPRN_CTRLF |
| 875 | clrrdi r6,r6,1 |
| 876 | mtspr SPRN_CTRLT,r6 |
| 877 | 4: |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 878 | /* Secondary threads wait for primary to have done partition switch */ |
| 879 | ld r5, HSTATE_KVM_VCORE(r13) |
| 880 | lbz r6, HSTATE_PTID(r13) |
| 881 | cmpwi r6, 0 |
| 882 | beq 21f |
| 883 | lbz r0, VCORE_IN_GUEST(r5) |
| 884 | cmpwi r0, 0 |
| 885 | bne 21f |
| 886 | HMT_LOW |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 887 | 20: lwz r3, VCORE_ENTRY_EXIT(r5) |
| 888 | cmpwi r3, 0x100 |
| 889 | bge no_switch_exit |
| 890 | lbz r0, VCORE_IN_GUEST(r5) |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 891 | cmpwi r0, 0 |
| 892 | beq 20b |
| 893 | HMT_MEDIUM |
| 894 | 21: |
| 895 | /* Set LPCR. */ |
| 896 | ld r8,VCORE_LPCR(r5) |
| 897 | mtspr SPRN_LPCR,r8 |
| 898 | isync |
| 899 | |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 900 | /* |
| 901 | * Set the decrementer to the guest decrementer. |
| 902 | */ |
| 903 | ld r8,VCPU_DEC_EXPIRES(r4) |
| 904 | /* r8 is a host timebase value here, convert to guest TB */ |
| 905 | ld r5,HSTATE_KVM_VCORE(r13) |
| 906 | ld r6,VCORE_TB_OFFSET_APPL(r5) |
| 907 | add r8,r8,r6 |
| 908 | mftb r7 |
| 909 | subf r3,r7,r8 |
| 910 | mtspr SPRN_DEC,r3 |
| 911 | |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 912 | /* Check if HDEC expires soon */ |
| 913 | mfspr r3, SPRN_HDEC |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 914 | EXTEND_HDEC(r3) |
| 915 | cmpdi r3, 512 /* 1 microsecond */ |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 916 | blt hdec_soon |
| 917 | |
Paul Mackerras | 6964e6a | 2018-01-11 14:51:02 +1100 | [diff] [blame] | 918 | /* For hash guest, clear out and reload the SLB */ |
| 919 | ld r6, VCPU_KVM(r4) |
| 920 | lbz r0, KVM_RADIX(r6) |
| 921 | cmpwi r0, 0 |
| 922 | bne 9f |
| 923 | li r6, 0 |
| 924 | slbmte r6, r6 |
| 925 | slbia |
| 926 | ptesync |
| 927 | |
| 928 | /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ |
| 929 | lwz r5,VCPU_SLB_MAX(r4) |
| 930 | cmpwi r5,0 |
| 931 | beq 9f |
| 932 | mtctr r5 |
| 933 | addi r6,r4,VCPU_SLB |
| 934 | 1: ld r8,VCPU_SLB_E(r6) |
| 935 | ld r9,VCPU_SLB_V(r6) |
| 936 | slbmte r9,r8 |
| 937 | addi r6,r6,VCPU_SLB_SIZE |
| 938 | bdnz 1b |
| 939 | 9: |
| 940 | |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 941 | #ifdef CONFIG_KVM_XICS |
| 942 | /* We are entering the guest on that thread, push VCPU to XIVE */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 943 | ld r11, VCPU_XIVE_SAVED_STATE(r4) |
| 944 | li r9, TM_QW1_OS |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 945 | lwz r8, VCPU_XIVE_CAM_WORD(r4) |
Paul Mackerras | 8d4ba9c | 2019-08-13 20:01:00 +1000 | [diff] [blame] | 946 | cmpwi r8, 0 |
| 947 | beq no_xive |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 948 | li r7, TM_QW1_OS + TM_WORD2 |
| 949 | mfmsr r0 |
| 950 | andi. r0, r0, MSR_DR /* in real mode? */ |
| 951 | beq 2f |
| 952 | ld r10, HSTATE_XIVE_TIMA_VIRT(r13) |
| 953 | cmpldi cr1, r10, 0 |
| 954 | beq cr1, no_xive |
| 955 | eieio |
| 956 | stdx r11,r9,r10 |
| 957 | stwx r8,r7,r10 |
| 958 | b 3f |
| 959 | 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) |
| 960 | cmpldi cr1, r10, 0 |
| 961 | beq cr1, no_xive |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 962 | eieio |
Benjamin Herrenschmidt | ad98dd1 | 2017-10-16 08:37:54 +1100 | [diff] [blame] | 963 | stdcix r11,r9,r10 |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 964 | stwcix r8,r7,r10 |
| 965 | 3: li r9, 1 |
Benjamin Herrenschmidt | 35c2405 | 2018-01-12 13:37:15 +1100 | [diff] [blame] | 966 | stb r9, VCPU_XIVE_PUSHED(r4) |
Benjamin Herrenschmidt | ad98dd1 | 2017-10-16 08:37:54 +1100 | [diff] [blame] | 967 | eieio |
Benjamin Herrenschmidt | 2267ea7 | 2018-01-12 13:37:13 +1100 | [diff] [blame] | 968 | |
| 969 | /* |
| 970 | * We clear the irq_pending flag. There is a small chance of a |
| 971 | * race vs. the escalation interrupt happening on another |
| 972 | * processor setting it again, but the only consequence is to |
| 973 | * cause a spurrious wakeup on the next H_CEDE which is not an |
| 974 | * issue. |
| 975 | */ |
| 976 | li r0,0 |
| 977 | stb r0, VCPU_IRQ_PENDING(r4) |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 978 | |
| 979 | /* |
| 980 | * In single escalation mode, if the escalation interrupt is |
| 981 | * on, we mask it. |
| 982 | */ |
| 983 | lbz r0, VCPU_XIVE_ESC_ON(r4) |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 984 | cmpwi cr1, r0,0 |
| 985 | beq cr1, 1f |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 986 | li r9, XIVE_ESB_SET_PQ_01 |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 987 | beq 4f /* in real mode? */ |
| 988 | ld r10, VCPU_XIVE_ESC_VADDR(r4) |
| 989 | ldx r0, r10, r9 |
| 990 | b 5f |
| 991 | 4: ld r10, VCPU_XIVE_ESC_RADDR(r4) |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 992 | ldcix r0, r10, r9 |
Suraj Jitindar Singh | 7ae9bda | 2019-04-29 18:57:45 +1000 | [diff] [blame] | 993 | 5: sync |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 994 | |
| 995 | /* We have a possible subtle race here: The escalation interrupt might |
| 996 | * have fired and be on its way to the host queue while we mask it, |
| 997 | * and if we unmask it early enough (re-cede right away), there is |
| 998 | * a theorical possibility that it fires again, thus landing in the |
| 999 | * target queue more than once which is a big no-no. |
| 1000 | * |
| 1001 | * Fortunately, solving this is rather easy. If the above load setting |
| 1002 | * PQ to 01 returns a previous value where P is set, then we know the |
| 1003 | * escalation interrupt is somewhere on its way to the host. In that |
| 1004 | * case we simply don't clear the xive_esc_on flag below. It will be |
| 1005 | * eventually cleared by the handler for the escalation interrupt. |
| 1006 | * |
| 1007 | * Then, when doing a cede, we check that flag again before re-enabling |
| 1008 | * the escalation interrupt, and if set, we abort the cede. |
| 1009 | */ |
| 1010 | andi. r0, r0, XIVE_ESB_VAL_P |
| 1011 | bne- 1f |
| 1012 | |
| 1013 | /* Now P is 0, we can clear the flag */ |
| 1014 | li r0, 0 |
| 1015 | stb r0, VCPU_XIVE_ESC_ON(r4) |
| 1016 | 1: |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1017 | no_xive: |
| 1018 | #endif /* CONFIG_KVM_XICS */ |
| 1019 | |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 1020 | li r0, 0 |
| 1021 | stw r0, STACK_SLOT_SHORT_PATH(r1) |
| 1022 | |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1023 | deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ |
Paul Mackerras | f7035ce | 2018-10-08 16:30:50 +1100 | [diff] [blame] | 1024 | /* Check if we can deliver an external or decrementer interrupt now */ |
| 1025 | ld r0, VCPU_PENDING_EXC(r4) |
| 1026 | BEGIN_FTR_SECTION |
| 1027 | /* On POWER9, also check for emulated doorbell interrupt */ |
| 1028 | lbz r3, VCPU_DBELL_REQ(r4) |
| 1029 | or r0, r0, r3 |
| 1030 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 1031 | cmpdi r0, 0 |
| 1032 | beq 71f |
| 1033 | mr r3, r4 |
| 1034 | bl kvmppc_guest_entry_inject_int |
| 1035 | ld r4, HSTATE_KVM_VCPU(r13) |
| 1036 | 71: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1037 | ld r6, VCPU_SRR0(r4) |
| 1038 | ld r7, VCPU_SRR1(r4) |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 1039 | mtspr SPRN_SRR0, r6 |
| 1040 | mtspr SPRN_SRR1, r7 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1041 | |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 1042 | fast_guest_entry_c: |
| 1043 | ld r10, VCPU_PC(r4) |
| 1044 | ld r11, VCPU_MSR(r4) |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 1045 | /* r11 = vcpu->arch.msr & ~MSR_HV */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1046 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
| 1047 | rotldi r11, r11, 1 + MSR_HV_LG |
| 1048 | ori r11, r11, MSR_ME |
| 1049 | |
Paul Mackerras | f7035ce | 2018-10-08 16:30:50 +1100 | [diff] [blame] | 1050 | ld r6, VCPU_CTR(r4) |
| 1051 | ld r7, VCPU_XER(r4) |
| 1052 | mtctr r6 |
| 1053 | mtxer r7 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1054 | |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 1055 | /* |
| 1056 | * Required state: |
| 1057 | * R4 = vcpu |
| 1058 | * R10: value for HSRR0 |
| 1059 | * R11: value for HSRR1 |
| 1060 | * R13 = PACA |
| 1061 | */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1062 | fast_guest_return: |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 1063 | li r0,0 |
| 1064 | stb r0,VCPU_CEDED(r4) /* cancel cede */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1065 | mtspr SPRN_HSRR0,r10 |
| 1066 | mtspr SPRN_HSRR1,r11 |
| 1067 | |
| 1068 | /* Activate guest mode, so faults get handled by KVM */ |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1069 | li r9, KVM_GUEST_MODE_GUEST_HV |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1070 | stb r9, HSTATE_IN_GUEST(r13) |
| 1071 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1072 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 1073 | /* Accumulate timing */ |
| 1074 | addi r3, r4, VCPU_TB_GUEST |
| 1075 | bl kvmhv_accumulate_time |
| 1076 | #endif |
| 1077 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1078 | /* Enter guest */ |
| 1079 | |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 1080 | BEGIN_FTR_SECTION |
| 1081 | ld r5, VCPU_CFAR(r4) |
| 1082 | mtspr SPRN_CFAR, r5 |
| 1083 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1084 | BEGIN_FTR_SECTION |
| 1085 | ld r0, VCPU_PPR(r4) |
| 1086 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 1087 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1088 | ld r5, VCPU_LR(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1089 | mtlr r5 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1090 | |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1091 | ld r1, VCPU_GPR(R1)(r4) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1092 | ld r5, VCPU_GPR(R5)(r4) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1093 | ld r8, VCPU_GPR(R8)(r4) |
| 1094 | ld r9, VCPU_GPR(R9)(r4) |
| 1095 | ld r10, VCPU_GPR(R10)(r4) |
| 1096 | ld r11, VCPU_GPR(R11)(r4) |
| 1097 | ld r12, VCPU_GPR(R12)(r4) |
| 1098 | ld r13, VCPU_GPR(R13)(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1099 | |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1100 | BEGIN_FTR_SECTION |
| 1101 | mtspr SPRN_PPR, r0 |
| 1102 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
Michael Neuling | e001fa7 | 2017-09-15 15:26:14 +1000 | [diff] [blame] | 1103 | |
| 1104 | /* Move canary into DSISR to check for later */ |
| 1105 | BEGIN_FTR_SECTION |
| 1106 | li r0, 0x7fff |
| 1107 | mtspr SPRN_HDSISR, r0 |
| 1108 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 1109 | |
Sukadev Bhattiprolu | 6c85b7bc | 2019-08-22 00:48:38 -0300 | [diff] [blame^] | 1110 | ld r6, VCPU_KVM(r4) |
| 1111 | lbz r7, KVM_SECURE_GUEST(r6) |
| 1112 | cmpdi r7, 0 |
| 1113 | ld r6, VCPU_GPR(R6)(r4) |
| 1114 | ld r7, VCPU_GPR(R7)(r4) |
| 1115 | bne ret_to_ultra |
| 1116 | |
| 1117 | lwz r0, VCPU_CR(r4) |
| 1118 | mtcr r0 |
| 1119 | |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1120 | ld r0, VCPU_GPR(R0)(r4) |
Sukadev Bhattiprolu | 6c85b7bc | 2019-08-22 00:48:38 -0300 | [diff] [blame^] | 1121 | ld r2, VCPU_GPR(R2)(r4) |
| 1122 | ld r3, VCPU_GPR(R3)(r4) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1123 | ld r4, VCPU_GPR(R4)(r4) |
Nicholas Piggin | 222f20f | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 1124 | HRFI_TO_GUEST |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1125 | b . |
Sukadev Bhattiprolu | 6c85b7bc | 2019-08-22 00:48:38 -0300 | [diff] [blame^] | 1126 | /* |
| 1127 | * Use UV_RETURN ultracall to return control back to the Ultravisor after |
| 1128 | * processing an hypercall or interrupt that was forwarded (a.k.a. reflected) |
| 1129 | * to the Hypervisor. |
| 1130 | * |
| 1131 | * All registers have already been loaded, except: |
| 1132 | * R0 = hcall result |
| 1133 | * R2 = SRR1, so UV can detect a synthesized interrupt (if any) |
| 1134 | * R3 = UV_RETURN |
| 1135 | */ |
| 1136 | ret_to_ultra: |
| 1137 | lwz r0, VCPU_CR(r4) |
| 1138 | mtcr r0 |
| 1139 | |
| 1140 | ld r0, VCPU_GPR(R3)(r4) |
| 1141 | mfspr r2, SPRN_SRR1 |
| 1142 | li r3, 0 |
| 1143 | ori r3, r3, UV_RETURN |
| 1144 | ld r4, VCPU_GPR(R4)(r4) |
| 1145 | sc 2 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1146 | |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 1147 | /* |
| 1148 | * Enter the guest on a P9 or later system where we have exactly |
| 1149 | * one vcpu per vcore and we don't need to go to real mode |
| 1150 | * (which implies that host and guest are both using radix MMU mode). |
| 1151 | * r3 = vcpu pointer |
| 1152 | * Most SPRs and all the VSRs have been loaded already. |
| 1153 | */ |
| 1154 | _GLOBAL(__kvmhv_vcpu_entry_p9) |
| 1155 | EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) |
| 1156 | mflr r0 |
| 1157 | std r0, PPC_LR_STKOFF(r1) |
| 1158 | stdu r1, -SFS(r1) |
| 1159 | |
| 1160 | li r0, 1 |
| 1161 | stw r0, STACK_SLOT_SHORT_PATH(r1) |
| 1162 | |
| 1163 | std r3, HSTATE_KVM_VCPU(r13) |
| 1164 | mfcr r4 |
| 1165 | stw r4, SFS+8(r1) |
| 1166 | |
| 1167 | std r1, HSTATE_HOST_R1(r13) |
| 1168 | |
| 1169 | reg = 14 |
| 1170 | .rept 18 |
| 1171 | std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) |
| 1172 | reg = reg + 1 |
| 1173 | .endr |
| 1174 | |
| 1175 | reg = 14 |
| 1176 | .rept 18 |
| 1177 | ld reg, __VCPU_GPR(reg)(r3) |
| 1178 | reg = reg + 1 |
| 1179 | .endr |
| 1180 | |
| 1181 | mfmsr r10 |
| 1182 | std r10, HSTATE_HOST_MSR(r13) |
| 1183 | |
| 1184 | mr r4, r3 |
| 1185 | b fast_guest_entry_c |
| 1186 | guest_exit_short_path: |
| 1187 | |
| 1188 | li r0, KVM_GUEST_MODE_NONE |
| 1189 | stb r0, HSTATE_IN_GUEST(r13) |
| 1190 | |
| 1191 | reg = 14 |
| 1192 | .rept 18 |
| 1193 | std reg, __VCPU_GPR(reg)(r9) |
| 1194 | reg = reg + 1 |
| 1195 | .endr |
| 1196 | |
| 1197 | reg = 14 |
| 1198 | .rept 18 |
| 1199 | ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) |
| 1200 | reg = reg + 1 |
| 1201 | .endr |
| 1202 | |
| 1203 | lwz r4, SFS+8(r1) |
| 1204 | mtcr r4 |
| 1205 | |
| 1206 | mr r3, r12 /* trap number */ |
| 1207 | |
| 1208 | addi r1, r1, SFS |
| 1209 | ld r0, PPC_LR_STKOFF(r1) |
| 1210 | mtlr r0 |
| 1211 | |
| 1212 | /* If we are in real mode, do a rfid to get back to the caller */ |
| 1213 | mfmsr r4 |
| 1214 | andi. r5, r4, MSR_IR |
| 1215 | bnelr |
| 1216 | rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ |
| 1217 | mtspr SPRN_SRR0, r0 |
| 1218 | ld r10, HSTATE_HOST_MSR(r13) |
| 1219 | rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG |
| 1220 | mtspr SPRN_SRR1, r10 |
| 1221 | RFI_TO_KERNEL |
| 1222 | b . |
| 1223 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1224 | secondary_too_late: |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1225 | li r12, 0 |
Paul Mackerras | a8b48a4 | 2018-03-07 22:17:20 +1100 | [diff] [blame] | 1226 | stw r12, STACK_SLOT_TRAP(r1) |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1227 | cmpdi r4, 0 |
| 1228 | beq 11f |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1229 | stw r12, VCPU_TRAP(r4) |
| 1230 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1231 | addi r3, r4, VCPU_TB_RMEXIT |
| 1232 | bl kvmhv_accumulate_time |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1233 | #endif |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1234 | 11: b kvmhv_switch_to_host |
| 1235 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 1236 | no_switch_exit: |
| 1237 | HMT_MEDIUM |
| 1238 | li r12, 0 |
| 1239 | b 12f |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1240 | hdec_soon: |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1241 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 1242 | 12: stw r12, VCPU_TRAP(r4) |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1243 | mr r9, r4 |
| 1244 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1245 | addi r3, r4, VCPU_TB_RMEXIT |
| 1246 | bl kvmhv_accumulate_time |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1247 | #endif |
Paul Mackerras | 6964e6a | 2018-01-11 14:51:02 +1100 | [diff] [blame] | 1248 | b guest_bypass |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1249 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1250 | /****************************************************************************** |
| 1251 | * * |
| 1252 | * Exit code * |
| 1253 | * * |
| 1254 | *****************************************************************************/ |
| 1255 | |
| 1256 | /* |
| 1257 | * We come here from the first-level interrupt handlers. |
| 1258 | */ |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1259 | .globl kvmppc_interrupt_hv |
| 1260 | kvmppc_interrupt_hv: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1261 | /* |
| 1262 | * Register contents: |
Nicholas Piggin | d3918e7 | 2016-12-22 04:29:25 +1000 | [diff] [blame] | 1263 | * R12 = (guest CR << 32) | interrupt vector |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1264 | * R13 = PACA |
Nicholas Piggin | d3918e7 | 2016-12-22 04:29:25 +1000 | [diff] [blame] | 1265 | * guest R12 saved in shadow VCPU SCRATCH0 |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1266 | * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1267 | * guest R13 saved in SPRN_SCRATCH0 |
| 1268 | */ |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1269 | std r9, HSTATE_SCRATCH2(r13) |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1270 | lbz r9, HSTATE_IN_GUEST(r13) |
| 1271 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
| 1272 | beq kvmppc_bad_host_intr |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1273 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 1274 | cmpwi r9, KVM_GUEST_MODE_GUEST |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1275 | ld r9, HSTATE_SCRATCH2(r13) |
Aneesh Kumar K.V | dd96b2c | 2013-10-07 22:17:55 +0530 | [diff] [blame] | 1276 | beq kvmppc_interrupt_pr |
| 1277 | #endif |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 1278 | /* We're now back in the host but in guest MMU context */ |
| 1279 | li r9, KVM_GUEST_MODE_HOST_HV |
| 1280 | stb r9, HSTATE_IN_GUEST(r13) |
| 1281 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1282 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1283 | |
| 1284 | /* Save registers */ |
| 1285 | |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1286 | std r0, VCPU_GPR(R0)(r9) |
| 1287 | std r1, VCPU_GPR(R1)(r9) |
| 1288 | std r2, VCPU_GPR(R2)(r9) |
| 1289 | std r3, VCPU_GPR(R3)(r9) |
| 1290 | std r4, VCPU_GPR(R4)(r9) |
| 1291 | std r5, VCPU_GPR(R5)(r9) |
| 1292 | std r6, VCPU_GPR(R6)(r9) |
| 1293 | std r7, VCPU_GPR(R7)(r9) |
| 1294 | std r8, VCPU_GPR(R8)(r9) |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1295 | ld r0, HSTATE_SCRATCH2(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1296 | std r0, VCPU_GPR(R9)(r9) |
| 1297 | std r10, VCPU_GPR(R10)(r9) |
| 1298 | std r11, VCPU_GPR(R11)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1299 | ld r3, HSTATE_SCRATCH0(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1300 | std r3, VCPU_GPR(R12)(r9) |
Nicholas Piggin | d3918e7 | 2016-12-22 04:29:25 +1000 | [diff] [blame] | 1301 | /* CR is in the high half of r12 */ |
| 1302 | srdi r4, r12, 32 |
Paul Mackerras | fd0944b | 2018-10-08 16:30:58 +1100 | [diff] [blame] | 1303 | std r4, VCPU_CR(r9) |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 1304 | BEGIN_FTR_SECTION |
| 1305 | ld r3, HSTATE_CFAR(r13) |
| 1306 | std r3, VCPU_CFAR(r9) |
| 1307 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1308 | BEGIN_FTR_SECTION |
| 1309 | ld r4, HSTATE_PPR(r13) |
| 1310 | std r4, VCPU_PPR(r9) |
| 1311 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1312 | |
| 1313 | /* Restore R1/R2 so we can handle faults */ |
| 1314 | ld r1, HSTATE_HOST_R1(r13) |
| 1315 | ld r2, PACATOC(r13) |
| 1316 | |
| 1317 | mfspr r10, SPRN_SRR0 |
| 1318 | mfspr r11, SPRN_SRR1 |
| 1319 | std r10, VCPU_SRR0(r9) |
| 1320 | std r11, VCPU_SRR1(r9) |
Nicholas Piggin | d3918e7 | 2016-12-22 04:29:25 +1000 | [diff] [blame] | 1321 | /* trap is in the low half of r12, clear CR from the high half */ |
| 1322 | clrldi r12, r12, 32 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1323 | andi. r0, r12, 2 /* need to read HSRR0/1? */ |
| 1324 | beq 1f |
| 1325 | mfspr r10, SPRN_HSRR0 |
| 1326 | mfspr r11, SPRN_HSRR1 |
| 1327 | clrrdi r12, r12, 2 |
| 1328 | 1: std r10, VCPU_PC(r9) |
| 1329 | std r11, VCPU_MSR(r9) |
| 1330 | |
| 1331 | GET_SCRATCH0(r3) |
| 1332 | mflr r4 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1333 | std r3, VCPU_GPR(R13)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1334 | std r4, VCPU_LR(r9) |
| 1335 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1336 | stw r12,VCPU_TRAP(r9) |
| 1337 | |
Paul Mackerras | 8b24e69 | 2017-06-26 15:45:51 +1000 | [diff] [blame] | 1338 | /* |
| 1339 | * Now that we have saved away SRR0/1 and HSRR0/1, |
| 1340 | * interrupts are recoverable in principle, so set MSR_RI. |
| 1341 | * This becomes important for relocation-on interrupts from |
| 1342 | * the guest, which we can get in radix mode on POWER9. |
| 1343 | */ |
| 1344 | li r0, MSR_RI |
| 1345 | mtmsrd r0, 1 |
| 1346 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1347 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 1348 | addi r3, r9, VCPU_TB_RMINTR |
| 1349 | mr r4, r9 |
| 1350 | bl kvmhv_accumulate_time |
| 1351 | ld r5, VCPU_GPR(R5)(r9) |
| 1352 | ld r6, VCPU_GPR(R6)(r9) |
| 1353 | ld r7, VCPU_GPR(R7)(r9) |
| 1354 | ld r8, VCPU_GPR(R8)(r9) |
| 1355 | #endif |
| 1356 | |
Paul Mackerras | 4a157d6 | 2014-12-03 13:30:39 +1100 | [diff] [blame] | 1357 | /* Save HEIR (HV emulation assist reg) in emul_inst |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1358 | if this is an HEI (HV emulation interrupt, e40) */ |
| 1359 | li r3,KVM_INST_FETCH_FAILED |
Paul Mackerras | 2bf2760 | 2015-03-20 20:39:40 +1100 | [diff] [blame] | 1360 | stw r3,VCPU_LAST_INST(r9) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1361 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
| 1362 | bne 11f |
| 1363 | mfspr r3,SPRN_HEIR |
Paul Mackerras | 4a157d6 | 2014-12-03 13:30:39 +1100 | [diff] [blame] | 1364 | 11: stw r3,VCPU_HEIR(r9) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1365 | |
| 1366 | /* these are volatile across C function calls */ |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1367 | #ifdef CONFIG_RELOCATABLE |
| 1368 | ld r3, HSTATE_SCRATCH1(r13) |
| 1369 | mtctr r3 |
| 1370 | #else |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1371 | mfctr r3 |
Nicholas Piggin | a97a65d | 2017-01-27 14:00:34 +1000 | [diff] [blame] | 1372 | #endif |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1373 | mfxer r4 |
| 1374 | std r3, VCPU_CTR(r9) |
Sam bobroff | c63517c | 2015-05-27 09:56:57 +1000 | [diff] [blame] | 1375 | std r4, VCPU_XER(r9) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1376 | |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1377 | /* Save more register state */ |
| 1378 | mfdar r3 |
| 1379 | mfdsisr r4 |
| 1380 | std r3, VCPU_DAR(r9) |
| 1381 | stw r4, VCPU_DSISR(r9) |
| 1382 | |
| 1383 | /* If this is a page table miss then see if it's theirs or ours */ |
| 1384 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE |
| 1385 | beq kvmppc_hdsi |
| 1386 | std r3, VCPU_FAULT_DAR(r9) |
| 1387 | stw r4, VCPU_FAULT_DSISR(r9) |
| 1388 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
| 1389 | beq kvmppc_hisi |
| 1390 | |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 1391 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1392 | /* For softpatch interrupt, go off and do TM instruction emulation */ |
| 1393 | cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH |
| 1394 | beq kvmppc_tm_emul |
| 1395 | #endif |
| 1396 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1397 | /* See if this is a leftover HDEC interrupt */ |
| 1398 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
| 1399 | bne 2f |
| 1400 | mfspr r3,SPRN_HDEC |
Paul Mackerras | a4faf2e | 2017-08-25 19:52:12 +1000 | [diff] [blame] | 1401 | EXTEND_HDEC(r3) |
| 1402 | cmpdi r3,0 |
Paul Mackerras | 1f09c3e | 2015-03-28 14:21:04 +1100 | [diff] [blame] | 1403 | mr r4,r9 |
| 1404 | bge fast_guest_return |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1405 | 2: |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 1406 | /* See if this is an hcall we can handle in real mode */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1407 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
| 1408 | beq hcall_try_real_mode |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1409 | |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 1410 | /* Hypervisor doorbell - exit only if host IPI flag set */ |
| 1411 | cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL |
| 1412 | bne 3f |
Nicholas Piggin | bd0fdb1 | 2017-03-13 03:03:49 +1000 | [diff] [blame] | 1413 | BEGIN_FTR_SECTION |
| 1414 | PPC_MSGSYNC |
Nicholas Piggin | 2cde371 | 2017-10-10 20:18:28 +1000 | [diff] [blame] | 1415 | lwsync |
Paul Mackerras | 360cae3 | 2018-10-08 16:31:04 +1100 | [diff] [blame] | 1416 | /* always exit if we're running a nested guest */ |
| 1417 | ld r0, VCPU_NESTED(r9) |
| 1418 | cmpdi r0, 0 |
| 1419 | bne guest_exit_cont |
Nicholas Piggin | bd0fdb1 | 2017-03-13 03:03:49 +1000 | [diff] [blame] | 1420 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 1421 | lbz r0, HSTATE_HOST_IPI(r13) |
Gautham R. Shenoy | 06554d9 | 2015-08-07 17:41:20 +0530 | [diff] [blame] | 1422 | cmpwi r0, 0 |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1423 | beq maybe_reenter_guest |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 1424 | b guest_exit_cont |
| 1425 | 3: |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 1426 | /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ |
| 1427 | cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL |
| 1428 | bne 14f |
| 1429 | mfspr r3, SPRN_HFSCR |
| 1430 | std r3, VCPU_HFSCR(r9) |
| 1431 | b guest_exit_cont |
| 1432 | 14: |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 1433 | /* External interrupt ? */ |
| 1434 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1435 | beq kvmppc_guest_external |
Paul Mackerras | 43ff3f6 | 2018-01-11 14:31:43 +1100 | [diff] [blame] | 1436 | /* See if it is a machine check */ |
| 1437 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
| 1438 | beq machine_check_realmode |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1439 | /* Or a hypervisor maintenance interrupt */ |
| 1440 | cmpwi r12, BOOK3S_INTERRUPT_HMI |
| 1441 | beq hmi_realmode |
| 1442 | |
| 1443 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
| 1444 | |
Paul Mackerras | 43ff3f6 | 2018-01-11 14:31:43 +1100 | [diff] [blame] | 1445 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 1446 | addi r3, r9, VCPU_TB_RMEXIT |
| 1447 | mr r4, r9 |
| 1448 | bl kvmhv_accumulate_time |
| 1449 | #endif |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1450 | #ifdef CONFIG_KVM_XICS |
| 1451 | /* We are exiting, pull the VP from the XIVE */ |
Benjamin Herrenschmidt | 35c2405 | 2018-01-12 13:37:15 +1100 | [diff] [blame] | 1452 | lbz r0, VCPU_XIVE_PUSHED(r9) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1453 | cmpwi cr0, r0, 0 |
| 1454 | beq 1f |
| 1455 | li r7, TM_SPC_PULL_OS_CTX |
| 1456 | li r6, TM_QW1_OS |
| 1457 | mfmsr r0 |
Benjamin Herrenschmidt | 2662efd | 2018-01-12 13:37:14 +1100 | [diff] [blame] | 1458 | andi. r0, r0, MSR_DR /* in real mode? */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1459 | beq 2f |
| 1460 | ld r10, HSTATE_XIVE_TIMA_VIRT(r13) |
| 1461 | cmpldi cr0, r10, 0 |
| 1462 | beq 1f |
| 1463 | /* First load to pull the context, we ignore the value */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1464 | eieio |
Benjamin Herrenschmidt | ad98dd1 | 2017-10-16 08:37:54 +1100 | [diff] [blame] | 1465 | lwzx r11, r7, r10 |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1466 | /* Second load to recover the context state (Words 0 and 1) */ |
| 1467 | ldx r11, r6, r10 |
| 1468 | b 3f |
| 1469 | 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) |
| 1470 | cmpldi cr0, r10, 0 |
| 1471 | beq 1f |
| 1472 | /* First load to pull the context, we ignore the value */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1473 | eieio |
Benjamin Herrenschmidt | ad98dd1 | 2017-10-16 08:37:54 +1100 | [diff] [blame] | 1474 | lwzcix r11, r7, r10 |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1475 | /* Second load to recover the context state (Words 0 and 1) */ |
| 1476 | ldcix r11, r6, r10 |
| 1477 | 3: std r11, VCPU_XIVE_SAVED_STATE(r9) |
| 1478 | /* Fixup some of the state for the next load */ |
| 1479 | li r10, 0 |
| 1480 | li r0, 0xff |
Benjamin Herrenschmidt | 35c2405 | 2018-01-12 13:37:15 +1100 | [diff] [blame] | 1481 | stb r10, VCPU_XIVE_PUSHED(r9) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1482 | stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) |
| 1483 | stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) |
Benjamin Herrenschmidt | ad98dd1 | 2017-10-16 08:37:54 +1100 | [diff] [blame] | 1484 | eieio |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1485 | 1: |
| 1486 | #endif /* CONFIG_KVM_XICS */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1487 | |
Paul Mackerras | 95a6432 | 2018-10-08 16:30:55 +1100 | [diff] [blame] | 1488 | /* If we came in through the P9 short path, go back out to C now */ |
| 1489 | lwz r0, STACK_SLOT_SHORT_PATH(r1) |
| 1490 | cmpwi r0, 0 |
| 1491 | bne guest_exit_short_path |
| 1492 | |
Paul Mackerras | 6964e6a | 2018-01-11 14:51:02 +1100 | [diff] [blame] | 1493 | /* For hash guest, read the guest SLB and save it away */ |
| 1494 | ld r5, VCPU_KVM(r9) |
| 1495 | lbz r0, KVM_RADIX(r5) |
| 1496 | li r5, 0 |
| 1497 | cmpwi r0, 0 |
| 1498 | bne 3f /* for radix, save 0 entries */ |
| 1499 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ |
| 1500 | mtctr r0 |
| 1501 | li r6,0 |
| 1502 | addi r7,r9,VCPU_SLB |
| 1503 | 1: slbmfee r8,r6 |
| 1504 | andis. r0,r8,SLB_ESID_V@h |
| 1505 | beq 2f |
| 1506 | add r8,r8,r6 /* put index in */ |
| 1507 | slbmfev r3,r6 |
| 1508 | std r8,VCPU_SLB_E(r7) |
| 1509 | std r3,VCPU_SLB_V(r7) |
| 1510 | addi r7,r7,VCPU_SLB_SIZE |
| 1511 | addi r5,r5,1 |
| 1512 | 2: addi r6,r6,1 |
| 1513 | bdnz 1b |
| 1514 | /* Finally clear out the SLB */ |
| 1515 | li r0,0 |
| 1516 | slbmte r0,r0 |
| 1517 | slbia |
| 1518 | ptesync |
| 1519 | 3: stw r5,VCPU_SLB_MAX(r9) |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1520 | |
Paul Mackerras | cda4a14 | 2018-03-22 09:48:54 +1100 | [diff] [blame] | 1521 | /* load host SLB entries */ |
| 1522 | BEGIN_MMU_FTR_SECTION |
| 1523 | b 0f |
| 1524 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
| 1525 | ld r8,PACA_SLBSHADOWPTR(r13) |
| 1526 | |
| 1527 | .rept SLB_NUM_BOLTED |
| 1528 | li r3, SLBSHADOW_SAVEAREA |
| 1529 | LDX_BE r5, r8, r3 |
| 1530 | addi r3, r3, 8 |
| 1531 | LDX_BE r6, r8, r3 |
| 1532 | andis. r7,r5,SLB_ESID_V@h |
| 1533 | beq 1f |
| 1534 | slbmte r6,r5 |
| 1535 | 1: addi r8,r8,16 |
| 1536 | .endr |
| 1537 | 0: |
| 1538 | |
Paul Mackerras | 6964e6a | 2018-01-11 14:51:02 +1100 | [diff] [blame] | 1539 | guest_bypass: |
Paul Mackerras | a8b48a4 | 2018-03-07 22:17:20 +1100 | [diff] [blame] | 1540 | stw r12, STACK_SLOT_TRAP(r1) |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 1541 | |
| 1542 | /* Save DEC */ |
| 1543 | /* Do this before kvmhv_commence_exit so we know TB is guest TB */ |
| 1544 | ld r3, HSTATE_KVM_VCORE(r13) |
| 1545 | mfspr r5,SPRN_DEC |
| 1546 | mftb r6 |
| 1547 | /* On P9, if the guest has large decr enabled, don't sign extend */ |
| 1548 | BEGIN_FTR_SECTION |
| 1549 | ld r4, VCORE_LPCR(r3) |
| 1550 | andis. r4, r4, LPCR_LD@h |
| 1551 | bne 16f |
| 1552 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 1553 | extsw r5,r5 |
| 1554 | 16: add r5,r5,r6 |
| 1555 | /* r5 is a guest timebase value here, convert to host TB */ |
| 1556 | ld r4,VCORE_TB_OFFSET_APPL(r3) |
| 1557 | subf r5,r4,r5 |
| 1558 | std r5,VCPU_DEC_EXPIRES(r9) |
| 1559 | |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1560 | /* Increment exit count, poke other threads to exit */ |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 1561 | mr r3, r12 |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1562 | bl kvmhv_commence_exit |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 1563 | nop |
| 1564 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1565 | |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 1566 | /* Stop others sending VCPU interrupts to this physical CPU */ |
| 1567 | li r0, -1 |
| 1568 | stw r0, VCPU_CPU(r9) |
| 1569 | stw r0, VCPU_THREAD_CPU(r9) |
| 1570 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1571 | /* Save guest CTRL register, set runlatch to 1 */ |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1572 | mfspr r6,SPRN_CTRLF |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1573 | stw r6,VCPU_CTRL(r9) |
| 1574 | andi. r0,r6,1 |
| 1575 | bne 4f |
| 1576 | ori r6,r6,1 |
| 1577 | mtspr SPRN_CTRLT,r6 |
| 1578 | 4: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1579 | /* |
| 1580 | * Save the guest PURR/SPURR |
| 1581 | */ |
| 1582 | mfspr r5,SPRN_PURR |
| 1583 | mfspr r6,SPRN_SPURR |
| 1584 | ld r7,VCPU_PURR(r9) |
| 1585 | ld r8,VCPU_SPURR(r9) |
| 1586 | std r5,VCPU_PURR(r9) |
| 1587 | std r6,VCPU_SPURR(r9) |
| 1588 | subf r5,r7,r5 |
| 1589 | subf r6,r8,r6 |
| 1590 | |
| 1591 | /* |
| 1592 | * Restore host PURR/SPURR and add guest times |
| 1593 | * so that the time in the guest gets accounted. |
| 1594 | */ |
| 1595 | ld r3,HSTATE_PURR(r13) |
| 1596 | ld r4,HSTATE_SPURR(r13) |
| 1597 | add r3,r3,r5 |
| 1598 | add r4,r4,r6 |
| 1599 | mtspr SPRN_PURR,r3 |
| 1600 | mtspr SPRN_SPURR,r4 |
| 1601 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1602 | BEGIN_FTR_SECTION |
| 1603 | b 8f |
| 1604 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1605 | /* Save POWER8-specific registers */ |
| 1606 | mfspr r5, SPRN_IAMR |
| 1607 | mfspr r6, SPRN_PSPB |
| 1608 | mfspr r7, SPRN_FSCR |
| 1609 | std r5, VCPU_IAMR(r9) |
| 1610 | stw r6, VCPU_PSPB(r9) |
| 1611 | std r7, VCPU_FSCR(r9) |
| 1612 | mfspr r5, SPRN_IC |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1613 | mfspr r7, SPRN_TAR |
| 1614 | std r5, VCPU_IC(r9) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1615 | std r7, VCPU_TAR(r9) |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 1616 | mfspr r8, SPRN_EBBHR |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1617 | std r8, VCPU_EBBHR(r9) |
| 1618 | mfspr r5, SPRN_EBBRR |
| 1619 | mfspr r6, SPRN_BESCR |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1620 | mfspr r7, SPRN_PID |
| 1621 | mfspr r8, SPRN_WORT |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 1622 | std r5, VCPU_EBBRR(r9) |
| 1623 | std r6, VCPU_BESCR(r9) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1624 | stw r7, VCPU_GUEST_PID(r9) |
| 1625 | std r8, VCPU_WORT(r9) |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 1626 | BEGIN_FTR_SECTION |
| 1627 | mfspr r5, SPRN_TCSCR |
| 1628 | mfspr r6, SPRN_ACOP |
| 1629 | mfspr r7, SPRN_CSIGR |
| 1630 | mfspr r8, SPRN_TACR |
| 1631 | std r5, VCPU_TCSCR(r9) |
| 1632 | std r6, VCPU_ACOP(r9) |
| 1633 | std r7, VCPU_CSIGR(r9) |
| 1634 | std r8, VCPU_TACR(r9) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1635 | FTR_SECTION_ELSE |
| 1636 | mfspr r5, SPRN_TIDR |
| 1637 | mfspr r6, SPRN_PSSCR |
| 1638 | std r5, VCPU_TID(r9) |
| 1639 | rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ |
| 1640 | rotldi r6, r6, 60 |
| 1641 | std r6, VCPU_PSSCR(r9) |
Paul Mackerras | 769377f | 2017-02-15 14:30:17 +1100 | [diff] [blame] | 1642 | /* Restore host HFSCR value */ |
| 1643 | ld r7, STACK_SLOT_HFSCR(r1) |
| 1644 | mtspr SPRN_HFSCR, r7 |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1645 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
Paul Mackerras | ccec445 | 2016-03-05 19:34:39 +1100 | [diff] [blame] | 1646 | /* |
| 1647 | * Restore various registers to 0, where non-zero values |
| 1648 | * set by the guest could disrupt the host. |
| 1649 | */ |
| 1650 | li r0, 0 |
Paul Mackerras | 4c3bb4c | 2017-06-15 15:43:17 +1000 | [diff] [blame] | 1651 | mtspr SPRN_PSPB, r0 |
Paul Mackerras | ccec445 | 2016-03-05 19:34:39 +1100 | [diff] [blame] | 1652 | mtspr SPRN_WORT, r0 |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 1653 | BEGIN_FTR_SECTION |
| 1654 | mtspr SPRN_TCSCR, r0 |
Paul Mackerras | ccec445 | 2016-03-05 19:34:39 +1100 | [diff] [blame] | 1655 | /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ |
| 1656 | li r0, 1 |
| 1657 | sldi r0, r0, 31 |
| 1658 | mtspr SPRN_MMCRS, r0 |
Paul Mackerras | 83677f5 | 2016-11-16 22:33:27 +1100 | [diff] [blame] | 1659 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1660 | |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 1661 | /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ |
| 1662 | ld r8, STACK_SLOT_IAMR(r1) |
| 1663 | mtspr SPRN_IAMR, r8 |
| 1664 | |
| 1665 | 8: /* Power7 jumps back in here */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1666 | mfspr r5,SPRN_AMR |
| 1667 | mfspr r6,SPRN_UAMOR |
| 1668 | std r5,VCPU_AMR(r9) |
| 1669 | std r6,VCPU_UAMOR(r9) |
Michael Ellerman | c3c7470c | 2019-02-22 13:22:08 +1100 | [diff] [blame] | 1670 | ld r5,STACK_SLOT_AMR(r1) |
| 1671 | ld r6,STACK_SLOT_UAMOR(r1) |
| 1672 | mtspr SPRN_AMR, r5 |
Paul Mackerras | 4c3bb4c | 2017-06-15 15:43:17 +1000 | [diff] [blame] | 1673 | mtspr SPRN_UAMOR, r6 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1674 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1675 | /* Switch DSCR back to host value */ |
| 1676 | mfspr r8, SPRN_DSCR |
| 1677 | ld r7, HSTATE_DSCR(r13) |
Paul Mackerras | cfc8602 | 2013-09-21 09:53:28 +1000 | [diff] [blame] | 1678 | std r8, VCPU_DSCR(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1679 | mtspr SPRN_DSCR, r7 |
| 1680 | |
| 1681 | /* Save non-volatile GPRs */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 1682 | std r14, VCPU_GPR(R14)(r9) |
| 1683 | std r15, VCPU_GPR(R15)(r9) |
| 1684 | std r16, VCPU_GPR(R16)(r9) |
| 1685 | std r17, VCPU_GPR(R17)(r9) |
| 1686 | std r18, VCPU_GPR(R18)(r9) |
| 1687 | std r19, VCPU_GPR(R19)(r9) |
| 1688 | std r20, VCPU_GPR(R20)(r9) |
| 1689 | std r21, VCPU_GPR(R21)(r9) |
| 1690 | std r22, VCPU_GPR(R22)(r9) |
| 1691 | std r23, VCPU_GPR(R23)(r9) |
| 1692 | std r24, VCPU_GPR(R24)(r9) |
| 1693 | std r25, VCPU_GPR(R25)(r9) |
| 1694 | std r26, VCPU_GPR(R26)(r9) |
| 1695 | std r27, VCPU_GPR(R27)(r9) |
| 1696 | std r28, VCPU_GPR(R28)(r9) |
| 1697 | std r29, VCPU_GPR(R29)(r9) |
| 1698 | std r30, VCPU_GPR(R30)(r9) |
| 1699 | std r31, VCPU_GPR(R31)(r9) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1700 | |
| 1701 | /* Save SPRGs */ |
| 1702 | mfspr r3, SPRN_SPRG0 |
| 1703 | mfspr r4, SPRN_SPRG1 |
| 1704 | mfspr r5, SPRN_SPRG2 |
| 1705 | mfspr r6, SPRN_SPRG3 |
| 1706 | std r3, VCPU_SPRG0(r9) |
| 1707 | std r4, VCPU_SPRG1(r9) |
| 1708 | std r5, VCPU_SPRG2(r9) |
| 1709 | std r6, VCPU_SPRG3(r9) |
| 1710 | |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 1711 | /* save FP state */ |
| 1712 | mr r3, r9 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 1713 | bl kvmppc_save_fp |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 1714 | |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1715 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 1716 | /* |
| 1717 | * Branch around the call if both CPU_FTR_TM and |
| 1718 | * CPU_FTR_P9_TM_HV_ASSIST are off. |
| 1719 | */ |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1720 | BEGIN_FTR_SECTION |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 1721 | b 91f |
| 1722 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 1723 | /* |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 1724 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 1725 | */ |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 1726 | mr r3, r9 |
| 1727 | ld r4, VCPU_MSR(r3) |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 1728 | li r5, 0 /* don't preserve non-vol regs */ |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 1729 | bl kvmppc_save_tm_hv |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 1730 | nop |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 1731 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 1732 | 91: |
Paul Mackerras | 0a8ecce | 2014-04-14 08:56:26 +1000 | [diff] [blame] | 1733 | #endif |
| 1734 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1735 | /* Increment yield count if they have a VPA */ |
| 1736 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ |
| 1737 | cmpdi r8, 0 |
| 1738 | beq 25f |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 1739 | li r4, LPPACA_YIELDCOUNT |
| 1740 | LWZX_BE r3, r8, r4 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1741 | addi r3, r3, 1 |
Alexander Graf | 0865a58 | 2014-06-11 10:36:17 +0200 | [diff] [blame] | 1742 | STWX_BE r3, r8, r4 |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1743 | li r3, 1 |
| 1744 | stb r3, VCPU_VPA_DIRTY(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1745 | 25: |
| 1746 | /* Save PMU registers if requested */ |
| 1747 | /* r8 and cr0.eq are live here */ |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 1748 | mr r3, r9 |
| 1749 | li r4, 1 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1750 | beq 21f /* if no VPA, save PMU stuff anyway */ |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 1751 | lbz r4, LPPACA_PMCINUSE(r8) |
| 1752 | 21: bl kvmhv_save_guest_pmu |
| 1753 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1754 | |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1755 | /* Restore host values of some registers */ |
| 1756 | BEGIN_FTR_SECTION |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 1757 | ld r5, STACK_SLOT_CIABR(r1) |
| 1758 | ld r6, STACK_SLOT_DAWR(r1) |
| 1759 | ld r7, STACK_SLOT_DAWRX(r1) |
| 1760 | mtspr SPRN_CIABR, r5 |
Michael Neuling | b53221e | 2018-03-27 15:37:22 +1100 | [diff] [blame] | 1761 | /* |
| 1762 | * If the DAWR doesn't work, it's ok to write these here as |
| 1763 | * this value should always be zero |
| 1764 | */ |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 1765 | mtspr SPRN_DAWR, r6 |
| 1766 | mtspr SPRN_DAWRX, r7 |
| 1767 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 1768 | BEGIN_FTR_SECTION |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1769 | ld r5, STACK_SLOT_TID(r1) |
| 1770 | ld r6, STACK_SLOT_PSSCR(r1) |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 1771 | ld r7, STACK_SLOT_PID(r1) |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1772 | mtspr SPRN_TIDR, r5 |
| 1773 | mtspr SPRN_PSSCR, r6 |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 1774 | mtspr SPRN_PID, r7 |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1775 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 1776 | |
| 1777 | #ifdef CONFIG_PPC_RADIX_MMU |
| 1778 | /* |
| 1779 | * Are we running hash or radix ? |
| 1780 | */ |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 1781 | ld r5, VCPU_KVM(r9) |
| 1782 | lbz r0, KVM_RADIX(r5) |
| 1783 | cmpwi cr2, r0, 0 |
Nicholas Piggin | 2bf1071 | 2018-07-05 18:47:00 +1000 | [diff] [blame] | 1784 | beq cr2, 2f |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 1785 | |
Paul Mackerras | df15818 | 2018-05-17 14:47:59 +1000 | [diff] [blame] | 1786 | /* |
| 1787 | * Radix: do eieio; tlbsync; ptesync sequence in case we |
| 1788 | * interrupted the guest between a tlbie and a ptesync. |
| 1789 | */ |
| 1790 | eieio |
| 1791 | tlbsync |
| 1792 | ptesync |
| 1793 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 1794 | /* Radix: Handle the case where the guest used an illegal PID */ |
| 1795 | LOAD_REG_ADDR(r4, mmu_base_pid) |
| 1796 | lwz r3, VCPU_GUEST_PID(r9) |
| 1797 | lwz r5, 0(r4) |
| 1798 | cmpw cr0,r3,r5 |
| 1799 | blt 2f |
| 1800 | |
| 1801 | /* |
| 1802 | * Illegal PID, the HW might have prefetched and cached in the TLB |
| 1803 | * some translations for the LPID 0 / guest PID combination which |
| 1804 | * Linux doesn't know about, so we need to flush that PID out of |
| 1805 | * the TLB. First we need to set LPIDR to 0 so tlbiel applies to |
| 1806 | * the right context. |
| 1807 | */ |
| 1808 | li r0,0 |
| 1809 | mtspr SPRN_LPID,r0 |
| 1810 | isync |
| 1811 | |
| 1812 | /* Then do a congruence class local flush */ |
| 1813 | ld r6,VCPU_KVM(r9) |
| 1814 | lwz r0,KVM_TLB_SETS(r6) |
| 1815 | mtctr r0 |
| 1816 | li r7,0x400 /* IS field = 0b01 */ |
| 1817 | ptesync |
| 1818 | sldi r0,r3,32 /* RS has PID */ |
| 1819 | 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ |
| 1820 | addi r7,r7,0x1000 |
| 1821 | bdnz 1b |
| 1822 | ptesync |
| 1823 | |
Nicholas Piggin | 2bf1071 | 2018-07-05 18:47:00 +1000 | [diff] [blame] | 1824 | 2: |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 1825 | #endif /* CONFIG_PPC_RADIX_MMU */ |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 1826 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1827 | /* |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 1828 | * POWER7/POWER8 guest -> host partition switch code. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1829 | * We don't have to lock against tlbies but we do |
| 1830 | * have to coordinate the hardware threads. |
Paul Mackerras | a8b48a4 | 2018-03-07 22:17:20 +1100 | [diff] [blame] | 1831 | * Here STACK_SLOT_TRAP(r1) contains the trap number. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1832 | */ |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1833 | kvmhv_switch_to_host: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1834 | /* Secondary threads wait for primary to do partition switch */ |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 1835 | ld r5,HSTATE_KVM_VCORE(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1836 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
| 1837 | lbz r3,HSTATE_PTID(r13) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1838 | cmpwi r3,0 |
| 1839 | beq 15f |
| 1840 | HMT_LOW |
| 1841 | 13: lbz r3,VCORE_IN_GUEST(r5) |
| 1842 | cmpwi r3,0 |
| 1843 | bne 13b |
| 1844 | HMT_MEDIUM |
| 1845 | b 16f |
| 1846 | |
| 1847 | /* Primary thread waits for all the secondaries to exit guest */ |
| 1848 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 1849 | rlwinm r0,r3,32-8,0xff |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1850 | clrldi r3,r3,56 |
| 1851 | cmpw r3,r0 |
| 1852 | bne 15b |
| 1853 | isync |
| 1854 | |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 1855 | /* Did we actually switch to the guest at all? */ |
| 1856 | lbz r6, VCORE_IN_GUEST(r5) |
| 1857 | cmpwi r6, 0 |
| 1858 | beq 19f |
| 1859 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1860 | /* Primary thread switches back to host partition */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1861 | lwz r7,KVM_HOST_LPID(r4) |
Paul Mackerras | 7a84084 | 2016-11-16 22:25:20 +1100 | [diff] [blame] | 1862 | BEGIN_FTR_SECTION |
| 1863 | ld r6,KVM_HOST_SDR1(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1864 | li r8,LPID_RSVD /* switch to reserved LPID */ |
| 1865 | mtspr SPRN_LPID,r8 |
| 1866 | ptesync |
Paul Mackerras | 7a84084 | 2016-11-16 22:25:20 +1100 | [diff] [blame] | 1867 | mtspr SPRN_SDR1,r6 /* switch to host page table */ |
| 1868 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1869 | mtspr SPRN_LPID,r7 |
| 1870 | isync |
| 1871 | |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1872 | BEGIN_FTR_SECTION |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 1873 | /* DPDES and VTB are shared between threads */ |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1874 | mfspr r7, SPRN_DPDES |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 1875 | mfspr r8, SPRN_VTB |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1876 | std r7, VCORE_DPDES(r5) |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 1877 | std r8, VCORE_VTB(r5) |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1878 | /* clear DPDES so we don't get guest doorbells in the host */ |
| 1879 | li r8, 0 |
| 1880 | mtspr SPRN_DPDES, r8 |
| 1881 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 1882 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1883 | /* Subtract timebase offset from timebase */ |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 1884 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1885 | cmpdi r8,0 |
| 1886 | beq 17f |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 1887 | li r0, 0 |
| 1888 | std r0, VCORE_TB_OFFSET_APPL(r5) |
Paul Mackerras | c5fb80d | 2014-03-25 10:47:07 +1100 | [diff] [blame] | 1889 | mftb r6 /* current guest timebase */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1890 | subf r8,r8,r6 |
| 1891 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| 1892 | mftb r7 /* check if lower 24 bits overflowed */ |
| 1893 | clrldi r6,r6,40 |
| 1894 | clrldi r7,r7,40 |
| 1895 | cmpld r7,r6 |
| 1896 | bge 17f |
| 1897 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ |
| 1898 | mtspr SPRN_TBU40,r8 |
| 1899 | |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1900 | 17: |
| 1901 | /* |
| 1902 | * If this is an HMI, we called kvmppc_realmode_hmi_handler |
| 1903 | * above, which may or may not have already called |
| 1904 | * kvmppc_subcore_exit_guest. Fortunately, all that |
| 1905 | * kvmppc_subcore_exit_guest does is clear a flag, so calling |
| 1906 | * it again here is benign even if kvmppc_realmode_hmi_handler |
| 1907 | * has already called it. |
| 1908 | */ |
| 1909 | bl kvmppc_subcore_exit_guest |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 1910 | nop |
| 1911 | 30: ld r5,HSTATE_KVM_VCORE(r13) |
| 1912 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
| 1913 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1914 | /* Reset PCR */ |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 1915 | ld r0, VCORE_PCR(r5) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1916 | cmpdi r0, 0 |
| 1917 | beq 18f |
| 1918 | li r0, 0 |
| 1919 | mtspr SPRN_PCR, r0 |
| 1920 | 18: |
| 1921 | /* Signal secondary CPUs to continue */ |
| 1922 | stb r0,VCORE_IN_GUEST(r5) |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 1923 | 19: lis r8,0x7fff /* MAX_INT@h */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1924 | mtspr SPRN_HDEC,r8 |
| 1925 | |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 1926 | 16: |
| 1927 | BEGIN_FTR_SECTION |
| 1928 | /* On POWER9 with HPT-on-radix we need to wait for all other threads */ |
| 1929 | ld r3, HSTATE_SPLIT_MODE(r13) |
| 1930 | cmpdi r3, 0 |
| 1931 | beq 47f |
| 1932 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) |
| 1933 | cmpwi r8, 0 |
| 1934 | beq 47f |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 1935 | bl kvmhv_p9_restore_lpcr |
| 1936 | nop |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 1937 | b 48f |
| 1938 | 47: |
| 1939 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 1940 | ld r8,KVM_HOST_LPCR(r4) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1941 | mtspr SPRN_LPCR,r8 |
| 1942 | isync |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 1943 | 48: |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 1944 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 1945 | /* Finish timing, if we have a vcpu */ |
| 1946 | ld r4, HSTATE_KVM_VCPU(r13) |
| 1947 | cmpdi r4, 0 |
| 1948 | li r3, 0 |
| 1949 | beq 2f |
| 1950 | bl kvmhv_accumulate_time |
| 1951 | 2: |
| 1952 | #endif |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1953 | /* Unset guest mode */ |
| 1954 | li r0, KVM_GUEST_MODE_NONE |
| 1955 | stb r0, HSTATE_IN_GUEST(r13) |
| 1956 | |
Paul Mackerras | a8b48a4 | 2018-03-07 22:17:20 +1100 | [diff] [blame] | 1957 | lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ |
Paul Mackerras | 7ceaa6d | 2017-06-16 11:53:19 +1000 | [diff] [blame] | 1958 | ld r0, SFS+PPC_LR_STKOFF(r1) |
| 1959 | addi r1, r1, SFS |
Paul Mackerras | 218309b | 2013-09-06 13:23:44 +1000 | [diff] [blame] | 1960 | mtlr r0 |
| 1961 | blr |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 1962 | |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 1963 | kvmppc_guest_external: |
| 1964 | /* External interrupt, first check for host_ipi. If this is |
| 1965 | * set, we know the host wants us out so let's do it now |
| 1966 | */ |
| 1967 | bl kvmppc_read_intr |
| 1968 | |
| 1969 | /* |
| 1970 | * Restore the active volatile registers after returning from |
| 1971 | * a C function. |
| 1972 | */ |
| 1973 | ld r9, HSTATE_KVM_VCPU(r13) |
| 1974 | li r12, BOOK3S_INTERRUPT_EXTERNAL |
| 1975 | |
| 1976 | /* |
| 1977 | * kvmppc_read_intr return codes: |
| 1978 | * |
| 1979 | * Exit to host (r3 > 0) |
| 1980 | * 1 An interrupt is pending that needs to be handled by the host |
| 1981 | * Exit guest and return to host by branching to guest_exit_cont |
| 1982 | * |
| 1983 | * 2 Passthrough that needs completion in the host |
| 1984 | * Exit guest and return to host by branching to guest_exit_cont |
| 1985 | * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD |
| 1986 | * to indicate to the host to complete handling the interrupt |
| 1987 | * |
| 1988 | * Before returning to guest, we check if any CPU is heading out |
| 1989 | * to the host and if so, we head out also. If no CPUs are heading |
| 1990 | * check return values <= 0. |
| 1991 | * |
| 1992 | * Return to guest (r3 <= 0) |
| 1993 | * 0 No external interrupt is pending |
| 1994 | * -1 A guest wakeup IPI (which has now been cleared) |
| 1995 | * In either case, we return to guest to deliver any pending |
| 1996 | * guest interrupts. |
| 1997 | * |
| 1998 | * -2 A PCI passthrough external interrupt was handled |
| 1999 | * (interrupt was delivered directly to guest) |
| 2000 | * Return to guest to deliver any pending guest interrupts. |
| 2001 | */ |
| 2002 | |
| 2003 | cmpdi r3, 1 |
| 2004 | ble 1f |
| 2005 | |
| 2006 | /* Return code = 2 */ |
| 2007 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD |
| 2008 | stw r12, VCPU_TRAP(r9) |
| 2009 | b guest_exit_cont |
| 2010 | |
| 2011 | 1: /* Return code <= 1 */ |
| 2012 | cmpdi r3, 0 |
| 2013 | bgt guest_exit_cont |
| 2014 | |
| 2015 | /* Return code <= 0 */ |
| 2016 | maybe_reenter_guest: |
| 2017 | ld r5, HSTATE_KVM_VCORE(r13) |
| 2018 | lwz r0, VCORE_ENTRY_EXIT(r5) |
| 2019 | cmpwi r0, 0x100 |
| 2020 | mr r4, r9 |
| 2021 | blt deliver_guest_interrupt |
| 2022 | b guest_exit_cont |
| 2023 | |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2024 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 2025 | /* |
| 2026 | * Softpatch interrupt for transactional memory emulation cases |
| 2027 | * on POWER9 DD2.2. This is early in the guest exit path - we |
| 2028 | * haven't saved registers or done a treclaim yet. |
| 2029 | */ |
| 2030 | kvmppc_tm_emul: |
| 2031 | /* Save instruction image in HEIR */ |
| 2032 | mfspr r3, SPRN_HEIR |
| 2033 | stw r3, VCPU_HEIR(r9) |
| 2034 | |
| 2035 | /* |
| 2036 | * The cases we want to handle here are those where the guest |
| 2037 | * is in real suspend mode and is trying to transition to |
| 2038 | * transactional mode. |
| 2039 | */ |
| 2040 | lbz r0, HSTATE_FAKE_SUSPEND(r13) |
| 2041 | cmpwi r0, 0 /* keep exiting guest if in fake suspend */ |
| 2042 | bne guest_exit_cont |
| 2043 | rldicl r3, r11, 64 - MSR_TS_S_LG, 62 |
| 2044 | cmpwi r3, 1 /* or if not in suspend state */ |
| 2045 | bne guest_exit_cont |
| 2046 | |
| 2047 | /* Call C code to do the emulation */ |
| 2048 | mr r3, r9 |
| 2049 | bl kvmhv_p9_tm_emulation_early |
| 2050 | nop |
| 2051 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2052 | li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH |
| 2053 | cmpwi r3, 0 |
| 2054 | beq guest_exit_cont /* continue exiting if not handled */ |
| 2055 | ld r10, VCPU_PC(r9) |
| 2056 | ld r11, VCPU_MSR(r9) |
| 2057 | b fast_interrupt_c_return /* go back to guest if handled */ |
| 2058 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
| 2059 | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2060 | /* |
| 2061 | * Check whether an HDSI is an HPTE not found fault or something else. |
| 2062 | * If it is an HPTE not found fault that is due to the guest accessing |
| 2063 | * a page that they have mapped but which we have paged out, then |
| 2064 | * we continue on with the guest exit path. In all other cases, |
| 2065 | * reflect the HDSI to the guest as a DSI. |
| 2066 | */ |
| 2067 | kvmppc_hdsi: |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 2068 | ld r3, VCPU_KVM(r9) |
| 2069 | lbz r0, KVM_RADIX(r3) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2070 | mfspr r4, SPRN_HDAR |
| 2071 | mfspr r6, SPRN_HDSISR |
Michael Neuling | e001fa7 | 2017-09-15 15:26:14 +1000 | [diff] [blame] | 2072 | BEGIN_FTR_SECTION |
| 2073 | /* Look for DSISR canary. If we find it, retry instruction */ |
| 2074 | cmpdi r6, 0x7fff |
| 2075 | beq 6f |
| 2076 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
| 2077 | cmpwi r0, 0 |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 2078 | bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 2079 | /* HPTE not found fault or protection fault? */ |
| 2080 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2081 | beq 1f /* if not, send it to the guest */ |
Paul Mackerras | 4e5acdc | 2017-02-28 11:05:47 +1100 | [diff] [blame] | 2082 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
| 2083 | beq 3f |
Paul Mackerras | ef8c640 | 2017-01-30 21:21:43 +1100 | [diff] [blame] | 2084 | BEGIN_FTR_SECTION |
| 2085 | mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ |
| 2086 | b 4f |
| 2087 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2088 | clrrdi r0, r4, 28 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2089 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2090 | li r0, BOOK3S_INTERRUPT_DATA_SEGMENT |
| 2091 | bne 7f /* if no SLB entry found */ |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2092 | 4: std r4, VCPU_FAULT_DAR(r9) |
| 2093 | stw r6, VCPU_FAULT_DSISR(r9) |
| 2094 | |
| 2095 | /* Search the hash table. */ |
| 2096 | mr r3, r9 /* vcpu pointer */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2097 | li r7, 1 /* data fault */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 2098 | bl kvmppc_hpte_hv_fault |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2099 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2100 | ld r10, VCPU_PC(r9) |
| 2101 | ld r11, VCPU_MSR(r9) |
| 2102 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE |
| 2103 | cmpdi r3, 0 /* retry the instruction */ |
| 2104 | beq 6f |
| 2105 | cmpdi r3, -1 /* handle in kernel mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2106 | beq guest_exit_cont |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2107 | cmpdi r3, -2 /* MMIO emulation; need instr word */ |
| 2108 | beq 2f |
| 2109 | |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2110 | /* Synthesize a DSI (or DSegI) for the guest */ |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2111 | ld r4, VCPU_FAULT_DAR(r9) |
| 2112 | mr r6, r3 |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2113 | 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2114 | mtspr SPRN_DSISR, r6 |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2115 | 7: mtspr SPRN_DAR, r4 |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2116 | mtspr SPRN_SRR0, r10 |
| 2117 | mtspr SPRN_SRR1, r11 |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2118 | mr r10, r0 |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 2119 | bl kvmppc_msr_interrupt |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2120 | fast_interrupt_c_return: |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2121 | 6: ld r7, VCPU_CTR(r9) |
Sam bobroff | c63517c | 2015-05-27 09:56:57 +1000 | [diff] [blame] | 2122 | ld r8, VCPU_XER(r9) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2123 | mtctr r7 |
| 2124 | mtxer r8 |
| 2125 | mr r4, r9 |
| 2126 | b fast_guest_return |
| 2127 | |
| 2128 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ |
| 2129 | ld r5, KVM_VRMA_SLB_V(r5) |
| 2130 | b 4b |
| 2131 | |
| 2132 | /* If this is for emulated MMIO, load the instruction word */ |
| 2133 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ |
| 2134 | |
| 2135 | /* Set guest mode to 'jump over instruction' so if lwz faults |
| 2136 | * we'll just continue at the next IP. */ |
| 2137 | li r0, KVM_GUEST_MODE_SKIP |
| 2138 | stb r0, HSTATE_IN_GUEST(r13) |
| 2139 | |
| 2140 | /* Do the access with MSR:DR enabled */ |
| 2141 | mfmsr r3 |
| 2142 | ori r4, r3, MSR_DR /* Enable paging for data */ |
| 2143 | mtmsrd r4 |
| 2144 | lwz r8, 0(r10) |
| 2145 | mtmsrd r3 |
| 2146 | |
| 2147 | /* Store the result */ |
| 2148 | stw r8, VCPU_LAST_INST(r9) |
| 2149 | |
| 2150 | /* Unset guest mode. */ |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 2151 | li r0, KVM_GUEST_MODE_HOST_HV |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2152 | stb r0, HSTATE_IN_GUEST(r13) |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2153 | b guest_exit_cont |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2154 | |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 2155 | .Lradix_hdsi: |
| 2156 | std r4, VCPU_FAULT_DAR(r9) |
| 2157 | stw r6, VCPU_FAULT_DSISR(r9) |
| 2158 | .Lradix_hisi: |
| 2159 | mfspr r5, SPRN_ASDR |
| 2160 | std r5, VCPU_FAULT_GPA(r9) |
| 2161 | b guest_exit_cont |
| 2162 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2163 | /* |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2164 | * Similarly for an HISI, reflect it to the guest as an ISI unless |
| 2165 | * it is an HPTE not found fault for a page that we have paged out. |
| 2166 | */ |
| 2167 | kvmppc_hisi: |
Paul Mackerras | f4c51f8 | 2017-01-30 21:21:45 +1100 | [diff] [blame] | 2168 | ld r3, VCPU_KVM(r9) |
| 2169 | lbz r0, KVM_RADIX(r3) |
| 2170 | cmpwi r0, 0 |
| 2171 | bne .Lradix_hisi /* for radix, just save ASDR */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2172 | andis. r0, r11, SRR1_ISI_NOPT@h |
| 2173 | beq 1f |
Paul Mackerras | 4e5acdc | 2017-02-28 11:05:47 +1100 | [diff] [blame] | 2174 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
| 2175 | beq 3f |
Paul Mackerras | ef8c640 | 2017-01-30 21:21:43 +1100 | [diff] [blame] | 2176 | BEGIN_FTR_SECTION |
| 2177 | mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ |
| 2178 | b 4f |
| 2179 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2180 | clrrdi r0, r10, 28 |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2181 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2182 | li r0, BOOK3S_INTERRUPT_INST_SEGMENT |
| 2183 | bne 7f /* if no SLB entry found */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2184 | 4: |
| 2185 | /* Search the hash table. */ |
| 2186 | mr r3, r9 /* vcpu pointer */ |
| 2187 | mr r4, r10 |
| 2188 | mr r6, r11 |
| 2189 | li r7, 0 /* instruction fault */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 2190 | bl kvmppc_hpte_hv_fault |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2191 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2192 | ld r10, VCPU_PC(r9) |
| 2193 | ld r11, VCPU_MSR(r9) |
| 2194 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
| 2195 | cmpdi r3, 0 /* retry the instruction */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2196 | beq fast_interrupt_c_return |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2197 | cmpdi r3, -1 /* handle in kernel mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2198 | beq guest_exit_cont |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2199 | |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2200 | /* Synthesize an ISI (or ISegI) for the guest */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2201 | mr r11, r3 |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2202 | 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE |
| 2203 | 7: mtspr SPRN_SRR0, r10 |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2204 | mtspr SPRN_SRR1, r11 |
Paul Mackerras | cf29b21 | 2015-10-27 16:10:20 +1100 | [diff] [blame] | 2205 | mr r10, r0 |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 2206 | bl kvmppc_msr_interrupt |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2207 | b fast_interrupt_c_return |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2208 | |
| 2209 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ |
| 2210 | ld r5, KVM_VRMA_SLB_V(r6) |
| 2211 | b 4b |
| 2212 | |
| 2213 | /* |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2214 | * Try to handle an hcall in real mode. |
| 2215 | * Returns to the guest if we handle it, or continues on up to |
| 2216 | * the kernel if we can't (i.e. if we don't have a handler for |
| 2217 | * it, or if the handler returns H_TOO_HARD). |
Paul Mackerras | 1f09c3e | 2015-03-28 14:21:04 +1100 | [diff] [blame] | 2218 | * |
| 2219 | * r5 - r8 contain hcall args, |
| 2220 | * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2221 | */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2222 | hcall_try_real_mode: |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2223 | ld r3,VCPU_GPR(R3)(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2224 | andi. r0,r11,MSR_PR |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 2225 | /* sc 1 from userspace - reflect to guest syscall */ |
| 2226 | bne sc_1_fast_return |
Paul Mackerras | 360cae3 | 2018-10-08 16:31:04 +1100 | [diff] [blame] | 2227 | /* sc 1 from nested guest - give it to L1 to handle */ |
| 2228 | ld r0, VCPU_NESTED(r9) |
| 2229 | cmpdi r0, 0 |
| 2230 | bne guest_exit_cont |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2231 | clrrdi r3,r3,2 |
| 2232 | cmpldi r3,hcall_real_table_end - hcall_real_table |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2233 | bge guest_exit_cont |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2234 | /* See if this hcall is enabled for in-kernel handling */ |
| 2235 | ld r4, VCPU_KVM(r9) |
| 2236 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ |
| 2237 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ |
| 2238 | add r4, r4, r0 |
| 2239 | ld r0, KVM_ENABLED_HCALLS(r4) |
| 2240 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ |
| 2241 | srd r0, r0, r4 |
| 2242 | andi. r0, r0, 1 |
| 2243 | beq guest_exit_cont |
| 2244 | /* Get pointer to handler, if any, and call it */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2245 | LOAD_REG_ADDR(r4, hcall_real_table) |
Paul Mackerras | 4baa1d8 | 2013-07-08 20:09:53 +1000 | [diff] [blame] | 2246 | lwax r3,r3,r4 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2247 | cmpwi r3,0 |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2248 | beq guest_exit_cont |
Anton Blanchard | 05a308c | 2014-06-12 18:16:10 +1000 | [diff] [blame] | 2249 | add r12,r3,r4 |
| 2250 | mtctr r12 |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2251 | mr r3,r9 /* get vcpu pointer */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2252 | ld r4,VCPU_GPR(R4)(r9) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2253 | bctrl |
| 2254 | cmpdi r3,H_TOO_HARD |
| 2255 | beq hcall_real_fallback |
| 2256 | ld r4,HSTATE_KVM_VCPU(r13) |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2257 | std r3,VCPU_GPR(R3)(r4) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2258 | ld r10,VCPU_PC(r4) |
| 2259 | ld r11,VCPU_MSR(r4) |
| 2260 | b fast_guest_return |
| 2261 | |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 2262 | sc_1_fast_return: |
| 2263 | mtspr SPRN_SRR0,r10 |
| 2264 | mtspr SPRN_SRR1,r11 |
| 2265 | li r10, BOOK3S_INTERRUPT_SYSCALL |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 2266 | bl kvmppc_msr_interrupt |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 2267 | mr r4,r9 |
| 2268 | b fast_guest_return |
| 2269 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2270 | /* We've attempted a real mode hcall, but it's punted it back |
| 2271 | * to userspace. We need to restore some clobbered volatiles |
| 2272 | * before resuming the pass-it-to-qemu path */ |
| 2273 | hcall_real_fallback: |
| 2274 | li r12,BOOK3S_INTERRUPT_SYSCALL |
| 2275 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2276 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2277 | b guest_exit_cont |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2278 | |
| 2279 | .globl hcall_real_table |
| 2280 | hcall_real_table: |
| 2281 | .long 0 /* 0 - unused */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2282 | .long DOTSYM(kvmppc_h_remove) - hcall_real_table |
| 2283 | .long DOTSYM(kvmppc_h_enter) - hcall_real_table |
| 2284 | .long DOTSYM(kvmppc_h_read) - hcall_real_table |
Paul Mackerras | cdeee51 | 2015-06-24 21:18:07 +1000 | [diff] [blame] | 2285 | .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table |
| 2286 | .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2287 | .long DOTSYM(kvmppc_h_protect) - hcall_real_table |
Jordan Niethe | e40542a | 2019-02-21 14:28:48 +1100 | [diff] [blame] | 2288 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2289 | .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table |
Alexey Kardashevskiy | 31217db | 2016-03-18 13:50:42 +1100 | [diff] [blame] | 2290 | .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table |
Jordan Niethe | e40542a | 2019-02-21 14:28:48 +1100 | [diff] [blame] | 2291 | #else |
| 2292 | .long 0 /* 0x1c */ |
| 2293 | .long 0 /* 0x20 */ |
| 2294 | #endif |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2295 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2296 | .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table |
Suraj Jitindar Singh | eadfb1c | 2019-03-22 17:05:45 +1100 | [diff] [blame] | 2297 | .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2298 | .long 0 /* 0x30 */ |
| 2299 | .long 0 /* 0x34 */ |
| 2300 | .long 0 /* 0x38 */ |
| 2301 | .long 0 /* 0x3c */ |
| 2302 | .long 0 /* 0x40 */ |
| 2303 | .long 0 /* 0x44 */ |
| 2304 | .long 0 /* 0x48 */ |
| 2305 | .long 0 /* 0x4c */ |
| 2306 | .long 0 /* 0x50 */ |
| 2307 | .long 0 /* 0x54 */ |
| 2308 | .long 0 /* 0x58 */ |
| 2309 | .long 0 /* 0x5c */ |
| 2310 | .long 0 /* 0x60 */ |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 2311 | #ifdef CONFIG_KVM_XICS |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2312 | .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table |
| 2313 | .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table |
| 2314 | .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 2315 | .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2316 | .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 2317 | #else |
| 2318 | .long 0 /* 0x64 - H_EOI */ |
| 2319 | .long 0 /* 0x68 - H_CPPR */ |
| 2320 | .long 0 /* 0x6c - H_IPI */ |
| 2321 | .long 0 /* 0x70 - H_IPOLL */ |
| 2322 | .long 0 /* 0x74 - H_XIRR */ |
| 2323 | #endif |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2324 | .long 0 /* 0x78 */ |
| 2325 | .long 0 /* 0x7c */ |
| 2326 | .long 0 /* 0x80 */ |
| 2327 | .long 0 /* 0x84 */ |
| 2328 | .long 0 /* 0x88 */ |
| 2329 | .long 0 /* 0x8c */ |
| 2330 | .long 0 /* 0x90 */ |
| 2331 | .long 0 /* 0x94 */ |
| 2332 | .long 0 /* 0x98 */ |
| 2333 | .long 0 /* 0x9c */ |
| 2334 | .long 0 /* 0xa0 */ |
| 2335 | .long 0 /* 0xa4 */ |
| 2336 | .long 0 /* 0xa8 */ |
| 2337 | .long 0 /* 0xac */ |
| 2338 | .long 0 /* 0xb0 */ |
| 2339 | .long 0 /* 0xb4 */ |
| 2340 | .long 0 /* 0xb8 */ |
| 2341 | .long 0 /* 0xbc */ |
| 2342 | .long 0 /* 0xc0 */ |
| 2343 | .long 0 /* 0xc4 */ |
| 2344 | .long 0 /* 0xc8 */ |
| 2345 | .long 0 /* 0xcc */ |
| 2346 | .long 0 /* 0xd0 */ |
| 2347 | .long 0 /* 0xd4 */ |
| 2348 | .long 0 /* 0xd8 */ |
| 2349 | .long 0 /* 0xdc */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2350 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 2351 | .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2352 | .long 0 /* 0xe8 */ |
| 2353 | .long 0 /* 0xec */ |
| 2354 | .long 0 /* 0xf0 */ |
| 2355 | .long 0 /* 0xf4 */ |
| 2356 | .long 0 /* 0xf8 */ |
| 2357 | .long 0 /* 0xfc */ |
| 2358 | .long 0 /* 0x100 */ |
| 2359 | .long 0 /* 0x104 */ |
| 2360 | .long 0 /* 0x108 */ |
| 2361 | .long 0 /* 0x10c */ |
| 2362 | .long 0 /* 0x110 */ |
| 2363 | .long 0 /* 0x114 */ |
| 2364 | .long 0 /* 0x118 */ |
| 2365 | .long 0 /* 0x11c */ |
| 2366 | .long 0 /* 0x120 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2367 | .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2368 | .long 0 /* 0x128 */ |
| 2369 | .long 0 /* 0x12c */ |
| 2370 | .long 0 /* 0x130 */ |
Anton Blanchard | c1fb019 | 2014-02-04 16:07:01 +1100 | [diff] [blame] | 2371 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
Jordan Niethe | e40542a | 2019-02-21 14:28:48 +1100 | [diff] [blame] | 2372 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 31217db | 2016-03-18 13:50:42 +1100 | [diff] [blame] | 2373 | .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 2374 | .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table |
Jordan Niethe | e40542a | 2019-02-21 14:28:48 +1100 | [diff] [blame] | 2375 | #else |
| 2376 | .long 0 /* 0x138 */ |
| 2377 | .long 0 /* 0x13c */ |
| 2378 | #endif |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 2379 | .long 0 /* 0x140 */ |
| 2380 | .long 0 /* 0x144 */ |
| 2381 | .long 0 /* 0x148 */ |
| 2382 | .long 0 /* 0x14c */ |
| 2383 | .long 0 /* 0x150 */ |
| 2384 | .long 0 /* 0x154 */ |
| 2385 | .long 0 /* 0x158 */ |
| 2386 | .long 0 /* 0x15c */ |
| 2387 | .long 0 /* 0x160 */ |
| 2388 | .long 0 /* 0x164 */ |
| 2389 | .long 0 /* 0x168 */ |
| 2390 | .long 0 /* 0x16c */ |
| 2391 | .long 0 /* 0x170 */ |
| 2392 | .long 0 /* 0x174 */ |
| 2393 | .long 0 /* 0x178 */ |
| 2394 | .long 0 /* 0x17c */ |
| 2395 | .long 0 /* 0x180 */ |
| 2396 | .long 0 /* 0x184 */ |
| 2397 | .long 0 /* 0x188 */ |
| 2398 | .long 0 /* 0x18c */ |
| 2399 | .long 0 /* 0x190 */ |
| 2400 | .long 0 /* 0x194 */ |
| 2401 | .long 0 /* 0x198 */ |
| 2402 | .long 0 /* 0x19c */ |
| 2403 | .long 0 /* 0x1a0 */ |
| 2404 | .long 0 /* 0x1a4 */ |
| 2405 | .long 0 /* 0x1a8 */ |
| 2406 | .long 0 /* 0x1ac */ |
| 2407 | .long 0 /* 0x1b0 */ |
| 2408 | .long 0 /* 0x1b4 */ |
| 2409 | .long 0 /* 0x1b8 */ |
| 2410 | .long 0 /* 0x1bc */ |
| 2411 | .long 0 /* 0x1c0 */ |
| 2412 | .long 0 /* 0x1c4 */ |
| 2413 | .long 0 /* 0x1c8 */ |
| 2414 | .long 0 /* 0x1cc */ |
| 2415 | .long 0 /* 0x1d0 */ |
| 2416 | .long 0 /* 0x1d4 */ |
| 2417 | .long 0 /* 0x1d8 */ |
| 2418 | .long 0 /* 0x1dc */ |
| 2419 | .long 0 /* 0x1e0 */ |
| 2420 | .long 0 /* 0x1e4 */ |
| 2421 | .long 0 /* 0x1e8 */ |
| 2422 | .long 0 /* 0x1ec */ |
| 2423 | .long 0 /* 0x1f0 */ |
| 2424 | .long 0 /* 0x1f4 */ |
| 2425 | .long 0 /* 0x1f8 */ |
| 2426 | .long 0 /* 0x1fc */ |
| 2427 | .long 0 /* 0x200 */ |
| 2428 | .long 0 /* 0x204 */ |
| 2429 | .long 0 /* 0x208 */ |
| 2430 | .long 0 /* 0x20c */ |
| 2431 | .long 0 /* 0x210 */ |
| 2432 | .long 0 /* 0x214 */ |
| 2433 | .long 0 /* 0x218 */ |
| 2434 | .long 0 /* 0x21c */ |
| 2435 | .long 0 /* 0x220 */ |
| 2436 | .long 0 /* 0x224 */ |
| 2437 | .long 0 /* 0x228 */ |
| 2438 | .long 0 /* 0x22c */ |
| 2439 | .long 0 /* 0x230 */ |
| 2440 | .long 0 /* 0x234 */ |
| 2441 | .long 0 /* 0x238 */ |
| 2442 | .long 0 /* 0x23c */ |
| 2443 | .long 0 /* 0x240 */ |
| 2444 | .long 0 /* 0x244 */ |
| 2445 | .long 0 /* 0x248 */ |
| 2446 | .long 0 /* 0x24c */ |
| 2447 | .long 0 /* 0x250 */ |
| 2448 | .long 0 /* 0x254 */ |
| 2449 | .long 0 /* 0x258 */ |
| 2450 | .long 0 /* 0x25c */ |
| 2451 | .long 0 /* 0x260 */ |
| 2452 | .long 0 /* 0x264 */ |
| 2453 | .long 0 /* 0x268 */ |
| 2454 | .long 0 /* 0x26c */ |
| 2455 | .long 0 /* 0x270 */ |
| 2456 | .long 0 /* 0x274 */ |
| 2457 | .long 0 /* 0x278 */ |
| 2458 | .long 0 /* 0x27c */ |
| 2459 | .long 0 /* 0x280 */ |
| 2460 | .long 0 /* 0x284 */ |
| 2461 | .long 0 /* 0x288 */ |
| 2462 | .long 0 /* 0x28c */ |
| 2463 | .long 0 /* 0x290 */ |
| 2464 | .long 0 /* 0x294 */ |
| 2465 | .long 0 /* 0x298 */ |
| 2466 | .long 0 /* 0x29c */ |
| 2467 | .long 0 /* 0x2a0 */ |
| 2468 | .long 0 /* 0x2a4 */ |
| 2469 | .long 0 /* 0x2a8 */ |
| 2470 | .long 0 /* 0x2ac */ |
| 2471 | .long 0 /* 0x2b0 */ |
| 2472 | .long 0 /* 0x2b4 */ |
| 2473 | .long 0 /* 0x2b8 */ |
| 2474 | .long 0 /* 0x2bc */ |
| 2475 | .long 0 /* 0x2c0 */ |
| 2476 | .long 0 /* 0x2c4 */ |
| 2477 | .long 0 /* 0x2c8 */ |
| 2478 | .long 0 /* 0x2cc */ |
| 2479 | .long 0 /* 0x2d0 */ |
| 2480 | .long 0 /* 0x2d4 */ |
| 2481 | .long 0 /* 0x2d8 */ |
| 2482 | .long 0 /* 0x2dc */ |
| 2483 | .long 0 /* 0x2e0 */ |
| 2484 | .long 0 /* 0x2e4 */ |
| 2485 | .long 0 /* 0x2e8 */ |
| 2486 | .long 0 /* 0x2ec */ |
| 2487 | .long 0 /* 0x2f0 */ |
| 2488 | .long 0 /* 0x2f4 */ |
| 2489 | .long 0 /* 0x2f8 */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 2490 | #ifdef CONFIG_KVM_XICS |
| 2491 | .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table |
| 2492 | #else |
| 2493 | .long 0 /* 0x2fc - H_XIRR_X*/ |
| 2494 | #endif |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 2495 | .long DOTSYM(kvmppc_h_random) - hcall_real_table |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2496 | .globl hcall_real_table_end |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2497 | hcall_real_table_end: |
| 2498 | |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2499 | _GLOBAL(kvmppc_h_set_xdabr) |
Paul Mackerras | 4bad777 | 2018-10-08 16:31:06 +1100 | [diff] [blame] | 2500 | EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2501 | andi. r0, r5, DABRX_USER | DABRX_KERNEL |
| 2502 | beq 6f |
| 2503 | li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI |
| 2504 | andc. r0, r5, r0 |
| 2505 | beq 3f |
| 2506 | 6: li r3, H_PARAMETER |
| 2507 | blr |
| 2508 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2509 | _GLOBAL(kvmppc_h_set_dabr) |
Paul Mackerras | 4bad777 | 2018-10-08 16:31:06 +1100 | [diff] [blame] | 2510 | EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2511 | li r5, DABRX_USER | DABRX_KERNEL |
| 2512 | 3: |
Michael Neuling | eee7ff9 | 2014-01-08 21:25:19 +1100 | [diff] [blame] | 2513 | BEGIN_FTR_SECTION |
| 2514 | b 2f |
| 2515 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2516 | std r4,VCPU_DABR(r3) |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2517 | stw r5, VCPU_DABRX(r3) |
| 2518 | mtspr SPRN_DABRX, r5 |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 2519 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
| 2520 | 1: mtspr SPRN_DABR,r4 |
| 2521 | mfspr r5, SPRN_DABR |
| 2522 | cmpd r4, r5 |
| 2523 | bne 1b |
| 2524 | isync |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 2525 | li r3,0 |
| 2526 | blr |
| 2527 | |
Michael Neuling | e8ebedb | 2018-03-27 15:37:21 +1100 | [diff] [blame] | 2528 | 2: |
Michael Neuling | c1fe190 | 2019-04-01 17:03:12 +1100 | [diff] [blame] | 2529 | LOAD_REG_ADDR(r11, dawr_force_enable) |
| 2530 | lbz r11, 0(r11) |
| 2531 | cmpdi r11, 0 |
Michael Neuling | fabb2ef | 2019-06-17 17:16:18 +1000 | [diff] [blame] | 2532 | bne 3f |
Aneesh Kumar K.V | ca9a16c | 2018-03-30 17:27:24 +0530 | [diff] [blame] | 2533 | li r3, H_HARDWARE |
Michael Neuling | fabb2ef | 2019-06-17 17:16:18 +1000 | [diff] [blame] | 2534 | blr |
| 2535 | 3: |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2536 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
Michael Neuling | e8ebedb | 2018-03-27 15:37:21 +1100 | [diff] [blame] | 2537 | rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW |
Thomas Huth | 760a736 | 2015-11-20 09:11:45 +0100 | [diff] [blame] | 2538 | rlwimi r5, r4, 2, DAWRX_WT |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2539 | clrrdi r4, r4, 3 |
| 2540 | std r4, VCPU_DAWR(r3) |
| 2541 | std r5, VCPU_DAWRX(r3) |
Suraj Jitindar Singh | 84b0282 | 2019-06-17 17:16:19 +1000 | [diff] [blame] | 2542 | /* |
| 2543 | * If came in through the real mode hcall handler then it is necessary |
| 2544 | * to write the registers since the return path won't. Otherwise it is |
| 2545 | * sufficient to store then in the vcpu struct as they will be loaded |
| 2546 | * next time the vcpu is run. |
| 2547 | */ |
| 2548 | mfmsr r6 |
| 2549 | andi. r6, r6, MSR_DR /* in real mode? */ |
| 2550 | bne 4f |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 2551 | mtspr SPRN_DAWR, r4 |
| 2552 | mtspr SPRN_DAWRX, r5 |
Suraj Jitindar Singh | 84b0282 | 2019-06-17 17:16:19 +1000 | [diff] [blame] | 2553 | 4: li r3, 0 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2554 | blr |
| 2555 | |
Paul Mackerras | 1f09c3e | 2015-03-28 14:21:04 +1100 | [diff] [blame] | 2556 | _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2557 | ori r11,r11,MSR_EE |
| 2558 | std r11,VCPU_MSR(r3) |
| 2559 | li r0,1 |
| 2560 | stb r0,VCPU_CEDED(r3) |
| 2561 | sync /* order setting ceded vs. testing prodded */ |
| 2562 | lbz r5,VCPU_PRODDED(r3) |
| 2563 | cmpwi r5,0 |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2564 | bne kvm_cede_prodded |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 2565 | li r12,0 /* set trap to 0 to say hcall is handled */ |
| 2566 | stw r12,VCPU_TRAP(r3) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2567 | li r0,H_SUCCESS |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2568 | std r0,VCPU_GPR(R3)(r3) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2569 | |
| 2570 | /* |
| 2571 | * Set our bit in the bitmask of napping threads unless all the |
| 2572 | * other threads are already napping, in which case we send this |
| 2573 | * up to the host. |
| 2574 | */ |
| 2575 | ld r5,HSTATE_KVM_VCORE(r13) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 2576 | lbz r6,HSTATE_PTID(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2577 | lwz r8,VCORE_ENTRY_EXIT(r5) |
| 2578 | clrldi r8,r8,56 |
| 2579 | li r0,1 |
| 2580 | sld r0,r0,r6 |
| 2581 | addi r6,r5,VCORE_NAPPING_THREADS |
| 2582 | 31: lwarx r4,0,r6 |
| 2583 | or r4,r4,r0 |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 2584 | cmpw r4,r8 |
| 2585 | beq kvm_cede_exit |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2586 | stwcx. r4,0,r6 |
| 2587 | bne 31b |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 2588 | /* order napping_threads update vs testing entry_exit_map */ |
Paul Mackerras | f019b7a | 2013-11-16 17:46:03 +1100 | [diff] [blame] | 2589 | isync |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 2590 | li r0,NAPPING_CEDE |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2591 | stb r0,HSTATE_NAPPING(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2592 | lwz r7,VCORE_ENTRY_EXIT(r5) |
| 2593 | cmpwi r7,0x100 |
| 2594 | bge 33f /* another thread already exiting */ |
| 2595 | |
| 2596 | /* |
| 2597 | * Although not specifically required by the architecture, POWER7 |
| 2598 | * preserves the following registers in nap mode, even if an SMT mode |
| 2599 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, |
| 2600 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. |
| 2601 | */ |
| 2602 | /* Save non-volatile GPRs */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2603 | std r14, VCPU_GPR(R14)(r3) |
| 2604 | std r15, VCPU_GPR(R15)(r3) |
| 2605 | std r16, VCPU_GPR(R16)(r3) |
| 2606 | std r17, VCPU_GPR(R17)(r3) |
| 2607 | std r18, VCPU_GPR(R18)(r3) |
| 2608 | std r19, VCPU_GPR(R19)(r3) |
| 2609 | std r20, VCPU_GPR(R20)(r3) |
| 2610 | std r21, VCPU_GPR(R21)(r3) |
| 2611 | std r22, VCPU_GPR(R22)(r3) |
| 2612 | std r23, VCPU_GPR(R23)(r3) |
| 2613 | std r24, VCPU_GPR(R24)(r3) |
| 2614 | std r25, VCPU_GPR(R25)(r3) |
| 2615 | std r26, VCPU_GPR(R26)(r3) |
| 2616 | std r27, VCPU_GPR(R27)(r3) |
| 2617 | std r28, VCPU_GPR(R28)(r3) |
| 2618 | std r29, VCPU_GPR(R29)(r3) |
| 2619 | std r30, VCPU_GPR(R30)(r3) |
| 2620 | std r31, VCPU_GPR(R31)(r3) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2621 | |
| 2622 | /* save FP state */ |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 2623 | bl kvmppc_save_fp |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2624 | |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2625 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2626 | /* |
| 2627 | * Branch around the call if both CPU_FTR_TM and |
| 2628 | * CPU_FTR_P9_TM_HV_ASSIST are off. |
| 2629 | */ |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2630 | BEGIN_FTR_SECTION |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2631 | b 91f |
| 2632 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 2633 | /* |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2634 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 2635 | */ |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 2636 | ld r3, HSTATE_KVM_VCPU(r13) |
| 2637 | ld r4, VCPU_MSR(r3) |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2638 | li r5, 0 /* don't preserve non-vol regs */ |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 2639 | bl kvmppc_save_tm_hv |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2640 | nop |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2641 | 91: |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2642 | #endif |
| 2643 | |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2644 | /* |
| 2645 | * Set DEC to the smaller of DEC and HDEC, so that we wake |
| 2646 | * no later than the end of our timeslice (HDEC interrupts |
| 2647 | * don't wake us from nap). |
| 2648 | */ |
| 2649 | mfspr r3, SPRN_DEC |
| 2650 | mfspr r4, SPRN_HDEC |
| 2651 | mftb r5 |
Paul Mackerras | 1bc3fe8 | 2017-05-22 16:55:16 +1000 | [diff] [blame] | 2652 | BEGIN_FTR_SECTION |
| 2653 | /* On P9 check whether the guest has large decrementer mode enabled */ |
| 2654 | ld r6, HSTATE_KVM_VCORE(r13) |
| 2655 | ld r6, VCORE_LPCR(r6) |
| 2656 | andis. r6, r6, LPCR_LD@h |
| 2657 | bne 68f |
| 2658 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 2659 | extsw r3, r3 |
Paul Mackerras | 1bc3fe8 | 2017-05-22 16:55:16 +1000 | [diff] [blame] | 2660 | 68: EXTEND_HDEC(r4) |
Paul Mackerras | 2f27246 | 2017-05-22 16:25:14 +1000 | [diff] [blame] | 2661 | cmpd r3, r4 |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2662 | ble 67f |
| 2663 | mtspr SPRN_DEC, r4 |
| 2664 | 67: |
| 2665 | /* save expiry time of guest decrementer */ |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2666 | add r3, r3, r5 |
| 2667 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2668 | ld r5, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 2669 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2670 | subf r3, r6, r3 /* convert to host TB value */ |
| 2671 | std r3, VCPU_DEC_EXPIRES(r4) |
| 2672 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 2673 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 2674 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2675 | addi r3, r4, VCPU_TB_CEDE |
| 2676 | bl kvmhv_accumulate_time |
| 2677 | #endif |
| 2678 | |
Paul Mackerras | ccc0777 | 2015-03-28 14:21:07 +1100 | [diff] [blame] | 2679 | lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ |
| 2680 | |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2681 | /* Go back to host stack */ |
| 2682 | ld r1, HSTATE_HOST_R1(r13) |
| 2683 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2684 | /* |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2685 | * Take a nap until a decrementer or external or doobell interrupt |
Paul Mackerras | ccc0777 | 2015-03-28 14:21:07 +1100 | [diff] [blame] | 2686 | * occurs, with PECE1 and PECE0 set in LPCR. |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2687 | * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. |
Paul Mackerras | ccc0777 | 2015-03-28 14:21:07 +1100 | [diff] [blame] | 2688 | * Also clear the runlatch bit before napping. |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2689 | */ |
Paul Mackerras | 56548fc | 2014-12-03 14:48:40 +1100 | [diff] [blame] | 2690 | kvm_do_nap: |
Paul Mackerras | 1f09c3e | 2015-03-28 14:21:04 +1100 | [diff] [blame] | 2691 | mfspr r0, SPRN_CTRLF |
| 2692 | clrrdi r0, r0, 1 |
| 2693 | mtspr SPRN_CTRLT, r0 |
Preeti U Murthy | 582b910 | 2014-04-11 16:02:08 +0530 | [diff] [blame] | 2694 | |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 2695 | li r0,1 |
| 2696 | stb r0,HSTATE_HWTHREAD_REQ(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2697 | mfspr r5,SPRN_LPCR |
| 2698 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2699 | BEGIN_FTR_SECTION |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2700 | ori r5, r5, LPCR_PECEDH |
Paul Mackerras | ccc0777 | 2015-03-28 14:21:07 +1100 | [diff] [blame] | 2701 | rlwimi r5, r3, 0, LPCR_PECEDP |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2702 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Paul Mackerras | bf53c88 | 2016-11-18 14:34:07 +1100 | [diff] [blame] | 2703 | |
| 2704 | kvm_nap_sequence: /* desired LPCR value in r5 */ |
| 2705 | BEGIN_FTR_SECTION |
| 2706 | /* |
| 2707 | * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) |
| 2708 | * enable state loss = 1 (allow SMT mode switch) |
| 2709 | * requested level = 0 (just stop dispatching) |
| 2710 | */ |
| 2711 | lis r3, (PSSCR_EC | PSSCR_ESL)@h |
Paul Mackerras | bf53c88 | 2016-11-18 14:34:07 +1100 | [diff] [blame] | 2712 | /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ |
| 2713 | li r4, LPCR_PECE_HVEE@higher |
| 2714 | sldi r4, r4, 32 |
| 2715 | or r5, r5, r4 |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2716 | FTR_SECTION_ELSE |
| 2717 | li r3, PNV_THREAD_NAP |
| 2718 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2719 | mtspr SPRN_LPCR,r5 |
| 2720 | isync |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2721 | |
Paul Mackerras | bf53c88 | 2016-11-18 14:34:07 +1100 | [diff] [blame] | 2722 | BEGIN_FTR_SECTION |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2723 | bl isa300_idle_stop_mayloss |
Paul Mackerras | bf53c88 | 2016-11-18 14:34:07 +1100 | [diff] [blame] | 2724 | FTR_SECTION_ELSE |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2725 | bl isa206_idle_insn_mayloss |
| 2726 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
| 2727 | |
| 2728 | mfspr r0, SPRN_CTRLF |
| 2729 | ori r0, r0, 1 |
| 2730 | mtspr SPRN_CTRLT, r0 |
| 2731 | |
| 2732 | mtspr SPRN_SRR1, r3 |
| 2733 | |
| 2734 | li r0, 0 |
| 2735 | stb r0, PACA_FTRACE_ENABLED(r13) |
| 2736 | |
| 2737 | li r0, KVM_HWTHREAD_IN_KVM |
| 2738 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
| 2739 | |
| 2740 | lbz r0, HSTATE_NAPPING(r13) |
| 2741 | cmpwi r0, NAPPING_CEDE |
| 2742 | beq kvm_end_cede |
| 2743 | cmpwi r0, NAPPING_NOVCPU |
| 2744 | beq kvm_novcpu_wakeup |
| 2745 | cmpwi r0, NAPPING_UNSPLIT |
| 2746 | beq kvm_unsplit_wakeup |
| 2747 | twi 31,0,0 /* Nap state must not be zero */ |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2748 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2749 | 33: mr r4, r3 |
| 2750 | li r3, 0 |
| 2751 | li r12, 0 |
| 2752 | b 34f |
| 2753 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2754 | kvm_end_cede: |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 2755 | /* Woken by external or decrementer interrupt */ |
| 2756 | |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2757 | /* get vcpu pointer */ |
| 2758 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2759 | |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 2760 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 2761 | addi r3, r4, VCPU_TB_RMINTR |
| 2762 | bl kvmhv_accumulate_time |
| 2763 | #endif |
| 2764 | |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2765 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2766 | /* |
| 2767 | * Branch around the call if both CPU_FTR_TM and |
| 2768 | * CPU_FTR_P9_TM_HV_ASSIST are off. |
| 2769 | */ |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2770 | BEGIN_FTR_SECTION |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2771 | b 91f |
| 2772 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 2773 | /* |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2774 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
Paul Mackerras | 67f8a8c | 2017-09-12 13:47:23 +1000 | [diff] [blame] | 2775 | */ |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 2776 | mr r3, r4 |
| 2777 | ld r4, VCPU_MSR(r3) |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2778 | li r5, 0 /* don't preserve non-vol regs */ |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 2779 | bl kvmppc_restore_tm_hv |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 2780 | nop |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 2781 | ld r4, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 2782 | 91: |
Paul Mackerras | 93d1739 | 2016-06-22 15:52:55 +1000 | [diff] [blame] | 2783 | #endif |
| 2784 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2785 | /* load up FP state */ |
| 2786 | bl kvmppc_load_fp |
| 2787 | |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2788 | /* Restore guest decrementer */ |
| 2789 | ld r3, VCPU_DEC_EXPIRES(r4) |
| 2790 | ld r5, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 2791 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
Paul Mackerras | fd6d53b | 2015-03-28 14:21:08 +1100 | [diff] [blame] | 2792 | add r3, r3, r6 /* convert host TB to guest TB value */ |
| 2793 | mftb r7 |
| 2794 | subf r3, r7, r3 |
| 2795 | mtspr SPRN_DEC, r3 |
| 2796 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2797 | /* Load NV GPRS */ |
Michael Neuling | c75df6f | 2012-06-25 13:33:10 +0000 | [diff] [blame] | 2798 | ld r14, VCPU_GPR(R14)(r4) |
| 2799 | ld r15, VCPU_GPR(R15)(r4) |
| 2800 | ld r16, VCPU_GPR(R16)(r4) |
| 2801 | ld r17, VCPU_GPR(R17)(r4) |
| 2802 | ld r18, VCPU_GPR(R18)(r4) |
| 2803 | ld r19, VCPU_GPR(R19)(r4) |
| 2804 | ld r20, VCPU_GPR(R20)(r4) |
| 2805 | ld r21, VCPU_GPR(R21)(r4) |
| 2806 | ld r22, VCPU_GPR(R22)(r4) |
| 2807 | ld r23, VCPU_GPR(R23)(r4) |
| 2808 | ld r24, VCPU_GPR(R24)(r4) |
| 2809 | ld r25, VCPU_GPR(R25)(r4) |
| 2810 | ld r26, VCPU_GPR(R26)(r4) |
| 2811 | ld r27, VCPU_GPR(R27)(r4) |
| 2812 | ld r28, VCPU_GPR(R28)(r4) |
| 2813 | ld r29, VCPU_GPR(R29)(r4) |
| 2814 | ld r30, VCPU_GPR(R30)(r4) |
| 2815 | ld r31, VCPU_GPR(R31)(r4) |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2816 | |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2817 | /* Check the wake reason in SRR1 to see why we got here */ |
| 2818 | bl kvmppc_check_wake_reason |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2819 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2820 | /* |
| 2821 | * Restore volatile registers since we could have called a |
| 2822 | * C routine in kvmppc_check_wake_reason |
| 2823 | * r4 = VCPU |
| 2824 | * r3 tells us whether we need to return to host or not |
| 2825 | * WARNING: it gets checked further down: |
| 2826 | * should not modify r3 until this check is done. |
| 2827 | */ |
| 2828 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2829 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2830 | /* clear our bit in vcore->napping_threads */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2831 | 34: ld r5,HSTATE_KVM_VCORE(r13) |
| 2832 | lbz r7,HSTATE_PTID(r13) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2833 | li r0,1 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2834 | sld r0,r0,r7 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2835 | addi r6,r5,VCORE_NAPPING_THREADS |
| 2836 | 32: lwarx r7,0,r6 |
| 2837 | andc r7,r7,r0 |
| 2838 | stwcx. r7,0,r6 |
| 2839 | bne 32b |
| 2840 | li r0,0 |
| 2841 | stb r0,HSTATE_NAPPING(r13) |
| 2842 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2843 | /* See if the wake reason saved in r3 means we need to exit */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2844 | stw r12, VCPU_TRAP(r4) |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 2845 | mr r9, r4 |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2846 | cmpdi r3, 0 |
| 2847 | bgt guest_exit_cont |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 2848 | b maybe_reenter_guest |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2849 | |
| 2850 | /* cede when already previously prodded case */ |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2851 | kvm_cede_prodded: |
| 2852 | li r0,0 |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2853 | stb r0,VCPU_PRODDED(r3) |
| 2854 | sync /* order testing prodded vs. clearing ceded */ |
| 2855 | stb r0,VCPU_CEDED(r3) |
| 2856 | li r3,H_SUCCESS |
| 2857 | blr |
| 2858 | |
| 2859 | /* we've ceded but we want to give control to the host */ |
Paul Mackerras | 04f995a | 2012-08-06 00:03:28 +0000 | [diff] [blame] | 2860 | kvm_cede_exit: |
Paul Mackerras | 6af27c8 | 2015-03-28 14:21:10 +1100 | [diff] [blame] | 2861 | ld r9, HSTATE_KVM_VCPU(r13) |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 2862 | #ifdef CONFIG_KVM_XICS |
Paul Mackerras | 959c5d5 | 2019-08-13 20:03:49 +1000 | [diff] [blame] | 2863 | /* are we using XIVE with single escalation? */ |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 2864 | ld r10, VCPU_XIVE_ESC_VADDR(r9) |
| 2865 | cmpdi r10, 0 |
| 2866 | beq 3f |
Paul Mackerras | 959c5d5 | 2019-08-13 20:03:49 +1000 | [diff] [blame] | 2867 | li r6, XIVE_ESB_SET_PQ_00 |
| 2868 | /* |
| 2869 | * If we still have a pending escalation, abort the cede, |
| 2870 | * and we must set PQ to 10 rather than 00 so that we don't |
| 2871 | * potentially end up with two entries for the escalation |
| 2872 | * interrupt in the XIVE interrupt queue. In that case |
| 2873 | * we also don't want to set xive_esc_on to 1 here in |
| 2874 | * case we race with xive_esc_irq(). |
| 2875 | */ |
| 2876 | lbz r5, VCPU_XIVE_ESC_ON(r9) |
| 2877 | cmpwi r5, 0 |
| 2878 | beq 4f |
| 2879 | li r0, 0 |
| 2880 | stb r0, VCPU_CEDED(r9) |
| 2881 | li r6, XIVE_ESB_SET_PQ_10 |
| 2882 | b 5f |
| 2883 | 4: li r0, 1 |
| 2884 | stb r0, VCPU_XIVE_ESC_ON(r9) |
| 2885 | /* make sure store to xive_esc_on is seen before xive_esc_irq runs */ |
| 2886 | sync |
| 2887 | 5: /* Enable XIVE escalation */ |
| 2888 | mfmsr r0 |
| 2889 | andi. r0, r0, MSR_DR /* in real mode? */ |
| 2890 | beq 1f |
| 2891 | ldx r0, r10, r6 |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 2892 | b 2f |
| 2893 | 1: ld r10, VCPU_XIVE_ESC_RADDR(r9) |
Paul Mackerras | 959c5d5 | 2019-08-13 20:03:49 +1000 | [diff] [blame] | 2894 | ldcix r0, r10, r6 |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 2895 | 2: sync |
Benjamin Herrenschmidt | 9b9b13a | 2018-01-12 13:37:16 +1100 | [diff] [blame] | 2896 | #endif /* CONFIG_KVM_XICS */ |
| 2897 | 3: b guest_exit_cont |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 2898 | |
Paul Mackerras | 884dfb7 | 2019-02-21 13:38:49 +1100 | [diff] [blame] | 2899 | /* Try to do machine check recovery in real mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2900 | machine_check_realmode: |
| 2901 | mr r3, r9 /* get vcpu pointer */ |
Anton Blanchard | b1576fe | 2014-02-04 16:04:35 +1100 | [diff] [blame] | 2902 | bl kvmppc_realmode_machine_check |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2903 | nop |
Paul Mackerras | 884dfb7 | 2019-02-21 13:38:49 +1100 | [diff] [blame] | 2904 | /* all machine checks go to virtual mode for further handling */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2905 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2906 | li r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
Paul Mackerras | 884dfb7 | 2019-02-21 13:38:49 +1100 | [diff] [blame] | 2907 | b guest_exit_cont |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 2908 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2909 | /* |
Paul Mackerras | df709a2 | 2018-10-08 16:30:52 +1100 | [diff] [blame] | 2910 | * Call C code to handle a HMI in real mode. |
| 2911 | * Only the primary thread does the call, secondary threads are handled |
| 2912 | * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. |
| 2913 | * r9 points to the vcpu on entry |
| 2914 | */ |
| 2915 | hmi_realmode: |
| 2916 | lbz r0, HSTATE_PTID(r13) |
| 2917 | cmpwi r0, 0 |
| 2918 | bne guest_exit_cont |
| 2919 | bl kvmppc_realmode_hmi_handler |
| 2920 | ld r9, HSTATE_KVM_VCPU(r13) |
| 2921 | li r12, BOOK3S_INTERRUPT_HMI |
| 2922 | b guest_exit_cont |
| 2923 | |
| 2924 | /* |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2925 | * Check the reason we woke from nap, and take appropriate action. |
Paul Mackerras | 1f09c3e | 2015-03-28 14:21:04 +1100 | [diff] [blame] | 2926 | * Returns (in r3): |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2927 | * 0 if nothing needs to be done |
| 2928 | * 1 if something happened that needs to be handled by the host |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2929 | * -1 if there was a guest wakeup (IPI or msgsnd) |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 2930 | * -2 if we handled a PCI passthrough interrupt (returned by |
| 2931 | * kvmppc_read_intr only) |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2932 | * |
| 2933 | * Also sets r12 to the interrupt vector for any interrupt that needs |
| 2934 | * to be handled now by the host (0x500 for external interrupt), or zero. |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2935 | * Modifies all volatile registers (since it may call a C function). |
| 2936 | * This routine calls kvmppc_read_intr, a C function, if an external |
| 2937 | * interrupt is pending. |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2938 | */ |
| 2939 | kvmppc_check_wake_reason: |
| 2940 | mfspr r6, SPRN_SRR1 |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2941 | BEGIN_FTR_SECTION |
| 2942 | rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ |
| 2943 | FTR_SECTION_ELSE |
| 2944 | rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ |
| 2945 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) |
| 2946 | cmpwi r6, 8 /* was it an external interrupt? */ |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2947 | beq 7f /* if so, see what it was */ |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2948 | li r3, 0 |
| 2949 | li r12, 0 |
| 2950 | cmpwi r6, 6 /* was it the decrementer? */ |
| 2951 | beq 0f |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2952 | BEGIN_FTR_SECTION |
| 2953 | cmpwi r6, 5 /* privileged doorbell? */ |
| 2954 | beq 0f |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2955 | cmpwi r6, 3 /* hypervisor doorbell? */ |
| 2956 | beq 3f |
Paul Mackerras | aa31e84 | 2014-01-08 21:25:26 +1100 | [diff] [blame] | 2957 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 2958 | cmpwi r6, 0xa /* Hypervisor maintenance ? */ |
| 2959 | beq 4f |
Paul Mackerras | e3bbbbf | 2014-01-08 21:25:25 +1100 | [diff] [blame] | 2960 | li r3, 1 /* anything else, return 1 */ |
| 2961 | 0: blr |
| 2962 | |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2963 | /* hypervisor doorbell */ |
| 2964 | 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL |
Gautham R. Shenoy | 70aa396 | 2015-10-15 11:29:58 +0530 | [diff] [blame] | 2965 | |
| 2966 | /* |
| 2967 | * Clear the doorbell as we will invoke the handler |
| 2968 | * explicitly in the guest exit path. |
| 2969 | */ |
| 2970 | lis r6, (PPC_DBELL_SERVER << (63-36))@h |
| 2971 | PPC_MSGCLR(6) |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2972 | /* see if it's a host IPI */ |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2973 | li r3, 1 |
Nicholas Piggin | 2cde371 | 2017-10-10 20:18:28 +1000 | [diff] [blame] | 2974 | BEGIN_FTR_SECTION |
| 2975 | PPC_MSGSYNC |
| 2976 | lwsync |
| 2977 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2978 | lbz r0, HSTATE_HOST_IPI(r13) |
| 2979 | cmpwi r0, 0 |
| 2980 | bnelr |
Gautham R. Shenoy | 70aa396 | 2015-10-15 11:29:58 +0530 | [diff] [blame] | 2981 | /* if not, return -1 */ |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 2982 | li r3, -1 |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 2983 | blr |
| 2984 | |
Mahesh Salgaonkar | fd7bacb | 2016-05-15 09:44:26 +0530 | [diff] [blame] | 2985 | /* Woken up due to Hypervisor maintenance interrupt */ |
| 2986 | 4: li r12, BOOK3S_INTERRUPT_HMI |
| 2987 | li r3, 1 |
| 2988 | blr |
| 2989 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 2990 | /* external interrupt - create a stack frame so we can call C */ |
| 2991 | 7: mflr r0 |
| 2992 | std r0, PPC_LR_STKOFF(r1) |
| 2993 | stdu r1, -PPC_MIN_STKFRM(r1) |
| 2994 | bl kvmppc_read_intr |
| 2995 | nop |
| 2996 | li r12, BOOK3S_INTERRUPT_EXTERNAL |
Suresh Warrier | f7af520 | 2016-08-19 15:35:52 +1000 | [diff] [blame] | 2997 | cmpdi r3, 1 |
| 2998 | ble 1f |
| 2999 | |
| 3000 | /* |
| 3001 | * Return code of 2 means PCI passthrough interrupt, but |
| 3002 | * we need to return back to host to complete handling the |
| 3003 | * interrupt. Trap reason is expected in r12 by guest |
| 3004 | * exit code. |
| 3005 | */ |
| 3006 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD |
| 3007 | 1: |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 3008 | ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) |
| 3009 | addi r1, r1, PPC_MIN_STKFRM |
| 3010 | mtlr r0 |
| 3011 | blr |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3012 | |
| 3013 | /* |
| 3014 | * Save away FP, VMX and VSX registers. |
| 3015 | * r3 = vcpu pointer |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3016 | * N.B. r30 and r31 are volatile across this function, |
| 3017 | * thus it is not callable from C. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3018 | */ |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3019 | kvmppc_save_fp: |
| 3020 | mflr r30 |
| 3021 | mr r31,r3 |
Paul Mackerras | 8943633 | 2012-03-02 01:38:23 +0000 | [diff] [blame] | 3022 | mfmsr r5 |
| 3023 | ori r8,r5,MSR_FP |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3024 | #ifdef CONFIG_ALTIVEC |
| 3025 | BEGIN_FTR_SECTION |
| 3026 | oris r8,r8,MSR_VEC@h |
| 3027 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 3028 | #endif |
| 3029 | #ifdef CONFIG_VSX |
| 3030 | BEGIN_FTR_SECTION |
| 3031 | oris r8,r8,MSR_VSX@h |
| 3032 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 3033 | #endif |
| 3034 | mtmsrd r8 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3035 | addi r3,r3,VCPU_FPRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 3036 | bl store_fp_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3037 | #ifdef CONFIG_ALTIVEC |
| 3038 | BEGIN_FTR_SECTION |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3039 | addi r3,r31,VCPU_VRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 3040 | bl store_vr_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3041 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 3042 | #endif |
| 3043 | mfspr r6,SPRN_VRSAVE |
Paul Mackerras | e724f08 | 2014-03-13 20:02:48 +1100 | [diff] [blame] | 3044 | stw r6,VCPU_VRSAVE(r31) |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3045 | mtlr r30 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3046 | blr |
| 3047 | |
| 3048 | /* |
| 3049 | * Load up FP, VMX and VSX registers |
| 3050 | * r4 = vcpu pointer |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3051 | * N.B. r30 and r31 are volatile across this function, |
| 3052 | * thus it is not callable from C. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3053 | */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3054 | kvmppc_load_fp: |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3055 | mflr r30 |
| 3056 | mr r31,r4 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3057 | mfmsr r9 |
| 3058 | ori r8,r9,MSR_FP |
| 3059 | #ifdef CONFIG_ALTIVEC |
| 3060 | BEGIN_FTR_SECTION |
| 3061 | oris r8,r8,MSR_VEC@h |
| 3062 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 3063 | #endif |
| 3064 | #ifdef CONFIG_VSX |
| 3065 | BEGIN_FTR_SECTION |
| 3066 | oris r8,r8,MSR_VSX@h |
| 3067 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 3068 | #endif |
| 3069 | mtmsrd r8 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3070 | addi r3,r4,VCPU_FPRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 3071 | bl load_fp_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3072 | #ifdef CONFIG_ALTIVEC |
| 3073 | BEGIN_FTR_SECTION |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3074 | addi r3,r31,VCPU_VRS |
Alexander Graf | 9bf163f | 2014-06-16 14:41:15 +0200 | [diff] [blame] | 3075 | bl load_vr_state |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3076 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 3077 | #endif |
Paul Mackerras | e724f08 | 2014-03-13 20:02:48 +1100 | [diff] [blame] | 3078 | lwz r7,VCPU_VRSAVE(r31) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3079 | mtspr SPRN_VRSAVE,r7 |
Paul Mackerras | 595e4f7 | 2013-10-15 20:43:04 +1100 | [diff] [blame] | 3080 | mtlr r30 |
| 3081 | mr r4,r31 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 3082 | blr |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 3083 | |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3084 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 3085 | /* |
| 3086 | * Save transactional state and TM-related registers. |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3087 | * Called with r3 pointing to the vcpu struct and r4 containing |
| 3088 | * the guest MSR value. |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3089 | * r5 is non-zero iff non-volatile register state needs to be maintained. |
| 3090 | * If r5 == 0, this can modify all checkpointed registers, but |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3091 | * restores r1 and r2 before exit. |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3092 | */ |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3093 | _GLOBAL_TOC(kvmppc_save_tm_hv) |
| 3094 | EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3095 | /* See if we need to handle fake suspend mode */ |
| 3096 | BEGIN_FTR_SECTION |
Simon Guo | caa3be9 | 2018-05-23 15:01:50 +0800 | [diff] [blame] | 3097 | b __kvmppc_save_tm |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3098 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) |
| 3099 | |
| 3100 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ |
| 3101 | cmpwi r0, 0 |
Simon Guo | caa3be9 | 2018-05-23 15:01:50 +0800 | [diff] [blame] | 3102 | beq __kvmppc_save_tm |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3103 | |
| 3104 | /* The following code handles the fake_suspend = 1 case */ |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3105 | mflr r0 |
| 3106 | std r0, PPC_LR_STKOFF(r1) |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3107 | stdu r1, -PPC_MIN_STKFRM(r1) |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3108 | |
| 3109 | /* Turn on TM. */ |
| 3110 | mfmsr r8 |
| 3111 | li r0, 1 |
| 3112 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3113 | mtmsrd r8 |
| 3114 | |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3115 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ |
| 3116 | beq 4f |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3117 | BEGIN_FTR_SECTION |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3118 | bl pnv_power9_force_smt4_catch |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3119 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3120 | nop |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3121 | |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3122 | /* We have to treclaim here because that's the only way to do S->N */ |
| 3123 | li r3, TM_CAUSE_KVM_RESCHED |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3124 | TRECLAIM(R3) |
| 3125 | |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3126 | /* |
| 3127 | * We were in fake suspend, so we are not going to save the |
| 3128 | * register state as the guest checkpointed state (since |
| 3129 | * we already have it), therefore we can now use any volatile GPR. |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3130 | * In fact treclaim in fake suspend state doesn't modify |
| 3131 | * any registers. |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3132 | */ |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3133 | |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3134 | BEGIN_FTR_SECTION |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3135 | bl pnv_power9_force_smt4_release |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3136 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3137 | nop |
| 3138 | |
| 3139 | 4: |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3140 | mfspr r3, SPRN_PSSCR |
| 3141 | /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ |
| 3142 | li r0, PSSCR_FAKE_SUSPEND |
| 3143 | andc r3, r3, r0 |
| 3144 | mtspr SPRN_PSSCR, r3 |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3145 | |
Paul Mackerras | 681c617 | 2018-03-21 21:32:03 +1100 | [diff] [blame] | 3146 | /* Don't save TEXASR, use value from last exit in real suspend state */ |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3147 | ld r9, HSTATE_KVM_VCPU(r13) |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3148 | mfspr r5, SPRN_TFHAR |
| 3149 | mfspr r6, SPRN_TFIAR |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3150 | std r5, VCPU_TFHAR(r9) |
| 3151 | std r6, VCPU_TFIAR(r9) |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3152 | |
Suraj Jitindar Singh | 87a11bb | 2018-03-21 21:32:02 +1100 | [diff] [blame] | 3153 | addi r1, r1, PPC_MIN_STKFRM |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3154 | ld r0, PPC_LR_STKOFF(r1) |
| 3155 | mtlr r0 |
| 3156 | blr |
| 3157 | |
| 3158 | /* |
| 3159 | * Restore transactional state and TM-related registers. |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3160 | * Called with r3 pointing to the vcpu struct |
| 3161 | * and r4 containing the guest MSR value. |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3162 | * r5 is non-zero iff non-volatile register state needs to be maintained. |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3163 | * This potentially modifies all checkpointed registers. |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3164 | * It restores r1 and r2 from the PACA. |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3165 | */ |
Paul Mackerras | 7854f75 | 2018-10-08 16:30:53 +1100 | [diff] [blame] | 3166 | _GLOBAL_TOC(kvmppc_restore_tm_hv) |
| 3167 | EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3168 | /* |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3169 | * If we are doing TM emulation for the guest on a POWER9 DD2, |
| 3170 | * then we don't actually do a trechkpt -- we either set up |
| 3171 | * fake-suspend mode, or emulate a TM rollback. |
| 3172 | */ |
| 3173 | BEGIN_FTR_SECTION |
Simon Guo | caa3be9 | 2018-05-23 15:01:50 +0800 | [diff] [blame] | 3174 | b __kvmppc_restore_tm |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3175 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) |
| 3176 | mflr r0 |
| 3177 | std r0, PPC_LR_STKOFF(r1) |
| 3178 | |
| 3179 | li r0, 0 |
| 3180 | stb r0, HSTATE_FAKE_SUSPEND(r13) |
| 3181 | |
| 3182 | /* Turn on TM so we can restore TM SPRs */ |
| 3183 | mfmsr r5 |
| 3184 | li r0, 1 |
| 3185 | rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3186 | mtmsrd r5 |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3187 | |
| 3188 | /* |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3189 | * The user may change these outside of a transaction, so they must |
| 3190 | * always be context switched. |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3191 | */ |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3192 | ld r5, VCPU_TFHAR(r3) |
| 3193 | ld r6, VCPU_TFIAR(r3) |
| 3194 | ld r7, VCPU_TEXASR(r3) |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3195 | mtspr SPRN_TFHAR, r5 |
| 3196 | mtspr SPRN_TFIAR, r6 |
| 3197 | mtspr SPRN_TEXASR, r7 |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3198 | |
Simon Guo | 6f597c6 | 2018-05-23 15:01:48 +0800 | [diff] [blame] | 3199 | rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3200 | beqlr /* TM not active in guest */ |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3201 | |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3202 | /* Make sure the failure summary is set */ |
| 3203 | oris r7, r7, (TEXASR_FS)@h |
| 3204 | mtspr SPRN_TEXASR, r7 |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3205 | |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3206 | cmpwi r5, 1 /* check for suspended state */ |
| 3207 | bgt 10f |
| 3208 | stb r5, HSTATE_FAKE_SUSPEND(r13) |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3209 | b 9f /* and return */ |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3210 | 10: stdu r1, -PPC_MIN_STKFRM(r1) |
| 3211 | /* guest is in transactional state, so simulate rollback */ |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3212 | bl kvmhv_emulate_tm_rollback |
| 3213 | nop |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 3214 | addi r1, r1, PPC_MIN_STKFRM |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3215 | 9: ld r0, PPC_LR_STKOFF(r1) |
| 3216 | mtlr r0 |
| 3217 | blr |
Paul Mackerras | 7b0e827 | 2018-05-30 20:07:52 +1000 | [diff] [blame] | 3218 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
Paul Mackerras | f024ee0 | 2016-06-22 14:21:59 +1000 | [diff] [blame] | 3219 | |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 3220 | /* |
| 3221 | * We come here if we get any exception or interrupt while we are |
| 3222 | * executing host real mode code while in guest MMU context. |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3223 | * r12 is (CR << 32) | vector |
| 3224 | * r13 points to our PACA |
| 3225 | * r12 is saved in HSTATE_SCRATCH0(r13) |
| 3226 | * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE |
| 3227 | * r9 is saved in HSTATE_SCRATCH2(r13) |
| 3228 | * r13 is saved in HSPRG1 |
| 3229 | * cfar is saved in HSTATE_CFAR(r13) |
| 3230 | * ppr is saved in HSTATE_PPR(r13) |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 3231 | */ |
| 3232 | kvmppc_bad_host_intr: |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3233 | /* |
| 3234 | * Switch to the emergency stack, but start half-way down in |
| 3235 | * case we were already on it. |
| 3236 | */ |
| 3237 | mr r9, r1 |
| 3238 | std r1, PACAR1(r13) |
| 3239 | ld r1, PACAEMERGSP(r13) |
| 3240 | subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE |
| 3241 | std r9, 0(r1) |
| 3242 | std r0, GPR0(r1) |
| 3243 | std r9, GPR1(r1) |
| 3244 | std r2, GPR2(r1) |
| 3245 | SAVE_4GPRS(3, r1) |
| 3246 | SAVE_2GPRS(7, r1) |
| 3247 | srdi r0, r12, 32 |
| 3248 | clrldi r12, r12, 32 |
| 3249 | std r0, _CCR(r1) |
| 3250 | std r12, _TRAP(r1) |
| 3251 | andi. r0, r12, 2 |
| 3252 | beq 1f |
| 3253 | mfspr r3, SPRN_HSRR0 |
| 3254 | mfspr r4, SPRN_HSRR1 |
| 3255 | mfspr r5, SPRN_HDAR |
| 3256 | mfspr r6, SPRN_HDSISR |
| 3257 | b 2f |
| 3258 | 1: mfspr r3, SPRN_SRR0 |
| 3259 | mfspr r4, SPRN_SRR1 |
| 3260 | mfspr r5, SPRN_DAR |
| 3261 | mfspr r6, SPRN_DSISR |
| 3262 | 2: std r3, _NIP(r1) |
| 3263 | std r4, _MSR(r1) |
| 3264 | std r5, _DAR(r1) |
| 3265 | std r6, _DSISR(r1) |
| 3266 | ld r9, HSTATE_SCRATCH2(r13) |
| 3267 | ld r12, HSTATE_SCRATCH0(r13) |
| 3268 | GET_SCRATCH0(r0) |
| 3269 | SAVE_4GPRS(9, r1) |
| 3270 | std r0, GPR13(r1) |
| 3271 | SAVE_NVGPRS(r1) |
| 3272 | ld r5, HSTATE_CFAR(r13) |
| 3273 | std r5, ORIG_GPR3(r1) |
| 3274 | mflr r3 |
| 3275 | #ifdef CONFIG_RELOCATABLE |
| 3276 | ld r4, HSTATE_SCRATCH1(r13) |
| 3277 | #else |
| 3278 | mfctr r4 |
| 3279 | #endif |
| 3280 | mfxer r5 |
Madhavan Srinivasan | 4e26bc4 | 2017-12-20 09:25:50 +0530 | [diff] [blame] | 3281 | lbz r6, PACAIRQSOFTMASK(r13) |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3282 | std r3, _LINK(r1) |
| 3283 | std r4, _CTR(r1) |
| 3284 | std r5, _XER(r1) |
| 3285 | std r6, SOFTE(r1) |
| 3286 | ld r2, PACATOC(r13) |
| 3287 | LOAD_REG_IMMEDIATE(3, 0x7265677368657265) |
| 3288 | std r3, STACK_FRAME_OVERHEAD-16(r1) |
| 3289 | |
| 3290 | /* |
| 3291 | * On POWER9 do a minimal restore of the MMU and call C code, |
| 3292 | * which will print a message and panic. |
| 3293 | * XXX On POWER7 and POWER8, we just spin here since we don't |
| 3294 | * know what the other threads are doing (and we don't want to |
| 3295 | * coordinate with them) - but at least we now have register state |
| 3296 | * in memory that we might be able to look at from another CPU. |
| 3297 | */ |
| 3298 | BEGIN_FTR_SECTION |
Paul Mackerras | 44a3add | 2013-10-04 21:45:04 +1000 | [diff] [blame] | 3299 | b . |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3300 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
| 3301 | ld r9, HSTATE_KVM_VCPU(r13) |
| 3302 | ld r10, VCPU_KVM(r9) |
| 3303 | |
| 3304 | li r0, 0 |
| 3305 | mtspr SPRN_AMR, r0 |
| 3306 | mtspr SPRN_IAMR, r0 |
| 3307 | mtspr SPRN_CIABR, r0 |
| 3308 | mtspr SPRN_DAWRX, r0 |
| 3309 | |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3310 | BEGIN_MMU_FTR_SECTION |
| 3311 | b 4f |
| 3312 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
| 3313 | |
| 3314 | slbmte r0, r0 |
| 3315 | slbia |
| 3316 | ptesync |
| 3317 | ld r8, PACA_SLBSHADOWPTR(r13) |
| 3318 | .rept SLB_NUM_BOLTED |
| 3319 | li r3, SLBSHADOW_SAVEAREA |
| 3320 | LDX_BE r5, r8, r3 |
| 3321 | addi r3, r3, 8 |
| 3322 | LDX_BE r6, r8, r3 |
| 3323 | andis. r7, r5, SLB_ESID_V@h |
| 3324 | beq 3f |
| 3325 | slbmte r6, r5 |
| 3326 | 3: addi r8, r8, 16 |
| 3327 | .endr |
| 3328 | |
| 3329 | 4: lwz r7, KVM_HOST_LPID(r10) |
| 3330 | mtspr SPRN_LPID, r7 |
| 3331 | mtspr SPRN_PID, r0 |
| 3332 | ld r8, KVM_HOST_LPCR(r10) |
| 3333 | mtspr SPRN_LPCR, r8 |
| 3334 | isync |
| 3335 | li r0, KVM_GUEST_MODE_NONE |
| 3336 | stb r0, HSTATE_IN_GUEST(r13) |
| 3337 | |
| 3338 | /* |
| 3339 | * Turn on the MMU and jump to C code |
| 3340 | */ |
| 3341 | bcl 20, 31, .+4 |
| 3342 | 5: mflr r3 |
| 3343 | addi r3, r3, 9f - 5b |
Nicholas Piggin | eadce3b | 2018-05-18 03:49:43 +1000 | [diff] [blame] | 3344 | li r4, -1 |
| 3345 | rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3346 | ld r4, PACAKMSR(r13) |
| 3347 | mtspr SPRN_SRR0, r3 |
| 3348 | mtspr SPRN_SRR1, r4 |
Nicholas Piggin | 222f20f | 2018-01-10 03:07:15 +1100 | [diff] [blame] | 3349 | RFI_TO_KERNEL |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 3350 | 9: addi r3, r1, STACK_FRAME_OVERHEAD |
| 3351 | bl kvmppc_bad_interrupt |
| 3352 | b 9b |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 3353 | |
| 3354 | /* |
| 3355 | * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken |
| 3356 | * from VCPU_INTR_MSR and is modified based on the required TM state changes. |
| 3357 | * r11 has the guest MSR value (in/out) |
| 3358 | * r9 has a vcpu pointer (in) |
| 3359 | * r0 is used as a scratch register |
| 3360 | */ |
| 3361 | kvmppc_msr_interrupt: |
| 3362 | rldicl r0, r11, 64 - MSR_TS_S_LG, 62 |
| 3363 | cmpwi r0, 2 /* Check if we are in transactional state.. */ |
| 3364 | ld r11, VCPU_INTR_MSR(r9) |
| 3365 | bne 1f |
| 3366 | /* ... if transactional, change to suspended */ |
| 3367 | li r0, 1 |
| 3368 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG |
| 3369 | blr |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 3370 | |
| 3371 | /* |
Paul Mackerras | 41f4e63 | 2018-10-08 16:30:51 +1100 | [diff] [blame] | 3372 | * Load up guest PMU state. R3 points to the vcpu struct. |
| 3373 | */ |
| 3374 | _GLOBAL(kvmhv_load_guest_pmu) |
| 3375 | EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) |
| 3376 | mr r4, r3 |
| 3377 | mflr r0 |
| 3378 | li r3, 1 |
| 3379 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
| 3380 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
| 3381 | isync |
| 3382 | BEGIN_FTR_SECTION |
| 3383 | ld r3, VCPU_MMCR(r4) |
| 3384 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO |
| 3385 | cmpwi r5, MMCR0_PMAO |
| 3386 | beql kvmppc_fix_pmao |
| 3387 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) |
| 3388 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ |
| 3389 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ |
| 3390 | lwz r6, VCPU_PMC + 8(r4) |
| 3391 | lwz r7, VCPU_PMC + 12(r4) |
| 3392 | lwz r8, VCPU_PMC + 16(r4) |
| 3393 | lwz r9, VCPU_PMC + 20(r4) |
| 3394 | mtspr SPRN_PMC1, r3 |
| 3395 | mtspr SPRN_PMC2, r5 |
| 3396 | mtspr SPRN_PMC3, r6 |
| 3397 | mtspr SPRN_PMC4, r7 |
| 3398 | mtspr SPRN_PMC5, r8 |
| 3399 | mtspr SPRN_PMC6, r9 |
| 3400 | ld r3, VCPU_MMCR(r4) |
| 3401 | ld r5, VCPU_MMCR + 8(r4) |
| 3402 | ld r6, VCPU_MMCR + 16(r4) |
| 3403 | ld r7, VCPU_SIAR(r4) |
| 3404 | ld r8, VCPU_SDAR(r4) |
| 3405 | mtspr SPRN_MMCR1, r5 |
| 3406 | mtspr SPRN_MMCRA, r6 |
| 3407 | mtspr SPRN_SIAR, r7 |
| 3408 | mtspr SPRN_SDAR, r8 |
| 3409 | BEGIN_FTR_SECTION |
| 3410 | ld r5, VCPU_MMCR + 24(r4) |
| 3411 | ld r6, VCPU_SIER(r4) |
| 3412 | mtspr SPRN_MMCR2, r5 |
| 3413 | mtspr SPRN_SIER, r6 |
| 3414 | BEGIN_FTR_SECTION_NESTED(96) |
| 3415 | lwz r7, VCPU_PMC + 24(r4) |
| 3416 | lwz r8, VCPU_PMC + 28(r4) |
| 3417 | ld r9, VCPU_MMCR + 32(r4) |
| 3418 | mtspr SPRN_SPMC1, r7 |
| 3419 | mtspr SPRN_SPMC2, r8 |
| 3420 | mtspr SPRN_MMCRS, r9 |
| 3421 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) |
| 3422 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 3423 | mtspr SPRN_MMCR0, r3 |
| 3424 | isync |
| 3425 | mtlr r0 |
| 3426 | blr |
| 3427 | |
| 3428 | /* |
| 3429 | * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. |
| 3430 | */ |
| 3431 | _GLOBAL(kvmhv_load_host_pmu) |
| 3432 | EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) |
| 3433 | mflr r0 |
| 3434 | lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ |
| 3435 | cmpwi r4, 0 |
| 3436 | beq 23f /* skip if not */ |
| 3437 | BEGIN_FTR_SECTION |
| 3438 | ld r3, HSTATE_MMCR0(r13) |
| 3439 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO |
| 3440 | cmpwi r4, MMCR0_PMAO |
| 3441 | beql kvmppc_fix_pmao |
| 3442 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) |
| 3443 | lwz r3, HSTATE_PMC1(r13) |
| 3444 | lwz r4, HSTATE_PMC2(r13) |
| 3445 | lwz r5, HSTATE_PMC3(r13) |
| 3446 | lwz r6, HSTATE_PMC4(r13) |
| 3447 | lwz r8, HSTATE_PMC5(r13) |
| 3448 | lwz r9, HSTATE_PMC6(r13) |
| 3449 | mtspr SPRN_PMC1, r3 |
| 3450 | mtspr SPRN_PMC2, r4 |
| 3451 | mtspr SPRN_PMC3, r5 |
| 3452 | mtspr SPRN_PMC4, r6 |
| 3453 | mtspr SPRN_PMC5, r8 |
| 3454 | mtspr SPRN_PMC6, r9 |
| 3455 | ld r3, HSTATE_MMCR0(r13) |
| 3456 | ld r4, HSTATE_MMCR1(r13) |
| 3457 | ld r5, HSTATE_MMCRA(r13) |
| 3458 | ld r6, HSTATE_SIAR(r13) |
| 3459 | ld r7, HSTATE_SDAR(r13) |
| 3460 | mtspr SPRN_MMCR1, r4 |
| 3461 | mtspr SPRN_MMCRA, r5 |
| 3462 | mtspr SPRN_SIAR, r6 |
| 3463 | mtspr SPRN_SDAR, r7 |
| 3464 | BEGIN_FTR_SECTION |
| 3465 | ld r8, HSTATE_MMCR2(r13) |
| 3466 | ld r9, HSTATE_SIER(r13) |
| 3467 | mtspr SPRN_MMCR2, r8 |
| 3468 | mtspr SPRN_SIER, r9 |
| 3469 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 3470 | mtspr SPRN_MMCR0, r3 |
| 3471 | isync |
| 3472 | mtlr r0 |
| 3473 | 23: blr |
| 3474 | |
| 3475 | /* |
| 3476 | * Save guest PMU state into the vcpu struct. |
| 3477 | * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) |
| 3478 | */ |
| 3479 | _GLOBAL(kvmhv_save_guest_pmu) |
| 3480 | EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) |
| 3481 | mr r9, r3 |
| 3482 | mr r8, r4 |
| 3483 | BEGIN_FTR_SECTION |
| 3484 | /* |
| 3485 | * POWER8 seems to have a hardware bug where setting |
| 3486 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] |
| 3487 | * when some counters are already negative doesn't seem |
| 3488 | * to cause a performance monitor alert (and hence interrupt). |
| 3489 | * The effect of this is that when saving the PMU state, |
| 3490 | * if there is no PMU alert pending when we read MMCR0 |
| 3491 | * before freezing the counters, but one becomes pending |
| 3492 | * before we read the counters, we lose it. |
| 3493 | * To work around this, we need a way to freeze the counters |
| 3494 | * before reading MMCR0. Normally, freezing the counters |
| 3495 | * is done by writing MMCR0 (to set MMCR0[FC]) which |
| 3496 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, |
| 3497 | * we can also freeze the counters using MMCR2, by writing |
| 3498 | * 1s to all the counter freeze condition bits (there are |
| 3499 | * 9 bits each for 6 counters). |
| 3500 | */ |
| 3501 | li r3, -1 /* set all freeze bits */ |
| 3502 | clrrdi r3, r3, 10 |
| 3503 | mfspr r10, SPRN_MMCR2 |
| 3504 | mtspr SPRN_MMCR2, r3 |
| 3505 | isync |
| 3506 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 3507 | li r3, 1 |
| 3508 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
| 3509 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
| 3510 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
| 3511 | mfspr r6, SPRN_MMCRA |
| 3512 | /* Clear MMCRA in order to disable SDAR updates */ |
| 3513 | li r7, 0 |
| 3514 | mtspr SPRN_MMCRA, r7 |
| 3515 | isync |
| 3516 | cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ |
| 3517 | bne 21f |
| 3518 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ |
| 3519 | b 22f |
| 3520 | 21: mfspr r5, SPRN_MMCR1 |
| 3521 | mfspr r7, SPRN_SIAR |
| 3522 | mfspr r8, SPRN_SDAR |
| 3523 | std r4, VCPU_MMCR(r9) |
| 3524 | std r5, VCPU_MMCR + 8(r9) |
| 3525 | std r6, VCPU_MMCR + 16(r9) |
| 3526 | BEGIN_FTR_SECTION |
| 3527 | std r10, VCPU_MMCR + 24(r9) |
| 3528 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 3529 | std r7, VCPU_SIAR(r9) |
| 3530 | std r8, VCPU_SDAR(r9) |
| 3531 | mfspr r3, SPRN_PMC1 |
| 3532 | mfspr r4, SPRN_PMC2 |
| 3533 | mfspr r5, SPRN_PMC3 |
| 3534 | mfspr r6, SPRN_PMC4 |
| 3535 | mfspr r7, SPRN_PMC5 |
| 3536 | mfspr r8, SPRN_PMC6 |
| 3537 | stw r3, VCPU_PMC(r9) |
| 3538 | stw r4, VCPU_PMC + 4(r9) |
| 3539 | stw r5, VCPU_PMC + 8(r9) |
| 3540 | stw r6, VCPU_PMC + 12(r9) |
| 3541 | stw r7, VCPU_PMC + 16(r9) |
| 3542 | stw r8, VCPU_PMC + 20(r9) |
| 3543 | BEGIN_FTR_SECTION |
| 3544 | mfspr r5, SPRN_SIER |
| 3545 | std r5, VCPU_SIER(r9) |
| 3546 | BEGIN_FTR_SECTION_NESTED(96) |
| 3547 | mfspr r6, SPRN_SPMC1 |
| 3548 | mfspr r7, SPRN_SPMC2 |
| 3549 | mfspr r8, SPRN_MMCRS |
| 3550 | stw r6, VCPU_PMC + 24(r9) |
| 3551 | stw r7, VCPU_PMC + 28(r9) |
| 3552 | std r8, VCPU_MMCR + 32(r9) |
| 3553 | lis r4, 0x8000 |
| 3554 | mtspr SPRN_MMCRS, r4 |
| 3555 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) |
| 3556 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
| 3557 | 22: blr |
| 3558 | |
| 3559 | /* |
Paul Mackerras | 9bc01a9 | 2014-05-26 19:48:40 +1000 | [diff] [blame] | 3560 | * This works around a hardware bug on POWER8E processors, where |
| 3561 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a |
| 3562 | * performance monitor interrupt. Instead, when we need to have |
| 3563 | * an interrupt pending, we have to arrange for a counter to overflow. |
| 3564 | */ |
| 3565 | kvmppc_fix_pmao: |
| 3566 | li r3, 0 |
| 3567 | mtspr SPRN_MMCR2, r3 |
| 3568 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h |
| 3569 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN |
| 3570 | mtspr SPRN_MMCR0, r3 |
| 3571 | lis r3, 0x7fff |
| 3572 | ori r3, r3, 0xffff |
| 3573 | mtspr SPRN_PMC6, r3 |
| 3574 | isync |
| 3575 | blr |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 3576 | |
| 3577 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 3578 | /* |
| 3579 | * Start timing an activity |
| 3580 | * r3 = pointer to time accumulation struct, r4 = vcpu |
| 3581 | */ |
| 3582 | kvmhv_start_timing: |
| 3583 | ld r5, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 3584 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
| 3585 | mftb r5 |
| 3586 | subf r5, r6, r5 /* subtract current timebase offset */ |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 3587 | std r3, VCPU_CUR_ACTIVITY(r4) |
| 3588 | std r5, VCPU_ACTIVITY_START(r4) |
| 3589 | blr |
| 3590 | |
| 3591 | /* |
| 3592 | * Accumulate time to one activity and start another. |
| 3593 | * r3 = pointer to new time accumulation struct, r4 = vcpu |
| 3594 | */ |
| 3595 | kvmhv_accumulate_time: |
| 3596 | ld r5, HSTATE_KVM_VCORE(r13) |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 3597 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
| 3598 | ld r5, VCPU_CUR_ACTIVITY(r4) |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 3599 | ld r6, VCPU_ACTIVITY_START(r4) |
| 3600 | std r3, VCPU_CUR_ACTIVITY(r4) |
| 3601 | mftb r7 |
Paul Mackerras | 57b8daa | 2018-04-20 22:51:11 +1000 | [diff] [blame] | 3602 | subf r7, r8, r7 /* subtract current timebase offset */ |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 3603 | std r7, VCPU_ACTIVITY_START(r4) |
| 3604 | cmpdi r5, 0 |
| 3605 | beqlr |
| 3606 | subf r3, r6, r7 |
| 3607 | ld r8, TAS_SEQCOUNT(r5) |
| 3608 | cmpdi r8, 0 |
| 3609 | addi r8, r8, 1 |
| 3610 | std r8, TAS_SEQCOUNT(r5) |
| 3611 | lwsync |
| 3612 | ld r7, TAS_TOTAL(r5) |
| 3613 | add r7, r7, r3 |
| 3614 | std r7, TAS_TOTAL(r5) |
| 3615 | ld r6, TAS_MIN(r5) |
| 3616 | ld r7, TAS_MAX(r5) |
| 3617 | beq 3f |
| 3618 | cmpd r3, r6 |
| 3619 | bge 1f |
| 3620 | 3: std r3, TAS_MIN(r5) |
| 3621 | 1: cmpd r3, r7 |
| 3622 | ble 2f |
| 3623 | std r3, TAS_MAX(r5) |
| 3624 | 2: lwsync |
| 3625 | addi r8, r8, 1 |
| 3626 | std r8, TAS_SEQCOUNT(r5) |
| 3627 | blr |
| 3628 | #endif |