Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright IBM Corp. 2007 |
| 16 | * |
| 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
| 19 | */ |
| 20 | |
| 21 | #include <linux/errno.h> |
| 22 | #include <linux/err.h> |
| 23 | #include <linux/kvm_host.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 24 | #include <linux/vmalloc.h> |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 25 | #include <linux/hrtimer.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 26 | #include <linux/sched/signal.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 27 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 29 | #include <linux/file.h> |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 30 | #include <linux/module.h> |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 31 | #include <linux/irqbypass.h> |
| 32 | #include <linux/kvm_irqfd.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 33 | #include <asm/cputable.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 34 | #include <linux/uaccess.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 35 | #include <asm/kvm_ppc.h> |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 36 | #include <asm/cputhreads.h> |
Alexander Graf | bd2be68 | 2012-08-13 01:04:19 +0200 | [diff] [blame] | 37 | #include <asm/irqflags.h> |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 38 | #include <asm/iommu.h> |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 39 | #include <asm/switch_to.h> |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 40 | #include <asm/xive.h> |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 41 | #ifdef CONFIG_PPC_PSERIES |
| 42 | #include <asm/hvcall.h> |
| 43 | #include <asm/plpar_wrappers.h> |
| 44 | #endif |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 45 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 46 | #include "timing.h" |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 47 | #include "irq.h" |
Paul Mackerras | fad7b9b | 2008-12-23 14:57:26 +1100 | [diff] [blame] | 48 | #include "../mm/mmu_decl.h" |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 49 | |
Marcelo Tosatti | 46f43c6 | 2009-06-18 11:47:27 -0300 | [diff] [blame] | 50 | #define CREATE_TRACE_POINTS |
| 51 | #include "trace.h" |
| 52 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 53 | struct kvmppc_ops *kvmppc_hv_ops; |
| 54 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); |
| 55 | struct kvmppc_ops *kvmppc_pr_ops; |
| 56 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); |
| 57 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 58 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 59 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 60 | { |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 61 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 62 | } |
| 63 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 64 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 65 | { |
| 66 | return false; |
| 67 | } |
| 68 | |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 69 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 70 | { |
| 71 | return 1; |
| 72 | } |
| 73 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 74 | /* |
| 75 | * Common checks before entering the guest world. Call with interrupts |
| 76 | * disabled. |
| 77 | * |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 78 | * returns: |
| 79 | * |
| 80 | * == 1 if we're ready to go into guest state |
| 81 | * <= 0 if we need to go back to the host with return value |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 82 | */ |
| 83 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
| 84 | { |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 85 | int r; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 86 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 87 | WARN_ON(irqs_disabled()); |
| 88 | hard_irq_disable(); |
| 89 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 90 | while (true) { |
| 91 | if (need_resched()) { |
| 92 | local_irq_enable(); |
| 93 | cond_resched(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 94 | hard_irq_disable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 95 | continue; |
| 96 | } |
| 97 | |
| 98 | if (signal_pending(current)) { |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 99 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
| 100 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
| 101 | r = -EINTR; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 102 | break; |
| 103 | } |
| 104 | |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 105 | vcpu->mode = IN_GUEST_MODE; |
| 106 | |
| 107 | /* |
| 108 | * Reading vcpu->requests must happen after setting vcpu->mode, |
| 109 | * so we don't miss a request because the requester sees |
| 110 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests |
| 111 | * before next entering the guest (and thus doesn't IPI). |
Lan Tianyu | 489153c | 2016-03-13 11:10:30 +0800 | [diff] [blame] | 112 | * This also orders the write to mode from any reads |
| 113 | * to the page tables done while the VCPU is running. |
| 114 | * Please see the comment in kvm_flush_remote_tlbs. |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 115 | */ |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 116 | smp_mb(); |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 117 | |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 118 | if (kvm_request_pending(vcpu)) { |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 119 | /* Make sure we process requests preemptable */ |
| 120 | local_irq_enable(); |
| 121 | trace_kvm_check_requests(vcpu); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 122 | r = kvmppc_core_check_requests(vcpu); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 123 | hard_irq_disable(); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 124 | if (r > 0) |
| 125 | continue; |
| 126 | break; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | if (kvmppc_core_prepare_to_enter(vcpu)) { |
| 130 | /* interrupts got enabled in between, so we |
| 131 | are back at square 1 */ |
| 132 | continue; |
| 133 | } |
| 134 | |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 135 | guest_enter_irqoff(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 136 | return 1; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 137 | } |
| 138 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 139 | /* return to host */ |
| 140 | local_irq_enable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 141 | return r; |
| 142 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 143 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 144 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 145 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 146 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) |
| 147 | { |
| 148 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; |
| 149 | int i; |
| 150 | |
| 151 | shared->sprg0 = swab64(shared->sprg0); |
| 152 | shared->sprg1 = swab64(shared->sprg1); |
| 153 | shared->sprg2 = swab64(shared->sprg2); |
| 154 | shared->sprg3 = swab64(shared->sprg3); |
| 155 | shared->srr0 = swab64(shared->srr0); |
| 156 | shared->srr1 = swab64(shared->srr1); |
| 157 | shared->dar = swab64(shared->dar); |
| 158 | shared->msr = swab64(shared->msr); |
| 159 | shared->dsisr = swab32(shared->dsisr); |
| 160 | shared->int_pending = swab32(shared->int_pending); |
| 161 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) |
| 162 | shared->sr[i] = swab32(shared->sr[i]); |
| 163 | } |
| 164 | #endif |
| 165 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 166 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
| 167 | { |
| 168 | int nr = kvmppc_get_gpr(vcpu, 11); |
| 169 | int r; |
| 170 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); |
| 171 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); |
| 172 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); |
| 173 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
| 174 | unsigned long r2 = 0; |
| 175 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 176 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 177 | /* 32 bit mode */ |
| 178 | param1 &= 0xffffffff; |
| 179 | param2 &= 0xffffffff; |
| 180 | param3 &= 0xffffffff; |
| 181 | param4 &= 0xffffffff; |
| 182 | } |
| 183 | |
| 184 | switch (nr) { |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 185 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 186 | { |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 187 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 188 | /* Book3S can be little endian, find it out here */ |
| 189 | int shared_big_endian = true; |
| 190 | if (vcpu->arch.intr_msr & MSR_LE) |
| 191 | shared_big_endian = false; |
| 192 | if (shared_big_endian != vcpu->arch.shared_big_endian) |
| 193 | kvmppc_swab_shared(vcpu); |
| 194 | vcpu->arch.shared_big_endian = shared_big_endian; |
| 195 | #endif |
| 196 | |
Alexander Graf | f3383cf | 2014-05-12 01:08:32 +0200 | [diff] [blame] | 197 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
| 198 | /* |
| 199 | * Older versions of the Linux magic page code had |
| 200 | * a bug where they would map their trampoline code |
| 201 | * NX. If that's the case, remove !PR NX capability. |
| 202 | */ |
| 203 | vcpu->arch.disable_kernel_nx = true; |
| 204 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 205 | } |
| 206 | |
| 207 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; |
| 208 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 209 | |
Alexander Graf | 89b68c9 | 2014-07-13 16:37:12 +0200 | [diff] [blame] | 210 | #ifdef CONFIG_PPC_64K_PAGES |
| 211 | /* |
| 212 | * Make sure our 4k magic page is in the same window of a 64k |
| 213 | * page within the guest and within the host's page. |
| 214 | */ |
| 215 | if ((vcpu->arch.magic_page_pa & 0xf000) != |
| 216 | ((ulong)vcpu->arch.shared & 0xf000)) { |
| 217 | void *old_shared = vcpu->arch.shared; |
| 218 | ulong shared = (ulong)vcpu->arch.shared; |
| 219 | void *new_shared; |
| 220 | |
| 221 | shared &= PAGE_MASK; |
| 222 | shared |= vcpu->arch.magic_page_pa & 0xf000; |
| 223 | new_shared = (void*)shared; |
| 224 | memcpy(new_shared, old_shared, 0x1000); |
| 225 | vcpu->arch.shared = new_shared; |
| 226 | } |
| 227 | #endif |
| 228 | |
Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 229 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 230 | |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 231 | r = EV_SUCCESS; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 232 | break; |
| 233 | } |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 234 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
| 235 | r = EV_SUCCESS; |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 236 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 237 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
| 238 | #endif |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 239 | |
| 240 | /* Second return value is in r4 */ |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 241 | break; |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 242 | case EV_HCALL_TOKEN(EV_IDLE): |
| 243 | r = EV_SUCCESS; |
| 244 | kvm_vcpu_block(vcpu); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 245 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 246 | break; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 247 | default: |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 248 | r = EV_UNIMPLEMENTED; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 249 | break; |
| 250 | } |
| 251 | |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 252 | kvmppc_set_gpr(vcpu, 4, r2); |
| 253 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 254 | return r; |
| 255 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 256 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 257 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 258 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
| 259 | { |
| 260 | int r = false; |
| 261 | |
| 262 | /* We have to know what CPU to virtualize */ |
| 263 | if (!vcpu->arch.pvr) |
| 264 | goto out; |
| 265 | |
| 266 | /* PAPR only works with book3s_64 */ |
| 267 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
| 268 | goto out; |
| 269 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 270 | /* HV KVM can only do PAPR mode for now */ |
Aneesh Kumar K.V | a78b55d | 2013-10-07 22:18:02 +0530 | [diff] [blame] | 271 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 272 | goto out; |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 273 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 274 | #ifdef CONFIG_KVM_BOOKE_HV |
| 275 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) |
| 276 | goto out; |
| 277 | #endif |
| 278 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 279 | r = true; |
| 280 | |
| 281 | out: |
| 282 | vcpu->arch.sane = r; |
| 283 | return r ? 0 : -EINVAL; |
| 284 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 285 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 286 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 287 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 288 | { |
| 289 | enum emulation_result er; |
| 290 | int r; |
| 291 | |
Alexander Graf | d69614a | 2014-06-18 14:53:49 +0200 | [diff] [blame] | 292 | er = kvmppc_emulate_loadstore(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 293 | switch (er) { |
| 294 | case EMULATE_DONE: |
| 295 | /* Future optimization: only reload non-volatiles if they were |
| 296 | * actually modified. */ |
| 297 | r = RESUME_GUEST_NV; |
| 298 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 299 | case EMULATE_AGAIN: |
| 300 | r = RESUME_GUEST; |
| 301 | break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 302 | case EMULATE_DO_MMIO: |
| 303 | run->exit_reason = KVM_EXIT_MMIO; |
| 304 | /* We must reload nonvolatiles because "update" load/store |
| 305 | * instructions modify register state. */ |
| 306 | /* Future optimization: only reload non-volatiles if they were |
| 307 | * actually modified. */ |
| 308 | r = RESUME_HOST_NV; |
| 309 | break; |
| 310 | case EMULATE_FAIL: |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 311 | { |
| 312 | u32 last_inst; |
| 313 | |
Alexander Graf | 8d0eff6 | 2014-09-10 14:37:29 +0200 | [diff] [blame] | 314 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 315 | /* XXX Deliver Program interrupt to guest. */ |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 316 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 317 | r = RESUME_HOST; |
| 318 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 319 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 320 | default: |
Alexander Graf | 5a33169 | 2012-12-14 23:46:03 +0100 | [diff] [blame] | 321 | WARN_ON(1); |
| 322 | r = RESUME_GUEST; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 323 | } |
| 324 | |
| 325 | return r; |
| 326 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 327 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 328 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 329 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 330 | bool data) |
| 331 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 332 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 333 | struct kvmppc_pte pte; |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 334 | int r = -EINVAL; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 335 | |
| 336 | vcpu->stat.st++; |
| 337 | |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 338 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) |
| 339 | r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, |
| 340 | size); |
| 341 | |
| 342 | if ((!r) || (r == -EAGAIN)) |
| 343 | return r; |
| 344 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 345 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 346 | XLATE_WRITE, &pte); |
| 347 | if (r < 0) |
| 348 | return r; |
| 349 | |
| 350 | *eaddr = pte.raddr; |
| 351 | |
| 352 | if (!pte.may_write) |
| 353 | return -EPERM; |
| 354 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 355 | /* Magic page override */ |
| 356 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 357 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 358 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 359 | void *magic = vcpu->arch.shared; |
| 360 | magic += pte.eaddr & 0xfff; |
| 361 | memcpy(magic, ptr, size); |
| 362 | return EMULATE_DONE; |
| 363 | } |
| 364 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 365 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 366 | return EMULATE_DO_MMIO; |
| 367 | |
| 368 | return EMULATE_DONE; |
| 369 | } |
| 370 | EXPORT_SYMBOL_GPL(kvmppc_st); |
| 371 | |
| 372 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 373 | bool data) |
| 374 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 375 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 376 | struct kvmppc_pte pte; |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 377 | int rc = -EINVAL; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 378 | |
| 379 | vcpu->stat.ld++; |
| 380 | |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 381 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) |
| 382 | rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, |
| 383 | size); |
| 384 | |
| 385 | if ((!rc) || (rc == -EAGAIN)) |
| 386 | return rc; |
| 387 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 388 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 389 | XLATE_READ, &pte); |
| 390 | if (rc) |
| 391 | return rc; |
| 392 | |
| 393 | *eaddr = pte.raddr; |
| 394 | |
| 395 | if (!pte.may_read) |
| 396 | return -EPERM; |
| 397 | |
| 398 | if (!data && !pte.may_execute) |
| 399 | return -ENOEXEC; |
| 400 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 401 | /* Magic page override */ |
| 402 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 403 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 404 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 405 | void *magic = vcpu->arch.shared; |
| 406 | magic += pte.eaddr & 0xfff; |
| 407 | memcpy(ptr, magic, size); |
| 408 | return EMULATE_DONE; |
| 409 | } |
| 410 | |
Alexander Graf | c45c551 | 2014-06-20 14:17:30 +0200 | [diff] [blame] | 411 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 412 | return EMULATE_DO_MMIO; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 413 | |
| 414 | return EMULATE_DONE; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 415 | } |
| 416 | EXPORT_SYMBOL_GPL(kvmppc_ld); |
| 417 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 418 | int kvm_arch_hardware_enable(void) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 419 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 420 | return 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 421 | } |
| 422 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 423 | int kvm_arch_hardware_setup(void) |
| 424 | { |
| 425 | return 0; |
| 426 | } |
| 427 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 428 | void kvm_arch_check_processor_compat(void *rtn) |
| 429 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 430 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 431 | } |
| 432 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 433 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 434 | { |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 435 | struct kvmppc_ops *kvm_ops = NULL; |
| 436 | /* |
| 437 | * if we have both HV and PR enabled, default is HV |
| 438 | */ |
| 439 | if (type == 0) { |
| 440 | if (kvmppc_hv_ops) |
| 441 | kvm_ops = kvmppc_hv_ops; |
| 442 | else |
| 443 | kvm_ops = kvmppc_pr_ops; |
| 444 | if (!kvm_ops) |
| 445 | goto err_out; |
| 446 | } else if (type == KVM_VM_PPC_HV) { |
| 447 | if (!kvmppc_hv_ops) |
| 448 | goto err_out; |
| 449 | kvm_ops = kvmppc_hv_ops; |
| 450 | } else if (type == KVM_VM_PPC_PR) { |
| 451 | if (!kvmppc_pr_ops) |
| 452 | goto err_out; |
| 453 | kvm_ops = kvmppc_pr_ops; |
| 454 | } else |
| 455 | goto err_out; |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 456 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 457 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) |
| 458 | return -ENOENT; |
| 459 | |
| 460 | kvm->arch.kvm_ops = kvm_ops; |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 461 | return kvmppc_core_init_vm(kvm); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 462 | err_out: |
| 463 | return -EINVAL; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 464 | } |
| 465 | |
Luiz Capitulino | 235539b | 2016-09-07 14:47:23 -0400 | [diff] [blame] | 466 | bool kvm_arch_has_vcpu_debugfs(void) |
| 467 | { |
| 468 | return false; |
| 469 | } |
| 470 | |
| 471 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
| 472 | { |
| 473 | return 0; |
| 474 | } |
| 475 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 476 | void kvm_arch_destroy_vm(struct kvm *kvm) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 477 | { |
| 478 | unsigned int i; |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 479 | struct kvm_vcpu *vcpu; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 480 | |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 481 | #ifdef CONFIG_KVM_XICS |
| 482 | /* |
| 483 | * We call kick_all_cpus_sync() to ensure that all |
| 484 | * CPUs have executed any pending IPIs before we |
| 485 | * continue and free VCPUs structures below. |
| 486 | */ |
| 487 | if (is_kvmppc_hv_enabled(kvm)) |
| 488 | kick_all_cpus_sync(); |
| 489 | #endif |
| 490 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 491 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 492 | kvm_arch_vcpu_free(vcpu); |
| 493 | |
| 494 | mutex_lock(&kvm->lock); |
| 495 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
| 496 | kvm->vcpus[i] = NULL; |
| 497 | |
| 498 | atomic_set(&kvm->online_vcpus, 0); |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 499 | |
| 500 | kvmppc_core_destroy_vm(kvm); |
| 501 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 502 | mutex_unlock(&kvm->lock); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 503 | |
| 504 | /* drop the module reference */ |
| 505 | module_put(kvm->arch.kvm_ops->owner); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 506 | } |
| 507 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 508 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 509 | { |
| 510 | int r; |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 511 | /* Assume we're using HV mode when the HV module is loaded */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 512 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 513 | |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 514 | if (kvm) { |
| 515 | /* |
| 516 | * Hooray - we know which VM type we're running on. Depend on |
| 517 | * that rather than the guess above. |
| 518 | */ |
| 519 | hv_enabled = is_kvmppc_hv_enabled(kvm); |
| 520 | } |
| 521 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 522 | switch (ext) { |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 523 | #ifdef CONFIG_BOOKE |
| 524 | case KVM_CAP_PPC_BOOKE_SREGS: |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 525 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 526 | case KVM_CAP_PPC_EPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 527 | #else |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 528 | case KVM_CAP_PPC_SEGSTATE: |
Alexander Graf | 1022fc3 | 2011-09-14 21:45:23 +0200 | [diff] [blame] | 529 | case KVM_CAP_PPC_HIOR: |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 530 | case KVM_CAP_PPC_PAPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 531 | #endif |
Alexander Graf | 1897876 | 2010-03-24 21:48:18 +0100 | [diff] [blame] | 532 | case KVM_CAP_PPC_UNSET_IRQ: |
Alexander Graf | 7b4203e | 2010-08-30 13:50:45 +0200 | [diff] [blame] | 533 | case KVM_CAP_PPC_IRQ_LEVEL: |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 534 | case KVM_CAP_ENABLE_CAP: |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 535 | case KVM_CAP_ONE_REG: |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 536 | case KVM_CAP_IOEVENTFD: |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 537 | case KVM_CAP_DEVICE_CTRL: |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 538 | case KVM_CAP_IMMEDIATE_EXIT: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 539 | r = 1; |
| 540 | break; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 541 | case KVM_CAP_PPC_PAIRED_SINGLES: |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 542 | case KVM_CAP_PPC_OSI: |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 543 | case KVM_CAP_PPC_GET_PVINFO: |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 544 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 545 | case KVM_CAP_SW_TLB: |
| 546 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 547 | /* We support this only for PR */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 548 | r = !hv_enabled; |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 549 | break; |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 550 | #ifdef CONFIG_KVM_MPIC |
| 551 | case KVM_CAP_IRQ_MPIC: |
| 552 | r = 1; |
| 553 | break; |
| 554 | #endif |
| 555 | |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 556 | #ifdef CONFIG_PPC_BOOK3S_64 |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 557 | case KVM_CAP_SPAPR_TCE: |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 558 | case KVM_CAP_SPAPR_TCE_64: |
Suraj Jitindar Singh | 693ac10 | 2018-12-14 16:29:03 +1100 | [diff] [blame] | 559 | r = 1; |
| 560 | break; |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 561 | case KVM_CAP_SPAPR_TCE_VFIO: |
Suraj Jitindar Singh | 693ac10 | 2018-12-14 16:29:03 +1100 | [diff] [blame] | 562 | r = !!cpu_has_feature(CPU_FTR_HVMODE); |
| 563 | break; |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 564 | case KVM_CAP_PPC_RTAS: |
Alexander Graf | f2e9104 | 2014-05-22 17:40:15 +0200 | [diff] [blame] | 565 | case KVM_CAP_PPC_FIXUP_HCALL: |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 566 | case KVM_CAP_PPC_ENABLE_HCALL: |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 567 | #ifdef CONFIG_KVM_XICS |
| 568 | case KVM_CAP_IRQ_XICS: |
| 569 | #endif |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 570 | case KVM_CAP_PPC_GET_CPU_CHAR: |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 571 | r = 1; |
| 572 | break; |
David Gibson | a8acaec | 2016-11-23 16:14:07 +1100 | [diff] [blame] | 573 | |
| 574 | case KVM_CAP_PPC_ALLOC_HTAB: |
| 575 | r = hv_enabled; |
| 576 | break; |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 577 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 578 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 579 | case KVM_CAP_PPC_SMT: |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 580 | r = 0; |
Paul Mackerras | 5790069 | 2017-05-16 16:41:20 +1000 | [diff] [blame] | 581 | if (kvm) { |
| 582 | if (kvm->arch.emul_smt_mode > 1) |
| 583 | r = kvm->arch.emul_smt_mode; |
| 584 | else |
| 585 | r = kvm->arch.smt_mode; |
| 586 | } else if (hv_enabled) { |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 587 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 588 | r = 1; |
| 589 | else |
| 590 | r = threads_per_subcore; |
| 591 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 592 | break; |
Paul Mackerras | 2ed4f9d | 2017-06-21 16:01:27 +1000 | [diff] [blame] | 593 | case KVM_CAP_PPC_SMT_POSSIBLE: |
| 594 | r = 1; |
| 595 | if (hv_enabled) { |
| 596 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
| 597 | r = ((threads_per_subcore << 1) - 1); |
| 598 | else |
| 599 | /* P9 can emulate dbells, so allow any mode */ |
| 600 | r = 8 | 4 | 2 | 1; |
| 601 | } |
| 602 | break; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 603 | case KVM_CAP_PPC_RMA: |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 604 | r = 0; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 605 | break; |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 606 | case KVM_CAP_PPC_HWRNG: |
| 607 | r = kvmppc_hwrng_present(); |
| 608 | break; |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 609 | case KVM_CAP_PPC_MMU_RADIX: |
Paul Mackerras | 8cf4ecc | 2017-01-30 21:21:53 +1100 | [diff] [blame] | 610 | r = !!(hv_enabled && radix_enabled()); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 611 | break; |
| 612 | case KVM_CAP_PPC_MMU_HASH_V3: |
Paul Mackerras | de760db | 2018-10-08 16:31:16 +1100 | [diff] [blame] | 613 | r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && |
| 614 | cpu_has_feature(CPU_FTR_HVMODE)); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 615 | break; |
Paul Mackerras | aa069a9 | 2018-09-21 20:02:01 +1000 | [diff] [blame] | 616 | case KVM_CAP_PPC_NESTED_HV: |
| 617 | r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && |
| 618 | !kvmppc_hv_ops->enable_nested(NULL)); |
| 619 | break; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 620 | #endif |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 621 | case KVM_CAP_SYNC_MMU: |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 622 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 623 | r = hv_enabled; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 624 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 625 | r = 1; |
| 626 | #else |
| 627 | r = 0; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 628 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 629 | break; |
| 630 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 631 | case KVM_CAP_PPC_HTAB_FD: |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 632 | r = hv_enabled; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 633 | break; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 634 | #endif |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 635 | case KVM_CAP_NR_VCPUS: |
| 636 | /* |
| 637 | * Recommending a number of CPUs is somewhat arbitrary; we |
| 638 | * return the number of present CPUs for -HV (since a host |
| 639 | * will have secondary threads "offline"), and for other KVM |
| 640 | * implementations just count online CPUs. |
| 641 | */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 642 | if (hv_enabled) |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 643 | r = num_present_cpus(); |
| 644 | else |
| 645 | r = num_online_cpus(); |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 646 | break; |
Nikunj A Dadhania | bfec5c2c | 2015-10-16 10:27:53 +0530 | [diff] [blame] | 647 | case KVM_CAP_NR_MEMSLOTS: |
| 648 | r = KVM_USER_MEM_SLOTS; |
| 649 | break; |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 650 | case KVM_CAP_MAX_VCPUS: |
| 651 | r = KVM_MAX_VCPUS; |
| 652 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 653 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 654 | case KVM_CAP_PPC_GET_SMMU_INFO: |
| 655 | r = 1; |
| 656 | break; |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 657 | case KVM_CAP_SPAPR_MULTITCE: |
| 658 | r = 1; |
| 659 | break; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 660 | case KVM_CAP_SPAPR_RESIZE_HPT: |
David Gibson | 790a9df | 2018-02-02 14:29:08 +1100 | [diff] [blame] | 661 | r = !!hv_enabled; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 662 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 663 | #endif |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 664 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 665 | case KVM_CAP_PPC_FWNMI: |
| 666 | r = hv_enabled; |
| 667 | break; |
| 668 | #endif |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 669 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 670 | case KVM_CAP_PPC_HTM: |
Simon Guo | d234d68 | 2018-05-23 15:02:08 +0800 | [diff] [blame] | 671 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
| 672 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 673 | break; |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 674 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 675 | default: |
| 676 | r = 0; |
| 677 | break; |
| 678 | } |
| 679 | return r; |
| 680 | |
| 681 | } |
| 682 | |
| 683 | long kvm_arch_dev_ioctl(struct file *filp, |
| 684 | unsigned int ioctl, unsigned long arg) |
| 685 | { |
| 686 | return -EINVAL; |
| 687 | } |
| 688 | |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 689 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 690 | struct kvm_memory_slot *dont) |
| 691 | { |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 692 | kvmppc_core_free_memslot(kvm, free, dont); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 693 | } |
| 694 | |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 695 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 696 | unsigned long npages) |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 697 | { |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 698 | return kvmppc_core_create_memslot(kvm, slot, npages); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 699 | } |
| 700 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 701 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
Takuya Yoshikawa | 462fce4 | 2013-02-27 19:41:56 +0900 | [diff] [blame] | 702 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 703 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 704 | enum kvm_mr_change change) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 705 | { |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 706 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 707 | } |
| 708 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 709 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 710 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 711 | const struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 712 | const struct kvm_memory_slot *new, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 713 | enum kvm_mr_change change) |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 714 | { |
Bharata B Rao | f032b73 | 2018-12-12 15:15:30 +1100 | [diff] [blame] | 715 | kvmppc_core_commit_memory_region(kvm, mem, old, new, change); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 716 | } |
| 717 | |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 718 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 719 | struct kvm_memory_slot *slot) |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 720 | { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 721 | kvmppc_core_flush_memslot(kvm, slot); |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 722 | } |
| 723 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 724 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
| 725 | { |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 726 | struct kvm_vcpu *vcpu; |
| 727 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
Matt Evans | 03cdab5 | 2011-12-06 21:19:42 +0000 | [diff] [blame] | 728 | if (!IS_ERR(vcpu)) { |
| 729 | vcpu->arch.wqp = &vcpu->wq; |
Wei Yongjun | 06056bf | 2010-03-09 14:13:43 +0800 | [diff] [blame] | 730 | kvmppc_create_vcpu_debugfs(vcpu, id); |
Matt Evans | 03cdab5 | 2011-12-06 21:19:42 +0000 | [diff] [blame] | 731 | } |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 732 | return vcpu; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 733 | } |
| 734 | |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 735 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 736 | { |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 737 | } |
| 738 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 739 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
| 740 | { |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 741 | /* Make sure we're not using the vcpu anymore */ |
| 742 | hrtimer_cancel(&vcpu->arch.dec_timer); |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 743 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 744 | kvmppc_remove_vcpu_debugfs(vcpu); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 745 | |
| 746 | switch (vcpu->arch.irq_type) { |
| 747 | case KVMPPC_IRQ_MPIC: |
| 748 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
| 749 | break; |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 750 | case KVMPPC_IRQ_XICS: |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame^] | 751 | if (xics_on_xive()) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 752 | kvmppc_xive_cleanup_vcpu(vcpu); |
| 753 | else |
| 754 | kvmppc_xics_free_icp(vcpu); |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 755 | break; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 756 | } |
| 757 | |
Hollis Blanchard | db93f57 | 2008-11-05 09:36:18 -0600 | [diff] [blame] | 758 | kvmppc_core_vcpu_free(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 759 | } |
| 760 | |
| 761 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 762 | { |
| 763 | kvm_arch_vcpu_free(vcpu); |
| 764 | } |
| 765 | |
| 766 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 767 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 768 | return kvmppc_core_pending_dec(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 769 | } |
| 770 | |
Thomas Huth | 5358a96 | 2015-05-22 09:25:02 +0200 | [diff] [blame] | 771 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 772 | { |
| 773 | struct kvm_vcpu *vcpu; |
| 774 | |
| 775 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); |
Mihai Caraman | d02d4d1 | 2014-09-01 17:19:56 +0300 | [diff] [blame] | 776 | kvmppc_decrementer_func(vcpu); |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 777 | |
| 778 | return HRTIMER_NORESTART; |
| 779 | } |
| 780 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 781 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
| 782 | { |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 783 | int ret; |
| 784 | |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 785 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 786 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
Paul Mackerras | 5855564 | 2018-01-12 20:55:20 +1100 | [diff] [blame] | 787 | vcpu->arch.dec_expires = get_tb(); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 788 | |
Bharat Bhushan | 09000ad | 2011-03-25 10:32:13 +0530 | [diff] [blame] | 789 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 790 | mutex_init(&vcpu->arch.exit_timing_lock); |
| 791 | #endif |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 792 | ret = kvmppc_subarch_vcpu_init(vcpu); |
| 793 | return ret; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
| 797 | { |
Hollis Blanchard | ecc0981 | 2009-01-03 16:22:59 -0600 | [diff] [blame] | 798 | kvmppc_mmu_destroy(vcpu); |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 799 | kvmppc_subarch_vcpu_uninit(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 803 | { |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 804 | #ifdef CONFIG_BOOKE |
| 805 | /* |
| 806 | * vrsave (formerly usprg0) isn't used by Linux, but may |
| 807 | * be used by the guest. |
| 808 | * |
| 809 | * On non-booke this is associated with Altivec and |
| 810 | * is handled by code in book3s.c. |
| 811 | */ |
| 812 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
| 813 | #endif |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 814 | kvmppc_core_vcpu_load(vcpu, cpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 818 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 819 | kvmppc_core_vcpu_put(vcpu); |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 820 | #ifdef CONFIG_BOOKE |
| 821 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
| 822 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 823 | } |
| 824 | |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 825 | /* |
| 826 | * irq_bypass_add_producer and irq_bypass_del_producer are only |
| 827 | * useful if the architecture supports PCI passthrough. |
| 828 | * irq_bypass_stop and irq_bypass_start are not needed and so |
| 829 | * kvm_ops are not defined for them. |
| 830 | */ |
| 831 | bool kvm_arch_has_irq_bypass(void) |
| 832 | { |
| 833 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || |
| 834 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); |
| 835 | } |
| 836 | |
| 837 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
| 838 | struct irq_bypass_producer *prod) |
| 839 | { |
| 840 | struct kvm_kernel_irqfd *irqfd = |
| 841 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 842 | struct kvm *kvm = irqfd->kvm; |
| 843 | |
| 844 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) |
| 845 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); |
| 846 | |
| 847 | return 0; |
| 848 | } |
| 849 | |
| 850 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
| 851 | struct irq_bypass_producer *prod) |
| 852 | { |
| 853 | struct kvm_kernel_irqfd *irqfd = |
| 854 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 855 | struct kvm *kvm = irqfd->kvm; |
| 856 | |
| 857 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) |
| 858 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); |
| 859 | } |
| 860 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 861 | #ifdef CONFIG_VSX |
| 862 | static inline int kvmppc_get_vsr_dword_offset(int index) |
| 863 | { |
| 864 | int offset; |
| 865 | |
| 866 | if ((index != 0) && (index != 1)) |
| 867 | return -1; |
| 868 | |
| 869 | #ifdef __BIG_ENDIAN |
| 870 | offset = index; |
| 871 | #else |
| 872 | offset = 1 - index; |
| 873 | #endif |
| 874 | |
| 875 | return offset; |
| 876 | } |
| 877 | |
| 878 | static inline int kvmppc_get_vsr_word_offset(int index) |
| 879 | { |
| 880 | int offset; |
| 881 | |
| 882 | if ((index > 3) || (index < 0)) |
| 883 | return -1; |
| 884 | |
| 885 | #ifdef __BIG_ENDIAN |
| 886 | offset = index; |
| 887 | #else |
| 888 | offset = 3 - index; |
| 889 | #endif |
| 890 | return offset; |
| 891 | } |
| 892 | |
| 893 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, |
| 894 | u64 gpr) |
| 895 | { |
| 896 | union kvmppc_one_reg val; |
| 897 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 898 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 899 | |
| 900 | if (offset == -1) |
| 901 | return; |
| 902 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 903 | if (index >= 32) { |
| 904 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 905 | val.vsxval[offset] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 906 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 907 | } else { |
| 908 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, |
| 913 | u64 gpr) |
| 914 | { |
| 915 | union kvmppc_one_reg val; |
| 916 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 917 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 918 | if (index >= 32) { |
| 919 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 920 | val.vsxval[0] = gpr; |
| 921 | val.vsxval[1] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 922 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 923 | } else { |
| 924 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; |
| 925 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; |
| 926 | } |
| 927 | } |
| 928 | |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 929 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, |
| 930 | u32 gpr) |
| 931 | { |
| 932 | union kvmppc_one_reg val; |
| 933 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 934 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 935 | if (index >= 32) { |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 936 | val.vsx32val[0] = gpr; |
| 937 | val.vsx32val[1] = gpr; |
| 938 | val.vsx32val[2] = gpr; |
| 939 | val.vsx32val[3] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 940 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 941 | } else { |
| 942 | val.vsx32val[0] = gpr; |
| 943 | val.vsx32val[1] = gpr; |
| 944 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; |
| 945 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; |
| 946 | } |
| 947 | } |
| 948 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 949 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
| 950 | u32 gpr32) |
| 951 | { |
| 952 | union kvmppc_one_reg val; |
| 953 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 954 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 955 | int dword_offset, word_offset; |
| 956 | |
| 957 | if (offset == -1) |
| 958 | return; |
| 959 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 960 | if (index >= 32) { |
| 961 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 962 | val.vsx32val[offset] = gpr32; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 963 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 964 | } else { |
| 965 | dword_offset = offset / 2; |
| 966 | word_offset = offset % 2; |
| 967 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); |
| 968 | val.vsx32val[word_offset] = gpr32; |
| 969 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; |
| 970 | } |
| 971 | } |
| 972 | #endif /* CONFIG_VSX */ |
| 973 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 974 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 975 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, |
| 976 | int index, int element_size) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 977 | { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 978 | int offset; |
| 979 | int elts = sizeof(vector128)/element_size; |
| 980 | |
| 981 | if ((index < 0) || (index >= elts)) |
| 982 | return -1; |
| 983 | |
| 984 | if (kvmppc_need_byteswap(vcpu)) |
| 985 | offset = elts - index - 1; |
| 986 | else |
| 987 | offset = index; |
| 988 | |
| 989 | return offset; |
| 990 | } |
| 991 | |
| 992 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, |
| 993 | int index) |
| 994 | { |
| 995 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); |
| 996 | } |
| 997 | |
| 998 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, |
| 999 | int index) |
| 1000 | { |
| 1001 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); |
| 1002 | } |
| 1003 | |
| 1004 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, |
| 1005 | int index) |
| 1006 | { |
| 1007 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); |
| 1008 | } |
| 1009 | |
| 1010 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, |
| 1011 | int index) |
| 1012 | { |
| 1013 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); |
| 1014 | } |
| 1015 | |
| 1016 | |
| 1017 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
| 1018 | u64 gpr) |
| 1019 | { |
| 1020 | union kvmppc_one_reg val; |
| 1021 | int offset = kvmppc_get_vmx_dword_offset(vcpu, |
| 1022 | vcpu->arch.mmio_vmx_offset); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1023 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1024 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1025 | if (offset == -1) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1026 | return; |
| 1027 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1028 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1029 | val.vsxval[offset] = gpr; |
| 1030 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1031 | } |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1032 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1033 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, |
| 1034 | u32 gpr32) |
| 1035 | { |
| 1036 | union kvmppc_one_reg val; |
| 1037 | int offset = kvmppc_get_vmx_word_offset(vcpu, |
| 1038 | vcpu->arch.mmio_vmx_offset); |
| 1039 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1040 | |
| 1041 | if (offset == -1) |
| 1042 | return; |
| 1043 | |
| 1044 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1045 | val.vsx32val[offset] = gpr32; |
| 1046 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1047 | } |
| 1048 | |
| 1049 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, |
| 1050 | u16 gpr16) |
| 1051 | { |
| 1052 | union kvmppc_one_reg val; |
| 1053 | int offset = kvmppc_get_vmx_hword_offset(vcpu, |
| 1054 | vcpu->arch.mmio_vmx_offset); |
| 1055 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1056 | |
| 1057 | if (offset == -1) |
| 1058 | return; |
| 1059 | |
| 1060 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1061 | val.vsx16val[offset] = gpr16; |
| 1062 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1063 | } |
| 1064 | |
| 1065 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, |
| 1066 | u8 gpr8) |
| 1067 | { |
| 1068 | union kvmppc_one_reg val; |
| 1069 | int offset = kvmppc_get_vmx_byte_offset(vcpu, |
| 1070 | vcpu->arch.mmio_vmx_offset); |
| 1071 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1072 | |
| 1073 | if (offset == -1) |
| 1074 | return; |
| 1075 | |
| 1076 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1077 | val.vsx8val[offset] = gpr8; |
| 1078 | VCPU_VSX_VR(vcpu, index) = val.vval; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1079 | } |
| 1080 | #endif /* CONFIG_ALTIVEC */ |
| 1081 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1082 | #ifdef CONFIG_PPC_FPU |
| 1083 | static inline u64 sp_to_dp(u32 fprs) |
| 1084 | { |
| 1085 | u64 fprd; |
| 1086 | |
| 1087 | preempt_disable(); |
| 1088 | enable_kernel_fp(); |
| 1089 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) |
| 1090 | : "fr0"); |
| 1091 | preempt_enable(); |
| 1092 | return fprd; |
| 1093 | } |
| 1094 | |
| 1095 | static inline u32 dp_to_sp(u64 fprd) |
| 1096 | { |
| 1097 | u32 fprs; |
| 1098 | |
| 1099 | preempt_disable(); |
| 1100 | enable_kernel_fp(); |
| 1101 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) |
| 1102 | : "fr0"); |
| 1103 | preempt_enable(); |
| 1104 | return fprs; |
| 1105 | } |
| 1106 | |
| 1107 | #else |
| 1108 | #define sp_to_dp(x) (x) |
| 1109 | #define dp_to_sp(x) (x) |
| 1110 | #endif /* CONFIG_PPC_FPU */ |
| 1111 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1112 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
| 1113 | struct kvm_run *run) |
| 1114 | { |
Denis Kirjanov | 69b6183 | 2010-06-11 11:23:26 +0000 | [diff] [blame] | 1115 | u64 uninitialized_var(gpr); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1116 | |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1117 | if (run->mmio.len > sizeof(gpr)) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1118 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
| 1119 | return; |
| 1120 | } |
| 1121 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1122 | if (!vcpu->arch.mmio_host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1123 | switch (run->mmio.len) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1124 | case 8: gpr = *(u64 *)run->mmio.data; break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1125 | case 4: gpr = *(u32 *)run->mmio.data; break; |
| 1126 | case 2: gpr = *(u16 *)run->mmio.data; break; |
| 1127 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1128 | } |
| 1129 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1130 | switch (run->mmio.len) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1131 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
| 1132 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; |
| 1133 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1134 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1135 | } |
| 1136 | } |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1137 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1138 | /* conversion between single and double precision */ |
| 1139 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) |
| 1140 | gpr = sp_to_dp(gpr); |
| 1141 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1142 | if (vcpu->arch.mmio_sign_extend) { |
| 1143 | switch (run->mmio.len) { |
| 1144 | #ifdef CONFIG_PPC64 |
| 1145 | case 4: |
| 1146 | gpr = (s64)(s32)gpr; |
| 1147 | break; |
| 1148 | #endif |
| 1149 | case 2: |
| 1150 | gpr = (s64)(s16)gpr; |
| 1151 | break; |
| 1152 | case 1: |
| 1153 | gpr = (s64)(s8)gpr; |
| 1154 | break; |
| 1155 | } |
| 1156 | } |
| 1157 | |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1158 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
| 1159 | case KVM_MMIO_REG_GPR: |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1160 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
| 1161 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1162 | case KVM_MMIO_REG_FPR: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1163 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1164 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); |
| 1165 | |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1166 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1167 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1168 | #ifdef CONFIG_PPC_BOOK3S |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1169 | case KVM_MMIO_REG_QPR: |
| 1170 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1171 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1172 | case KVM_MMIO_REG_FQPR: |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1173 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1174 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1175 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1176 | #endif |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1177 | #ifdef CONFIG_VSX |
| 1178 | case KVM_MMIO_REG_VSX: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1179 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1180 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); |
| 1181 | |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1182 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1183 | kvmppc_set_vsr_dword(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1184 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1185 | kvmppc_set_vsr_word(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1186 | else if (vcpu->arch.mmio_copy_type == |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1187 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
| 1188 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1189 | else if (vcpu->arch.mmio_copy_type == |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 1190 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) |
| 1191 | kvmppc_set_vsr_word_dump(vcpu, gpr); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1192 | break; |
| 1193 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1194 | #ifdef CONFIG_ALTIVEC |
| 1195 | case KVM_MMIO_REG_VMX: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1196 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1197 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); |
| 1198 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1199 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) |
| 1200 | kvmppc_set_vmx_dword(vcpu, gpr); |
| 1201 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) |
| 1202 | kvmppc_set_vmx_word(vcpu, gpr); |
| 1203 | else if (vcpu->arch.mmio_copy_type == |
| 1204 | KVMPPC_VMX_COPY_HWORD) |
| 1205 | kvmppc_set_vmx_hword(vcpu, gpr); |
| 1206 | else if (vcpu->arch.mmio_copy_type == |
| 1207 | KVMPPC_VMX_COPY_BYTE) |
| 1208 | kvmppc_set_vmx_byte(vcpu, gpr); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1209 | break; |
| 1210 | #endif |
Suraj Jitindar Singh | 873db2c | 2018-12-14 16:29:08 +1100 | [diff] [blame] | 1211 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 1212 | case KVM_MMIO_REG_NESTED_GPR: |
| 1213 | if (kvmppc_need_byteswap(vcpu)) |
| 1214 | gpr = swab64(gpr); |
| 1215 | kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, |
| 1216 | sizeof(gpr)); |
| 1217 | break; |
| 1218 | #endif |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1219 | default: |
| 1220 | BUG(); |
| 1221 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1222 | } |
| 1223 | |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1224 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1225 | unsigned int rt, unsigned int bytes, |
| 1226 | int is_default_endian, int sign_extend) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1227 | { |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1228 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1229 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1230 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1231 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1232 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1233 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1234 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1235 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1236 | } |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1237 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1238 | if (bytes > sizeof(run->mmio.data)) { |
| 1239 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1240 | run->mmio.len); |
| 1241 | } |
| 1242 | |
| 1243 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1244 | run->mmio.len = bytes; |
| 1245 | run->mmio.is_write = 0; |
| 1246 | |
| 1247 | vcpu->arch.io_gpr = rt; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1248 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1249 | vcpu->mmio_needed = 1; |
| 1250 | vcpu->mmio_is_write = 0; |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1251 | vcpu->arch.mmio_sign_extend = sign_extend; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1252 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1253 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1254 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1255 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1256 | bytes, &run->mmio.data); |
| 1257 | |
| 1258 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1259 | |
| 1260 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1261 | kvmppc_complete_mmio_load(vcpu, run); |
| 1262 | vcpu->mmio_needed = 0; |
| 1263 | return EMULATE_DONE; |
| 1264 | } |
| 1265 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1266 | return EMULATE_DO_MMIO; |
| 1267 | } |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1268 | |
| 1269 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1270 | unsigned int rt, unsigned int bytes, |
| 1271 | int is_default_endian) |
| 1272 | { |
| 1273 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); |
| 1274 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1275 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1276 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1277 | /* Same as above, but sign extends */ |
| 1278 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1279 | unsigned int rt, unsigned int bytes, |
| 1280 | int is_default_endian) |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1281 | { |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1282 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1283 | } |
| 1284 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1285 | #ifdef CONFIG_VSX |
| 1286 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1287 | unsigned int rt, unsigned int bytes, |
| 1288 | int is_default_endian, int mmio_sign_extend) |
| 1289 | { |
| 1290 | enum emulation_result emulated = EMULATE_DONE; |
| 1291 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1292 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1293 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1294 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1295 | |
| 1296 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1297 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
| 1298 | is_default_endian, mmio_sign_extend); |
| 1299 | |
| 1300 | if (emulated != EMULATE_DONE) |
| 1301 | break; |
| 1302 | |
| 1303 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1304 | |
| 1305 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1306 | vcpu->arch.mmio_vsx_offset++; |
| 1307 | } |
| 1308 | return emulated; |
| 1309 | } |
| 1310 | #endif /* CONFIG_VSX */ |
| 1311 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1312 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1313 | u64 val, unsigned int bytes, int is_default_endian) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1314 | { |
| 1315 | void *data = run->mmio.data; |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1316 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1317 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1318 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1319 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1320 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1321 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1322 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1323 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1324 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1325 | |
| 1326 | if (bytes > sizeof(run->mmio.data)) { |
| 1327 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1328 | run->mmio.len); |
| 1329 | } |
| 1330 | |
| 1331 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1332 | run->mmio.len = bytes; |
| 1333 | run->mmio.is_write = 1; |
| 1334 | vcpu->mmio_needed = 1; |
| 1335 | vcpu->mmio_is_write = 1; |
| 1336 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1337 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
| 1338 | val = dp_to_sp(val); |
| 1339 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1340 | /* Store the value at the lowest bytes in 'data'. */ |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1341 | if (!host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1342 | switch (bytes) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1343 | case 8: *(u64 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1344 | case 4: *(u32 *)data = val; break; |
| 1345 | case 2: *(u16 *)data = val; break; |
| 1346 | case 1: *(u8 *)data = val; break; |
| 1347 | } |
| 1348 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1349 | switch (bytes) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1350 | case 8: *(u64 *)data = swab64(val); break; |
| 1351 | case 4: *(u32 *)data = swab32(val); break; |
| 1352 | case 2: *(u16 *)data = swab16(val); break; |
| 1353 | case 1: *(u8 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1354 | } |
| 1355 | } |
| 1356 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1357 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1358 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1359 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1360 | bytes, &run->mmio.data); |
| 1361 | |
| 1362 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1363 | |
| 1364 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1365 | vcpu->mmio_needed = 0; |
| 1366 | return EMULATE_DONE; |
| 1367 | } |
| 1368 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1369 | return EMULATE_DO_MMIO; |
| 1370 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1371 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1372 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1373 | #ifdef CONFIG_VSX |
| 1374 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) |
| 1375 | { |
| 1376 | u32 dword_offset, word_offset; |
| 1377 | union kvmppc_one_reg reg; |
| 1378 | int vsx_offset = 0; |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1379 | int copy_type = vcpu->arch.mmio_copy_type; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1380 | int result = 0; |
| 1381 | |
| 1382 | switch (copy_type) { |
| 1383 | case KVMPPC_VSX_COPY_DWORD: |
| 1384 | vsx_offset = |
| 1385 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 1386 | |
| 1387 | if (vsx_offset == -1) { |
| 1388 | result = -1; |
| 1389 | break; |
| 1390 | } |
| 1391 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1392 | if (rs < 32) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1393 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); |
| 1394 | } else { |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1395 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1396 | *val = reg.vsxval[vsx_offset]; |
| 1397 | } |
| 1398 | break; |
| 1399 | |
| 1400 | case KVMPPC_VSX_COPY_WORD: |
| 1401 | vsx_offset = |
| 1402 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 1403 | |
| 1404 | if (vsx_offset == -1) { |
| 1405 | result = -1; |
| 1406 | break; |
| 1407 | } |
| 1408 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1409 | if (rs < 32) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1410 | dword_offset = vsx_offset / 2; |
| 1411 | word_offset = vsx_offset % 2; |
| 1412 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); |
| 1413 | *val = reg.vsx32val[word_offset]; |
| 1414 | } else { |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1415 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1416 | *val = reg.vsx32val[vsx_offset]; |
| 1417 | } |
| 1418 | break; |
| 1419 | |
| 1420 | default: |
| 1421 | result = -1; |
| 1422 | break; |
| 1423 | } |
| 1424 | |
| 1425 | return result; |
| 1426 | } |
| 1427 | |
| 1428 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1429 | int rs, unsigned int bytes, int is_default_endian) |
| 1430 | { |
| 1431 | u64 val; |
| 1432 | enum emulation_result emulated = EMULATE_DONE; |
| 1433 | |
| 1434 | vcpu->arch.io_gpr = rs; |
| 1435 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1436 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1437 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1438 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1439 | |
| 1440 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1441 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) |
| 1442 | return EMULATE_FAIL; |
| 1443 | |
| 1444 | emulated = kvmppc_handle_store(run, vcpu, |
| 1445 | val, bytes, is_default_endian); |
| 1446 | |
| 1447 | if (emulated != EMULATE_DONE) |
| 1448 | break; |
| 1449 | |
| 1450 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1451 | |
| 1452 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1453 | vcpu->arch.mmio_vsx_offset++; |
| 1454 | } |
| 1455 | |
| 1456 | return emulated; |
| 1457 | } |
| 1458 | |
| 1459 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, |
| 1460 | struct kvm_run *run) |
| 1461 | { |
| 1462 | enum emulation_result emulated = EMULATE_FAIL; |
| 1463 | int r; |
| 1464 | |
| 1465 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1466 | |
| 1467 | if (!vcpu->mmio_is_write) { |
| 1468 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, |
| 1469 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); |
| 1470 | } else { |
| 1471 | emulated = kvmppc_handle_vsx_store(run, vcpu, |
| 1472 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1473 | } |
| 1474 | |
| 1475 | switch (emulated) { |
| 1476 | case EMULATE_DO_MMIO: |
| 1477 | run->exit_reason = KVM_EXIT_MMIO; |
| 1478 | r = RESUME_HOST; |
| 1479 | break; |
| 1480 | case EMULATE_FAIL: |
| 1481 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); |
| 1482 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1483 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1484 | r = RESUME_HOST; |
| 1485 | break; |
| 1486 | default: |
| 1487 | r = RESUME_GUEST; |
| 1488 | break; |
| 1489 | } |
| 1490 | return r; |
| 1491 | } |
| 1492 | #endif /* CONFIG_VSX */ |
| 1493 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1494 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1495 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1496 | unsigned int rt, unsigned int bytes, int is_default_endian) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1497 | { |
Paul Mackerras | 6df3877 | 2018-02-13 15:45:21 +1100 | [diff] [blame] | 1498 | enum emulation_result emulated = EMULATE_DONE; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1499 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1500 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
| 1501 | return EMULATE_FAIL; |
| 1502 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1503 | while (vcpu->arch.mmio_vmx_copy_nums) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1504 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1505 | is_default_endian, 0); |
| 1506 | |
| 1507 | if (emulated != EMULATE_DONE) |
| 1508 | break; |
| 1509 | |
| 1510 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1511 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1512 | vcpu->arch.mmio_vmx_offset++; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1513 | } |
| 1514 | |
| 1515 | return emulated; |
| 1516 | } |
| 1517 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1518 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1519 | { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1520 | union kvmppc_one_reg reg; |
| 1521 | int vmx_offset = 0; |
| 1522 | int result = 0; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1523 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1524 | vmx_offset = |
| 1525 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1526 | |
| 1527 | if (vmx_offset == -1) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1528 | return -1; |
| 1529 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1530 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1531 | *val = reg.vsxval[vmx_offset]; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1532 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1533 | return result; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1534 | } |
| 1535 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1536 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1537 | { |
| 1538 | union kvmppc_one_reg reg; |
| 1539 | int vmx_offset = 0; |
| 1540 | int result = 0; |
| 1541 | |
| 1542 | vmx_offset = |
| 1543 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1544 | |
| 1545 | if (vmx_offset == -1) |
| 1546 | return -1; |
| 1547 | |
| 1548 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1549 | *val = reg.vsx32val[vmx_offset]; |
| 1550 | |
| 1551 | return result; |
| 1552 | } |
| 1553 | |
| 1554 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1555 | { |
| 1556 | union kvmppc_one_reg reg; |
| 1557 | int vmx_offset = 0; |
| 1558 | int result = 0; |
| 1559 | |
| 1560 | vmx_offset = |
| 1561 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1562 | |
| 1563 | if (vmx_offset == -1) |
| 1564 | return -1; |
| 1565 | |
| 1566 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1567 | *val = reg.vsx16val[vmx_offset]; |
| 1568 | |
| 1569 | return result; |
| 1570 | } |
| 1571 | |
| 1572 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1573 | { |
| 1574 | union kvmppc_one_reg reg; |
| 1575 | int vmx_offset = 0; |
| 1576 | int result = 0; |
| 1577 | |
| 1578 | vmx_offset = |
| 1579 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1580 | |
| 1581 | if (vmx_offset == -1) |
| 1582 | return -1; |
| 1583 | |
| 1584 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1585 | *val = reg.vsx8val[vmx_offset]; |
| 1586 | |
| 1587 | return result; |
| 1588 | } |
| 1589 | |
| 1590 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1591 | unsigned int rs, unsigned int bytes, int is_default_endian) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1592 | { |
| 1593 | u64 val = 0; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1594 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1595 | enum emulation_result emulated = EMULATE_DONE; |
| 1596 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1597 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
| 1598 | return EMULATE_FAIL; |
| 1599 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1600 | vcpu->arch.io_gpr = rs; |
| 1601 | |
| 1602 | while (vcpu->arch.mmio_vmx_copy_nums) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1603 | switch (vcpu->arch.mmio_copy_type) { |
| 1604 | case KVMPPC_VMX_COPY_DWORD: |
| 1605 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) |
| 1606 | return EMULATE_FAIL; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1607 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1608 | break; |
| 1609 | case KVMPPC_VMX_COPY_WORD: |
| 1610 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) |
| 1611 | return EMULATE_FAIL; |
| 1612 | break; |
| 1613 | case KVMPPC_VMX_COPY_HWORD: |
| 1614 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) |
| 1615 | return EMULATE_FAIL; |
| 1616 | break; |
| 1617 | case KVMPPC_VMX_COPY_BYTE: |
| 1618 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) |
| 1619 | return EMULATE_FAIL; |
| 1620 | break; |
| 1621 | default: |
| 1622 | return EMULATE_FAIL; |
| 1623 | } |
| 1624 | |
| 1625 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1626 | is_default_endian); |
| 1627 | if (emulated != EMULATE_DONE) |
| 1628 | break; |
| 1629 | |
| 1630 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1631 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1632 | vcpu->arch.mmio_vmx_offset++; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1633 | } |
| 1634 | |
| 1635 | return emulated; |
| 1636 | } |
| 1637 | |
| 1638 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, |
| 1639 | struct kvm_run *run) |
| 1640 | { |
| 1641 | enum emulation_result emulated = EMULATE_FAIL; |
| 1642 | int r; |
| 1643 | |
| 1644 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1645 | |
| 1646 | if (!vcpu->mmio_is_write) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1647 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
| 1648 | vcpu->arch.io_gpr, run->mmio.len, 1); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1649 | } else { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1650 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
| 1651 | vcpu->arch.io_gpr, run->mmio.len, 1); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1652 | } |
| 1653 | |
| 1654 | switch (emulated) { |
| 1655 | case EMULATE_DO_MMIO: |
| 1656 | run->exit_reason = KVM_EXIT_MMIO; |
| 1657 | r = RESUME_HOST; |
| 1658 | break; |
| 1659 | case EMULATE_FAIL: |
| 1660 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); |
| 1661 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1662 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1663 | r = RESUME_HOST; |
| 1664 | break; |
| 1665 | default: |
| 1666 | r = RESUME_GUEST; |
| 1667 | break; |
| 1668 | } |
| 1669 | return r; |
| 1670 | } |
| 1671 | #endif /* CONFIG_ALTIVEC */ |
| 1672 | |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1673 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1674 | { |
| 1675 | int r = 0; |
| 1676 | union kvmppc_one_reg val; |
| 1677 | int size; |
| 1678 | |
| 1679 | size = one_reg_size(reg->id); |
| 1680 | if (size > sizeof(val)) |
| 1681 | return -EINVAL; |
| 1682 | |
| 1683 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); |
| 1684 | if (r == -EINVAL) { |
| 1685 | r = 0; |
| 1686 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1687 | #ifdef CONFIG_ALTIVEC |
| 1688 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1689 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1690 | r = -ENXIO; |
| 1691 | break; |
| 1692 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1693 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1694 | break; |
| 1695 | case KVM_REG_PPC_VSCR: |
| 1696 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1697 | r = -ENXIO; |
| 1698 | break; |
| 1699 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1700 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1701 | break; |
| 1702 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1703 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1704 | break; |
| 1705 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1706 | default: |
| 1707 | r = -EINVAL; |
| 1708 | break; |
| 1709 | } |
| 1710 | } |
| 1711 | |
| 1712 | if (r) |
| 1713 | return r; |
| 1714 | |
| 1715 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) |
| 1716 | r = -EFAULT; |
| 1717 | |
| 1718 | return r; |
| 1719 | } |
| 1720 | |
| 1721 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1722 | { |
| 1723 | int r; |
| 1724 | union kvmppc_one_reg val; |
| 1725 | int size; |
| 1726 | |
| 1727 | size = one_reg_size(reg->id); |
| 1728 | if (size > sizeof(val)) |
| 1729 | return -EINVAL; |
| 1730 | |
| 1731 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) |
| 1732 | return -EFAULT; |
| 1733 | |
| 1734 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); |
| 1735 | if (r == -EINVAL) { |
| 1736 | r = 0; |
| 1737 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1738 | #ifdef CONFIG_ALTIVEC |
| 1739 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1740 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1741 | r = -ENXIO; |
| 1742 | break; |
| 1743 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1744 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1745 | break; |
| 1746 | case KVM_REG_PPC_VSCR: |
| 1747 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1748 | r = -ENXIO; |
| 1749 | break; |
| 1750 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1751 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1752 | break; |
| 1753 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1754 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1755 | r = -ENXIO; |
| 1756 | break; |
| 1757 | } |
| 1758 | vcpu->arch.vrsave = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1759 | break; |
| 1760 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1761 | default: |
| 1762 | r = -EINVAL; |
| 1763 | break; |
| 1764 | } |
| 1765 | } |
| 1766 | |
| 1767 | return r; |
| 1768 | } |
| 1769 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1770 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 1771 | { |
| 1772 | int r; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1773 | |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1774 | vcpu_load(vcpu); |
| 1775 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1776 | if (vcpu->mmio_needed) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1777 | vcpu->mmio_needed = 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1778 | if (!vcpu->mmio_is_write) |
| 1779 | kvmppc_complete_mmio_load(vcpu, run); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1780 | #ifdef CONFIG_VSX |
| 1781 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1782 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1783 | vcpu->arch.mmio_vsx_offset++; |
| 1784 | } |
| 1785 | |
| 1786 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1787 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); |
| 1788 | if (r == RESUME_HOST) { |
| 1789 | vcpu->mmio_needed = 1; |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1790 | goto out; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1791 | } |
| 1792 | } |
| 1793 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1794 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1795 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1796 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1797 | vcpu->arch.mmio_vmx_offset++; |
| 1798 | } |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1799 | |
| 1800 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1801 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
| 1802 | if (r == RESUME_HOST) { |
| 1803 | vcpu->mmio_needed = 1; |
Radim Krčmář | 1ab03c0 | 2018-02-09 21:36:57 +0100 | [diff] [blame] | 1804 | goto out; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1805 | } |
| 1806 | } |
| 1807 | #endif |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1808 | } else if (vcpu->arch.osi_needed) { |
| 1809 | u64 *gprs = run->osi.gprs; |
| 1810 | int i; |
| 1811 | |
| 1812 | for (i = 0; i < 32; i++) |
| 1813 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
| 1814 | vcpu->arch.osi_needed = 0; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1815 | } else if (vcpu->arch.hcall_needed) { |
| 1816 | int i; |
| 1817 | |
| 1818 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); |
| 1819 | for (i = 0; i < 9; ++i) |
| 1820 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); |
| 1821 | vcpu->arch.hcall_needed = 0; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1822 | #ifdef CONFIG_BOOKE |
| 1823 | } else if (vcpu->arch.epr_needed) { |
| 1824 | kvmppc_set_epr(vcpu, run->epr.epr); |
| 1825 | vcpu->arch.epr_needed = 0; |
| 1826 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1827 | } |
| 1828 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1829 | kvm_sigset_activate(vcpu); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1830 | |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 1831 | if (run->immediate_exit) |
| 1832 | r = -EINTR; |
| 1833 | else |
| 1834 | r = kvmppc_vcpu_run(run, vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1835 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1836 | kvm_sigset_deactivate(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1837 | |
Paul Mackerras | c662f77 | 2018-02-13 15:16:01 +1100 | [diff] [blame] | 1838 | #ifdef CONFIG_ALTIVEC |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1839 | out: |
Paul Mackerras | c662f77 | 2018-02-13 15:16:01 +1100 | [diff] [blame] | 1840 | #endif |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1841 | vcpu_put(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1842 | return r; |
| 1843 | } |
| 1844 | |
| 1845 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
| 1846 | { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1847 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
Paul Mackerras | 4fe27d2 | 2013-02-14 14:00:25 +0000 | [diff] [blame] | 1848 | kvmppc_core_dequeue_external(vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1849 | return 0; |
| 1850 | } |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1851 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1852 | kvmppc_core_queue_external(vcpu, irq); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 1853 | |
Scott Wood | dfd4d47 | 2011-11-17 12:39:59 +0000 | [diff] [blame] | 1854 | kvm_vcpu_kick(vcpu); |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1855 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1856 | return 0; |
| 1857 | } |
| 1858 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1859 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
| 1860 | struct kvm_enable_cap *cap) |
| 1861 | { |
| 1862 | int r; |
| 1863 | |
| 1864 | if (cap->flags) |
| 1865 | return -EINVAL; |
| 1866 | |
| 1867 | switch (cap->cap) { |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1868 | case KVM_CAP_PPC_OSI: |
| 1869 | r = 0; |
| 1870 | vcpu->arch.osi_enabled = true; |
| 1871 | break; |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 1872 | case KVM_CAP_PPC_PAPR: |
| 1873 | r = 0; |
| 1874 | vcpu->arch.papr_enabled = true; |
| 1875 | break; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1876 | case KVM_CAP_PPC_EPR: |
| 1877 | r = 0; |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1878 | if (cap->args[0]) |
| 1879 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; |
| 1880 | else |
| 1881 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1882 | break; |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 1883 | #ifdef CONFIG_BOOKE |
| 1884 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
| 1885 | r = 0; |
| 1886 | vcpu->arch.watchdog_enabled = true; |
| 1887 | break; |
| 1888 | #endif |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 1889 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 1890 | case KVM_CAP_SW_TLB: { |
| 1891 | struct kvm_config_tlb cfg; |
| 1892 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
| 1893 | |
| 1894 | r = -EFAULT; |
| 1895 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) |
| 1896 | break; |
| 1897 | |
| 1898 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); |
| 1899 | break; |
| 1900 | } |
| 1901 | #endif |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1902 | #ifdef CONFIG_KVM_MPIC |
| 1903 | case KVM_CAP_IRQ_MPIC: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1904 | struct fd f; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1905 | struct kvm_device *dev; |
| 1906 | |
| 1907 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1908 | f = fdget(cap->args[0]); |
| 1909 | if (!f.file) |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1910 | break; |
| 1911 | |
| 1912 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1913 | dev = kvm_device_from_filp(f.file); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1914 | if (dev) |
| 1915 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1916 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1917 | fdput(f); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1918 | break; |
| 1919 | } |
| 1920 | #endif |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1921 | #ifdef CONFIG_KVM_XICS |
| 1922 | case KVM_CAP_IRQ_XICS: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1923 | struct fd f; |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1924 | struct kvm_device *dev; |
| 1925 | |
| 1926 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1927 | f = fdget(cap->args[0]); |
| 1928 | if (!f.file) |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1929 | break; |
| 1930 | |
| 1931 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1932 | dev = kvm_device_from_filp(f.file); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1933 | if (dev) { |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame^] | 1934 | if (xics_on_xive()) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1935 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1936 | else |
| 1937 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1938 | } |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1939 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1940 | fdput(f); |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1941 | break; |
| 1942 | } |
| 1943 | #endif /* CONFIG_KVM_XICS */ |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 1944 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 1945 | case KVM_CAP_PPC_FWNMI: |
| 1946 | r = -EINVAL; |
| 1947 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
| 1948 | break; |
| 1949 | r = 0; |
| 1950 | vcpu->kvm->arch.fwnmi_enabled = true; |
| 1951 | break; |
| 1952 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1953 | default: |
| 1954 | r = -EINVAL; |
| 1955 | break; |
| 1956 | } |
| 1957 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 1958 | if (!r) |
| 1959 | r = kvmppc_sanity_check(vcpu); |
| 1960 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1961 | return r; |
| 1962 | } |
| 1963 | |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1964 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 1965 | { |
| 1966 | #ifdef CONFIG_KVM_MPIC |
| 1967 | if (kvm->arch.mpic) |
| 1968 | return true; |
| 1969 | #endif |
| 1970 | #ifdef CONFIG_KVM_XICS |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1971 | if (kvm->arch.xics || kvm->arch.xive) |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1972 | return true; |
| 1973 | #endif |
| 1974 | return false; |
| 1975 | } |
| 1976 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1977 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 1978 | struct kvm_mp_state *mp_state) |
| 1979 | { |
| 1980 | return -EINVAL; |
| 1981 | } |
| 1982 | |
| 1983 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 1984 | struct kvm_mp_state *mp_state) |
| 1985 | { |
| 1986 | return -EINVAL; |
| 1987 | } |
| 1988 | |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 1989 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 1990 | unsigned int ioctl, unsigned long arg) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1991 | { |
| 1992 | struct kvm_vcpu *vcpu = filp->private_data; |
| 1993 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1994 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1995 | if (ioctl == KVM_INTERRUPT) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1996 | struct kvm_interrupt irq; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1997 | if (copy_from_user(&irq, argp, sizeof(irq))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1998 | return -EFAULT; |
| 1999 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2000 | } |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 2001 | return -ENOIOCTLCMD; |
| 2002 | } |
| 2003 | |
| 2004 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 2005 | unsigned int ioctl, unsigned long arg) |
| 2006 | { |
| 2007 | struct kvm_vcpu *vcpu = filp->private_data; |
| 2008 | void __user *argp = (void __user *)arg; |
| 2009 | long r; |
Avi Kivity | 19483d1 | 2010-05-13 12:30:43 +0300 | [diff] [blame] | 2010 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 2011 | switch (ioctl) { |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2012 | case KVM_ENABLE_CAP: |
| 2013 | { |
| 2014 | struct kvm_enable_cap cap; |
| 2015 | r = -EFAULT; |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2016 | vcpu_load(vcpu); |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2017 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 2018 | goto out; |
| 2019 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2020 | vcpu_put(vcpu); |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2021 | break; |
| 2022 | } |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2023 | |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 2024 | case KVM_SET_ONE_REG: |
| 2025 | case KVM_GET_ONE_REG: |
| 2026 | { |
| 2027 | struct kvm_one_reg reg; |
| 2028 | r = -EFAULT; |
| 2029 | if (copy_from_user(®, argp, sizeof(reg))) |
| 2030 | goto out; |
| 2031 | if (ioctl == KVM_SET_ONE_REG) |
| 2032 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); |
| 2033 | else |
| 2034 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); |
| 2035 | break; |
| 2036 | } |
| 2037 | |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 2038 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2039 | case KVM_DIRTY_TLB: { |
| 2040 | struct kvm_dirty_tlb dirty; |
| 2041 | r = -EFAULT; |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2042 | vcpu_load(vcpu); |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2043 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
| 2044 | goto out; |
| 2045 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2046 | vcpu_put(vcpu); |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2047 | break; |
| 2048 | } |
| 2049 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2050 | default: |
| 2051 | r = -EINVAL; |
| 2052 | } |
| 2053 | |
| 2054 | out: |
| 2055 | return r; |
| 2056 | } |
| 2057 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 2058 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 2059 | { |
| 2060 | return VM_FAULT_SIGBUS; |
| 2061 | } |
| 2062 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2063 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
| 2064 | { |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2065 | u32 inst_nop = 0x60000000; |
| 2066 | #ifdef CONFIG_KVM_BOOKE_HV |
| 2067 | u32 inst_sc1 = 0x44000022; |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 2068 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
| 2069 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); |
| 2070 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); |
| 2071 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2072 | #else |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2073 | u32 inst_lis = 0x3c000000; |
| 2074 | u32 inst_ori = 0x60000000; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2075 | u32 inst_sc = 0x44000002; |
| 2076 | u32 inst_imm_mask = 0xffff; |
| 2077 | |
| 2078 | /* |
| 2079 | * The hypercall to get into KVM from within guest context is as |
| 2080 | * follows: |
| 2081 | * |
| 2082 | * lis r0, r0, KVM_SC_MAGIC_R0@h |
| 2083 | * ori r0, KVM_SC_MAGIC_R0@l |
| 2084 | * sc |
| 2085 | * nop |
| 2086 | */ |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 2087 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
| 2088 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); |
| 2089 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); |
| 2090 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2091 | #endif |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2092 | |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 2093 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
| 2094 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2095 | return 0; |
| 2096 | } |
| 2097 | |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 2098 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
| 2099 | bool line_status) |
| 2100 | { |
| 2101 | if (!irqchip_in_kernel(kvm)) |
| 2102 | return -ENXIO; |
| 2103 | |
| 2104 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
| 2105 | irq_event->irq, irq_event->level, |
| 2106 | line_status); |
| 2107 | return 0; |
| 2108 | } |
| 2109 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2110 | |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 2111 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 2112 | struct kvm_enable_cap *cap) |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2113 | { |
| 2114 | int r; |
| 2115 | |
| 2116 | if (cap->flags) |
| 2117 | return -EINVAL; |
| 2118 | |
| 2119 | switch (cap->cap) { |
| 2120 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 2121 | case KVM_CAP_PPC_ENABLE_HCALL: { |
| 2122 | unsigned long hcall = cap->args[0]; |
| 2123 | |
| 2124 | r = -EINVAL; |
| 2125 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || |
| 2126 | cap->args[1] > 1) |
| 2127 | break; |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2128 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
| 2129 | break; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2130 | if (cap->args[1]) |
| 2131 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 2132 | else |
| 2133 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 2134 | r = 0; |
| 2135 | break; |
| 2136 | } |
Paul Mackerras | 3c31352 | 2017-02-06 13:24:41 +1100 | [diff] [blame] | 2137 | case KVM_CAP_PPC_SMT: { |
| 2138 | unsigned long mode = cap->args[0]; |
| 2139 | unsigned long flags = cap->args[1]; |
| 2140 | |
| 2141 | r = -EINVAL; |
| 2142 | if (kvm->arch.kvm_ops->set_smt_mode) |
| 2143 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); |
| 2144 | break; |
| 2145 | } |
Paul Mackerras | aa069a9 | 2018-09-21 20:02:01 +1000 | [diff] [blame] | 2146 | |
| 2147 | case KVM_CAP_PPC_NESTED_HV: |
| 2148 | r = -EINVAL; |
| 2149 | if (!is_kvmppc_hv_enabled(kvm) || |
| 2150 | !kvm->arch.kvm_ops->enable_nested) |
| 2151 | break; |
| 2152 | r = kvm->arch.kvm_ops->enable_nested(kvm); |
| 2153 | break; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2154 | #endif |
| 2155 | default: |
| 2156 | r = -EINVAL; |
| 2157 | break; |
| 2158 | } |
| 2159 | |
| 2160 | return r; |
| 2161 | } |
| 2162 | |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2163 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 2164 | /* |
| 2165 | * These functions check whether the underlying hardware is safe |
| 2166 | * against attacks based on observing the effects of speculatively |
| 2167 | * executed instructions, and whether it supplies instructions for |
| 2168 | * use in workarounds. The information comes from firmware, either |
| 2169 | * via the device tree on powernv platforms or from an hcall on |
| 2170 | * pseries platforms. |
| 2171 | */ |
| 2172 | #ifdef CONFIG_PPC_PSERIES |
| 2173 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2174 | { |
| 2175 | struct h_cpu_char_result c; |
| 2176 | unsigned long rc; |
| 2177 | |
| 2178 | if (!machine_is(pseries)) |
| 2179 | return -ENOTTY; |
| 2180 | |
| 2181 | rc = plpar_get_cpu_characteristics(&c); |
| 2182 | if (rc == H_SUCCESS) { |
| 2183 | cp->character = c.character; |
| 2184 | cp->behaviour = c.behaviour; |
| 2185 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 2186 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 2187 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 2188 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 2189 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
| 2190 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | |
| 2191 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | |
| 2192 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 2193 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 2194 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
| 2195 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 2196 | } |
| 2197 | return 0; |
| 2198 | } |
| 2199 | #else |
| 2200 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2201 | { |
| 2202 | return -ENOTTY; |
| 2203 | } |
| 2204 | #endif |
| 2205 | |
| 2206 | static inline bool have_fw_feat(struct device_node *fw_features, |
| 2207 | const char *state, const char *name) |
| 2208 | { |
| 2209 | struct device_node *np; |
| 2210 | bool r = false; |
| 2211 | |
| 2212 | np = of_get_child_by_name(fw_features, name); |
| 2213 | if (np) { |
| 2214 | r = of_property_read_bool(np, state); |
| 2215 | of_node_put(np); |
| 2216 | } |
| 2217 | return r; |
| 2218 | } |
| 2219 | |
| 2220 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2221 | { |
| 2222 | struct device_node *np, *fw_features; |
| 2223 | int r; |
| 2224 | |
| 2225 | memset(cp, 0, sizeof(*cp)); |
| 2226 | r = pseries_get_cpu_char(cp); |
| 2227 | if (r != -ENOTTY) |
| 2228 | return r; |
| 2229 | |
| 2230 | np = of_find_node_by_name(NULL, "ibm,opal"); |
| 2231 | if (np) { |
| 2232 | fw_features = of_get_child_by_name(np, "fw-features"); |
| 2233 | of_node_put(np); |
| 2234 | if (!fw_features) |
| 2235 | return 0; |
| 2236 | if (have_fw_feat(fw_features, "enabled", |
| 2237 | "inst-spec-barrier-ori31,31,0")) |
| 2238 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; |
| 2239 | if (have_fw_feat(fw_features, "enabled", |
| 2240 | "fw-bcctrl-serialized")) |
| 2241 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; |
| 2242 | if (have_fw_feat(fw_features, "enabled", |
| 2243 | "inst-l1d-flush-ori30,30,0")) |
| 2244 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; |
| 2245 | if (have_fw_feat(fw_features, "enabled", |
| 2246 | "inst-l1d-flush-trig2")) |
| 2247 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; |
| 2248 | if (have_fw_feat(fw_features, "enabled", |
| 2249 | "fw-l1d-thread-split")) |
| 2250 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; |
| 2251 | if (have_fw_feat(fw_features, "enabled", |
| 2252 | "fw-count-cache-disabled")) |
| 2253 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 2254 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 2255 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 2256 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 2257 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 2258 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
| 2259 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 2260 | |
| 2261 | if (have_fw_feat(fw_features, "enabled", |
| 2262 | "speculation-policy-favor-security")) |
| 2263 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; |
| 2264 | if (!have_fw_feat(fw_features, "disabled", |
| 2265 | "needs-l1d-flush-msr-pr-0-to-1")) |
| 2266 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; |
| 2267 | if (!have_fw_feat(fw_features, "disabled", |
| 2268 | "needs-spec-barrier-for-bound-checks")) |
| 2269 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 2270 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 2271 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
| 2272 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 2273 | |
| 2274 | of_node_put(fw_features); |
| 2275 | } |
| 2276 | |
| 2277 | return 0; |
| 2278 | } |
| 2279 | #endif |
| 2280 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2281 | long kvm_arch_vm_ioctl(struct file *filp, |
| 2282 | unsigned int ioctl, unsigned long arg) |
| 2283 | { |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 2284 | struct kvm *kvm __maybe_unused = filp->private_data; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2285 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2286 | long r; |
| 2287 | |
| 2288 | switch (ioctl) { |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2289 | case KVM_PPC_GET_PVINFO: { |
| 2290 | struct kvm_ppc_pvinfo pvinfo; |
Vasiliy Kulikov | d8cdddc | 2010-10-30 13:04:24 +0400 | [diff] [blame] | 2291 | memset(&pvinfo, 0, sizeof(pvinfo)); |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2292 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
| 2293 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { |
| 2294 | r = -EFAULT; |
| 2295 | goto out; |
| 2296 | } |
| 2297 | |
| 2298 | break; |
| 2299 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2300 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2301 | case KVM_CREATE_SPAPR_TCE_64: { |
| 2302 | struct kvm_create_spapr_tce_64 create_tce_64; |
| 2303 | |
| 2304 | r = -EFAULT; |
| 2305 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) |
| 2306 | goto out; |
| 2307 | if (create_tce_64.flags) { |
| 2308 | r = -EINVAL; |
| 2309 | goto out; |
| 2310 | } |
| 2311 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
| 2312 | goto out; |
| 2313 | } |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2314 | case KVM_CREATE_SPAPR_TCE: { |
| 2315 | struct kvm_create_spapr_tce create_tce; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2316 | struct kvm_create_spapr_tce_64 create_tce_64; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2317 | |
| 2318 | r = -EFAULT; |
| 2319 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) |
| 2320 | goto out; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2321 | |
| 2322 | create_tce_64.liobn = create_tce.liobn; |
| 2323 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; |
| 2324 | create_tce_64.offset = 0; |
| 2325 | create_tce_64.size = create_tce.window_size >> |
| 2326 | IOMMU_PAGE_SHIFT_4K; |
| 2327 | create_tce_64.flags = 0; |
| 2328 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2329 | goto out; |
| 2330 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2331 | #endif |
| 2332 | #ifdef CONFIG_PPC_BOOK3S_64 |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2333 | case KVM_PPC_GET_SMMU_INFO: { |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2334 | struct kvm_ppc_smmu_info info; |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2335 | struct kvm *kvm = filp->private_data; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2336 | |
| 2337 | memset(&info, 0, sizeof(info)); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2338 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2339 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2340 | r = -EFAULT; |
| 2341 | break; |
| 2342 | } |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 2343 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
| 2344 | struct kvm *kvm = filp->private_data; |
| 2345 | |
| 2346 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
| 2347 | break; |
| 2348 | } |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 2349 | case KVM_PPC_CONFIGURE_V3_MMU: { |
| 2350 | struct kvm *kvm = filp->private_data; |
| 2351 | struct kvm_ppc_mmuv3_cfg cfg; |
| 2352 | |
| 2353 | r = -EINVAL; |
| 2354 | if (!kvm->arch.kvm_ops->configure_mmu) |
| 2355 | goto out; |
| 2356 | r = -EFAULT; |
| 2357 | if (copy_from_user(&cfg, argp, sizeof(cfg))) |
| 2358 | goto out; |
| 2359 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); |
| 2360 | break; |
| 2361 | } |
| 2362 | case KVM_PPC_GET_RMMU_INFO: { |
| 2363 | struct kvm *kvm = filp->private_data; |
| 2364 | struct kvm_ppc_rmmu_info info; |
| 2365 | |
| 2366 | r = -EINVAL; |
| 2367 | if (!kvm->arch.kvm_ops->get_rmmu_info) |
| 2368 | goto out; |
| 2369 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); |
| 2370 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2371 | r = -EFAULT; |
| 2372 | break; |
| 2373 | } |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2374 | case KVM_PPC_GET_CPU_CHAR: { |
| 2375 | struct kvm_ppc_cpu_char cpuchar; |
| 2376 | |
| 2377 | r = kvmppc_get_cpu_char(&cpuchar); |
| 2378 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) |
| 2379 | r = -EFAULT; |
| 2380 | break; |
| 2381 | } |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2382 | default: { |
| 2383 | struct kvm *kvm = filp->private_data; |
| 2384 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); |
| 2385 | } |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2386 | #else /* CONFIG_PPC_BOOK3S_64 */ |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2387 | default: |
Avi Kivity | 367e131 | 2009-08-26 14:57:07 +0300 | [diff] [blame] | 2388 | r = -ENOTTY; |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2389 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2390 | } |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2391 | out: |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2392 | return r; |
| 2393 | } |
| 2394 | |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2395 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
| 2396 | static unsigned long nr_lpids; |
| 2397 | |
| 2398 | long kvmppc_alloc_lpid(void) |
| 2399 | { |
| 2400 | long lpid; |
| 2401 | |
| 2402 | do { |
| 2403 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); |
| 2404 | if (lpid >= nr_lpids) { |
| 2405 | pr_err("%s: No LPIDs free\n", __func__); |
| 2406 | return -ENOMEM; |
| 2407 | } |
| 2408 | } while (test_and_set_bit(lpid, lpid_inuse)); |
| 2409 | |
| 2410 | return lpid; |
| 2411 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2412 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2413 | |
| 2414 | void kvmppc_claim_lpid(long lpid) |
| 2415 | { |
| 2416 | set_bit(lpid, lpid_inuse); |
| 2417 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2418 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2419 | |
| 2420 | void kvmppc_free_lpid(long lpid) |
| 2421 | { |
| 2422 | clear_bit(lpid, lpid_inuse); |
| 2423 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2424 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2425 | |
| 2426 | void kvmppc_init_lpid(unsigned long nr_lpids_param) |
| 2427 | { |
| 2428 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); |
| 2429 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); |
| 2430 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2431 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2432 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2433 | int kvm_arch_init(void *opaque) |
| 2434 | { |
| 2435 | return 0; |
| 2436 | } |
| 2437 | |
Paolo Bonzini | 478d6686 | 2014-08-05 11:29:07 +0200 | [diff] [blame] | 2438 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |