Thomas Gleixner | d94d71c | 2019-05-29 07:12:40 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2 | /* |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 3 | * |
| 4 | * Copyright IBM Corp. 2007 |
| 5 | * |
| 6 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 7 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/kvm_host.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 14 | #include <linux/hrtimer.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 16 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 18 | #include <linux/file.h> |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 19 | #include <linux/module.h> |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 20 | #include <linux/irqbypass.h> |
| 21 | #include <linux/kvm_irqfd.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 22 | #include <asm/cputable.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 23 | #include <linux/uaccess.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 24 | #include <asm/kvm_ppc.h> |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 25 | #include <asm/cputhreads.h> |
Alexander Graf | bd2be68 | 2012-08-13 01:04:19 +0200 | [diff] [blame] | 26 | #include <asm/irqflags.h> |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 27 | #include <asm/iommu.h> |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 28 | #include <asm/switch_to.h> |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 29 | #include <asm/xive.h> |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 30 | #ifdef CONFIG_PPC_PSERIES |
| 31 | #include <asm/hvcall.h> |
| 32 | #include <asm/plpar_wrappers.h> |
| 33 | #endif |
Bharata B Rao | 2294568 | 2019-11-25 08:36:30 +0530 | [diff] [blame] | 34 | #include <asm/ultravisor.h> |
| 35 | #include <asm/kvm_host.h> |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 36 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 37 | #include "timing.h" |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 38 | #include "irq.h" |
Paul Mackerras | fad7b9b | 2008-12-23 14:57:26 +1100 | [diff] [blame] | 39 | #include "../mm/mmu_decl.h" |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 40 | |
Marcelo Tosatti | 46f43c6 | 2009-06-18 11:47:27 -0300 | [diff] [blame] | 41 | #define CREATE_TRACE_POINTS |
| 42 | #include "trace.h" |
| 43 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 44 | struct kvmppc_ops *kvmppc_hv_ops; |
| 45 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); |
| 46 | struct kvmppc_ops *kvmppc_pr_ops; |
| 47 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); |
| 48 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 49 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 50 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 51 | { |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 52 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 53 | } |
| 54 | |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 55 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
| 56 | { |
| 57 | return kvm_arch_vcpu_runnable(vcpu); |
| 58 | } |
| 59 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 60 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 61 | { |
| 62 | return false; |
| 63 | } |
| 64 | |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 65 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 66 | { |
| 67 | return 1; |
| 68 | } |
| 69 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 70 | /* |
| 71 | * Common checks before entering the guest world. Call with interrupts |
| 72 | * disabled. |
| 73 | * |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 74 | * returns: |
| 75 | * |
| 76 | * == 1 if we're ready to go into guest state |
| 77 | * <= 0 if we need to go back to the host with return value |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 78 | */ |
| 79 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
| 80 | { |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 81 | int r; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 82 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 83 | WARN_ON(irqs_disabled()); |
| 84 | hard_irq_disable(); |
| 85 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 86 | while (true) { |
| 87 | if (need_resched()) { |
| 88 | local_irq_enable(); |
| 89 | cond_resched(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 90 | hard_irq_disable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 91 | continue; |
| 92 | } |
| 93 | |
| 94 | if (signal_pending(current)) { |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 95 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
| 96 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
| 97 | r = -EINTR; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 98 | break; |
| 99 | } |
| 100 | |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 101 | vcpu->mode = IN_GUEST_MODE; |
| 102 | |
| 103 | /* |
| 104 | * Reading vcpu->requests must happen after setting vcpu->mode, |
| 105 | * so we don't miss a request because the requester sees |
| 106 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests |
| 107 | * before next entering the guest (and thus doesn't IPI). |
Lan Tianyu | 489153c | 2016-03-13 11:10:30 +0800 | [diff] [blame] | 108 | * This also orders the write to mode from any reads |
| 109 | * to the page tables done while the VCPU is running. |
| 110 | * Please see the comment in kvm_flush_remote_tlbs. |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 111 | */ |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 112 | smp_mb(); |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 113 | |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 114 | if (kvm_request_pending(vcpu)) { |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 115 | /* Make sure we process requests preemptable */ |
| 116 | local_irq_enable(); |
| 117 | trace_kvm_check_requests(vcpu); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 118 | r = kvmppc_core_check_requests(vcpu); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 119 | hard_irq_disable(); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 120 | if (r > 0) |
| 121 | continue; |
| 122 | break; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | if (kvmppc_core_prepare_to_enter(vcpu)) { |
| 126 | /* interrupts got enabled in between, so we |
| 127 | are back at square 1 */ |
| 128 | continue; |
| 129 | } |
| 130 | |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 131 | guest_enter_irqoff(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 132 | return 1; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 133 | } |
| 134 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 135 | /* return to host */ |
| 136 | local_irq_enable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 137 | return r; |
| 138 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 139 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 140 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 141 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 142 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) |
| 143 | { |
| 144 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; |
| 145 | int i; |
| 146 | |
| 147 | shared->sprg0 = swab64(shared->sprg0); |
| 148 | shared->sprg1 = swab64(shared->sprg1); |
| 149 | shared->sprg2 = swab64(shared->sprg2); |
| 150 | shared->sprg3 = swab64(shared->sprg3); |
| 151 | shared->srr0 = swab64(shared->srr0); |
| 152 | shared->srr1 = swab64(shared->srr1); |
| 153 | shared->dar = swab64(shared->dar); |
| 154 | shared->msr = swab64(shared->msr); |
| 155 | shared->dsisr = swab32(shared->dsisr); |
| 156 | shared->int_pending = swab32(shared->int_pending); |
| 157 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) |
| 158 | shared->sr[i] = swab32(shared->sr[i]); |
| 159 | } |
| 160 | #endif |
| 161 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 162 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
| 163 | { |
| 164 | int nr = kvmppc_get_gpr(vcpu, 11); |
| 165 | int r; |
| 166 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); |
| 167 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); |
| 168 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); |
| 169 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
| 170 | unsigned long r2 = 0; |
| 171 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 172 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 173 | /* 32 bit mode */ |
| 174 | param1 &= 0xffffffff; |
| 175 | param2 &= 0xffffffff; |
| 176 | param3 &= 0xffffffff; |
| 177 | param4 &= 0xffffffff; |
| 178 | } |
| 179 | |
| 180 | switch (nr) { |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 181 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 182 | { |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 183 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 184 | /* Book3S can be little endian, find it out here */ |
| 185 | int shared_big_endian = true; |
| 186 | if (vcpu->arch.intr_msr & MSR_LE) |
| 187 | shared_big_endian = false; |
| 188 | if (shared_big_endian != vcpu->arch.shared_big_endian) |
| 189 | kvmppc_swab_shared(vcpu); |
| 190 | vcpu->arch.shared_big_endian = shared_big_endian; |
| 191 | #endif |
| 192 | |
Alexander Graf | f3383cf | 2014-05-12 01:08:32 +0200 | [diff] [blame] | 193 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
| 194 | /* |
| 195 | * Older versions of the Linux magic page code had |
| 196 | * a bug where they would map their trampoline code |
| 197 | * NX. If that's the case, remove !PR NX capability. |
| 198 | */ |
| 199 | vcpu->arch.disable_kernel_nx = true; |
| 200 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 201 | } |
| 202 | |
| 203 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; |
| 204 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 205 | |
Alexander Graf | 89b68c9 | 2014-07-13 16:37:12 +0200 | [diff] [blame] | 206 | #ifdef CONFIG_PPC_64K_PAGES |
| 207 | /* |
| 208 | * Make sure our 4k magic page is in the same window of a 64k |
| 209 | * page within the guest and within the host's page. |
| 210 | */ |
| 211 | if ((vcpu->arch.magic_page_pa & 0xf000) != |
| 212 | ((ulong)vcpu->arch.shared & 0xf000)) { |
| 213 | void *old_shared = vcpu->arch.shared; |
| 214 | ulong shared = (ulong)vcpu->arch.shared; |
| 215 | void *new_shared; |
| 216 | |
| 217 | shared &= PAGE_MASK; |
| 218 | shared |= vcpu->arch.magic_page_pa & 0xf000; |
| 219 | new_shared = (void*)shared; |
| 220 | memcpy(new_shared, old_shared, 0x1000); |
| 221 | vcpu->arch.shared = new_shared; |
| 222 | } |
| 223 | #endif |
| 224 | |
Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 225 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 226 | |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 227 | r = EV_SUCCESS; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 228 | break; |
| 229 | } |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 230 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
| 231 | r = EV_SUCCESS; |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 232 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 233 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
| 234 | #endif |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 235 | |
| 236 | /* Second return value is in r4 */ |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 237 | break; |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 238 | case EV_HCALL_TOKEN(EV_IDLE): |
| 239 | r = EV_SUCCESS; |
| 240 | kvm_vcpu_block(vcpu); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 241 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 242 | break; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 243 | default: |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 244 | r = EV_UNIMPLEMENTED; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 245 | break; |
| 246 | } |
| 247 | |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 248 | kvmppc_set_gpr(vcpu, 4, r2); |
| 249 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 250 | return r; |
| 251 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 252 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 253 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 254 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
| 255 | { |
| 256 | int r = false; |
| 257 | |
| 258 | /* We have to know what CPU to virtualize */ |
| 259 | if (!vcpu->arch.pvr) |
| 260 | goto out; |
| 261 | |
| 262 | /* PAPR only works with book3s_64 */ |
| 263 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
| 264 | goto out; |
| 265 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 266 | /* HV KVM can only do PAPR mode for now */ |
Aneesh Kumar K.V | a78b55d | 2013-10-07 22:18:02 +0530 | [diff] [blame] | 267 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 268 | goto out; |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 269 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 270 | #ifdef CONFIG_KVM_BOOKE_HV |
| 271 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) |
| 272 | goto out; |
| 273 | #endif |
| 274 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 275 | r = true; |
| 276 | |
| 277 | out: |
| 278 | vcpu->arch.sane = r; |
| 279 | return r ? 0 : -EINVAL; |
| 280 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 281 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 282 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 283 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 284 | { |
| 285 | enum emulation_result er; |
| 286 | int r; |
| 287 | |
Alexander Graf | d69614a | 2014-06-18 14:53:49 +0200 | [diff] [blame] | 288 | er = kvmppc_emulate_loadstore(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 289 | switch (er) { |
| 290 | case EMULATE_DONE: |
| 291 | /* Future optimization: only reload non-volatiles if they were |
| 292 | * actually modified. */ |
| 293 | r = RESUME_GUEST_NV; |
| 294 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 295 | case EMULATE_AGAIN: |
| 296 | r = RESUME_GUEST; |
| 297 | break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 298 | case EMULATE_DO_MMIO: |
| 299 | run->exit_reason = KVM_EXIT_MMIO; |
| 300 | /* We must reload nonvolatiles because "update" load/store |
| 301 | * instructions modify register state. */ |
| 302 | /* Future optimization: only reload non-volatiles if they were |
| 303 | * actually modified. */ |
| 304 | r = RESUME_HOST_NV; |
| 305 | break; |
| 306 | case EMULATE_FAIL: |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 307 | { |
| 308 | u32 last_inst; |
| 309 | |
Alexander Graf | 8d0eff6 | 2014-09-10 14:37:29 +0200 | [diff] [blame] | 310 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 311 | /* XXX Deliver Program interrupt to guest. */ |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 312 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 313 | r = RESUME_HOST; |
| 314 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 315 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 316 | default: |
Alexander Graf | 5a33169 | 2012-12-14 23:46:03 +0100 | [diff] [blame] | 317 | WARN_ON(1); |
| 318 | r = RESUME_GUEST; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | return r; |
| 322 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 323 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 324 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 325 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 326 | bool data) |
| 327 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 328 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 329 | struct kvmppc_pte pte; |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 330 | int r = -EINVAL; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 331 | |
| 332 | vcpu->stat.st++; |
| 333 | |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 334 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) |
| 335 | r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, |
| 336 | size); |
| 337 | |
| 338 | if ((!r) || (r == -EAGAIN)) |
| 339 | return r; |
| 340 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 341 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 342 | XLATE_WRITE, &pte); |
| 343 | if (r < 0) |
| 344 | return r; |
| 345 | |
| 346 | *eaddr = pte.raddr; |
| 347 | |
| 348 | if (!pte.may_write) |
| 349 | return -EPERM; |
| 350 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 351 | /* Magic page override */ |
| 352 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 353 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 354 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 355 | void *magic = vcpu->arch.shared; |
| 356 | magic += pte.eaddr & 0xfff; |
| 357 | memcpy(magic, ptr, size); |
| 358 | return EMULATE_DONE; |
| 359 | } |
| 360 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 361 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 362 | return EMULATE_DO_MMIO; |
| 363 | |
| 364 | return EMULATE_DONE; |
| 365 | } |
| 366 | EXPORT_SYMBOL_GPL(kvmppc_st); |
| 367 | |
| 368 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 369 | bool data) |
| 370 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 371 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 372 | struct kvmppc_pte pte; |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 373 | int rc = -EINVAL; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 374 | |
| 375 | vcpu->stat.ld++; |
| 376 | |
Suraj Jitindar Singh | cc6929c | 2018-12-14 16:29:07 +1100 | [diff] [blame] | 377 | if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) |
| 378 | rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, |
| 379 | size); |
| 380 | |
| 381 | if ((!rc) || (rc == -EAGAIN)) |
| 382 | return rc; |
| 383 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 384 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 385 | XLATE_READ, &pte); |
| 386 | if (rc) |
| 387 | return rc; |
| 388 | |
| 389 | *eaddr = pte.raddr; |
| 390 | |
| 391 | if (!pte.may_read) |
| 392 | return -EPERM; |
| 393 | |
| 394 | if (!data && !pte.may_execute) |
| 395 | return -ENOEXEC; |
| 396 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 397 | /* Magic page override */ |
| 398 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 399 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 400 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 401 | void *magic = vcpu->arch.shared; |
| 402 | magic += pte.eaddr & 0xfff; |
| 403 | memcpy(ptr, magic, size); |
| 404 | return EMULATE_DONE; |
| 405 | } |
| 406 | |
Alexander Graf | c45c551 | 2014-06-20 14:17:30 +0200 | [diff] [blame] | 407 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 408 | return EMULATE_DO_MMIO; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 409 | |
| 410 | return EMULATE_DONE; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 411 | } |
| 412 | EXPORT_SYMBOL_GPL(kvmppc_ld); |
| 413 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 414 | int kvm_arch_hardware_enable(void) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 415 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 416 | return 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 417 | } |
| 418 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 419 | int kvm_arch_hardware_setup(void) |
| 420 | { |
| 421 | return 0; |
| 422 | } |
| 423 | |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 424 | int kvm_arch_check_processor_compat(void) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 425 | { |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 426 | return kvmppc_core_check_processor_compat(); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 427 | } |
| 428 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 429 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 430 | { |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 431 | struct kvmppc_ops *kvm_ops = NULL; |
| 432 | /* |
| 433 | * if we have both HV and PR enabled, default is HV |
| 434 | */ |
| 435 | if (type == 0) { |
| 436 | if (kvmppc_hv_ops) |
| 437 | kvm_ops = kvmppc_hv_ops; |
| 438 | else |
| 439 | kvm_ops = kvmppc_pr_ops; |
| 440 | if (!kvm_ops) |
| 441 | goto err_out; |
| 442 | } else if (type == KVM_VM_PPC_HV) { |
| 443 | if (!kvmppc_hv_ops) |
| 444 | goto err_out; |
| 445 | kvm_ops = kvmppc_hv_ops; |
| 446 | } else if (type == KVM_VM_PPC_PR) { |
| 447 | if (!kvmppc_pr_ops) |
| 448 | goto err_out; |
| 449 | kvm_ops = kvmppc_pr_ops; |
| 450 | } else |
| 451 | goto err_out; |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 452 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 453 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) |
| 454 | return -ENOENT; |
| 455 | |
| 456 | kvm->arch.kvm_ops = kvm_ops; |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 457 | return kvmppc_core_init_vm(kvm); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 458 | err_out: |
| 459 | return -EINVAL; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 460 | } |
| 461 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 462 | void kvm_arch_destroy_vm(struct kvm *kvm) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 463 | { |
| 464 | unsigned int i; |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 465 | struct kvm_vcpu *vcpu; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 466 | |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 467 | #ifdef CONFIG_KVM_XICS |
| 468 | /* |
| 469 | * We call kick_all_cpus_sync() to ensure that all |
| 470 | * CPUs have executed any pending IPIs before we |
| 471 | * continue and free VCPUs structures below. |
| 472 | */ |
| 473 | if (is_kvmppc_hv_enabled(kvm)) |
| 474 | kick_all_cpus_sync(); |
| 475 | #endif |
| 476 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 477 | kvm_for_each_vcpu(i, vcpu, kvm) |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 478 | kvm_vcpu_destroy(vcpu); |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 479 | |
| 480 | mutex_lock(&kvm->lock); |
| 481 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
| 482 | kvm->vcpus[i] = NULL; |
| 483 | |
| 484 | atomic_set(&kvm->online_vcpus, 0); |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 485 | |
| 486 | kvmppc_core_destroy_vm(kvm); |
| 487 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 488 | mutex_unlock(&kvm->lock); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 489 | |
| 490 | /* drop the module reference */ |
| 491 | module_put(kvm->arch.kvm_ops->owner); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 492 | } |
| 493 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 494 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 495 | { |
| 496 | int r; |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 497 | /* Assume we're using HV mode when the HV module is loaded */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 498 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 499 | |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 500 | if (kvm) { |
| 501 | /* |
| 502 | * Hooray - we know which VM type we're running on. Depend on |
| 503 | * that rather than the guess above. |
| 504 | */ |
| 505 | hv_enabled = is_kvmppc_hv_enabled(kvm); |
| 506 | } |
| 507 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 508 | switch (ext) { |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 509 | #ifdef CONFIG_BOOKE |
| 510 | case KVM_CAP_PPC_BOOKE_SREGS: |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 511 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 512 | case KVM_CAP_PPC_EPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 513 | #else |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 514 | case KVM_CAP_PPC_SEGSTATE: |
Alexander Graf | 1022fc3 | 2011-09-14 21:45:23 +0200 | [diff] [blame] | 515 | case KVM_CAP_PPC_HIOR: |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 516 | case KVM_CAP_PPC_PAPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 517 | #endif |
Alexander Graf | 1897876 | 2010-03-24 21:48:18 +0100 | [diff] [blame] | 518 | case KVM_CAP_PPC_UNSET_IRQ: |
Alexander Graf | 7b4203e | 2010-08-30 13:50:45 +0200 | [diff] [blame] | 519 | case KVM_CAP_PPC_IRQ_LEVEL: |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 520 | case KVM_CAP_ENABLE_CAP: |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 521 | case KVM_CAP_ONE_REG: |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 522 | case KVM_CAP_IOEVENTFD: |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 523 | case KVM_CAP_DEVICE_CTRL: |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 524 | case KVM_CAP_IMMEDIATE_EXIT: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 525 | r = 1; |
| 526 | break; |
Fabiano Rosas | 1a9167a | 2019-06-19 13:01:27 -0300 | [diff] [blame] | 527 | case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: |
| 528 | /* fall through */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 529 | case KVM_CAP_PPC_PAIRED_SINGLES: |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 530 | case KVM_CAP_PPC_OSI: |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 531 | case KVM_CAP_PPC_GET_PVINFO: |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 532 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 533 | case KVM_CAP_SW_TLB: |
| 534 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 535 | /* We support this only for PR */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 536 | r = !hv_enabled; |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 537 | break; |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 538 | #ifdef CONFIG_KVM_MPIC |
| 539 | case KVM_CAP_IRQ_MPIC: |
| 540 | r = 1; |
| 541 | break; |
| 542 | #endif |
| 543 | |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 544 | #ifdef CONFIG_PPC_BOOK3S_64 |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 545 | case KVM_CAP_SPAPR_TCE: |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 546 | case KVM_CAP_SPAPR_TCE_64: |
Suraj Jitindar Singh | 693ac10 | 2018-12-14 16:29:03 +1100 | [diff] [blame] | 547 | r = 1; |
| 548 | break; |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 549 | case KVM_CAP_SPAPR_TCE_VFIO: |
Suraj Jitindar Singh | 693ac10 | 2018-12-14 16:29:03 +1100 | [diff] [blame] | 550 | r = !!cpu_has_feature(CPU_FTR_HVMODE); |
| 551 | break; |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 552 | case KVM_CAP_PPC_RTAS: |
Alexander Graf | f2e9104 | 2014-05-22 17:40:15 +0200 | [diff] [blame] | 553 | case KVM_CAP_PPC_FIXUP_HCALL: |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 554 | case KVM_CAP_PPC_ENABLE_HCALL: |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 555 | #ifdef CONFIG_KVM_XICS |
| 556 | case KVM_CAP_IRQ_XICS: |
| 557 | #endif |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 558 | case KVM_CAP_PPC_GET_CPU_CHAR: |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 559 | r = 1; |
| 560 | break; |
Cédric Le Goater | eacc56b | 2019-04-18 12:39:28 +0200 | [diff] [blame] | 561 | #ifdef CONFIG_KVM_XIVE |
| 562 | case KVM_CAP_PPC_IRQ_XIVE: |
| 563 | /* |
Cédric Le Goater | 3fab2d1 | 2019-04-18 12:39:40 +0200 | [diff] [blame] | 564 | * We need XIVE to be enabled on the platform (implies |
| 565 | * a POWER9 processor) and the PowerNV platform, as |
| 566 | * nested is not yet supported. |
Cédric Le Goater | eacc56b | 2019-04-18 12:39:28 +0200 | [diff] [blame] | 567 | */ |
Paul Mackerras | 2ad7a27 | 2019-08-26 16:21:21 +1000 | [diff] [blame] | 568 | r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && |
| 569 | kvmppc_xive_native_supported(); |
Cédric Le Goater | eacc56b | 2019-04-18 12:39:28 +0200 | [diff] [blame] | 570 | break; |
| 571 | #endif |
David Gibson | a8acaec | 2016-11-23 16:14:07 +1100 | [diff] [blame] | 572 | |
| 573 | case KVM_CAP_PPC_ALLOC_HTAB: |
| 574 | r = hv_enabled; |
| 575 | break; |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 576 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 577 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 578 | case KVM_CAP_PPC_SMT: |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 579 | r = 0; |
Paul Mackerras | 5790069 | 2017-05-16 16:41:20 +1000 | [diff] [blame] | 580 | if (kvm) { |
| 581 | if (kvm->arch.emul_smt_mode > 1) |
| 582 | r = kvm->arch.emul_smt_mode; |
| 583 | else |
| 584 | r = kvm->arch.smt_mode; |
| 585 | } else if (hv_enabled) { |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 586 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 587 | r = 1; |
| 588 | else |
| 589 | r = threads_per_subcore; |
| 590 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 591 | break; |
Paul Mackerras | 2ed4f9d | 2017-06-21 16:01:27 +1000 | [diff] [blame] | 592 | case KVM_CAP_PPC_SMT_POSSIBLE: |
| 593 | r = 1; |
| 594 | if (hv_enabled) { |
| 595 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
| 596 | r = ((threads_per_subcore << 1) - 1); |
| 597 | else |
| 598 | /* P9 can emulate dbells, so allow any mode */ |
| 599 | r = 8 | 4 | 2 | 1; |
| 600 | } |
| 601 | break; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 602 | case KVM_CAP_PPC_RMA: |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 603 | r = 0; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 604 | break; |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 605 | case KVM_CAP_PPC_HWRNG: |
| 606 | r = kvmppc_hwrng_present(); |
| 607 | break; |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 608 | case KVM_CAP_PPC_MMU_RADIX: |
Paul Mackerras | 8cf4ecc | 2017-01-30 21:21:53 +1100 | [diff] [blame] | 609 | r = !!(hv_enabled && radix_enabled()); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 610 | break; |
| 611 | case KVM_CAP_PPC_MMU_HASH_V3: |
Paul Mackerras | de760db | 2018-10-08 16:31:16 +1100 | [diff] [blame] | 612 | r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && |
| 613 | cpu_has_feature(CPU_FTR_HVMODE)); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 614 | break; |
Paul Mackerras | aa069a9 | 2018-09-21 20:02:01 +1000 | [diff] [blame] | 615 | case KVM_CAP_PPC_NESTED_HV: |
| 616 | r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && |
| 617 | !kvmppc_hv_ops->enable_nested(NULL)); |
| 618 | break; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 619 | #endif |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 620 | case KVM_CAP_SYNC_MMU: |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 621 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 622 | r = hv_enabled; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 623 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 624 | r = 1; |
| 625 | #else |
| 626 | r = 0; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 627 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 628 | break; |
| 629 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 630 | case KVM_CAP_PPC_HTAB_FD: |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 631 | r = hv_enabled; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 632 | break; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 633 | #endif |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 634 | case KVM_CAP_NR_VCPUS: |
| 635 | /* |
| 636 | * Recommending a number of CPUs is somewhat arbitrary; we |
| 637 | * return the number of present CPUs for -HV (since a host |
| 638 | * will have secondary threads "offline"), and for other KVM |
| 639 | * implementations just count online CPUs. |
| 640 | */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 641 | if (hv_enabled) |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 642 | r = num_present_cpus(); |
| 643 | else |
| 644 | r = num_online_cpus(); |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 645 | break; |
| 646 | case KVM_CAP_MAX_VCPUS: |
| 647 | r = KVM_MAX_VCPUS; |
| 648 | break; |
Thomas Huth | a86cb41 | 2019-05-23 18:43:08 +0200 | [diff] [blame] | 649 | case KVM_CAP_MAX_VCPU_ID: |
| 650 | r = KVM_MAX_VCPU_ID; |
| 651 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 652 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 653 | case KVM_CAP_PPC_GET_SMMU_INFO: |
| 654 | r = 1; |
| 655 | break; |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 656 | case KVM_CAP_SPAPR_MULTITCE: |
| 657 | r = 1; |
| 658 | break; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 659 | case KVM_CAP_SPAPR_RESIZE_HPT: |
David Gibson | 790a9df | 2018-02-02 14:29:08 +1100 | [diff] [blame] | 660 | r = !!hv_enabled; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 661 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 662 | #endif |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 663 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 664 | case KVM_CAP_PPC_FWNMI: |
| 665 | r = hv_enabled; |
| 666 | break; |
| 667 | #endif |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 668 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 669 | case KVM_CAP_PPC_HTM: |
Simon Guo | d234d68 | 2018-05-23 15:02:08 +0800 | [diff] [blame] | 670 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
| 671 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 672 | break; |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 673 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 674 | default: |
| 675 | r = 0; |
| 676 | break; |
| 677 | } |
| 678 | return r; |
| 679 | |
| 680 | } |
| 681 | |
| 682 | long kvm_arch_dev_ioctl(struct file *filp, |
| 683 | unsigned int ioctl, unsigned long arg) |
| 684 | { |
| 685 | return -EINVAL; |
| 686 | } |
| 687 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame^] | 688 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 689 | { |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame^] | 690 | kvmppc_core_free_memslot(kvm, slot); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 691 | } |
| 692 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 693 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
Takuya Yoshikawa | 462fce4 | 2013-02-27 19:41:56 +0900 | [diff] [blame] | 694 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 695 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 696 | enum kvm_mr_change change) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 697 | { |
Sean Christopherson | 82307e6 | 2020-02-18 13:07:18 -0800 | [diff] [blame] | 698 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 699 | } |
| 700 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 701 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 702 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 703 | struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 704 | const struct kvm_memory_slot *new, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 705 | enum kvm_mr_change change) |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 706 | { |
Bharata B Rao | f032b73 | 2018-12-12 15:15:30 +1100 | [diff] [blame] | 707 | kvmppc_core_commit_memory_region(kvm, mem, old, new, change); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 708 | } |
| 709 | |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 710 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 711 | struct kvm_memory_slot *slot) |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 712 | { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 713 | kvmppc_core_flush_memslot(kvm, slot); |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 714 | } |
| 715 | |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 716 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
| 717 | { |
| 718 | return 0; |
| 719 | } |
| 720 | |
Sean Christopherson | 74ce2e6 | 2019-12-18 13:55:26 -0800 | [diff] [blame] | 721 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
| 722 | { |
| 723 | struct kvm_vcpu *vcpu; |
| 724 | |
| 725 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); |
| 726 | kvmppc_decrementer_func(vcpu); |
| 727 | |
| 728 | return HRTIMER_NORESTART; |
| 729 | } |
| 730 | |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 731 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 732 | { |
Sean Christopherson | c50bfbd | 2019-12-18 13:54:57 -0800 | [diff] [blame] | 733 | int err; |
| 734 | |
Sean Christopherson | 74ce2e6 | 2019-12-18 13:55:26 -0800 | [diff] [blame] | 735 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
| 736 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
| 737 | vcpu->arch.dec_expires = get_tb(); |
| 738 | |
| 739 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 740 | mutex_init(&vcpu->arch.exit_timing_lock); |
| 741 | #endif |
| 742 | err = kvmppc_subarch_vcpu_init(vcpu); |
Sean Christopherson | ff030fd | 2019-12-18 13:55:00 -0800 | [diff] [blame] | 743 | if (err) |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 744 | return err; |
Sean Christopherson | ff030fd | 2019-12-18 13:55:00 -0800 | [diff] [blame] | 745 | |
Sean Christopherson | 74ce2e6 | 2019-12-18 13:55:26 -0800 | [diff] [blame] | 746 | err = kvmppc_core_vcpu_create(vcpu); |
| 747 | if (err) |
| 748 | goto out_vcpu_uninit; |
| 749 | |
Sean Christopherson | c50bfbd | 2019-12-18 13:54:57 -0800 | [diff] [blame] | 750 | vcpu->arch.wqp = &vcpu->wq; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 751 | kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); |
| 752 | return 0; |
Sean Christopherson | 74ce2e6 | 2019-12-18 13:55:26 -0800 | [diff] [blame] | 753 | |
| 754 | out_vcpu_uninit: |
| 755 | kvmppc_mmu_destroy(vcpu); |
| 756 | kvmppc_subarch_vcpu_uninit(vcpu); |
| 757 | return err; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 758 | } |
| 759 | |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 760 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 761 | { |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 762 | } |
| 763 | |
Sean Christopherson | d5279f3 | 2019-12-18 13:55:03 -0800 | [diff] [blame] | 764 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 765 | { |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 766 | /* Make sure we're not using the vcpu anymore */ |
| 767 | hrtimer_cancel(&vcpu->arch.dec_timer); |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 768 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 769 | kvmppc_remove_vcpu_debugfs(vcpu); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 770 | |
| 771 | switch (vcpu->arch.irq_type) { |
| 772 | case KVMPPC_IRQ_MPIC: |
| 773 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
| 774 | break; |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 775 | case KVMPPC_IRQ_XICS: |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 776 | if (xics_on_xive()) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 777 | kvmppc_xive_cleanup_vcpu(vcpu); |
| 778 | else |
| 779 | kvmppc_xics_free_icp(vcpu); |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 780 | break; |
Cédric Le Goater | eacc56b | 2019-04-18 12:39:28 +0200 | [diff] [blame] | 781 | case KVMPPC_IRQ_XIVE: |
| 782 | kvmppc_xive_native_cleanup_vcpu(vcpu); |
| 783 | break; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 784 | } |
| 785 | |
Hollis Blanchard | db93f57 | 2008-11-05 09:36:18 -0600 | [diff] [blame] | 786 | kvmppc_core_vcpu_free(vcpu); |
Sean Christopherson | 74ce2e6 | 2019-12-18 13:55:26 -0800 | [diff] [blame] | 787 | |
| 788 | kvmppc_mmu_destroy(vcpu); |
| 789 | kvmppc_subarch_vcpu_uninit(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 790 | } |
| 791 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 792 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 793 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 794 | return kvmppc_core_pending_dec(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 795 | } |
| 796 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 797 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 798 | { |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 799 | #ifdef CONFIG_BOOKE |
| 800 | /* |
| 801 | * vrsave (formerly usprg0) isn't used by Linux, but may |
| 802 | * be used by the guest. |
| 803 | * |
| 804 | * On non-booke this is associated with Altivec and |
| 805 | * is handled by code in book3s.c. |
| 806 | */ |
| 807 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
| 808 | #endif |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 809 | kvmppc_core_vcpu_load(vcpu, cpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 810 | } |
| 811 | |
| 812 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 813 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 814 | kvmppc_core_vcpu_put(vcpu); |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 815 | #ifdef CONFIG_BOOKE |
| 816 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
| 817 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 818 | } |
| 819 | |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 820 | /* |
| 821 | * irq_bypass_add_producer and irq_bypass_del_producer are only |
| 822 | * useful if the architecture supports PCI passthrough. |
| 823 | * irq_bypass_stop and irq_bypass_start are not needed and so |
| 824 | * kvm_ops are not defined for them. |
| 825 | */ |
| 826 | bool kvm_arch_has_irq_bypass(void) |
| 827 | { |
| 828 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || |
| 829 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); |
| 830 | } |
| 831 | |
| 832 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
| 833 | struct irq_bypass_producer *prod) |
| 834 | { |
| 835 | struct kvm_kernel_irqfd *irqfd = |
| 836 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 837 | struct kvm *kvm = irqfd->kvm; |
| 838 | |
| 839 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) |
| 840 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); |
| 841 | |
| 842 | return 0; |
| 843 | } |
| 844 | |
| 845 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
| 846 | struct irq_bypass_producer *prod) |
| 847 | { |
| 848 | struct kvm_kernel_irqfd *irqfd = |
| 849 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 850 | struct kvm *kvm = irqfd->kvm; |
| 851 | |
| 852 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) |
| 853 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); |
| 854 | } |
| 855 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 856 | #ifdef CONFIG_VSX |
| 857 | static inline int kvmppc_get_vsr_dword_offset(int index) |
| 858 | { |
| 859 | int offset; |
| 860 | |
| 861 | if ((index != 0) && (index != 1)) |
| 862 | return -1; |
| 863 | |
| 864 | #ifdef __BIG_ENDIAN |
| 865 | offset = index; |
| 866 | #else |
| 867 | offset = 1 - index; |
| 868 | #endif |
| 869 | |
| 870 | return offset; |
| 871 | } |
| 872 | |
| 873 | static inline int kvmppc_get_vsr_word_offset(int index) |
| 874 | { |
| 875 | int offset; |
| 876 | |
| 877 | if ((index > 3) || (index < 0)) |
| 878 | return -1; |
| 879 | |
| 880 | #ifdef __BIG_ENDIAN |
| 881 | offset = index; |
| 882 | #else |
| 883 | offset = 3 - index; |
| 884 | #endif |
| 885 | return offset; |
| 886 | } |
| 887 | |
| 888 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, |
| 889 | u64 gpr) |
| 890 | { |
| 891 | union kvmppc_one_reg val; |
| 892 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 893 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 894 | |
| 895 | if (offset == -1) |
| 896 | return; |
| 897 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 898 | if (index >= 32) { |
| 899 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 900 | val.vsxval[offset] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 901 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 902 | } else { |
| 903 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; |
| 904 | } |
| 905 | } |
| 906 | |
| 907 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, |
| 908 | u64 gpr) |
| 909 | { |
| 910 | union kvmppc_one_reg val; |
| 911 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 912 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 913 | if (index >= 32) { |
| 914 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 915 | val.vsxval[0] = gpr; |
| 916 | val.vsxval[1] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 917 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 918 | } else { |
| 919 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; |
| 920 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; |
| 921 | } |
| 922 | } |
| 923 | |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 924 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, |
| 925 | u32 gpr) |
| 926 | { |
| 927 | union kvmppc_one_reg val; |
| 928 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 929 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 930 | if (index >= 32) { |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 931 | val.vsx32val[0] = gpr; |
| 932 | val.vsx32val[1] = gpr; |
| 933 | val.vsx32val[2] = gpr; |
| 934 | val.vsx32val[3] = gpr; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 935 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 936 | } else { |
| 937 | val.vsx32val[0] = gpr; |
| 938 | val.vsx32val[1] = gpr; |
| 939 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; |
| 940 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; |
| 941 | } |
| 942 | } |
| 943 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 944 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
| 945 | u32 gpr32) |
| 946 | { |
| 947 | union kvmppc_one_reg val; |
| 948 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 949 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 950 | int dword_offset, word_offset; |
| 951 | |
| 952 | if (offset == -1) |
| 953 | return; |
| 954 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 955 | if (index >= 32) { |
| 956 | val.vval = VCPU_VSX_VR(vcpu, index - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 957 | val.vsx32val[offset] = gpr32; |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 958 | VCPU_VSX_VR(vcpu, index - 32) = val.vval; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 959 | } else { |
| 960 | dword_offset = offset / 2; |
| 961 | word_offset = offset % 2; |
| 962 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); |
| 963 | val.vsx32val[word_offset] = gpr32; |
| 964 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; |
| 965 | } |
| 966 | } |
| 967 | #endif /* CONFIG_VSX */ |
| 968 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 969 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 970 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, |
| 971 | int index, int element_size) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 972 | { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 973 | int offset; |
| 974 | int elts = sizeof(vector128)/element_size; |
| 975 | |
| 976 | if ((index < 0) || (index >= elts)) |
| 977 | return -1; |
| 978 | |
| 979 | if (kvmppc_need_byteswap(vcpu)) |
| 980 | offset = elts - index - 1; |
| 981 | else |
| 982 | offset = index; |
| 983 | |
| 984 | return offset; |
| 985 | } |
| 986 | |
| 987 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, |
| 988 | int index) |
| 989 | { |
| 990 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); |
| 991 | } |
| 992 | |
| 993 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, |
| 994 | int index) |
| 995 | { |
| 996 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); |
| 997 | } |
| 998 | |
| 999 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, |
| 1000 | int index) |
| 1001 | { |
| 1002 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); |
| 1003 | } |
| 1004 | |
| 1005 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, |
| 1006 | int index) |
| 1007 | { |
| 1008 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); |
| 1009 | } |
| 1010 | |
| 1011 | |
| 1012 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
| 1013 | u64 gpr) |
| 1014 | { |
| 1015 | union kvmppc_one_reg val; |
| 1016 | int offset = kvmppc_get_vmx_dword_offset(vcpu, |
| 1017 | vcpu->arch.mmio_vmx_offset); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1018 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1019 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1020 | if (offset == -1) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1021 | return; |
| 1022 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1023 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1024 | val.vsxval[offset] = gpr; |
| 1025 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1026 | } |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1027 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1028 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, |
| 1029 | u32 gpr32) |
| 1030 | { |
| 1031 | union kvmppc_one_reg val; |
| 1032 | int offset = kvmppc_get_vmx_word_offset(vcpu, |
| 1033 | vcpu->arch.mmio_vmx_offset); |
| 1034 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1035 | |
| 1036 | if (offset == -1) |
| 1037 | return; |
| 1038 | |
| 1039 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1040 | val.vsx32val[offset] = gpr32; |
| 1041 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1042 | } |
| 1043 | |
| 1044 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, |
| 1045 | u16 gpr16) |
| 1046 | { |
| 1047 | union kvmppc_one_reg val; |
| 1048 | int offset = kvmppc_get_vmx_hword_offset(vcpu, |
| 1049 | vcpu->arch.mmio_vmx_offset); |
| 1050 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1051 | |
| 1052 | if (offset == -1) |
| 1053 | return; |
| 1054 | |
| 1055 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1056 | val.vsx16val[offset] = gpr16; |
| 1057 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 1058 | } |
| 1059 | |
| 1060 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, |
| 1061 | u8 gpr8) |
| 1062 | { |
| 1063 | union kvmppc_one_reg val; |
| 1064 | int offset = kvmppc_get_vmx_byte_offset(vcpu, |
| 1065 | vcpu->arch.mmio_vmx_offset); |
| 1066 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 1067 | |
| 1068 | if (offset == -1) |
| 1069 | return; |
| 1070 | |
| 1071 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 1072 | val.vsx8val[offset] = gpr8; |
| 1073 | VCPU_VSX_VR(vcpu, index) = val.vval; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1074 | } |
| 1075 | #endif /* CONFIG_ALTIVEC */ |
| 1076 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1077 | #ifdef CONFIG_PPC_FPU |
| 1078 | static inline u64 sp_to_dp(u32 fprs) |
| 1079 | { |
| 1080 | u64 fprd; |
| 1081 | |
| 1082 | preempt_disable(); |
| 1083 | enable_kernel_fp(); |
| 1084 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) |
| 1085 | : "fr0"); |
| 1086 | preempt_enable(); |
| 1087 | return fprd; |
| 1088 | } |
| 1089 | |
| 1090 | static inline u32 dp_to_sp(u64 fprd) |
| 1091 | { |
| 1092 | u32 fprs; |
| 1093 | |
| 1094 | preempt_disable(); |
| 1095 | enable_kernel_fp(); |
| 1096 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) |
| 1097 | : "fr0"); |
| 1098 | preempt_enable(); |
| 1099 | return fprs; |
| 1100 | } |
| 1101 | |
| 1102 | #else |
| 1103 | #define sp_to_dp(x) (x) |
| 1104 | #define dp_to_sp(x) (x) |
| 1105 | #endif /* CONFIG_PPC_FPU */ |
| 1106 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1107 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
| 1108 | struct kvm_run *run) |
| 1109 | { |
Denis Kirjanov | 69b6183 | 2010-06-11 11:23:26 +0000 | [diff] [blame] | 1110 | u64 uninitialized_var(gpr); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1111 | |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1112 | if (run->mmio.len > sizeof(gpr)) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1113 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
| 1114 | return; |
| 1115 | } |
| 1116 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1117 | if (!vcpu->arch.mmio_host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1118 | switch (run->mmio.len) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1119 | case 8: gpr = *(u64 *)run->mmio.data; break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1120 | case 4: gpr = *(u32 *)run->mmio.data; break; |
| 1121 | case 2: gpr = *(u16 *)run->mmio.data; break; |
| 1122 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1123 | } |
| 1124 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1125 | switch (run->mmio.len) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1126 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
| 1127 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; |
| 1128 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1129 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1130 | } |
| 1131 | } |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1132 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1133 | /* conversion between single and double precision */ |
| 1134 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) |
| 1135 | gpr = sp_to_dp(gpr); |
| 1136 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1137 | if (vcpu->arch.mmio_sign_extend) { |
| 1138 | switch (run->mmio.len) { |
| 1139 | #ifdef CONFIG_PPC64 |
| 1140 | case 4: |
| 1141 | gpr = (s64)(s32)gpr; |
| 1142 | break; |
| 1143 | #endif |
| 1144 | case 2: |
| 1145 | gpr = (s64)(s16)gpr; |
| 1146 | break; |
| 1147 | case 1: |
| 1148 | gpr = (s64)(s8)gpr; |
| 1149 | break; |
| 1150 | } |
| 1151 | } |
| 1152 | |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1153 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
| 1154 | case KVM_MMIO_REG_GPR: |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1155 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
| 1156 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1157 | case KVM_MMIO_REG_FPR: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1158 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1159 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); |
| 1160 | |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1161 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1162 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1163 | #ifdef CONFIG_PPC_BOOK3S |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1164 | case KVM_MMIO_REG_QPR: |
| 1165 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1166 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1167 | case KVM_MMIO_REG_FQPR: |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1168 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1169 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1170 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1171 | #endif |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1172 | #ifdef CONFIG_VSX |
| 1173 | case KVM_MMIO_REG_VSX: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1174 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1175 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); |
| 1176 | |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1177 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1178 | kvmppc_set_vsr_dword(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1179 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1180 | kvmppc_set_vsr_word(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1181 | else if (vcpu->arch.mmio_copy_type == |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1182 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
| 1183 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1184 | else if (vcpu->arch.mmio_copy_type == |
Simon Guo | 94dd7fa | 2018-05-21 13:24:20 +0800 | [diff] [blame] | 1185 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) |
| 1186 | kvmppc_set_vsr_word_dump(vcpu, gpr); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1187 | break; |
| 1188 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1189 | #ifdef CONFIG_ALTIVEC |
| 1190 | case KVM_MMIO_REG_VMX: |
Simon Guo | 2e6baa4 | 2018-05-21 13:24:22 +0800 | [diff] [blame] | 1191 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1192 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); |
| 1193 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1194 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) |
| 1195 | kvmppc_set_vmx_dword(vcpu, gpr); |
| 1196 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) |
| 1197 | kvmppc_set_vmx_word(vcpu, gpr); |
| 1198 | else if (vcpu->arch.mmio_copy_type == |
| 1199 | KVMPPC_VMX_COPY_HWORD) |
| 1200 | kvmppc_set_vmx_hword(vcpu, gpr); |
| 1201 | else if (vcpu->arch.mmio_copy_type == |
| 1202 | KVMPPC_VMX_COPY_BYTE) |
| 1203 | kvmppc_set_vmx_byte(vcpu, gpr); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1204 | break; |
| 1205 | #endif |
Suraj Jitindar Singh | 873db2c | 2018-12-14 16:29:08 +1100 | [diff] [blame] | 1206 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 1207 | case KVM_MMIO_REG_NESTED_GPR: |
| 1208 | if (kvmppc_need_byteswap(vcpu)) |
| 1209 | gpr = swab64(gpr); |
| 1210 | kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, |
| 1211 | sizeof(gpr)); |
| 1212 | break; |
| 1213 | #endif |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1214 | default: |
| 1215 | BUG(); |
| 1216 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1217 | } |
| 1218 | |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1219 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1220 | unsigned int rt, unsigned int bytes, |
| 1221 | int is_default_endian, int sign_extend) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1222 | { |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1223 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1224 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1225 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1226 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1227 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1228 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1229 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1230 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1231 | } |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1232 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1233 | if (bytes > sizeof(run->mmio.data)) { |
| 1234 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1235 | run->mmio.len); |
| 1236 | } |
| 1237 | |
| 1238 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1239 | run->mmio.len = bytes; |
| 1240 | run->mmio.is_write = 0; |
| 1241 | |
| 1242 | vcpu->arch.io_gpr = rt; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1243 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1244 | vcpu->mmio_needed = 1; |
| 1245 | vcpu->mmio_is_write = 0; |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1246 | vcpu->arch.mmio_sign_extend = sign_extend; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1247 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1248 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1249 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1250 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1251 | bytes, &run->mmio.data); |
| 1252 | |
| 1253 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1254 | |
| 1255 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1256 | kvmppc_complete_mmio_load(vcpu, run); |
| 1257 | vcpu->mmio_needed = 0; |
| 1258 | return EMULATE_DONE; |
| 1259 | } |
| 1260 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1261 | return EMULATE_DO_MMIO; |
| 1262 | } |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1263 | |
| 1264 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1265 | unsigned int rt, unsigned int bytes, |
| 1266 | int is_default_endian) |
| 1267 | { |
| 1268 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); |
| 1269 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1270 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1271 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1272 | /* Same as above, but sign extends */ |
| 1273 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1274 | unsigned int rt, unsigned int bytes, |
| 1275 | int is_default_endian) |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1276 | { |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1277 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1278 | } |
| 1279 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1280 | #ifdef CONFIG_VSX |
| 1281 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1282 | unsigned int rt, unsigned int bytes, |
| 1283 | int is_default_endian, int mmio_sign_extend) |
| 1284 | { |
| 1285 | enum emulation_result emulated = EMULATE_DONE; |
| 1286 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1287 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1288 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1289 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1290 | |
| 1291 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1292 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
| 1293 | is_default_endian, mmio_sign_extend); |
| 1294 | |
| 1295 | if (emulated != EMULATE_DONE) |
| 1296 | break; |
| 1297 | |
| 1298 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1299 | |
| 1300 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1301 | vcpu->arch.mmio_vsx_offset++; |
| 1302 | } |
| 1303 | return emulated; |
| 1304 | } |
| 1305 | #endif /* CONFIG_VSX */ |
| 1306 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1307 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1308 | u64 val, unsigned int bytes, int is_default_endian) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1309 | { |
| 1310 | void *data = run->mmio.data; |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1311 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1312 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1313 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1314 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1315 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1316 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1317 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1318 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1319 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1320 | |
| 1321 | if (bytes > sizeof(run->mmio.data)) { |
| 1322 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1323 | run->mmio.len); |
| 1324 | } |
| 1325 | |
| 1326 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1327 | run->mmio.len = bytes; |
| 1328 | run->mmio.is_write = 1; |
| 1329 | vcpu->mmio_needed = 1; |
| 1330 | vcpu->mmio_is_write = 1; |
| 1331 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1332 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
| 1333 | val = dp_to_sp(val); |
| 1334 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1335 | /* Store the value at the lowest bytes in 'data'. */ |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1336 | if (!host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1337 | switch (bytes) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1338 | case 8: *(u64 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1339 | case 4: *(u32 *)data = val; break; |
| 1340 | case 2: *(u16 *)data = val; break; |
| 1341 | case 1: *(u8 *)data = val; break; |
| 1342 | } |
| 1343 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1344 | switch (bytes) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1345 | case 8: *(u64 *)data = swab64(val); break; |
| 1346 | case 4: *(u32 *)data = swab32(val); break; |
| 1347 | case 2: *(u16 *)data = swab16(val); break; |
| 1348 | case 1: *(u8 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1349 | } |
| 1350 | } |
| 1351 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1352 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1353 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1354 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1355 | bytes, &run->mmio.data); |
| 1356 | |
| 1357 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1358 | |
| 1359 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1360 | vcpu->mmio_needed = 0; |
| 1361 | return EMULATE_DONE; |
| 1362 | } |
| 1363 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1364 | return EMULATE_DO_MMIO; |
| 1365 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1366 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1367 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1368 | #ifdef CONFIG_VSX |
| 1369 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) |
| 1370 | { |
| 1371 | u32 dword_offset, word_offset; |
| 1372 | union kvmppc_one_reg reg; |
| 1373 | int vsx_offset = 0; |
Simon Guo | da2a32b | 2018-05-21 13:24:25 +0800 | [diff] [blame] | 1374 | int copy_type = vcpu->arch.mmio_copy_type; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1375 | int result = 0; |
| 1376 | |
| 1377 | switch (copy_type) { |
| 1378 | case KVMPPC_VSX_COPY_DWORD: |
| 1379 | vsx_offset = |
| 1380 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 1381 | |
| 1382 | if (vsx_offset == -1) { |
| 1383 | result = -1; |
| 1384 | break; |
| 1385 | } |
| 1386 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1387 | if (rs < 32) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1388 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); |
| 1389 | } else { |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1390 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1391 | *val = reg.vsxval[vsx_offset]; |
| 1392 | } |
| 1393 | break; |
| 1394 | |
| 1395 | case KVMPPC_VSX_COPY_WORD: |
| 1396 | vsx_offset = |
| 1397 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 1398 | |
| 1399 | if (vsx_offset == -1) { |
| 1400 | result = -1; |
| 1401 | break; |
| 1402 | } |
| 1403 | |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1404 | if (rs < 32) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1405 | dword_offset = vsx_offset / 2; |
| 1406 | word_offset = vsx_offset % 2; |
| 1407 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); |
| 1408 | *val = reg.vsx32val[word_offset]; |
| 1409 | } else { |
Simon Guo | 4eeb855 | 2018-05-28 09:48:26 +0800 | [diff] [blame] | 1410 | reg.vval = VCPU_VSX_VR(vcpu, rs - 32); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1411 | *val = reg.vsx32val[vsx_offset]; |
| 1412 | } |
| 1413 | break; |
| 1414 | |
| 1415 | default: |
| 1416 | result = -1; |
| 1417 | break; |
| 1418 | } |
| 1419 | |
| 1420 | return result; |
| 1421 | } |
| 1422 | |
| 1423 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1424 | int rs, unsigned int bytes, int is_default_endian) |
| 1425 | { |
| 1426 | u64 val; |
| 1427 | enum emulation_result emulated = EMULATE_DONE; |
| 1428 | |
| 1429 | vcpu->arch.io_gpr = rs; |
| 1430 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1431 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1432 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1433 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1434 | |
| 1435 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1436 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) |
| 1437 | return EMULATE_FAIL; |
| 1438 | |
| 1439 | emulated = kvmppc_handle_store(run, vcpu, |
| 1440 | val, bytes, is_default_endian); |
| 1441 | |
| 1442 | if (emulated != EMULATE_DONE) |
| 1443 | break; |
| 1444 | |
| 1445 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1446 | |
| 1447 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1448 | vcpu->arch.mmio_vsx_offset++; |
| 1449 | } |
| 1450 | |
| 1451 | return emulated; |
| 1452 | } |
| 1453 | |
| 1454 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, |
| 1455 | struct kvm_run *run) |
| 1456 | { |
| 1457 | enum emulation_result emulated = EMULATE_FAIL; |
| 1458 | int r; |
| 1459 | |
| 1460 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1461 | |
| 1462 | if (!vcpu->mmio_is_write) { |
| 1463 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, |
| 1464 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); |
| 1465 | } else { |
| 1466 | emulated = kvmppc_handle_vsx_store(run, vcpu, |
| 1467 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1468 | } |
| 1469 | |
| 1470 | switch (emulated) { |
| 1471 | case EMULATE_DO_MMIO: |
| 1472 | run->exit_reason = KVM_EXIT_MMIO; |
| 1473 | r = RESUME_HOST; |
| 1474 | break; |
| 1475 | case EMULATE_FAIL: |
| 1476 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); |
| 1477 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1478 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1479 | r = RESUME_HOST; |
| 1480 | break; |
| 1481 | default: |
| 1482 | r = RESUME_GUEST; |
| 1483 | break; |
| 1484 | } |
| 1485 | return r; |
| 1486 | } |
| 1487 | #endif /* CONFIG_VSX */ |
| 1488 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1489 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1490 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1491 | unsigned int rt, unsigned int bytes, int is_default_endian) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1492 | { |
Paul Mackerras | 6df3877 | 2018-02-13 15:45:21 +1100 | [diff] [blame] | 1493 | enum emulation_result emulated = EMULATE_DONE; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1494 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1495 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
| 1496 | return EMULATE_FAIL; |
| 1497 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1498 | while (vcpu->arch.mmio_vmx_copy_nums) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1499 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1500 | is_default_endian, 0); |
| 1501 | |
| 1502 | if (emulated != EMULATE_DONE) |
| 1503 | break; |
| 1504 | |
| 1505 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1506 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1507 | vcpu->arch.mmio_vmx_offset++; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1508 | } |
| 1509 | |
| 1510 | return emulated; |
| 1511 | } |
| 1512 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1513 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1514 | { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1515 | union kvmppc_one_reg reg; |
| 1516 | int vmx_offset = 0; |
| 1517 | int result = 0; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1518 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1519 | vmx_offset = |
| 1520 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1521 | |
| 1522 | if (vmx_offset == -1) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1523 | return -1; |
| 1524 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1525 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1526 | *val = reg.vsxval[vmx_offset]; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1527 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1528 | return result; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1529 | } |
| 1530 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1531 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1532 | { |
| 1533 | union kvmppc_one_reg reg; |
| 1534 | int vmx_offset = 0; |
| 1535 | int result = 0; |
| 1536 | |
| 1537 | vmx_offset = |
| 1538 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1539 | |
| 1540 | if (vmx_offset == -1) |
| 1541 | return -1; |
| 1542 | |
| 1543 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1544 | *val = reg.vsx32val[vmx_offset]; |
| 1545 | |
| 1546 | return result; |
| 1547 | } |
| 1548 | |
| 1549 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1550 | { |
| 1551 | union kvmppc_one_reg reg; |
| 1552 | int vmx_offset = 0; |
| 1553 | int result = 0; |
| 1554 | |
| 1555 | vmx_offset = |
| 1556 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1557 | |
| 1558 | if (vmx_offset == -1) |
| 1559 | return -1; |
| 1560 | |
| 1561 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1562 | *val = reg.vsx16val[vmx_offset]; |
| 1563 | |
| 1564 | return result; |
| 1565 | } |
| 1566 | |
| 1567 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1568 | { |
| 1569 | union kvmppc_one_reg reg; |
| 1570 | int vmx_offset = 0; |
| 1571 | int result = 0; |
| 1572 | |
| 1573 | vmx_offset = |
| 1574 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1575 | |
| 1576 | if (vmx_offset == -1) |
| 1577 | return -1; |
| 1578 | |
| 1579 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1580 | *val = reg.vsx8val[vmx_offset]; |
| 1581 | |
| 1582 | return result; |
| 1583 | } |
| 1584 | |
| 1585 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1586 | unsigned int rs, unsigned int bytes, int is_default_endian) |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1587 | { |
| 1588 | u64 val = 0; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1589 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1590 | enum emulation_result emulated = EMULATE_DONE; |
| 1591 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1592 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
| 1593 | return EMULATE_FAIL; |
| 1594 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1595 | vcpu->arch.io_gpr = rs; |
| 1596 | |
| 1597 | while (vcpu->arch.mmio_vmx_copy_nums) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1598 | switch (vcpu->arch.mmio_copy_type) { |
| 1599 | case KVMPPC_VMX_COPY_DWORD: |
| 1600 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) |
| 1601 | return EMULATE_FAIL; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1602 | |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1603 | break; |
| 1604 | case KVMPPC_VMX_COPY_WORD: |
| 1605 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) |
| 1606 | return EMULATE_FAIL; |
| 1607 | break; |
| 1608 | case KVMPPC_VMX_COPY_HWORD: |
| 1609 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) |
| 1610 | return EMULATE_FAIL; |
| 1611 | break; |
| 1612 | case KVMPPC_VMX_COPY_BYTE: |
| 1613 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) |
| 1614 | return EMULATE_FAIL; |
| 1615 | break; |
| 1616 | default: |
| 1617 | return EMULATE_FAIL; |
| 1618 | } |
| 1619 | |
| 1620 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1621 | is_default_endian); |
| 1622 | if (emulated != EMULATE_DONE) |
| 1623 | break; |
| 1624 | |
| 1625 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1626 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1627 | vcpu->arch.mmio_vmx_offset++; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1628 | } |
| 1629 | |
| 1630 | return emulated; |
| 1631 | } |
| 1632 | |
| 1633 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, |
| 1634 | struct kvm_run *run) |
| 1635 | { |
| 1636 | enum emulation_result emulated = EMULATE_FAIL; |
| 1637 | int r; |
| 1638 | |
| 1639 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1640 | |
| 1641 | if (!vcpu->mmio_is_write) { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1642 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
| 1643 | vcpu->arch.io_gpr, run->mmio.len, 1); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1644 | } else { |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1645 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
| 1646 | vcpu->arch.io_gpr, run->mmio.len, 1); |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1647 | } |
| 1648 | |
| 1649 | switch (emulated) { |
| 1650 | case EMULATE_DO_MMIO: |
| 1651 | run->exit_reason = KVM_EXIT_MMIO; |
| 1652 | r = RESUME_HOST; |
| 1653 | break; |
| 1654 | case EMULATE_FAIL: |
| 1655 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); |
| 1656 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1657 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1658 | r = RESUME_HOST; |
| 1659 | break; |
| 1660 | default: |
| 1661 | r = RESUME_GUEST; |
| 1662 | break; |
| 1663 | } |
| 1664 | return r; |
| 1665 | } |
| 1666 | #endif /* CONFIG_ALTIVEC */ |
| 1667 | |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1668 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1669 | { |
| 1670 | int r = 0; |
| 1671 | union kvmppc_one_reg val; |
| 1672 | int size; |
| 1673 | |
| 1674 | size = one_reg_size(reg->id); |
| 1675 | if (size > sizeof(val)) |
| 1676 | return -EINVAL; |
| 1677 | |
| 1678 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); |
| 1679 | if (r == -EINVAL) { |
| 1680 | r = 0; |
| 1681 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1682 | #ifdef CONFIG_ALTIVEC |
| 1683 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1684 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1685 | r = -ENXIO; |
| 1686 | break; |
| 1687 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1688 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1689 | break; |
| 1690 | case KVM_REG_PPC_VSCR: |
| 1691 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1692 | r = -ENXIO; |
| 1693 | break; |
| 1694 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1695 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1696 | break; |
| 1697 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1698 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1699 | break; |
| 1700 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1701 | default: |
| 1702 | r = -EINVAL; |
| 1703 | break; |
| 1704 | } |
| 1705 | } |
| 1706 | |
| 1707 | if (r) |
| 1708 | return r; |
| 1709 | |
| 1710 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) |
| 1711 | r = -EFAULT; |
| 1712 | |
| 1713 | return r; |
| 1714 | } |
| 1715 | |
| 1716 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1717 | { |
| 1718 | int r; |
| 1719 | union kvmppc_one_reg val; |
| 1720 | int size; |
| 1721 | |
| 1722 | size = one_reg_size(reg->id); |
| 1723 | if (size > sizeof(val)) |
| 1724 | return -EINVAL; |
| 1725 | |
| 1726 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) |
| 1727 | return -EFAULT; |
| 1728 | |
| 1729 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); |
| 1730 | if (r == -EINVAL) { |
| 1731 | r = 0; |
| 1732 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1733 | #ifdef CONFIG_ALTIVEC |
| 1734 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1735 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1736 | r = -ENXIO; |
| 1737 | break; |
| 1738 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1739 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1740 | break; |
| 1741 | case KVM_REG_PPC_VSCR: |
| 1742 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1743 | r = -ENXIO; |
| 1744 | break; |
| 1745 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1746 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1747 | break; |
| 1748 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1749 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1750 | r = -ENXIO; |
| 1751 | break; |
| 1752 | } |
| 1753 | vcpu->arch.vrsave = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1754 | break; |
| 1755 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1756 | default: |
| 1757 | r = -EINVAL; |
| 1758 | break; |
| 1759 | } |
| 1760 | } |
| 1761 | |
| 1762 | return r; |
| 1763 | } |
| 1764 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1765 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 1766 | { |
| 1767 | int r; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1768 | |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1769 | vcpu_load(vcpu); |
| 1770 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1771 | if (vcpu->mmio_needed) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1772 | vcpu->mmio_needed = 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1773 | if (!vcpu->mmio_is_write) |
| 1774 | kvmppc_complete_mmio_load(vcpu, run); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1775 | #ifdef CONFIG_VSX |
| 1776 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1777 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1778 | vcpu->arch.mmio_vsx_offset++; |
| 1779 | } |
| 1780 | |
| 1781 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1782 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); |
| 1783 | if (r == RESUME_HOST) { |
| 1784 | vcpu->mmio_needed = 1; |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1785 | goto out; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1786 | } |
| 1787 | } |
| 1788 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1789 | #ifdef CONFIG_ALTIVEC |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1790 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1791 | vcpu->arch.mmio_vmx_copy_nums--; |
Simon Guo | acc9eb9 | 2018-05-21 13:24:26 +0800 | [diff] [blame] | 1792 | vcpu->arch.mmio_vmx_offset++; |
| 1793 | } |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1794 | |
| 1795 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1796 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
| 1797 | if (r == RESUME_HOST) { |
| 1798 | vcpu->mmio_needed = 1; |
Radim Krčmář | 1ab03c0 | 2018-02-09 21:36:57 +0100 | [diff] [blame] | 1799 | goto out; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1800 | } |
| 1801 | } |
| 1802 | #endif |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1803 | } else if (vcpu->arch.osi_needed) { |
| 1804 | u64 *gprs = run->osi.gprs; |
| 1805 | int i; |
| 1806 | |
| 1807 | for (i = 0; i < 32; i++) |
| 1808 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
| 1809 | vcpu->arch.osi_needed = 0; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1810 | } else if (vcpu->arch.hcall_needed) { |
| 1811 | int i; |
| 1812 | |
| 1813 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); |
| 1814 | for (i = 0; i < 9; ++i) |
| 1815 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); |
| 1816 | vcpu->arch.hcall_needed = 0; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1817 | #ifdef CONFIG_BOOKE |
| 1818 | } else if (vcpu->arch.epr_needed) { |
| 1819 | kvmppc_set_epr(vcpu, run->epr.epr); |
| 1820 | vcpu->arch.epr_needed = 0; |
| 1821 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1822 | } |
| 1823 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1824 | kvm_sigset_activate(vcpu); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1825 | |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 1826 | if (run->immediate_exit) |
| 1827 | r = -EINTR; |
| 1828 | else |
| 1829 | r = kvmppc_vcpu_run(run, vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1830 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1831 | kvm_sigset_deactivate(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1832 | |
Paul Mackerras | c662f77 | 2018-02-13 15:16:01 +1100 | [diff] [blame] | 1833 | #ifdef CONFIG_ALTIVEC |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1834 | out: |
Paul Mackerras | c662f77 | 2018-02-13 15:16:01 +1100 | [diff] [blame] | 1835 | #endif |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1836 | vcpu_put(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1837 | return r; |
| 1838 | } |
| 1839 | |
| 1840 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
| 1841 | { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1842 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
Paul Mackerras | 4fe27d2 | 2013-02-14 14:00:25 +0000 | [diff] [blame] | 1843 | kvmppc_core_dequeue_external(vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1844 | return 0; |
| 1845 | } |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1846 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1847 | kvmppc_core_queue_external(vcpu, irq); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 1848 | |
Scott Wood | dfd4d47 | 2011-11-17 12:39:59 +0000 | [diff] [blame] | 1849 | kvm_vcpu_kick(vcpu); |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1850 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1851 | return 0; |
| 1852 | } |
| 1853 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1854 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
| 1855 | struct kvm_enable_cap *cap) |
| 1856 | { |
| 1857 | int r; |
| 1858 | |
| 1859 | if (cap->flags) |
| 1860 | return -EINVAL; |
| 1861 | |
| 1862 | switch (cap->cap) { |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1863 | case KVM_CAP_PPC_OSI: |
| 1864 | r = 0; |
| 1865 | vcpu->arch.osi_enabled = true; |
| 1866 | break; |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 1867 | case KVM_CAP_PPC_PAPR: |
| 1868 | r = 0; |
| 1869 | vcpu->arch.papr_enabled = true; |
| 1870 | break; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1871 | case KVM_CAP_PPC_EPR: |
| 1872 | r = 0; |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1873 | if (cap->args[0]) |
| 1874 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; |
| 1875 | else |
| 1876 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1877 | break; |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 1878 | #ifdef CONFIG_BOOKE |
| 1879 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
| 1880 | r = 0; |
| 1881 | vcpu->arch.watchdog_enabled = true; |
| 1882 | break; |
| 1883 | #endif |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 1884 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 1885 | case KVM_CAP_SW_TLB: { |
| 1886 | struct kvm_config_tlb cfg; |
| 1887 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
| 1888 | |
| 1889 | r = -EFAULT; |
| 1890 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) |
| 1891 | break; |
| 1892 | |
| 1893 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); |
| 1894 | break; |
| 1895 | } |
| 1896 | #endif |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1897 | #ifdef CONFIG_KVM_MPIC |
| 1898 | case KVM_CAP_IRQ_MPIC: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1899 | struct fd f; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1900 | struct kvm_device *dev; |
| 1901 | |
| 1902 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1903 | f = fdget(cap->args[0]); |
| 1904 | if (!f.file) |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1905 | break; |
| 1906 | |
| 1907 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1908 | dev = kvm_device_from_filp(f.file); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1909 | if (dev) |
| 1910 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1911 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1912 | fdput(f); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1913 | break; |
| 1914 | } |
| 1915 | #endif |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1916 | #ifdef CONFIG_KVM_XICS |
| 1917 | case KVM_CAP_IRQ_XICS: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1918 | struct fd f; |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1919 | struct kvm_device *dev; |
| 1920 | |
| 1921 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1922 | f = fdget(cap->args[0]); |
| 1923 | if (!f.file) |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1924 | break; |
| 1925 | |
| 1926 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1927 | dev = kvm_device_from_filp(f.file); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1928 | if (dev) { |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 1929 | if (xics_on_xive()) |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1930 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1931 | else |
| 1932 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1933 | } |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1934 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1935 | fdput(f); |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1936 | break; |
| 1937 | } |
| 1938 | #endif /* CONFIG_KVM_XICS */ |
Cédric Le Goater | eacc56b | 2019-04-18 12:39:28 +0200 | [diff] [blame] | 1939 | #ifdef CONFIG_KVM_XIVE |
| 1940 | case KVM_CAP_PPC_IRQ_XIVE: { |
| 1941 | struct fd f; |
| 1942 | struct kvm_device *dev; |
| 1943 | |
| 1944 | r = -EBADF; |
| 1945 | f = fdget(cap->args[0]); |
| 1946 | if (!f.file) |
| 1947 | break; |
| 1948 | |
| 1949 | r = -ENXIO; |
| 1950 | if (!xive_enabled()) |
| 1951 | break; |
| 1952 | |
| 1953 | r = -EPERM; |
| 1954 | dev = kvm_device_from_filp(f.file); |
| 1955 | if (dev) |
| 1956 | r = kvmppc_xive_native_connect_vcpu(dev, vcpu, |
| 1957 | cap->args[1]); |
| 1958 | |
| 1959 | fdput(f); |
| 1960 | break; |
| 1961 | } |
| 1962 | #endif /* CONFIG_KVM_XIVE */ |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 1963 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 1964 | case KVM_CAP_PPC_FWNMI: |
| 1965 | r = -EINVAL; |
| 1966 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
| 1967 | break; |
| 1968 | r = 0; |
| 1969 | vcpu->kvm->arch.fwnmi_enabled = true; |
| 1970 | break; |
| 1971 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1972 | default: |
| 1973 | r = -EINVAL; |
| 1974 | break; |
| 1975 | } |
| 1976 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 1977 | if (!r) |
| 1978 | r = kvmppc_sanity_check(vcpu); |
| 1979 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1980 | return r; |
| 1981 | } |
| 1982 | |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1983 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 1984 | { |
| 1985 | #ifdef CONFIG_KVM_MPIC |
| 1986 | if (kvm->arch.mpic) |
| 1987 | return true; |
| 1988 | #endif |
| 1989 | #ifdef CONFIG_KVM_XICS |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1990 | if (kvm->arch.xics || kvm->arch.xive) |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1991 | return true; |
| 1992 | #endif |
| 1993 | return false; |
| 1994 | } |
| 1995 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1996 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 1997 | struct kvm_mp_state *mp_state) |
| 1998 | { |
| 1999 | return -EINVAL; |
| 2000 | } |
| 2001 | |
| 2002 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 2003 | struct kvm_mp_state *mp_state) |
| 2004 | { |
| 2005 | return -EINVAL; |
| 2006 | } |
| 2007 | |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 2008 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 2009 | unsigned int ioctl, unsigned long arg) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2010 | { |
| 2011 | struct kvm_vcpu *vcpu = filp->private_data; |
| 2012 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2013 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 2014 | if (ioctl == KVM_INTERRUPT) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2015 | struct kvm_interrupt irq; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2016 | if (copy_from_user(&irq, argp, sizeof(irq))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 2017 | return -EFAULT; |
| 2018 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2019 | } |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 2020 | return -ENOIOCTLCMD; |
| 2021 | } |
| 2022 | |
| 2023 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 2024 | unsigned int ioctl, unsigned long arg) |
| 2025 | { |
| 2026 | struct kvm_vcpu *vcpu = filp->private_data; |
| 2027 | void __user *argp = (void __user *)arg; |
| 2028 | long r; |
Avi Kivity | 19483d1 | 2010-05-13 12:30:43 +0300 | [diff] [blame] | 2029 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 2030 | switch (ioctl) { |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2031 | case KVM_ENABLE_CAP: |
| 2032 | { |
| 2033 | struct kvm_enable_cap cap; |
| 2034 | r = -EFAULT; |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2035 | vcpu_load(vcpu); |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2036 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 2037 | goto out; |
| 2038 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2039 | vcpu_put(vcpu); |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 2040 | break; |
| 2041 | } |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2042 | |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 2043 | case KVM_SET_ONE_REG: |
| 2044 | case KVM_GET_ONE_REG: |
| 2045 | { |
| 2046 | struct kvm_one_reg reg; |
| 2047 | r = -EFAULT; |
| 2048 | if (copy_from_user(®, argp, sizeof(reg))) |
| 2049 | goto out; |
| 2050 | if (ioctl == KVM_SET_ONE_REG) |
| 2051 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); |
| 2052 | else |
| 2053 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); |
| 2054 | break; |
| 2055 | } |
| 2056 | |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 2057 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2058 | case KVM_DIRTY_TLB: { |
| 2059 | struct kvm_dirty_tlb dirty; |
| 2060 | r = -EFAULT; |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2061 | vcpu_load(vcpu); |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2062 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
| 2063 | goto out; |
| 2064 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
Simon Guo | b3cebfe | 2018-05-23 15:02:09 +0800 | [diff] [blame] | 2065 | vcpu_put(vcpu); |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 2066 | break; |
| 2067 | } |
| 2068 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2069 | default: |
| 2070 | r = -EINVAL; |
| 2071 | } |
| 2072 | |
| 2073 | out: |
| 2074 | return r; |
| 2075 | } |
| 2076 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 2077 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 2078 | { |
| 2079 | return VM_FAULT_SIGBUS; |
| 2080 | } |
| 2081 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2082 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
| 2083 | { |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2084 | u32 inst_nop = 0x60000000; |
| 2085 | #ifdef CONFIG_KVM_BOOKE_HV |
| 2086 | u32 inst_sc1 = 0x44000022; |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 2087 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
| 2088 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); |
| 2089 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); |
| 2090 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2091 | #else |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2092 | u32 inst_lis = 0x3c000000; |
| 2093 | u32 inst_ori = 0x60000000; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2094 | u32 inst_sc = 0x44000002; |
| 2095 | u32 inst_imm_mask = 0xffff; |
| 2096 | |
| 2097 | /* |
| 2098 | * The hypercall to get into KVM from within guest context is as |
| 2099 | * follows: |
| 2100 | * |
| 2101 | * lis r0, r0, KVM_SC_MAGIC_R0@h |
| 2102 | * ori r0, KVM_SC_MAGIC_R0@l |
| 2103 | * sc |
| 2104 | * nop |
| 2105 | */ |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 2106 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
| 2107 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); |
| 2108 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); |
| 2109 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 2110 | #endif |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2111 | |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 2112 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
| 2113 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2114 | return 0; |
| 2115 | } |
| 2116 | |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 2117 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
| 2118 | bool line_status) |
| 2119 | { |
| 2120 | if (!irqchip_in_kernel(kvm)) |
| 2121 | return -ENXIO; |
| 2122 | |
| 2123 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
| 2124 | irq_event->irq, irq_event->level, |
| 2125 | line_status); |
| 2126 | return 0; |
| 2127 | } |
| 2128 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2129 | |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 2130 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 2131 | struct kvm_enable_cap *cap) |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2132 | { |
| 2133 | int r; |
| 2134 | |
| 2135 | if (cap->flags) |
| 2136 | return -EINVAL; |
| 2137 | |
| 2138 | switch (cap->cap) { |
| 2139 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 2140 | case KVM_CAP_PPC_ENABLE_HCALL: { |
| 2141 | unsigned long hcall = cap->args[0]; |
| 2142 | |
| 2143 | r = -EINVAL; |
| 2144 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || |
| 2145 | cap->args[1] > 1) |
| 2146 | break; |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2147 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
| 2148 | break; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2149 | if (cap->args[1]) |
| 2150 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 2151 | else |
| 2152 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 2153 | r = 0; |
| 2154 | break; |
| 2155 | } |
Paul Mackerras | 3c31352 | 2017-02-06 13:24:41 +1100 | [diff] [blame] | 2156 | case KVM_CAP_PPC_SMT: { |
| 2157 | unsigned long mode = cap->args[0]; |
| 2158 | unsigned long flags = cap->args[1]; |
| 2159 | |
| 2160 | r = -EINVAL; |
| 2161 | if (kvm->arch.kvm_ops->set_smt_mode) |
| 2162 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); |
| 2163 | break; |
| 2164 | } |
Paul Mackerras | aa069a9 | 2018-09-21 20:02:01 +1000 | [diff] [blame] | 2165 | |
| 2166 | case KVM_CAP_PPC_NESTED_HV: |
| 2167 | r = -EINVAL; |
| 2168 | if (!is_kvmppc_hv_enabled(kvm) || |
| 2169 | !kvm->arch.kvm_ops->enable_nested) |
| 2170 | break; |
| 2171 | r = kvm->arch.kvm_ops->enable_nested(kvm); |
| 2172 | break; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2173 | #endif |
| 2174 | default: |
| 2175 | r = -EINVAL; |
| 2176 | break; |
| 2177 | } |
| 2178 | |
| 2179 | return r; |
| 2180 | } |
| 2181 | |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2182 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 2183 | /* |
| 2184 | * These functions check whether the underlying hardware is safe |
| 2185 | * against attacks based on observing the effects of speculatively |
| 2186 | * executed instructions, and whether it supplies instructions for |
| 2187 | * use in workarounds. The information comes from firmware, either |
| 2188 | * via the device tree on powernv platforms or from an hcall on |
| 2189 | * pseries platforms. |
| 2190 | */ |
| 2191 | #ifdef CONFIG_PPC_PSERIES |
| 2192 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2193 | { |
| 2194 | struct h_cpu_char_result c; |
| 2195 | unsigned long rc; |
| 2196 | |
| 2197 | if (!machine_is(pseries)) |
| 2198 | return -ENOTTY; |
| 2199 | |
| 2200 | rc = plpar_get_cpu_characteristics(&c); |
| 2201 | if (rc == H_SUCCESS) { |
| 2202 | cp->character = c.character; |
| 2203 | cp->behaviour = c.behaviour; |
| 2204 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 2205 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 2206 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 2207 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 2208 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
| 2209 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | |
| 2210 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2211 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
| 2212 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2213 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 2214 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2215 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
| 2216 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2217 | } |
| 2218 | return 0; |
| 2219 | } |
| 2220 | #else |
| 2221 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2222 | { |
| 2223 | return -ENOTTY; |
| 2224 | } |
| 2225 | #endif |
| 2226 | |
| 2227 | static inline bool have_fw_feat(struct device_node *fw_features, |
| 2228 | const char *state, const char *name) |
| 2229 | { |
| 2230 | struct device_node *np; |
| 2231 | bool r = false; |
| 2232 | |
| 2233 | np = of_get_child_by_name(fw_features, name); |
| 2234 | if (np) { |
| 2235 | r = of_property_read_bool(np, state); |
| 2236 | of_node_put(np); |
| 2237 | } |
| 2238 | return r; |
| 2239 | } |
| 2240 | |
| 2241 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 2242 | { |
| 2243 | struct device_node *np, *fw_features; |
| 2244 | int r; |
| 2245 | |
| 2246 | memset(cp, 0, sizeof(*cp)); |
| 2247 | r = pseries_get_cpu_char(cp); |
| 2248 | if (r != -ENOTTY) |
| 2249 | return r; |
| 2250 | |
| 2251 | np = of_find_node_by_name(NULL, "ibm,opal"); |
| 2252 | if (np) { |
| 2253 | fw_features = of_get_child_by_name(np, "fw-features"); |
| 2254 | of_node_put(np); |
| 2255 | if (!fw_features) |
| 2256 | return 0; |
| 2257 | if (have_fw_feat(fw_features, "enabled", |
| 2258 | "inst-spec-barrier-ori31,31,0")) |
| 2259 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; |
| 2260 | if (have_fw_feat(fw_features, "enabled", |
| 2261 | "fw-bcctrl-serialized")) |
| 2262 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; |
| 2263 | if (have_fw_feat(fw_features, "enabled", |
| 2264 | "inst-l1d-flush-ori30,30,0")) |
| 2265 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; |
| 2266 | if (have_fw_feat(fw_features, "enabled", |
| 2267 | "inst-l1d-flush-trig2")) |
| 2268 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; |
| 2269 | if (have_fw_feat(fw_features, "enabled", |
| 2270 | "fw-l1d-thread-split")) |
| 2271 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; |
| 2272 | if (have_fw_feat(fw_features, "enabled", |
| 2273 | "fw-count-cache-disabled")) |
| 2274 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2275 | if (have_fw_feat(fw_features, "enabled", |
| 2276 | "fw-count-cache-flush-bcctr2,0,0")) |
| 2277 | cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2278 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 2279 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 2280 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 2281 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 2282 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2283 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | |
| 2284 | KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2285 | |
| 2286 | if (have_fw_feat(fw_features, "enabled", |
| 2287 | "speculation-policy-favor-security")) |
| 2288 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; |
| 2289 | if (!have_fw_feat(fw_features, "disabled", |
| 2290 | "needs-l1d-flush-msr-pr-0-to-1")) |
| 2291 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; |
| 2292 | if (!have_fw_feat(fw_features, "disabled", |
| 2293 | "needs-spec-barrier-for-bound-checks")) |
| 2294 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2295 | if (have_fw_feat(fw_features, "enabled", |
| 2296 | "needs-count-cache-flush-on-context-switch")) |
| 2297 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2298 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 2299 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
Suraj Jitindar Singh | 2b57ecd | 2019-03-01 14:25:16 +1100 | [diff] [blame] | 2300 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | |
| 2301 | KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2302 | |
| 2303 | of_node_put(fw_features); |
| 2304 | } |
| 2305 | |
| 2306 | return 0; |
| 2307 | } |
| 2308 | #endif |
| 2309 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2310 | long kvm_arch_vm_ioctl(struct file *filp, |
| 2311 | unsigned int ioctl, unsigned long arg) |
| 2312 | { |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 2313 | struct kvm *kvm __maybe_unused = filp->private_data; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2314 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2315 | long r; |
| 2316 | |
| 2317 | switch (ioctl) { |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2318 | case KVM_PPC_GET_PVINFO: { |
| 2319 | struct kvm_ppc_pvinfo pvinfo; |
Vasiliy Kulikov | d8cdddc | 2010-10-30 13:04:24 +0400 | [diff] [blame] | 2320 | memset(&pvinfo, 0, sizeof(pvinfo)); |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2321 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
| 2322 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { |
| 2323 | r = -EFAULT; |
| 2324 | goto out; |
| 2325 | } |
| 2326 | |
| 2327 | break; |
| 2328 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2329 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2330 | case KVM_CREATE_SPAPR_TCE_64: { |
| 2331 | struct kvm_create_spapr_tce_64 create_tce_64; |
| 2332 | |
| 2333 | r = -EFAULT; |
| 2334 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) |
| 2335 | goto out; |
| 2336 | if (create_tce_64.flags) { |
| 2337 | r = -EINVAL; |
| 2338 | goto out; |
| 2339 | } |
| 2340 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
| 2341 | goto out; |
| 2342 | } |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2343 | case KVM_CREATE_SPAPR_TCE: { |
| 2344 | struct kvm_create_spapr_tce create_tce; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2345 | struct kvm_create_spapr_tce_64 create_tce_64; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2346 | |
| 2347 | r = -EFAULT; |
| 2348 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) |
| 2349 | goto out; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2350 | |
| 2351 | create_tce_64.liobn = create_tce.liobn; |
| 2352 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; |
| 2353 | create_tce_64.offset = 0; |
| 2354 | create_tce_64.size = create_tce.window_size >> |
| 2355 | IOMMU_PAGE_SHIFT_4K; |
| 2356 | create_tce_64.flags = 0; |
| 2357 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2358 | goto out; |
| 2359 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2360 | #endif |
| 2361 | #ifdef CONFIG_PPC_BOOK3S_64 |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2362 | case KVM_PPC_GET_SMMU_INFO: { |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2363 | struct kvm_ppc_smmu_info info; |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2364 | struct kvm *kvm = filp->private_data; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2365 | |
| 2366 | memset(&info, 0, sizeof(info)); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2367 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2368 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2369 | r = -EFAULT; |
| 2370 | break; |
| 2371 | } |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 2372 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
| 2373 | struct kvm *kvm = filp->private_data; |
| 2374 | |
| 2375 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
| 2376 | break; |
| 2377 | } |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 2378 | case KVM_PPC_CONFIGURE_V3_MMU: { |
| 2379 | struct kvm *kvm = filp->private_data; |
| 2380 | struct kvm_ppc_mmuv3_cfg cfg; |
| 2381 | |
| 2382 | r = -EINVAL; |
| 2383 | if (!kvm->arch.kvm_ops->configure_mmu) |
| 2384 | goto out; |
| 2385 | r = -EFAULT; |
| 2386 | if (copy_from_user(&cfg, argp, sizeof(cfg))) |
| 2387 | goto out; |
| 2388 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); |
| 2389 | break; |
| 2390 | } |
| 2391 | case KVM_PPC_GET_RMMU_INFO: { |
| 2392 | struct kvm *kvm = filp->private_data; |
| 2393 | struct kvm_ppc_rmmu_info info; |
| 2394 | |
| 2395 | r = -EINVAL; |
| 2396 | if (!kvm->arch.kvm_ops->get_rmmu_info) |
| 2397 | goto out; |
| 2398 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); |
| 2399 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2400 | r = -EFAULT; |
| 2401 | break; |
| 2402 | } |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2403 | case KVM_PPC_GET_CPU_CHAR: { |
| 2404 | struct kvm_ppc_cpu_char cpuchar; |
| 2405 | |
| 2406 | r = kvmppc_get_cpu_char(&cpuchar); |
| 2407 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) |
| 2408 | r = -EFAULT; |
| 2409 | break; |
| 2410 | } |
Bharata B Rao | 2294568 | 2019-11-25 08:36:30 +0530 | [diff] [blame] | 2411 | case KVM_PPC_SVM_OFF: { |
| 2412 | struct kvm *kvm = filp->private_data; |
| 2413 | |
| 2414 | r = 0; |
| 2415 | if (!kvm->arch.kvm_ops->svm_off) |
| 2416 | goto out; |
| 2417 | |
| 2418 | r = kvm->arch.kvm_ops->svm_off(kvm); |
| 2419 | break; |
| 2420 | } |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2421 | default: { |
| 2422 | struct kvm *kvm = filp->private_data; |
| 2423 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); |
| 2424 | } |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2425 | #else /* CONFIG_PPC_BOOK3S_64 */ |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2426 | default: |
Avi Kivity | 367e131 | 2009-08-26 14:57:07 +0300 | [diff] [blame] | 2427 | r = -ENOTTY; |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2428 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2429 | } |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2430 | out: |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2431 | return r; |
| 2432 | } |
| 2433 | |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2434 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
| 2435 | static unsigned long nr_lpids; |
| 2436 | |
| 2437 | long kvmppc_alloc_lpid(void) |
| 2438 | { |
| 2439 | long lpid; |
| 2440 | |
| 2441 | do { |
| 2442 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); |
| 2443 | if (lpid >= nr_lpids) { |
| 2444 | pr_err("%s: No LPIDs free\n", __func__); |
| 2445 | return -ENOMEM; |
| 2446 | } |
| 2447 | } while (test_and_set_bit(lpid, lpid_inuse)); |
| 2448 | |
| 2449 | return lpid; |
| 2450 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2451 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2452 | |
| 2453 | void kvmppc_claim_lpid(long lpid) |
| 2454 | { |
| 2455 | set_bit(lpid, lpid_inuse); |
| 2456 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2457 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2458 | |
| 2459 | void kvmppc_free_lpid(long lpid) |
| 2460 | { |
| 2461 | clear_bit(lpid, lpid_inuse); |
| 2462 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2463 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2464 | |
| 2465 | void kvmppc_init_lpid(unsigned long nr_lpids_param) |
| 2466 | { |
| 2467 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); |
| 2468 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); |
| 2469 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2470 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2471 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2472 | int kvm_arch_init(void *opaque) |
| 2473 | { |
| 2474 | return 0; |
| 2475 | } |
| 2476 | |
Paolo Bonzini | 478d6686 | 2014-08-05 11:29:07 +0200 | [diff] [blame] | 2477 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |