Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright IBM Corp. 2007 |
| 16 | * |
| 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
| 19 | */ |
| 20 | |
| 21 | #include <linux/errno.h> |
| 22 | #include <linux/err.h> |
| 23 | #include <linux/kvm_host.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 24 | #include <linux/vmalloc.h> |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 25 | #include <linux/hrtimer.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 26 | #include <linux/sched/signal.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 27 | #include <linux/fs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 29 | #include <linux/file.h> |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 30 | #include <linux/module.h> |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 31 | #include <linux/irqbypass.h> |
| 32 | #include <linux/kvm_irqfd.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 33 | #include <asm/cputable.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 34 | #include <linux/uaccess.h> |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 35 | #include <asm/kvm_ppc.h> |
Hollis Blanchard | 83aae4a | 2008-07-25 13:54:52 -0500 | [diff] [blame] | 36 | #include <asm/tlbflush.h> |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 37 | #include <asm/cputhreads.h> |
Alexander Graf | bd2be68 | 2012-08-13 01:04:19 +0200 | [diff] [blame] | 38 | #include <asm/irqflags.h> |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 39 | #include <asm/iommu.h> |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 40 | #include <asm/switch_to.h> |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 41 | #include <asm/xive.h> |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 42 | #ifdef CONFIG_PPC_PSERIES |
| 43 | #include <asm/hvcall.h> |
| 44 | #include <asm/plpar_wrappers.h> |
| 45 | #endif |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 46 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 47 | #include "timing.h" |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 48 | #include "irq.h" |
Paul Mackerras | fad7b9b | 2008-12-23 14:57:26 +1100 | [diff] [blame] | 49 | #include "../mm/mmu_decl.h" |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 50 | |
Marcelo Tosatti | 46f43c6 | 2009-06-18 11:47:27 -0300 | [diff] [blame] | 51 | #define CREATE_TRACE_POINTS |
| 52 | #include "trace.h" |
| 53 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 54 | struct kvmppc_ops *kvmppc_hv_ops; |
| 55 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); |
| 56 | struct kvmppc_ops *kvmppc_pr_ops; |
| 57 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); |
| 58 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 59 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 60 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 61 | { |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 62 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 63 | } |
| 64 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 65 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
| 66 | { |
| 67 | return false; |
| 68 | } |
| 69 | |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 70 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 71 | { |
| 72 | return 1; |
| 73 | } |
| 74 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 75 | /* |
| 76 | * Common checks before entering the guest world. Call with interrupts |
| 77 | * disabled. |
| 78 | * |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 79 | * returns: |
| 80 | * |
| 81 | * == 1 if we're ready to go into guest state |
| 82 | * <= 0 if we need to go back to the host with return value |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 83 | */ |
| 84 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
| 85 | { |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 86 | int r; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 87 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 88 | WARN_ON(irqs_disabled()); |
| 89 | hard_irq_disable(); |
| 90 | |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 91 | while (true) { |
| 92 | if (need_resched()) { |
| 93 | local_irq_enable(); |
| 94 | cond_resched(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 95 | hard_irq_disable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 96 | continue; |
| 97 | } |
| 98 | |
| 99 | if (signal_pending(current)) { |
Alexander Graf | 7ee7885 | 2012-08-13 12:44:41 +0200 | [diff] [blame] | 100 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
| 101 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
| 102 | r = -EINTR; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 103 | break; |
| 104 | } |
| 105 | |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 106 | vcpu->mode = IN_GUEST_MODE; |
| 107 | |
| 108 | /* |
| 109 | * Reading vcpu->requests must happen after setting vcpu->mode, |
| 110 | * so we don't miss a request because the requester sees |
| 111 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests |
| 112 | * before next entering the guest (and thus doesn't IPI). |
Lan Tianyu | 489153c | 2016-03-13 11:10:30 +0800 | [diff] [blame] | 113 | * This also orders the write to mode from any reads |
| 114 | * to the page tables done while the VCPU is running. |
| 115 | * Please see the comment in kvm_flush_remote_tlbs. |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 116 | */ |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 117 | smp_mb(); |
Scott Wood | 5bd1cf1 | 2012-08-22 15:03:50 +0000 | [diff] [blame] | 118 | |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 119 | if (kvm_request_pending(vcpu)) { |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 120 | /* Make sure we process requests preemptable */ |
| 121 | local_irq_enable(); |
| 122 | trace_kvm_check_requests(vcpu); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 123 | r = kvmppc_core_check_requests(vcpu); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 124 | hard_irq_disable(); |
Alexander Graf | 7c973a2 | 2012-08-13 12:50:35 +0200 | [diff] [blame] | 125 | if (r > 0) |
| 126 | continue; |
| 127 | break; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | if (kvmppc_core_prepare_to_enter(vcpu)) { |
| 131 | /* interrupts got enabled in between, so we |
| 132 | are back at square 1 */ |
| 133 | continue; |
| 134 | } |
| 135 | |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 136 | guest_enter_irqoff(); |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 137 | return 1; |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 138 | } |
| 139 | |
Scott Wood | 6c85f52 | 2014-01-09 19:18:40 -0600 | [diff] [blame] | 140 | /* return to host */ |
| 141 | local_irq_enable(); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 142 | return r; |
| 143 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 144 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
Alexander Graf | 03d25c5 | 2012-08-10 12:28:50 +0200 | [diff] [blame] | 145 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 146 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 147 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) |
| 148 | { |
| 149 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; |
| 150 | int i; |
| 151 | |
| 152 | shared->sprg0 = swab64(shared->sprg0); |
| 153 | shared->sprg1 = swab64(shared->sprg1); |
| 154 | shared->sprg2 = swab64(shared->sprg2); |
| 155 | shared->sprg3 = swab64(shared->sprg3); |
| 156 | shared->srr0 = swab64(shared->srr0); |
| 157 | shared->srr1 = swab64(shared->srr1); |
| 158 | shared->dar = swab64(shared->dar); |
| 159 | shared->msr = swab64(shared->msr); |
| 160 | shared->dsisr = swab32(shared->dsisr); |
| 161 | shared->int_pending = swab32(shared->int_pending); |
| 162 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) |
| 163 | shared->sr[i] = swab32(shared->sr[i]); |
| 164 | } |
| 165 | #endif |
| 166 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 167 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
| 168 | { |
| 169 | int nr = kvmppc_get_gpr(vcpu, 11); |
| 170 | int r; |
| 171 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); |
| 172 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); |
| 173 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); |
| 174 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
| 175 | unsigned long r2 = 0; |
| 176 | |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 177 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 178 | /* 32 bit mode */ |
| 179 | param1 &= 0xffffffff; |
| 180 | param2 &= 0xffffffff; |
| 181 | param3 &= 0xffffffff; |
| 182 | param4 &= 0xffffffff; |
| 183 | } |
| 184 | |
| 185 | switch (nr) { |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 186 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 187 | { |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 188 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 189 | /* Book3S can be little endian, find it out here */ |
| 190 | int shared_big_endian = true; |
| 191 | if (vcpu->arch.intr_msr & MSR_LE) |
| 192 | shared_big_endian = false; |
| 193 | if (shared_big_endian != vcpu->arch.shared_big_endian) |
| 194 | kvmppc_swab_shared(vcpu); |
| 195 | vcpu->arch.shared_big_endian = shared_big_endian; |
| 196 | #endif |
| 197 | |
Alexander Graf | f3383cf | 2014-05-12 01:08:32 +0200 | [diff] [blame] | 198 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
| 199 | /* |
| 200 | * Older versions of the Linux magic page code had |
| 201 | * a bug where they would map their trampoline code |
| 202 | * NX. If that's the case, remove !PR NX capability. |
| 203 | */ |
| 204 | vcpu->arch.disable_kernel_nx = true; |
| 205 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 206 | } |
| 207 | |
| 208 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; |
| 209 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 210 | |
Alexander Graf | 89b68c9 | 2014-07-13 16:37:12 +0200 | [diff] [blame] | 211 | #ifdef CONFIG_PPC_64K_PAGES |
| 212 | /* |
| 213 | * Make sure our 4k magic page is in the same window of a 64k |
| 214 | * page within the guest and within the host's page. |
| 215 | */ |
| 216 | if ((vcpu->arch.magic_page_pa & 0xf000) != |
| 217 | ((ulong)vcpu->arch.shared & 0xf000)) { |
| 218 | void *old_shared = vcpu->arch.shared; |
| 219 | ulong shared = (ulong)vcpu->arch.shared; |
| 220 | void *new_shared; |
| 221 | |
| 222 | shared &= PAGE_MASK; |
| 223 | shared |= vcpu->arch.magic_page_pa & 0xf000; |
| 224 | new_shared = (void*)shared; |
| 225 | memcpy(new_shared, old_shared, 0x1000); |
| 226 | vcpu->arch.shared = new_shared; |
| 227 | } |
| 228 | #endif |
| 229 | |
Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 230 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 231 | |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 232 | r = EV_SUCCESS; |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 233 | break; |
| 234 | } |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 235 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
| 236 | r = EV_SUCCESS; |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 237 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
Alexander Graf | 5fc8740 | 2010-07-29 14:47:55 +0200 | [diff] [blame] | 238 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
| 239 | #endif |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 240 | |
| 241 | /* Second return value is in r4 */ |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 242 | break; |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 243 | case EV_HCALL_TOKEN(EV_IDLE): |
| 244 | r = EV_SUCCESS; |
| 245 | kvm_vcpu_block(vcpu); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 246 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 247 | break; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 248 | default: |
Stuart Yoder | fdcf8bd | 2012-07-03 05:48:50 +0000 | [diff] [blame] | 249 | r = EV_UNIMPLEMENTED; |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 250 | break; |
| 251 | } |
| 252 | |
Alexander Graf | 7508e16 | 2010-08-03 11:32:56 +0200 | [diff] [blame] | 253 | kvmppc_set_gpr(vcpu, 4, r2); |
| 254 | |
Alexander Graf | 2a342ed | 2010-07-29 14:47:48 +0200 | [diff] [blame] | 255 | return r; |
| 256 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 257 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 258 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 259 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
| 260 | { |
| 261 | int r = false; |
| 262 | |
| 263 | /* We have to know what CPU to virtualize */ |
| 264 | if (!vcpu->arch.pvr) |
| 265 | goto out; |
| 266 | |
| 267 | /* PAPR only works with book3s_64 */ |
| 268 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
| 269 | goto out; |
| 270 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 271 | /* HV KVM can only do PAPR mode for now */ |
Aneesh Kumar K.V | a78b55d | 2013-10-07 22:18:02 +0530 | [diff] [blame] | 272 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 273 | goto out; |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 274 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 275 | #ifdef CONFIG_KVM_BOOKE_HV |
| 276 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) |
| 277 | goto out; |
| 278 | #endif |
| 279 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 280 | r = true; |
| 281 | |
| 282 | out: |
| 283 | vcpu->arch.sane = r; |
| 284 | return r ? 0 : -EINVAL; |
| 285 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 286 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 287 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 288 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 289 | { |
| 290 | enum emulation_result er; |
| 291 | int r; |
| 292 | |
Alexander Graf | d69614a | 2014-06-18 14:53:49 +0200 | [diff] [blame] | 293 | er = kvmppc_emulate_loadstore(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 294 | switch (er) { |
| 295 | case EMULATE_DONE: |
| 296 | /* Future optimization: only reload non-volatiles if they were |
| 297 | * actually modified. */ |
| 298 | r = RESUME_GUEST_NV; |
| 299 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 300 | case EMULATE_AGAIN: |
| 301 | r = RESUME_GUEST; |
| 302 | break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 303 | case EMULATE_DO_MMIO: |
| 304 | run->exit_reason = KVM_EXIT_MMIO; |
| 305 | /* We must reload nonvolatiles because "update" load/store |
| 306 | * instructions modify register state. */ |
| 307 | /* Future optimization: only reload non-volatiles if they were |
| 308 | * actually modified. */ |
| 309 | r = RESUME_HOST_NV; |
| 310 | break; |
| 311 | case EMULATE_FAIL: |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 312 | { |
| 313 | u32 last_inst; |
| 314 | |
Alexander Graf | 8d0eff6 | 2014-09-10 14:37:29 +0200 | [diff] [blame] | 315 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 316 | /* XXX Deliver Program interrupt to guest. */ |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 317 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 318 | r = RESUME_HOST; |
| 319 | break; |
Mihai Caraman | 51f0472 | 2014-07-23 19:06:21 +0300 | [diff] [blame] | 320 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 321 | default: |
Alexander Graf | 5a33169 | 2012-12-14 23:46:03 +0100 | [diff] [blame] | 322 | WARN_ON(1); |
| 323 | r = RESUME_GUEST; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | return r; |
| 327 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 328 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 329 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 330 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 331 | bool data) |
| 332 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 333 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 334 | struct kvmppc_pte pte; |
| 335 | int r; |
| 336 | |
| 337 | vcpu->stat.st++; |
| 338 | |
| 339 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 340 | XLATE_WRITE, &pte); |
| 341 | if (r < 0) |
| 342 | return r; |
| 343 | |
| 344 | *eaddr = pte.raddr; |
| 345 | |
| 346 | if (!pte.may_write) |
| 347 | return -EPERM; |
| 348 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 349 | /* Magic page override */ |
| 350 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 351 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 352 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 353 | void *magic = vcpu->arch.shared; |
| 354 | magic += pte.eaddr & 0xfff; |
| 355 | memcpy(magic, ptr, size); |
| 356 | return EMULATE_DONE; |
| 357 | } |
| 358 | |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 359 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 360 | return EMULATE_DO_MMIO; |
| 361 | |
| 362 | return EMULATE_DONE; |
| 363 | } |
| 364 | EXPORT_SYMBOL_GPL(kvmppc_st); |
| 365 | |
| 366 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 367 | bool data) |
| 368 | { |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 369 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 370 | struct kvmppc_pte pte; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 371 | int rc; |
| 372 | |
| 373 | vcpu->stat.ld++; |
| 374 | |
| 375 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, |
| 376 | XLATE_READ, &pte); |
| 377 | if (rc) |
| 378 | return rc; |
| 379 | |
| 380 | *eaddr = pte.raddr; |
| 381 | |
| 382 | if (!pte.may_read) |
| 383 | return -EPERM; |
| 384 | |
| 385 | if (!data && !pte.may_execute) |
| 386 | return -ENOEXEC; |
| 387 | |
Alexander Graf | c12fb43 | 2014-06-20 14:43:36 +0200 | [diff] [blame] | 388 | /* Magic page override */ |
| 389 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && |
| 390 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && |
| 391 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
| 392 | void *magic = vcpu->arch.shared; |
| 393 | magic += pte.eaddr & 0xfff; |
| 394 | memcpy(ptr, magic, size); |
| 395 | return EMULATE_DONE; |
| 396 | } |
| 397 | |
Alexander Graf | c45c551 | 2014-06-20 14:17:30 +0200 | [diff] [blame] | 398 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
| 399 | return EMULATE_DO_MMIO; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 400 | |
| 401 | return EMULATE_DONE; |
Alexander Graf | 35c4a73 | 2014-06-20 13:58:16 +0200 | [diff] [blame] | 402 | } |
| 403 | EXPORT_SYMBOL_GPL(kvmppc_ld); |
| 404 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 405 | int kvm_arch_hardware_enable(void) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 406 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 407 | return 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 408 | } |
| 409 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 410 | int kvm_arch_hardware_setup(void) |
| 411 | { |
| 412 | return 0; |
| 413 | } |
| 414 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 415 | void kvm_arch_check_processor_compat(void *rtn) |
| 416 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 417 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 418 | } |
| 419 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 420 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 421 | { |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 422 | struct kvmppc_ops *kvm_ops = NULL; |
| 423 | /* |
| 424 | * if we have both HV and PR enabled, default is HV |
| 425 | */ |
| 426 | if (type == 0) { |
| 427 | if (kvmppc_hv_ops) |
| 428 | kvm_ops = kvmppc_hv_ops; |
| 429 | else |
| 430 | kvm_ops = kvmppc_pr_ops; |
| 431 | if (!kvm_ops) |
| 432 | goto err_out; |
| 433 | } else if (type == KVM_VM_PPC_HV) { |
| 434 | if (!kvmppc_hv_ops) |
| 435 | goto err_out; |
| 436 | kvm_ops = kvmppc_hv_ops; |
| 437 | } else if (type == KVM_VM_PPC_PR) { |
| 438 | if (!kvmppc_pr_ops) |
| 439 | goto err_out; |
| 440 | kvm_ops = kvmppc_pr_ops; |
| 441 | } else |
| 442 | goto err_out; |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 443 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 444 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) |
| 445 | return -ENOENT; |
| 446 | |
| 447 | kvm->arch.kvm_ops = kvm_ops; |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 448 | return kvmppc_core_init_vm(kvm); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 449 | err_out: |
| 450 | return -EINVAL; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 451 | } |
| 452 | |
Luiz Capitulino | 235539b | 2016-09-07 14:47:23 -0400 | [diff] [blame] | 453 | bool kvm_arch_has_vcpu_debugfs(void) |
| 454 | { |
| 455 | return false; |
| 456 | } |
| 457 | |
| 458 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
| 459 | { |
| 460 | return 0; |
| 461 | } |
| 462 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 463 | void kvm_arch_destroy_vm(struct kvm *kvm) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 464 | { |
| 465 | unsigned int i; |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 466 | struct kvm_vcpu *vcpu; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 467 | |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 468 | #ifdef CONFIG_KVM_XICS |
| 469 | /* |
| 470 | * We call kick_all_cpus_sync() to ensure that all |
| 471 | * CPUs have executed any pending IPIs before we |
| 472 | * continue and free VCPUs structures below. |
| 473 | */ |
| 474 | if (is_kvmppc_hv_enabled(kvm)) |
| 475 | kick_all_cpus_sync(); |
| 476 | #endif |
| 477 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 478 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 479 | kvm_arch_vcpu_free(vcpu); |
| 480 | |
| 481 | mutex_lock(&kvm->lock); |
| 482 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
| 483 | kvm->vcpus[i] = NULL; |
| 484 | |
| 485 | atomic_set(&kvm->online_vcpus, 0); |
Paul Mackerras | f9e0554 | 2011-06-29 00:19:22 +0000 | [diff] [blame] | 486 | |
| 487 | kvmppc_core_destroy_vm(kvm); |
| 488 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 489 | mutex_unlock(&kvm->lock); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 490 | |
| 491 | /* drop the module reference */ |
| 492 | module_put(kvm->arch.kvm_ops->owner); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 493 | } |
| 494 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 495 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 496 | { |
| 497 | int r; |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 498 | /* Assume we're using HV mode when the HV module is loaded */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 499 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 500 | |
Alexander Graf | 7a58777 | 2014-07-14 18:55:19 +0200 | [diff] [blame] | 501 | if (kvm) { |
| 502 | /* |
| 503 | * Hooray - we know which VM type we're running on. Depend on |
| 504 | * that rather than the guess above. |
| 505 | */ |
| 506 | hv_enabled = is_kvmppc_hv_enabled(kvm); |
| 507 | } |
| 508 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 509 | switch (ext) { |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 510 | #ifdef CONFIG_BOOKE |
| 511 | case KVM_CAP_PPC_BOOKE_SREGS: |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 512 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 513 | case KVM_CAP_PPC_EPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 514 | #else |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 515 | case KVM_CAP_PPC_SEGSTATE: |
Alexander Graf | 1022fc3 | 2011-09-14 21:45:23 +0200 | [diff] [blame] | 516 | case KVM_CAP_PPC_HIOR: |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 517 | case KVM_CAP_PPC_PAPR: |
Scott Wood | 5ce941e | 2011-04-27 17:24:21 -0500 | [diff] [blame] | 518 | #endif |
Alexander Graf | 1897876 | 2010-03-24 21:48:18 +0100 | [diff] [blame] | 519 | case KVM_CAP_PPC_UNSET_IRQ: |
Alexander Graf | 7b4203e | 2010-08-30 13:50:45 +0200 | [diff] [blame] | 520 | case KVM_CAP_PPC_IRQ_LEVEL: |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 521 | case KVM_CAP_ENABLE_CAP: |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 522 | case KVM_CAP_ENABLE_CAP_VM: |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 523 | case KVM_CAP_ONE_REG: |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 524 | case KVM_CAP_IOEVENTFD: |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 525 | case KVM_CAP_DEVICE_CTRL: |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 526 | case KVM_CAP_IMMEDIATE_EXIT: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 527 | r = 1; |
| 528 | break; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 529 | case KVM_CAP_PPC_PAIRED_SINGLES: |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 530 | case KVM_CAP_PPC_OSI: |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 531 | case KVM_CAP_PPC_GET_PVINFO: |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 532 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 533 | case KVM_CAP_SW_TLB: |
| 534 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 535 | /* We support this only for PR */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 536 | r = !hv_enabled; |
Alexander Graf | e15a113 | 2009-11-30 03:02:02 +0000 | [diff] [blame] | 537 | break; |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 538 | #ifdef CONFIG_KVM_MPIC |
| 539 | case KVM_CAP_IRQ_MPIC: |
| 540 | r = 1; |
| 541 | break; |
| 542 | #endif |
| 543 | |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 544 | #ifdef CONFIG_PPC_BOOK3S_64 |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 545 | case KVM_CAP_SPAPR_TCE: |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 546 | case KVM_CAP_SPAPR_TCE_64: |
Alexey Kardashevskiy | 121f80b | 2017-03-22 15:21:56 +1100 | [diff] [blame] | 547 | /* fallthrough */ |
| 548 | case KVM_CAP_SPAPR_TCE_VFIO: |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 549 | case KVM_CAP_PPC_RTAS: |
Alexander Graf | f2e9104 | 2014-05-22 17:40:15 +0200 | [diff] [blame] | 550 | case KVM_CAP_PPC_FIXUP_HCALL: |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 551 | case KVM_CAP_PPC_ENABLE_HCALL: |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 552 | #ifdef CONFIG_KVM_XICS |
| 553 | case KVM_CAP_IRQ_XICS: |
| 554 | #endif |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 555 | case KVM_CAP_PPC_GET_CPU_CHAR: |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 556 | r = 1; |
| 557 | break; |
David Gibson | a8acaec | 2016-11-23 16:14:07 +1100 | [diff] [blame] | 558 | |
| 559 | case KVM_CAP_PPC_ALLOC_HTAB: |
| 560 | r = hv_enabled; |
| 561 | break; |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 562 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 563 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 564 | case KVM_CAP_PPC_SMT: |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 565 | r = 0; |
Paul Mackerras | 5790069 | 2017-05-16 16:41:20 +1000 | [diff] [blame] | 566 | if (kvm) { |
| 567 | if (kvm->arch.emul_smt_mode > 1) |
| 568 | r = kvm->arch.emul_smt_mode; |
| 569 | else |
| 570 | r = kvm->arch.smt_mode; |
| 571 | } else if (hv_enabled) { |
Paul Mackerras | 45c940b | 2016-11-18 17:43:30 +1100 | [diff] [blame] | 572 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 573 | r = 1; |
| 574 | else |
| 575 | r = threads_per_subcore; |
| 576 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 577 | break; |
Paul Mackerras | 2ed4f9d | 2017-06-21 16:01:27 +1000 | [diff] [blame] | 578 | case KVM_CAP_PPC_SMT_POSSIBLE: |
| 579 | r = 1; |
| 580 | if (hv_enabled) { |
| 581 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
| 582 | r = ((threads_per_subcore << 1) - 1); |
| 583 | else |
| 584 | /* P9 can emulate dbells, so allow any mode */ |
| 585 | r = 8 | 4 | 2 | 1; |
| 586 | } |
| 587 | break; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 588 | case KVM_CAP_PPC_RMA: |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 589 | r = 0; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 590 | break; |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 591 | case KVM_CAP_PPC_HWRNG: |
| 592 | r = kvmppc_hwrng_present(); |
| 593 | break; |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 594 | case KVM_CAP_PPC_MMU_RADIX: |
Paul Mackerras | 8cf4ecc | 2017-01-30 21:21:53 +1100 | [diff] [blame] | 595 | r = !!(hv_enabled && radix_enabled()); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 596 | break; |
| 597 | case KVM_CAP_PPC_MMU_HASH_V3: |
Paul Mackerras | 18c3640 | 2017-09-13 16:00:10 +1000 | [diff] [blame] | 598 | r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 599 | break; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 600 | #endif |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 601 | case KVM_CAP_SYNC_MMU: |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 602 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 603 | r = hv_enabled; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 604 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 605 | r = 1; |
| 606 | #else |
| 607 | r = 0; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 608 | #endif |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 609 | break; |
| 610 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 611 | case KVM_CAP_PPC_HTAB_FD: |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 612 | r = hv_enabled; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 613 | break; |
Alexander Graf | f4800b1 | 2012-08-07 10:24:14 +0200 | [diff] [blame] | 614 | #endif |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 615 | case KVM_CAP_NR_VCPUS: |
| 616 | /* |
| 617 | * Recommending a number of CPUs is somewhat arbitrary; we |
| 618 | * return the number of present CPUs for -HV (since a host |
| 619 | * will have secondary threads "offline"), and for other KVM |
| 620 | * implementations just count online CPUs. |
| 621 | */ |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 622 | if (hv_enabled) |
Aneesh Kumar K.V | 699cc87 | 2013-10-07 22:17:56 +0530 | [diff] [blame] | 623 | r = num_present_cpus(); |
| 624 | else |
| 625 | r = num_online_cpus(); |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 626 | break; |
Nikunj A Dadhania | bfec5c2c | 2015-10-16 10:27:53 +0530 | [diff] [blame] | 627 | case KVM_CAP_NR_MEMSLOTS: |
| 628 | r = KVM_USER_MEM_SLOTS; |
| 629 | break; |
Matt Evans | b543403 | 2011-12-07 16:55:57 +0000 | [diff] [blame] | 630 | case KVM_CAP_MAX_VCPUS: |
| 631 | r = KVM_MAX_VCPUS; |
| 632 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 633 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 634 | case KVM_CAP_PPC_GET_SMMU_INFO: |
| 635 | r = 1; |
| 636 | break; |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 637 | case KVM_CAP_SPAPR_MULTITCE: |
| 638 | r = 1; |
| 639 | break; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 640 | case KVM_CAP_SPAPR_RESIZE_HPT: |
David Gibson | 790a9df | 2018-02-02 14:29:08 +1100 | [diff] [blame] | 641 | r = !!hv_enabled; |
David Gibson | 050f233 | 2016-12-20 16:49:07 +1100 | [diff] [blame] | 642 | break; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 643 | #endif |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 644 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 645 | case KVM_CAP_PPC_FWNMI: |
| 646 | r = hv_enabled; |
| 647 | break; |
| 648 | #endif |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 649 | case KVM_CAP_PPC_HTM: |
Paul Mackerras | 072df81 | 2017-11-09 14:30:24 +1100 | [diff] [blame] | 650 | r = hv_enabled && |
Michael Ellerman | 2a3d655 | 2017-10-12 22:58:54 +1100 | [diff] [blame] | 651 | (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); |
Sam Bobroff | 23528bb | 2016-07-20 13:41:36 +1000 | [diff] [blame] | 652 | break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 653 | default: |
| 654 | r = 0; |
| 655 | break; |
| 656 | } |
| 657 | return r; |
| 658 | |
| 659 | } |
| 660 | |
| 661 | long kvm_arch_dev_ioctl(struct file *filp, |
| 662 | unsigned int ioctl, unsigned long arg) |
| 663 | { |
| 664 | return -EINVAL; |
| 665 | } |
| 666 | |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 667 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 668 | struct kvm_memory_slot *dont) |
| 669 | { |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 670 | kvmppc_core_free_memslot(kvm, free, dont); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 671 | } |
| 672 | |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 673 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 674 | unsigned long npages) |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 675 | { |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 676 | return kvmppc_core_create_memslot(kvm, slot, npages); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 677 | } |
| 678 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 679 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
Takuya Yoshikawa | 462fce4 | 2013-02-27 19:41:56 +0900 | [diff] [blame] | 680 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 681 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 682 | enum kvm_mr_change change) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 683 | { |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 684 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 685 | } |
| 686 | |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 687 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 688 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 689 | const struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 690 | const struct kvm_memory_slot *new, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 691 | enum kvm_mr_change change) |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 692 | { |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 693 | kvmppc_core_commit_memory_region(kvm, mem, old, new); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 694 | } |
| 695 | |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 696 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 697 | struct kvm_memory_slot *slot) |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 698 | { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 699 | kvmppc_core_flush_memslot(kvm, slot); |
Marcelo Tosatti | 34d4cb8 | 2008-07-10 20:49:31 -0300 | [diff] [blame] | 700 | } |
| 701 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 702 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
| 703 | { |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 704 | struct kvm_vcpu *vcpu; |
| 705 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
Matt Evans | 03cdab5 | 2011-12-06 21:19:42 +0000 | [diff] [blame] | 706 | if (!IS_ERR(vcpu)) { |
| 707 | vcpu->arch.wqp = &vcpu->wq; |
Wei Yongjun | 06056bf | 2010-03-09 14:13:43 +0800 | [diff] [blame] | 708 | kvmppc_create_vcpu_debugfs(vcpu, id); |
Matt Evans | 03cdab5 | 2011-12-06 21:19:42 +0000 | [diff] [blame] | 709 | } |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 710 | return vcpu; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 711 | } |
| 712 | |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 713 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 714 | { |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 715 | } |
| 716 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 717 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
| 718 | { |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 719 | /* Make sure we're not using the vcpu anymore */ |
| 720 | hrtimer_cancel(&vcpu->arch.dec_timer); |
Alexander Graf | a595405 | 2010-02-22 16:52:14 +0100 | [diff] [blame] | 721 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 722 | kvmppc_remove_vcpu_debugfs(vcpu); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 723 | |
| 724 | switch (vcpu->arch.irq_type) { |
| 725 | case KVMPPC_IRQ_MPIC: |
| 726 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); |
| 727 | break; |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 728 | case KVMPPC_IRQ_XICS: |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 729 | if (xive_enabled()) |
| 730 | kvmppc_xive_cleanup_vcpu(vcpu); |
| 731 | else |
| 732 | kvmppc_xics_free_icp(vcpu); |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 733 | break; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 734 | } |
| 735 | |
Hollis Blanchard | db93f57 | 2008-11-05 09:36:18 -0600 | [diff] [blame] | 736 | kvmppc_core_vcpu_free(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 740 | { |
| 741 | kvm_arch_vcpu_free(vcpu); |
| 742 | } |
| 743 | |
| 744 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 745 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 746 | return kvmppc_core_pending_dec(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 747 | } |
| 748 | |
Thomas Huth | 5358a96 | 2015-05-22 09:25:02 +0200 | [diff] [blame] | 749 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 750 | { |
| 751 | struct kvm_vcpu *vcpu; |
| 752 | |
| 753 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); |
Mihai Caraman | d02d4d1 | 2014-09-01 17:19:56 +0300 | [diff] [blame] | 754 | kvmppc_decrementer_func(vcpu); |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 755 | |
| 756 | return HRTIMER_NORESTART; |
| 757 | } |
| 758 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 759 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
| 760 | { |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 761 | int ret; |
| 762 | |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 763 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
Alexander Graf | 544c676 | 2009-11-02 12:02:31 +0000 | [diff] [blame] | 764 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
Paul Mackerras | 5855564 | 2018-01-12 20:55:20 +1100 | [diff] [blame] | 765 | vcpu->arch.dec_expires = get_tb(); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 766 | |
Bharat Bhushan | 09000ad | 2011-03-25 10:32:13 +0530 | [diff] [blame] | 767 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 768 | mutex_init(&vcpu->arch.exit_timing_lock); |
| 769 | #endif |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 770 | ret = kvmppc_subarch_vcpu_init(vcpu); |
| 771 | return ret; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 772 | } |
| 773 | |
| 774 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
| 775 | { |
Hollis Blanchard | ecc0981 | 2009-01-03 16:22:59 -0600 | [diff] [blame] | 776 | kvmppc_mmu_destroy(vcpu); |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 777 | kvmppc_subarch_vcpu_uninit(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 781 | { |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 782 | #ifdef CONFIG_BOOKE |
| 783 | /* |
| 784 | * vrsave (formerly usprg0) isn't used by Linux, but may |
| 785 | * be used by the guest. |
| 786 | * |
| 787 | * On non-booke this is associated with Altivec and |
| 788 | * is handled by code in book3s.c. |
| 789 | */ |
| 790 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
| 791 | #endif |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 792 | kvmppc_core_vcpu_load(vcpu, cpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 796 | { |
Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame] | 797 | kvmppc_core_vcpu_put(vcpu); |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 798 | #ifdef CONFIG_BOOKE |
| 799 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
| 800 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 801 | } |
| 802 | |
Suresh Warrier | 9576730 | 2016-08-19 15:35:47 +1000 | [diff] [blame] | 803 | /* |
| 804 | * irq_bypass_add_producer and irq_bypass_del_producer are only |
| 805 | * useful if the architecture supports PCI passthrough. |
| 806 | * irq_bypass_stop and irq_bypass_start are not needed and so |
| 807 | * kvm_ops are not defined for them. |
| 808 | */ |
| 809 | bool kvm_arch_has_irq_bypass(void) |
| 810 | { |
| 811 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || |
| 812 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); |
| 813 | } |
| 814 | |
| 815 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
| 816 | struct irq_bypass_producer *prod) |
| 817 | { |
| 818 | struct kvm_kernel_irqfd *irqfd = |
| 819 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 820 | struct kvm *kvm = irqfd->kvm; |
| 821 | |
| 822 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) |
| 823 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); |
| 824 | |
| 825 | return 0; |
| 826 | } |
| 827 | |
| 828 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
| 829 | struct irq_bypass_producer *prod) |
| 830 | { |
| 831 | struct kvm_kernel_irqfd *irqfd = |
| 832 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
| 833 | struct kvm *kvm = irqfd->kvm; |
| 834 | |
| 835 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) |
| 836 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); |
| 837 | } |
| 838 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 839 | #ifdef CONFIG_VSX |
| 840 | static inline int kvmppc_get_vsr_dword_offset(int index) |
| 841 | { |
| 842 | int offset; |
| 843 | |
| 844 | if ((index != 0) && (index != 1)) |
| 845 | return -1; |
| 846 | |
| 847 | #ifdef __BIG_ENDIAN |
| 848 | offset = index; |
| 849 | #else |
| 850 | offset = 1 - index; |
| 851 | #endif |
| 852 | |
| 853 | return offset; |
| 854 | } |
| 855 | |
| 856 | static inline int kvmppc_get_vsr_word_offset(int index) |
| 857 | { |
| 858 | int offset; |
| 859 | |
| 860 | if ((index > 3) || (index < 0)) |
| 861 | return -1; |
| 862 | |
| 863 | #ifdef __BIG_ENDIAN |
| 864 | offset = index; |
| 865 | #else |
| 866 | offset = 3 - index; |
| 867 | #endif |
| 868 | return offset; |
| 869 | } |
| 870 | |
| 871 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, |
| 872 | u64 gpr) |
| 873 | { |
| 874 | union kvmppc_one_reg val; |
| 875 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 876 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 877 | |
| 878 | if (offset == -1) |
| 879 | return; |
| 880 | |
| 881 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { |
| 882 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 883 | val.vsxval[offset] = gpr; |
| 884 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 885 | } else { |
| 886 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; |
| 887 | } |
| 888 | } |
| 889 | |
| 890 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, |
| 891 | u64 gpr) |
| 892 | { |
| 893 | union kvmppc_one_reg val; |
| 894 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 895 | |
| 896 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { |
| 897 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 898 | val.vsxval[0] = gpr; |
| 899 | val.vsxval[1] = gpr; |
| 900 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 901 | } else { |
| 902 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; |
| 903 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; |
| 904 | } |
| 905 | } |
| 906 | |
| 907 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
| 908 | u32 gpr32) |
| 909 | { |
| 910 | union kvmppc_one_reg val; |
| 911 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 912 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 913 | int dword_offset, word_offset; |
| 914 | |
| 915 | if (offset == -1) |
| 916 | return; |
| 917 | |
| 918 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { |
| 919 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 920 | val.vsx32val[offset] = gpr32; |
| 921 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 922 | } else { |
| 923 | dword_offset = offset / 2; |
| 924 | word_offset = offset % 2; |
| 925 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); |
| 926 | val.vsx32val[word_offset] = gpr32; |
| 927 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; |
| 928 | } |
| 929 | } |
| 930 | #endif /* CONFIG_VSX */ |
| 931 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 932 | #ifdef CONFIG_ALTIVEC |
| 933 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
| 934 | u64 gpr) |
| 935 | { |
| 936 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 937 | u32 hi, lo; |
| 938 | u32 di; |
| 939 | |
| 940 | #ifdef __BIG_ENDIAN |
| 941 | hi = gpr >> 32; |
| 942 | lo = gpr & 0xffffffff; |
| 943 | #else |
| 944 | lo = gpr >> 32; |
| 945 | hi = gpr & 0xffffffff; |
| 946 | #endif |
| 947 | |
| 948 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ |
| 949 | if (di > 1) |
| 950 | return; |
| 951 | |
| 952 | if (vcpu->arch.mmio_host_swabbed) |
| 953 | di = 1 - di; |
| 954 | |
| 955 | VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; |
| 956 | VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; |
| 957 | } |
| 958 | #endif /* CONFIG_ALTIVEC */ |
| 959 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 960 | #ifdef CONFIG_PPC_FPU |
| 961 | static inline u64 sp_to_dp(u32 fprs) |
| 962 | { |
| 963 | u64 fprd; |
| 964 | |
| 965 | preempt_disable(); |
| 966 | enable_kernel_fp(); |
| 967 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) |
| 968 | : "fr0"); |
| 969 | preempt_enable(); |
| 970 | return fprd; |
| 971 | } |
| 972 | |
| 973 | static inline u32 dp_to_sp(u64 fprd) |
| 974 | { |
| 975 | u32 fprs; |
| 976 | |
| 977 | preempt_disable(); |
| 978 | enable_kernel_fp(); |
| 979 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) |
| 980 | : "fr0"); |
| 981 | preempt_enable(); |
| 982 | return fprs; |
| 983 | } |
| 984 | |
| 985 | #else |
| 986 | #define sp_to_dp(x) (x) |
| 987 | #define dp_to_sp(x) (x) |
| 988 | #endif /* CONFIG_PPC_FPU */ |
| 989 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 990 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
| 991 | struct kvm_run *run) |
| 992 | { |
Denis Kirjanov | 69b6183 | 2010-06-11 11:23:26 +0000 | [diff] [blame] | 993 | u64 uninitialized_var(gpr); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 994 | |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 995 | if (run->mmio.len > sizeof(gpr)) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 996 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
| 997 | return; |
| 998 | } |
| 999 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1000 | if (!vcpu->arch.mmio_host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1001 | switch (run->mmio.len) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1002 | case 8: gpr = *(u64 *)run->mmio.data; break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1003 | case 4: gpr = *(u32 *)run->mmio.data; break; |
| 1004 | case 2: gpr = *(u16 *)run->mmio.data; break; |
| 1005 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1006 | } |
| 1007 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1008 | switch (run->mmio.len) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1009 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
| 1010 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; |
| 1011 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1012 | case 1: gpr = *(u8 *)run->mmio.data; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1013 | } |
| 1014 | } |
Alexander Graf | 8e5b26b | 2010-01-08 02:58:01 +0100 | [diff] [blame] | 1015 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1016 | /* conversion between single and double precision */ |
| 1017 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) |
| 1018 | gpr = sp_to_dp(gpr); |
| 1019 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1020 | if (vcpu->arch.mmio_sign_extend) { |
| 1021 | switch (run->mmio.len) { |
| 1022 | #ifdef CONFIG_PPC64 |
| 1023 | case 4: |
| 1024 | gpr = (s64)(s32)gpr; |
| 1025 | break; |
| 1026 | #endif |
| 1027 | case 2: |
| 1028 | gpr = (s64)(s16)gpr; |
| 1029 | break; |
| 1030 | case 1: |
| 1031 | gpr = (s64)(s8)gpr; |
| 1032 | break; |
| 1033 | } |
| 1034 | } |
| 1035 | |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1036 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
| 1037 | case KVM_MMIO_REG_GPR: |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1038 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
| 1039 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1040 | case KVM_MMIO_REG_FPR: |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1041 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1042 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1043 | #ifdef CONFIG_PPC_BOOK3S |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1044 | case KVM_MMIO_REG_QPR: |
| 1045 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1046 | break; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1047 | case KVM_MMIO_REG_FQPR: |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 1048 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
Alexander Graf | b3c5d3c | 2012-01-07 02:07:38 +0100 | [diff] [blame] | 1049 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1050 | break; |
Alexander Graf | 287d561 | 2010-04-01 15:33:21 +0200 | [diff] [blame] | 1051 | #endif |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1052 | #ifdef CONFIG_VSX |
| 1053 | case KVM_MMIO_REG_VSX: |
| 1054 | if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) |
| 1055 | kvmppc_set_vsr_dword(vcpu, gpr); |
| 1056 | else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) |
| 1057 | kvmppc_set_vsr_word(vcpu, gpr); |
| 1058 | else if (vcpu->arch.mmio_vsx_copy_type == |
| 1059 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
| 1060 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
| 1061 | break; |
| 1062 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1063 | #ifdef CONFIG_ALTIVEC |
| 1064 | case KVM_MMIO_REG_VMX: |
| 1065 | kvmppc_set_vmx_dword(vcpu, gpr); |
| 1066 | break; |
| 1067 | #endif |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1068 | default: |
| 1069 | BUG(); |
| 1070 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1071 | } |
| 1072 | |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1073 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1074 | unsigned int rt, unsigned int bytes, |
| 1075 | int is_default_endian, int sign_extend) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1076 | { |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1077 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1078 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1079 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1080 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1081 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1082 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1083 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1084 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1085 | } |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1086 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1087 | if (bytes > sizeof(run->mmio.data)) { |
| 1088 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1089 | run->mmio.len); |
| 1090 | } |
| 1091 | |
| 1092 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1093 | run->mmio.len = bytes; |
| 1094 | run->mmio.is_write = 0; |
| 1095 | |
| 1096 | vcpu->arch.io_gpr = rt; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1097 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1098 | vcpu->mmio_needed = 1; |
| 1099 | vcpu->mmio_is_write = 0; |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1100 | vcpu->arch.mmio_sign_extend = sign_extend; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1101 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1102 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1103 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1104 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1105 | bytes, &run->mmio.data); |
| 1106 | |
| 1107 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1108 | |
| 1109 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1110 | kvmppc_complete_mmio_load(vcpu, run); |
| 1111 | vcpu->mmio_needed = 0; |
| 1112 | return EMULATE_DONE; |
| 1113 | } |
| 1114 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1115 | return EMULATE_DO_MMIO; |
| 1116 | } |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1117 | |
| 1118 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1119 | unsigned int rt, unsigned int bytes, |
| 1120 | int is_default_endian) |
| 1121 | { |
| 1122 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); |
| 1123 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1124 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1125 | |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1126 | /* Same as above, but sign extends */ |
| 1127 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1128 | unsigned int rt, unsigned int bytes, |
| 1129 | int is_default_endian) |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1130 | { |
Paul Mackerras | eb8b056 | 2016-05-05 16:17:10 +1000 | [diff] [blame] | 1131 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
Alexander Graf | 3587d53 | 2010-02-19 11:00:30 +0100 | [diff] [blame] | 1132 | } |
| 1133 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1134 | #ifdef CONFIG_VSX |
| 1135 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1136 | unsigned int rt, unsigned int bytes, |
| 1137 | int is_default_endian, int mmio_sign_extend) |
| 1138 | { |
| 1139 | enum emulation_result emulated = EMULATE_DONE; |
| 1140 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1141 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1142 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1143 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1144 | |
| 1145 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1146 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
| 1147 | is_default_endian, mmio_sign_extend); |
| 1148 | |
| 1149 | if (emulated != EMULATE_DONE) |
| 1150 | break; |
| 1151 | |
| 1152 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1153 | |
| 1154 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1155 | vcpu->arch.mmio_vsx_offset++; |
| 1156 | } |
| 1157 | return emulated; |
| 1158 | } |
| 1159 | #endif /* CONFIG_VSX */ |
| 1160 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1161 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1162 | u64 val, unsigned int bytes, int is_default_endian) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1163 | { |
| 1164 | void *data = run->mmio.data; |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1165 | int idx, ret; |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1166 | bool host_swabbed; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1167 | |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1168 | /* Pity C doesn't have a logical XOR operator */ |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1169 | if (kvmppc_need_byteswap(vcpu)) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1170 | host_swabbed = is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1171 | } else { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1172 | host_swabbed = !is_default_endian; |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 1173 | } |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1174 | |
| 1175 | if (bytes > sizeof(run->mmio.data)) { |
| 1176 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
| 1177 | run->mmio.len); |
| 1178 | } |
| 1179 | |
| 1180 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
| 1181 | run->mmio.len = bytes; |
| 1182 | run->mmio.is_write = 1; |
| 1183 | vcpu->mmio_needed = 1; |
| 1184 | vcpu->mmio_is_write = 1; |
| 1185 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1186 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
| 1187 | val = dp_to_sp(val); |
| 1188 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1189 | /* Store the value at the lowest bytes in 'data'. */ |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1190 | if (!host_swabbed) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1191 | switch (bytes) { |
Alexander Graf | b104d06 | 2010-02-19 11:00:29 +0100 | [diff] [blame] | 1192 | case 8: *(u64 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1193 | case 4: *(u32 *)data = val; break; |
| 1194 | case 2: *(u16 *)data = val; break; |
| 1195 | case 1: *(u8 *)data = val; break; |
| 1196 | } |
| 1197 | } else { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1198 | switch (bytes) { |
David Gibson | d078eed | 2015-02-03 16:36:24 +1100 | [diff] [blame] | 1199 | case 8: *(u64 *)data = swab64(val); break; |
| 1200 | case 4: *(u32 *)data = swab32(val); break; |
| 1201 | case 2: *(u16 *)data = swab16(val); break; |
| 1202 | case 1: *(u8 *)data = val; break; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1203 | } |
| 1204 | } |
| 1205 | |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1206 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1207 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 1208 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
Scott Wood | ed840ee | 2013-04-26 14:53:39 +0000 | [diff] [blame] | 1209 | bytes, &run->mmio.data); |
| 1210 | |
| 1211 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1212 | |
| 1213 | if (!ret) { |
Alexander Graf | 0e673fb | 2012-10-09 00:06:20 +0200 | [diff] [blame] | 1214 | vcpu->mmio_needed = 0; |
| 1215 | return EMULATE_DONE; |
| 1216 | } |
| 1217 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1218 | return EMULATE_DO_MMIO; |
| 1219 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1220 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1221 | |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1222 | #ifdef CONFIG_VSX |
| 1223 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) |
| 1224 | { |
| 1225 | u32 dword_offset, word_offset; |
| 1226 | union kvmppc_one_reg reg; |
| 1227 | int vsx_offset = 0; |
| 1228 | int copy_type = vcpu->arch.mmio_vsx_copy_type; |
| 1229 | int result = 0; |
| 1230 | |
| 1231 | switch (copy_type) { |
| 1232 | case KVMPPC_VSX_COPY_DWORD: |
| 1233 | vsx_offset = |
| 1234 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); |
| 1235 | |
| 1236 | if (vsx_offset == -1) { |
| 1237 | result = -1; |
| 1238 | break; |
| 1239 | } |
| 1240 | |
| 1241 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { |
| 1242 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); |
| 1243 | } else { |
| 1244 | reg.vval = VCPU_VSX_VR(vcpu, rs); |
| 1245 | *val = reg.vsxval[vsx_offset]; |
| 1246 | } |
| 1247 | break; |
| 1248 | |
| 1249 | case KVMPPC_VSX_COPY_WORD: |
| 1250 | vsx_offset = |
| 1251 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); |
| 1252 | |
| 1253 | if (vsx_offset == -1) { |
| 1254 | result = -1; |
| 1255 | break; |
| 1256 | } |
| 1257 | |
| 1258 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { |
| 1259 | dword_offset = vsx_offset / 2; |
| 1260 | word_offset = vsx_offset % 2; |
| 1261 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); |
| 1262 | *val = reg.vsx32val[word_offset]; |
| 1263 | } else { |
| 1264 | reg.vval = VCPU_VSX_VR(vcpu, rs); |
| 1265 | *val = reg.vsx32val[vsx_offset]; |
| 1266 | } |
| 1267 | break; |
| 1268 | |
| 1269 | default: |
| 1270 | result = -1; |
| 1271 | break; |
| 1272 | } |
| 1273 | |
| 1274 | return result; |
| 1275 | } |
| 1276 | |
| 1277 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1278 | int rs, unsigned int bytes, int is_default_endian) |
| 1279 | { |
| 1280 | u64 val; |
| 1281 | enum emulation_result emulated = EMULATE_DONE; |
| 1282 | |
| 1283 | vcpu->arch.io_gpr = rs; |
| 1284 | |
Paul Mackerras | 9aa6825 | 2017-11-20 19:56:27 +1100 | [diff] [blame] | 1285 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
| 1286 | if (vcpu->arch.mmio_vsx_copy_nums > 4) |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1287 | return EMULATE_FAIL; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1288 | |
| 1289 | while (vcpu->arch.mmio_vsx_copy_nums) { |
| 1290 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) |
| 1291 | return EMULATE_FAIL; |
| 1292 | |
| 1293 | emulated = kvmppc_handle_store(run, vcpu, |
| 1294 | val, bytes, is_default_endian); |
| 1295 | |
| 1296 | if (emulated != EMULATE_DONE) |
| 1297 | break; |
| 1298 | |
| 1299 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1300 | |
| 1301 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1302 | vcpu->arch.mmio_vsx_offset++; |
| 1303 | } |
| 1304 | |
| 1305 | return emulated; |
| 1306 | } |
| 1307 | |
| 1308 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, |
| 1309 | struct kvm_run *run) |
| 1310 | { |
| 1311 | enum emulation_result emulated = EMULATE_FAIL; |
| 1312 | int r; |
| 1313 | |
| 1314 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1315 | |
| 1316 | if (!vcpu->mmio_is_write) { |
| 1317 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, |
| 1318 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); |
| 1319 | } else { |
| 1320 | emulated = kvmppc_handle_vsx_store(run, vcpu, |
| 1321 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1322 | } |
| 1323 | |
| 1324 | switch (emulated) { |
| 1325 | case EMULATE_DO_MMIO: |
| 1326 | run->exit_reason = KVM_EXIT_MMIO; |
| 1327 | r = RESUME_HOST; |
| 1328 | break; |
| 1329 | case EMULATE_FAIL: |
| 1330 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); |
| 1331 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1332 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1333 | r = RESUME_HOST; |
| 1334 | break; |
| 1335 | default: |
| 1336 | r = RESUME_GUEST; |
| 1337 | break; |
| 1338 | } |
| 1339 | return r; |
| 1340 | } |
| 1341 | #endif /* CONFIG_VSX */ |
| 1342 | |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1343 | #ifdef CONFIG_ALTIVEC |
| 1344 | /* handle quadword load access in two halves */ |
| 1345 | int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1346 | unsigned int rt, int is_default_endian) |
| 1347 | { |
| 1348 | enum emulation_result emulated; |
| 1349 | |
| 1350 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1351 | emulated = __kvmppc_handle_load(run, vcpu, rt, 8, |
| 1352 | is_default_endian, 0); |
| 1353 | |
| 1354 | if (emulated != EMULATE_DONE) |
| 1355 | break; |
| 1356 | |
| 1357 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1358 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1359 | } |
| 1360 | |
| 1361 | return emulated; |
| 1362 | } |
| 1363 | |
| 1364 | static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) |
| 1365 | { |
| 1366 | vector128 vrs = VCPU_VSX_VR(vcpu, rs); |
| 1367 | u32 di; |
| 1368 | u64 w0, w1; |
| 1369 | |
| 1370 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ |
| 1371 | if (di > 1) |
| 1372 | return -1; |
| 1373 | |
| 1374 | if (vcpu->arch.mmio_host_swabbed) |
| 1375 | di = 1 - di; |
| 1376 | |
| 1377 | w0 = vrs.u[di * 2]; |
| 1378 | w1 = vrs.u[di * 2 + 1]; |
| 1379 | |
| 1380 | #ifdef __BIG_ENDIAN |
| 1381 | *val = (w0 << 32) | w1; |
| 1382 | #else |
| 1383 | *val = (w1 << 32) | w0; |
| 1384 | #endif |
| 1385 | return 0; |
| 1386 | } |
| 1387 | |
| 1388 | /* handle quadword store in two halves */ |
| 1389 | int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1390 | unsigned int rs, int is_default_endian) |
| 1391 | { |
| 1392 | u64 val = 0; |
| 1393 | enum emulation_result emulated = EMULATE_DONE; |
| 1394 | |
| 1395 | vcpu->arch.io_gpr = rs; |
| 1396 | |
| 1397 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1398 | if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) |
| 1399 | return EMULATE_FAIL; |
| 1400 | |
| 1401 | emulated = kvmppc_handle_store(run, vcpu, val, 8, |
| 1402 | is_default_endian); |
| 1403 | if (emulated != EMULATE_DONE) |
| 1404 | break; |
| 1405 | |
| 1406 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1407 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1408 | } |
| 1409 | |
| 1410 | return emulated; |
| 1411 | } |
| 1412 | |
| 1413 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, |
| 1414 | struct kvm_run *run) |
| 1415 | { |
| 1416 | enum emulation_result emulated = EMULATE_FAIL; |
| 1417 | int r; |
| 1418 | |
| 1419 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1420 | |
| 1421 | if (!vcpu->mmio_is_write) { |
| 1422 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, |
| 1423 | vcpu->arch.io_gpr, 1); |
| 1424 | } else { |
| 1425 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, |
| 1426 | vcpu->arch.io_gpr, 1); |
| 1427 | } |
| 1428 | |
| 1429 | switch (emulated) { |
| 1430 | case EMULATE_DO_MMIO: |
| 1431 | run->exit_reason = KVM_EXIT_MMIO; |
| 1432 | r = RESUME_HOST; |
| 1433 | break; |
| 1434 | case EMULATE_FAIL: |
| 1435 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); |
| 1436 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1437 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 1438 | r = RESUME_HOST; |
| 1439 | break; |
| 1440 | default: |
| 1441 | r = RESUME_GUEST; |
| 1442 | break; |
| 1443 | } |
| 1444 | return r; |
| 1445 | } |
| 1446 | #endif /* CONFIG_ALTIVEC */ |
| 1447 | |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1448 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1449 | { |
| 1450 | int r = 0; |
| 1451 | union kvmppc_one_reg val; |
| 1452 | int size; |
| 1453 | |
| 1454 | size = one_reg_size(reg->id); |
| 1455 | if (size > sizeof(val)) |
| 1456 | return -EINVAL; |
| 1457 | |
| 1458 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); |
| 1459 | if (r == -EINVAL) { |
| 1460 | r = 0; |
| 1461 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1462 | #ifdef CONFIG_ALTIVEC |
| 1463 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1464 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1465 | r = -ENXIO; |
| 1466 | break; |
| 1467 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1468 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1469 | break; |
| 1470 | case KVM_REG_PPC_VSCR: |
| 1471 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1472 | r = -ENXIO; |
| 1473 | break; |
| 1474 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1475 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1476 | break; |
| 1477 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1478 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1479 | break; |
| 1480 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1481 | default: |
| 1482 | r = -EINVAL; |
| 1483 | break; |
| 1484 | } |
| 1485 | } |
| 1486 | |
| 1487 | if (r) |
| 1488 | return r; |
| 1489 | |
| 1490 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) |
| 1491 | r = -EFAULT; |
| 1492 | |
| 1493 | return r; |
| 1494 | } |
| 1495 | |
| 1496 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
| 1497 | { |
| 1498 | int r; |
| 1499 | union kvmppc_one_reg val; |
| 1500 | int size; |
| 1501 | |
| 1502 | size = one_reg_size(reg->id); |
| 1503 | if (size > sizeof(val)) |
| 1504 | return -EINVAL; |
| 1505 | |
| 1506 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) |
| 1507 | return -EFAULT; |
| 1508 | |
| 1509 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); |
| 1510 | if (r == -EINVAL) { |
| 1511 | r = 0; |
| 1512 | switch (reg->id) { |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1513 | #ifdef CONFIG_ALTIVEC |
| 1514 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: |
| 1515 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1516 | r = -ENXIO; |
| 1517 | break; |
| 1518 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1519 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1520 | break; |
| 1521 | case KVM_REG_PPC_VSCR: |
| 1522 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1523 | r = -ENXIO; |
| 1524 | break; |
| 1525 | } |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1526 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1527 | break; |
| 1528 | case KVM_REG_PPC_VRSAVE: |
Greg Kurz | b4d7f16 | 2016-01-13 18:28:17 +0100 | [diff] [blame] | 1529 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 1530 | r = -ENXIO; |
| 1531 | break; |
| 1532 | } |
| 1533 | vcpu->arch.vrsave = set_reg_val(reg->id, val); |
Mihai Caraman | 3840edc | 2014-08-20 16:36:25 +0300 | [diff] [blame] | 1534 | break; |
| 1535 | #endif /* CONFIG_ALTIVEC */ |
Mihai Caraman | 8a41ea5 | 2014-08-20 16:36:24 +0300 | [diff] [blame] | 1536 | default: |
| 1537 | r = -EINVAL; |
| 1538 | break; |
| 1539 | } |
| 1540 | } |
| 1541 | |
| 1542 | return r; |
| 1543 | } |
| 1544 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1545 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 1546 | { |
| 1547 | int r; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1548 | |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1549 | vcpu_load(vcpu); |
| 1550 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1551 | if (vcpu->mmio_needed) { |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1552 | vcpu->mmio_needed = 0; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1553 | if (!vcpu->mmio_is_write) |
| 1554 | kvmppc_complete_mmio_load(vcpu, run); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1555 | #ifdef CONFIG_VSX |
| 1556 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1557 | vcpu->arch.mmio_vsx_copy_nums--; |
| 1558 | vcpu->arch.mmio_vsx_offset++; |
| 1559 | } |
| 1560 | |
| 1561 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { |
| 1562 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); |
| 1563 | if (r == RESUME_HOST) { |
| 1564 | vcpu->mmio_needed = 1; |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1565 | goto out; |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1566 | } |
| 1567 | } |
| 1568 | #endif |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1569 | #ifdef CONFIG_ALTIVEC |
| 1570 | if (vcpu->arch.mmio_vmx_copy_nums > 0) |
| 1571 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1572 | |
| 1573 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1574 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
| 1575 | if (r == RESUME_HOST) { |
| 1576 | vcpu->mmio_needed = 1; |
Radim Krčmář | 1ab03c0 | 2018-02-09 21:36:57 +0100 | [diff] [blame] | 1577 | goto out; |
Jose Ricardo Ziviani | 09f9849 | 2018-02-03 18:24:26 -0200 | [diff] [blame] | 1578 | } |
| 1579 | } |
| 1580 | #endif |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1581 | } else if (vcpu->arch.osi_needed) { |
| 1582 | u64 *gprs = run->osi.gprs; |
| 1583 | int i; |
| 1584 | |
| 1585 | for (i = 0; i < 32; i++) |
| 1586 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
| 1587 | vcpu->arch.osi_needed = 0; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1588 | } else if (vcpu->arch.hcall_needed) { |
| 1589 | int i; |
| 1590 | |
| 1591 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); |
| 1592 | for (i = 0; i < 9; ++i) |
| 1593 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); |
| 1594 | vcpu->arch.hcall_needed = 0; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1595 | #ifdef CONFIG_BOOKE |
| 1596 | } else if (vcpu->arch.epr_needed) { |
| 1597 | kvmppc_set_epr(vcpu, run->epr.epr); |
| 1598 | vcpu->arch.epr_needed = 0; |
| 1599 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1600 | } |
| 1601 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1602 | kvm_sigset_activate(vcpu); |
Bin Lu | 6f63e81 | 2017-02-21 21:12:36 +0800 | [diff] [blame] | 1603 | |
Paolo Bonzini | 460df4c | 2017-02-08 11:50:15 +0100 | [diff] [blame] | 1604 | if (run->immediate_exit) |
| 1605 | r = -EINTR; |
| 1606 | else |
| 1607 | r = kvmppc_vcpu_run(run, vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1608 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 1609 | kvm_sigset_deactivate(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1610 | |
Christoffer Dall | accb757 | 2017-12-04 21:35:25 +0100 | [diff] [blame] | 1611 | out: |
| 1612 | vcpu_put(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1613 | return r; |
| 1614 | } |
| 1615 | |
| 1616 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
| 1617 | { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1618 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
Paul Mackerras | 4fe27d2 | 2013-02-14 14:00:25 +0000 | [diff] [blame] | 1619 | kvmppc_core_dequeue_external(vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1620 | return 0; |
| 1621 | } |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1622 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1623 | kvmppc_core_queue_external(vcpu, irq); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 1624 | |
Scott Wood | dfd4d47 | 2011-11-17 12:39:59 +0000 | [diff] [blame] | 1625 | kvm_vcpu_kick(vcpu); |
Hollis Blanchard | 45c5eb6 | 2008-04-25 17:55:49 -0500 | [diff] [blame] | 1626 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1627 | return 0; |
| 1628 | } |
| 1629 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1630 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
| 1631 | struct kvm_enable_cap *cap) |
| 1632 | { |
| 1633 | int r; |
| 1634 | |
| 1635 | if (cap->flags) |
| 1636 | return -EINVAL; |
| 1637 | |
| 1638 | switch (cap->cap) { |
Alexander Graf | ad0a048 | 2010-03-24 21:48:30 +0100 | [diff] [blame] | 1639 | case KVM_CAP_PPC_OSI: |
| 1640 | r = 0; |
| 1641 | vcpu->arch.osi_enabled = true; |
| 1642 | break; |
Alexander Graf | 930b412 | 2011-08-08 17:29:42 +0200 | [diff] [blame] | 1643 | case KVM_CAP_PPC_PAPR: |
| 1644 | r = 0; |
| 1645 | vcpu->arch.papr_enabled = true; |
| 1646 | break; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1647 | case KVM_CAP_PPC_EPR: |
| 1648 | r = 0; |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1649 | if (cap->args[0]) |
| 1650 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; |
| 1651 | else |
| 1652 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; |
Alexander Graf | 1c81063 | 2013-01-04 18:12:48 +0100 | [diff] [blame] | 1653 | break; |
Bharat Bhushan | f61c94b | 2012-08-08 20:38:19 +0000 | [diff] [blame] | 1654 | #ifdef CONFIG_BOOKE |
| 1655 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
| 1656 | r = 0; |
| 1657 | vcpu->arch.watchdog_enabled = true; |
| 1658 | break; |
| 1659 | #endif |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 1660 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 1661 | case KVM_CAP_SW_TLB: { |
| 1662 | struct kvm_config_tlb cfg; |
| 1663 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
| 1664 | |
| 1665 | r = -EFAULT; |
| 1666 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) |
| 1667 | break; |
| 1668 | |
| 1669 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); |
| 1670 | break; |
| 1671 | } |
| 1672 | #endif |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1673 | #ifdef CONFIG_KVM_MPIC |
| 1674 | case KVM_CAP_IRQ_MPIC: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1675 | struct fd f; |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1676 | struct kvm_device *dev; |
| 1677 | |
| 1678 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1679 | f = fdget(cap->args[0]); |
| 1680 | if (!f.file) |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1681 | break; |
| 1682 | |
| 1683 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1684 | dev = kvm_device_from_filp(f.file); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1685 | if (dev) |
| 1686 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1687 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1688 | fdput(f); |
Scott Wood | eb1e4f4 | 2013-04-12 14:08:47 +0000 | [diff] [blame] | 1689 | break; |
| 1690 | } |
| 1691 | #endif |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1692 | #ifdef CONFIG_KVM_XICS |
| 1693 | case KVM_CAP_IRQ_XICS: { |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1694 | struct fd f; |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1695 | struct kvm_device *dev; |
| 1696 | |
| 1697 | r = -EBADF; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1698 | f = fdget(cap->args[0]); |
| 1699 | if (!f.file) |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1700 | break; |
| 1701 | |
| 1702 | r = -EPERM; |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1703 | dev = kvm_device_from_filp(f.file); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1704 | if (dev) { |
| 1705 | if (xive_enabled()) |
| 1706 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1707 | else |
| 1708 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); |
| 1709 | } |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1710 | |
Al Viro | 70abade | 2013-08-30 15:04:22 -0400 | [diff] [blame] | 1711 | fdput(f); |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1712 | break; |
| 1713 | } |
| 1714 | #endif /* CONFIG_KVM_XICS */ |
Aravinda Prasad | 134764e | 2017-05-11 16:32:48 +0530 | [diff] [blame] | 1715 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 1716 | case KVM_CAP_PPC_FWNMI: |
| 1717 | r = -EINVAL; |
| 1718 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) |
| 1719 | break; |
| 1720 | r = 0; |
| 1721 | vcpu->kvm->arch.fwnmi_enabled = true; |
| 1722 | break; |
| 1723 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1724 | default: |
| 1725 | r = -EINVAL; |
| 1726 | break; |
| 1727 | } |
| 1728 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 1729 | if (!r) |
| 1730 | r = kvmppc_sanity_check(vcpu); |
| 1731 | |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1732 | return r; |
| 1733 | } |
| 1734 | |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1735 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 1736 | { |
| 1737 | #ifdef CONFIG_KVM_MPIC |
| 1738 | if (kvm->arch.mpic) |
| 1739 | return true; |
| 1740 | #endif |
| 1741 | #ifdef CONFIG_KVM_XICS |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1742 | if (kvm->arch.xics || kvm->arch.xive) |
Paul Mackerras | 34a75b0 | 2016-08-10 11:27:27 +1000 | [diff] [blame] | 1743 | return true; |
| 1744 | #endif |
| 1745 | return false; |
| 1746 | } |
| 1747 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1748 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 1749 | struct kvm_mp_state *mp_state) |
| 1750 | { |
| 1751 | return -EINVAL; |
| 1752 | } |
| 1753 | |
| 1754 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 1755 | struct kvm_mp_state *mp_state) |
| 1756 | { |
| 1757 | return -EINVAL; |
| 1758 | } |
| 1759 | |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 1760 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 1761 | unsigned int ioctl, unsigned long arg) |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1762 | { |
| 1763 | struct kvm_vcpu *vcpu = filp->private_data; |
| 1764 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1765 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1766 | if (ioctl == KVM_INTERRUPT) { |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1767 | struct kvm_interrupt irq; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1768 | if (copy_from_user(&irq, argp, sizeof(irq))) |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1769 | return -EFAULT; |
| 1770 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1771 | } |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 1772 | return -ENOIOCTLCMD; |
| 1773 | } |
| 1774 | |
| 1775 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 1776 | unsigned int ioctl, unsigned long arg) |
| 1777 | { |
| 1778 | struct kvm_vcpu *vcpu = filp->private_data; |
| 1779 | void __user *argp = (void __user *)arg; |
| 1780 | long r; |
Avi Kivity | 19483d1 | 2010-05-13 12:30:43 +0300 | [diff] [blame] | 1781 | |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1782 | vcpu_load(vcpu); |
| 1783 | |
| 1784 | switch (ioctl) { |
Alexander Graf | 71fbfd5 | 2010-03-24 21:48:29 +0100 | [diff] [blame] | 1785 | case KVM_ENABLE_CAP: |
| 1786 | { |
| 1787 | struct kvm_enable_cap cap; |
| 1788 | r = -EFAULT; |
| 1789 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 1790 | goto out; |
| 1791 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
| 1792 | break; |
| 1793 | } |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 1794 | |
Alexander Graf | e24ed81 | 2011-09-14 10:02:41 +0200 | [diff] [blame] | 1795 | case KVM_SET_ONE_REG: |
| 1796 | case KVM_GET_ONE_REG: |
| 1797 | { |
| 1798 | struct kvm_one_reg reg; |
| 1799 | r = -EFAULT; |
| 1800 | if (copy_from_user(®, argp, sizeof(reg))) |
| 1801 | goto out; |
| 1802 | if (ioctl == KVM_SET_ONE_REG) |
| 1803 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); |
| 1804 | else |
| 1805 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); |
| 1806 | break; |
| 1807 | } |
| 1808 | |
Alexander Graf | bf7ca4b | 2012-02-15 23:40:00 +0000 | [diff] [blame] | 1809 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
Scott Wood | dc83b8b | 2011-08-18 15:25:21 -0500 | [diff] [blame] | 1810 | case KVM_DIRTY_TLB: { |
| 1811 | struct kvm_dirty_tlb dirty; |
| 1812 | r = -EFAULT; |
| 1813 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
| 1814 | goto out; |
| 1815 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
| 1816 | break; |
| 1817 | } |
| 1818 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1819 | default: |
| 1820 | r = -EINVAL; |
| 1821 | } |
| 1822 | |
| 1823 | out: |
Christoffer Dall | 9b062471 | 2017-12-04 21:35:36 +0100 | [diff] [blame] | 1824 | vcpu_put(vcpu); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1825 | return r; |
| 1826 | } |
| 1827 | |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 1828 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
| 1829 | { |
| 1830 | return VM_FAULT_SIGBUS; |
| 1831 | } |
| 1832 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 1833 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
| 1834 | { |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 1835 | u32 inst_nop = 0x60000000; |
| 1836 | #ifdef CONFIG_KVM_BOOKE_HV |
| 1837 | u32 inst_sc1 = 0x44000022; |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 1838 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
| 1839 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); |
| 1840 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); |
| 1841 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 1842 | #else |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 1843 | u32 inst_lis = 0x3c000000; |
| 1844 | u32 inst_ori = 0x60000000; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 1845 | u32 inst_sc = 0x44000002; |
| 1846 | u32 inst_imm_mask = 0xffff; |
| 1847 | |
| 1848 | /* |
| 1849 | * The hypercall to get into KVM from within guest context is as |
| 1850 | * follows: |
| 1851 | * |
| 1852 | * lis r0, r0, KVM_SC_MAGIC_R0@h |
| 1853 | * ori r0, KVM_SC_MAGIC_R0@l |
| 1854 | * sc |
| 1855 | * nop |
| 1856 | */ |
Alexander Graf | 2743103 | 2014-04-24 13:39:16 +0200 | [diff] [blame] | 1857 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
| 1858 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); |
| 1859 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); |
| 1860 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); |
Stuart Yoder | 784bafa | 2012-07-03 05:48:51 +0000 | [diff] [blame] | 1861 | #endif |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 1862 | |
Liu Yu-B13201 | 9202e07 | 2012-07-03 05:48:52 +0000 | [diff] [blame] | 1863 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
| 1864 | |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 1865 | return 0; |
| 1866 | } |
| 1867 | |
Alexander Graf | 5efdb4b | 2013-04-17 00:37:57 +0200 | [diff] [blame] | 1868 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
| 1869 | bool line_status) |
| 1870 | { |
| 1871 | if (!irqchip_in_kernel(kvm)) |
| 1872 | return -ENXIO; |
| 1873 | |
| 1874 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
| 1875 | irq_event->irq, irq_event->level, |
| 1876 | line_status); |
| 1877 | return 0; |
| 1878 | } |
| 1879 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 1880 | |
| 1881 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 1882 | struct kvm_enable_cap *cap) |
| 1883 | { |
| 1884 | int r; |
| 1885 | |
| 1886 | if (cap->flags) |
| 1887 | return -EINVAL; |
| 1888 | |
| 1889 | switch (cap->cap) { |
| 1890 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 1891 | case KVM_CAP_PPC_ENABLE_HCALL: { |
| 1892 | unsigned long hcall = cap->args[0]; |
| 1893 | |
| 1894 | r = -EINVAL; |
| 1895 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || |
| 1896 | cap->args[1] > 1) |
| 1897 | break; |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 1898 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
| 1899 | break; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 1900 | if (cap->args[1]) |
| 1901 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 1902 | else |
| 1903 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 1904 | r = 0; |
| 1905 | break; |
| 1906 | } |
Paul Mackerras | 3c31352 | 2017-02-06 13:24:41 +1100 | [diff] [blame] | 1907 | case KVM_CAP_PPC_SMT: { |
| 1908 | unsigned long mode = cap->args[0]; |
| 1909 | unsigned long flags = cap->args[1]; |
| 1910 | |
| 1911 | r = -EINVAL; |
| 1912 | if (kvm->arch.kvm_ops->set_smt_mode) |
| 1913 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); |
| 1914 | break; |
| 1915 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 1916 | #endif |
| 1917 | default: |
| 1918 | r = -EINVAL; |
| 1919 | break; |
| 1920 | } |
| 1921 | |
| 1922 | return r; |
| 1923 | } |
| 1924 | |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 1925 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1926 | /* |
| 1927 | * These functions check whether the underlying hardware is safe |
| 1928 | * against attacks based on observing the effects of speculatively |
| 1929 | * executed instructions, and whether it supplies instructions for |
| 1930 | * use in workarounds. The information comes from firmware, either |
| 1931 | * via the device tree on powernv platforms or from an hcall on |
| 1932 | * pseries platforms. |
| 1933 | */ |
| 1934 | #ifdef CONFIG_PPC_PSERIES |
| 1935 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 1936 | { |
| 1937 | struct h_cpu_char_result c; |
| 1938 | unsigned long rc; |
| 1939 | |
| 1940 | if (!machine_is(pseries)) |
| 1941 | return -ENOTTY; |
| 1942 | |
| 1943 | rc = plpar_get_cpu_characteristics(&c); |
| 1944 | if (rc == H_SUCCESS) { |
| 1945 | cp->character = c.character; |
| 1946 | cp->behaviour = c.behaviour; |
| 1947 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 1948 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 1949 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 1950 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 1951 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
| 1952 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | |
| 1953 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | |
| 1954 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 1955 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 1956 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
| 1957 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 1958 | } |
| 1959 | return 0; |
| 1960 | } |
| 1961 | #else |
| 1962 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 1963 | { |
| 1964 | return -ENOTTY; |
| 1965 | } |
| 1966 | #endif |
| 1967 | |
| 1968 | static inline bool have_fw_feat(struct device_node *fw_features, |
| 1969 | const char *state, const char *name) |
| 1970 | { |
| 1971 | struct device_node *np; |
| 1972 | bool r = false; |
| 1973 | |
| 1974 | np = of_get_child_by_name(fw_features, name); |
| 1975 | if (np) { |
| 1976 | r = of_property_read_bool(np, state); |
| 1977 | of_node_put(np); |
| 1978 | } |
| 1979 | return r; |
| 1980 | } |
| 1981 | |
| 1982 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) |
| 1983 | { |
| 1984 | struct device_node *np, *fw_features; |
| 1985 | int r; |
| 1986 | |
| 1987 | memset(cp, 0, sizeof(*cp)); |
| 1988 | r = pseries_get_cpu_char(cp); |
| 1989 | if (r != -ENOTTY) |
| 1990 | return r; |
| 1991 | |
| 1992 | np = of_find_node_by_name(NULL, "ibm,opal"); |
| 1993 | if (np) { |
| 1994 | fw_features = of_get_child_by_name(np, "fw-features"); |
| 1995 | of_node_put(np); |
| 1996 | if (!fw_features) |
| 1997 | return 0; |
| 1998 | if (have_fw_feat(fw_features, "enabled", |
| 1999 | "inst-spec-barrier-ori31,31,0")) |
| 2000 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; |
| 2001 | if (have_fw_feat(fw_features, "enabled", |
| 2002 | "fw-bcctrl-serialized")) |
| 2003 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; |
| 2004 | if (have_fw_feat(fw_features, "enabled", |
| 2005 | "inst-l1d-flush-ori30,30,0")) |
| 2006 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; |
| 2007 | if (have_fw_feat(fw_features, "enabled", |
| 2008 | "inst-l1d-flush-trig2")) |
| 2009 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; |
| 2010 | if (have_fw_feat(fw_features, "enabled", |
| 2011 | "fw-l1d-thread-split")) |
| 2012 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; |
| 2013 | if (have_fw_feat(fw_features, "enabled", |
| 2014 | "fw-count-cache-disabled")) |
| 2015 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 2016 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | |
| 2017 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | |
| 2018 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | |
| 2019 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | |
| 2020 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | |
| 2021 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; |
| 2022 | |
| 2023 | if (have_fw_feat(fw_features, "enabled", |
| 2024 | "speculation-policy-favor-security")) |
| 2025 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; |
| 2026 | if (!have_fw_feat(fw_features, "disabled", |
| 2027 | "needs-l1d-flush-msr-pr-0-to-1")) |
| 2028 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; |
| 2029 | if (!have_fw_feat(fw_features, "disabled", |
| 2030 | "needs-spec-barrier-for-bound-checks")) |
| 2031 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 2032 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | |
| 2033 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | |
| 2034 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; |
| 2035 | |
| 2036 | of_node_put(fw_features); |
| 2037 | } |
| 2038 | |
| 2039 | return 0; |
| 2040 | } |
| 2041 | #endif |
| 2042 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2043 | long kvm_arch_vm_ioctl(struct file *filp, |
| 2044 | unsigned int ioctl, unsigned long arg) |
| 2045 | { |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 2046 | struct kvm *kvm __maybe_unused = filp->private_data; |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2047 | void __user *argp = (void __user *)arg; |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2048 | long r; |
| 2049 | |
| 2050 | switch (ioctl) { |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2051 | case KVM_PPC_GET_PVINFO: { |
| 2052 | struct kvm_ppc_pvinfo pvinfo; |
Vasiliy Kulikov | d8cdddc | 2010-10-30 13:04:24 +0400 | [diff] [blame] | 2053 | memset(&pvinfo, 0, sizeof(pvinfo)); |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2054 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
| 2055 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { |
| 2056 | r = -EFAULT; |
| 2057 | goto out; |
| 2058 | } |
| 2059 | |
| 2060 | break; |
| 2061 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2062 | case KVM_ENABLE_CAP: |
| 2063 | { |
| 2064 | struct kvm_enable_cap cap; |
| 2065 | r = -EFAULT; |
| 2066 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 2067 | goto out; |
| 2068 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); |
| 2069 | break; |
| 2070 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2071 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2072 | case KVM_CREATE_SPAPR_TCE_64: { |
| 2073 | struct kvm_create_spapr_tce_64 create_tce_64; |
| 2074 | |
| 2075 | r = -EFAULT; |
| 2076 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) |
| 2077 | goto out; |
| 2078 | if (create_tce_64.flags) { |
| 2079 | r = -EINVAL; |
| 2080 | goto out; |
| 2081 | } |
| 2082 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
| 2083 | goto out; |
| 2084 | } |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2085 | case KVM_CREATE_SPAPR_TCE: { |
| 2086 | struct kvm_create_spapr_tce create_tce; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2087 | struct kvm_create_spapr_tce_64 create_tce_64; |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2088 | |
| 2089 | r = -EFAULT; |
| 2090 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) |
| 2091 | goto out; |
Alexey Kardashevskiy | 58ded42 | 2016-03-01 17:54:40 +1100 | [diff] [blame] | 2092 | |
| 2093 | create_tce_64.liobn = create_tce.liobn; |
| 2094 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; |
| 2095 | create_tce_64.offset = 0; |
| 2096 | create_tce_64.size = create_tce.window_size >> |
| 2097 | IOMMU_PAGE_SHIFT_4K; |
| 2098 | create_tce_64.flags = 0; |
| 2099 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2100 | goto out; |
| 2101 | } |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 2102 | #endif |
| 2103 | #ifdef CONFIG_PPC_BOOK3S_64 |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2104 | case KVM_PPC_GET_SMMU_INFO: { |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2105 | struct kvm_ppc_smmu_info info; |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2106 | struct kvm *kvm = filp->private_data; |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2107 | |
| 2108 | memset(&info, 0, sizeof(info)); |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2109 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2110 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2111 | r = -EFAULT; |
| 2112 | break; |
| 2113 | } |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 2114 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
| 2115 | struct kvm *kvm = filp->private_data; |
| 2116 | |
| 2117 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
| 2118 | break; |
| 2119 | } |
Paul Mackerras | c927013 | 2017-01-30 21:21:41 +1100 | [diff] [blame] | 2120 | case KVM_PPC_CONFIGURE_V3_MMU: { |
| 2121 | struct kvm *kvm = filp->private_data; |
| 2122 | struct kvm_ppc_mmuv3_cfg cfg; |
| 2123 | |
| 2124 | r = -EINVAL; |
| 2125 | if (!kvm->arch.kvm_ops->configure_mmu) |
| 2126 | goto out; |
| 2127 | r = -EFAULT; |
| 2128 | if (copy_from_user(&cfg, argp, sizeof(cfg))) |
| 2129 | goto out; |
| 2130 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); |
| 2131 | break; |
| 2132 | } |
| 2133 | case KVM_PPC_GET_RMMU_INFO: { |
| 2134 | struct kvm *kvm = filp->private_data; |
| 2135 | struct kvm_ppc_rmmu_info info; |
| 2136 | |
| 2137 | r = -EINVAL; |
| 2138 | if (!kvm->arch.kvm_ops->get_rmmu_info) |
| 2139 | goto out; |
| 2140 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); |
| 2141 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
| 2142 | r = -EFAULT; |
| 2143 | break; |
| 2144 | } |
Paul Mackerras | 3214d01 | 2018-01-15 16:06:47 +1100 | [diff] [blame] | 2145 | case KVM_PPC_GET_CPU_CHAR: { |
| 2146 | struct kvm_ppc_cpu_char cpuchar; |
| 2147 | |
| 2148 | r = kvmppc_get_cpu_char(&cpuchar); |
| 2149 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) |
| 2150 | r = -EFAULT; |
| 2151 | break; |
| 2152 | } |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2153 | default: { |
| 2154 | struct kvm *kvm = filp->private_data; |
| 2155 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); |
| 2156 | } |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2157 | #else /* CONFIG_PPC_BOOK3S_64 */ |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2158 | default: |
Avi Kivity | 367e131 | 2009-08-26 14:57:07 +0300 | [diff] [blame] | 2159 | r = -ENOTTY; |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2160 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2161 | } |
Alexander Graf | 15711e9 | 2010-07-29 14:48:08 +0200 | [diff] [blame] | 2162 | out: |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2163 | return r; |
| 2164 | } |
| 2165 | |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2166 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
| 2167 | static unsigned long nr_lpids; |
| 2168 | |
| 2169 | long kvmppc_alloc_lpid(void) |
| 2170 | { |
| 2171 | long lpid; |
| 2172 | |
| 2173 | do { |
| 2174 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); |
| 2175 | if (lpid >= nr_lpids) { |
| 2176 | pr_err("%s: No LPIDs free\n", __func__); |
| 2177 | return -ENOMEM; |
| 2178 | } |
| 2179 | } while (test_and_set_bit(lpid, lpid_inuse)); |
| 2180 | |
| 2181 | return lpid; |
| 2182 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2183 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2184 | |
| 2185 | void kvmppc_claim_lpid(long lpid) |
| 2186 | { |
| 2187 | set_bit(lpid, lpid_inuse); |
| 2188 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2189 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2190 | |
| 2191 | void kvmppc_free_lpid(long lpid) |
| 2192 | { |
| 2193 | clear_bit(lpid, lpid_inuse); |
| 2194 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2195 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2196 | |
| 2197 | void kvmppc_init_lpid(unsigned long nr_lpids_param) |
| 2198 | { |
| 2199 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); |
| 2200 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); |
| 2201 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2202 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 2203 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 2204 | int kvm_arch_init(void *opaque) |
| 2205 | { |
| 2206 | return 0; |
| 2207 | } |
| 2208 | |
Paolo Bonzini | 478d6686 | 2014-08-05 11:29:07 +0200 | [diff] [blame] | 2209 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |