Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011. Freescale Inc. All rights reserved. |
| 4 | * |
| 5 | * Authors: |
| 6 | * Alexander Graf <agraf@suse.de> |
| 7 | * Paul Mackerras <paulus@samba.org> |
| 8 | * |
| 9 | * Description: |
| 10 | * |
| 11 | * Hypercall handling for running PAPR guests in PR KVM on Book 3S |
| 12 | * processors. |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 13 | */ |
| 14 | |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 15 | #include <linux/anon_inodes.h> |
| 16 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 17 | #include <linux/uaccess.h> |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 18 | #include <asm/kvm_ppc.h> |
| 19 | #include <asm/kvm_book3s.h> |
| 20 | |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 21 | #define HPTE_SIZE 16 /* bytes per HPT entry */ |
| 22 | |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 23 | static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) |
| 24 | { |
| 25 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
| 26 | unsigned long pteg_addr; |
| 27 | |
| 28 | pte_index <<= 4; |
| 29 | pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70; |
| 30 | pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; |
| 31 | pteg_addr |= pte_index; |
| 32 | |
| 33 | return pteg_addr; |
| 34 | } |
| 35 | |
| 36 | static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) |
| 37 | { |
| 38 | long flags = kvmppc_get_gpr(vcpu, 4); |
| 39 | long pte_index = kvmppc_get_gpr(vcpu, 5); |
Alexander Graf | f396df3 | 2014-06-16 13:58:11 +0200 | [diff] [blame] | 40 | __be64 pteg[2 * 8]; |
| 41 | __be64 *hpte; |
| 42 | unsigned long pteg_addr, i; |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 43 | long int ret; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 44 | |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 45 | i = pte_index & 7; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 46 | pte_index &= ~7UL; |
| 47 | pteg_addr = get_pteg_addr(vcpu, pte_index); |
| 48 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 49 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 50 | ret = H_FUNCTION; |
| 51 | if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) |
| 52 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 53 | hpte = pteg; |
| 54 | |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 55 | ret = H_PTEG_FULL; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 56 | if (likely((flags & H_EXACT) == 0)) { |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 57 | for (i = 0; ; ++i) { |
| 58 | if (i == 8) |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 59 | goto done; |
Alexander Graf | 1692aa3 | 2014-04-24 13:09:15 +0200 | [diff] [blame] | 60 | if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0) |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 61 | break; |
| 62 | hpte += 2; |
| 63 | } |
| 64 | } else { |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 65 | hpte += i * 2; |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 66 | if (*hpte & HPTE_V_VALID) |
| 67 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 68 | } |
| 69 | |
Alexander Graf | 1692aa3 | 2014-04-24 13:09:15 +0200 | [diff] [blame] | 70 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); |
| 71 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 72 | pteg_addr += i * HPTE_SIZE; |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 73 | ret = H_FUNCTION; |
| 74 | if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) |
| 75 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 76 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 77 | ret = H_SUCCESS; |
| 78 | |
| 79 | done: |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 80 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
Paul Mackerras | 5cd92a9 | 2013-09-20 14:52:47 +1000 | [diff] [blame] | 81 | kvmppc_set_gpr(vcpu, 3, ret); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 82 | |
| 83 | return EMULATE_DONE; |
| 84 | } |
| 85 | |
| 86 | static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) |
| 87 | { |
| 88 | unsigned long flags= kvmppc_get_gpr(vcpu, 4); |
| 89 | unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); |
| 90 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); |
| 91 | unsigned long v = 0, pteg, rb; |
| 92 | unsigned long pte[2]; |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 93 | long int ret; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 94 | |
| 95 | pteg = get_pteg_addr(vcpu, pte_index); |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 96 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 97 | ret = H_FUNCTION; |
| 98 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) |
| 99 | goto done; |
Alexander Graf | f396df3 | 2014-06-16 13:58:11 +0200 | [diff] [blame] | 100 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 101 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 102 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 103 | ret = H_NOT_FOUND; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 104 | if ((pte[0] & HPTE_V_VALID) == 0 || |
| 105 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 106 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) |
| 107 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 108 | |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 109 | ret = H_FUNCTION; |
| 110 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) |
| 111 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 112 | |
| 113 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); |
| 114 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
| 115 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 116 | ret = H_SUCCESS; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 117 | kvmppc_set_gpr(vcpu, 4, pte[0]); |
| 118 | kvmppc_set_gpr(vcpu, 5, pte[1]); |
| 119 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 120 | done: |
| 121 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
| 122 | kvmppc_set_gpr(vcpu, 3, ret); |
| 123 | |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 124 | return EMULATE_DONE; |
| 125 | } |
| 126 | |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 127 | /* Request defs for kvmppc_h_pr_bulk_remove() */ |
| 128 | #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL |
| 129 | #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL |
| 130 | #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL |
| 131 | #define H_BULK_REMOVE_END 0xc000000000000000ULL |
| 132 | #define H_BULK_REMOVE_CODE 0x3000000000000000ULL |
| 133 | #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL |
| 134 | #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL |
| 135 | #define H_BULK_REMOVE_PARM 0x2000000000000000ULL |
| 136 | #define H_BULK_REMOVE_HW 0x3000000000000000ULL |
| 137 | #define H_BULK_REMOVE_RC 0x0c00000000000000ULL |
| 138 | #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL |
| 139 | #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL |
| 140 | #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL |
| 141 | #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL |
| 142 | #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL |
| 143 | #define H_BULK_REMOVE_MAX_BATCH 4 |
| 144 | |
| 145 | static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) |
| 146 | { |
| 147 | int i; |
| 148 | int paramnr = 4; |
| 149 | int ret = H_SUCCESS; |
| 150 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 151 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 152 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { |
| 153 | unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); |
| 154 | unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); |
| 155 | unsigned long pteg, rb, flags; |
| 156 | unsigned long pte[2]; |
| 157 | unsigned long v = 0; |
| 158 | |
| 159 | if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { |
| 160 | break; /* Exit success */ |
| 161 | } else if ((tsh & H_BULK_REMOVE_TYPE) != |
| 162 | H_BULK_REMOVE_REQUEST) { |
| 163 | ret = H_PARAMETER; |
| 164 | break; /* Exit fail */ |
| 165 | } |
| 166 | |
| 167 | tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; |
| 168 | tsh |= H_BULK_REMOVE_RESPONSE; |
| 169 | |
| 170 | if ((tsh & H_BULK_REMOVE_ANDCOND) && |
| 171 | (tsh & H_BULK_REMOVE_AVPN)) { |
| 172 | tsh |= H_BULK_REMOVE_PARM; |
| 173 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); |
| 174 | ret = H_PARAMETER; |
| 175 | break; /* Exit fail */ |
| 176 | } |
| 177 | |
| 178 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 179 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { |
| 180 | ret = H_FUNCTION; |
| 181 | break; |
| 182 | } |
Alexander Graf | f396df3 | 2014-06-16 13:58:11 +0200 | [diff] [blame] | 183 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 184 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 185 | |
| 186 | /* tsl = AVPN */ |
| 187 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; |
| 188 | |
| 189 | if ((pte[0] & HPTE_V_VALID) == 0 || |
| 190 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || |
| 191 | ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { |
| 192 | tsh |= H_BULK_REMOVE_NOT_FOUND; |
| 193 | } else { |
| 194 | /* Splat the pteg in (userland) hpt */ |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 195 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { |
| 196 | ret = H_FUNCTION; |
| 197 | break; |
| 198 | } |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 199 | |
| 200 | rb = compute_tlbie_rb(pte[0], pte[1], |
| 201 | tsh & H_BULK_REMOVE_PTEX); |
| 202 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
| 203 | tsh |= H_BULK_REMOVE_SUCCESS; |
| 204 | tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; |
| 205 | } |
| 206 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); |
| 207 | } |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 208 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 209 | kvmppc_set_gpr(vcpu, 3, ret); |
| 210 | |
| 211 | return EMULATE_DONE; |
| 212 | } |
| 213 | |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 214 | static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) |
| 215 | { |
| 216 | unsigned long flags = kvmppc_get_gpr(vcpu, 4); |
| 217 | unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); |
| 218 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); |
| 219 | unsigned long rb, pteg, r, v; |
| 220 | unsigned long pte[2]; |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 221 | long int ret; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 222 | |
| 223 | pteg = get_pteg_addr(vcpu, pte_index); |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 224 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 225 | ret = H_FUNCTION; |
| 226 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) |
| 227 | goto done; |
Alexander Graf | f396df3 | 2014-06-16 13:58:11 +0200 | [diff] [blame] | 228 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 229 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 230 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 231 | ret = H_NOT_FOUND; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 232 | if ((pte[0] & HPTE_V_VALID) == 0 || |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 233 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) |
| 234 | goto done; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 235 | |
| 236 | v = pte[0]; |
| 237 | r = pte[1]; |
| 238 | r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | |
| 239 | HPTE_R_KEY_LO); |
| 240 | r |= (flags << 55) & HPTE_R_PP0; |
| 241 | r |= (flags << 48) & HPTE_R_KEY_HI; |
| 242 | r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); |
| 243 | |
| 244 | pte[1] = r; |
| 245 | |
| 246 | rb = compute_tlbie_rb(v, r, pte_index); |
| 247 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
Alexander Graf | f396df3 | 2014-06-16 13:58:11 +0200 | [diff] [blame] | 248 | pte[0] = (__force u64)cpu_to_be64(pte[0]); |
| 249 | pte[1] = (__force u64)cpu_to_be64(pte[1]); |
Paul Mackerras | 67325e9 | 2017-05-11 11:33:30 +1000 | [diff] [blame] | 250 | ret = H_FUNCTION; |
| 251 | if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) |
| 252 | goto done; |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 253 | ret = H_SUCCESS; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 254 | |
Paul Mackerras | 9308ab8 | 2013-09-20 14:52:48 +1000 | [diff] [blame] | 255 | done: |
| 256 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
| 257 | kvmppc_set_gpr(vcpu, 3, ret); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 258 | |
| 259 | return EMULATE_DONE; |
| 260 | } |
| 261 | |
David Gibson | 99342cf8 | 2015-02-05 11:53:25 +1100 | [diff] [blame] | 262 | static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) |
| 263 | { |
| 264 | long rc; |
| 265 | |
| 266 | rc = kvmppc_h_logical_ci_load(vcpu); |
| 267 | if (rc == H_TOO_HARD) |
| 268 | return EMULATE_FAIL; |
| 269 | kvmppc_set_gpr(vcpu, 3, rc); |
| 270 | return EMULATE_DONE; |
| 271 | } |
| 272 | |
| 273 | static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) |
| 274 | { |
| 275 | long rc; |
| 276 | |
| 277 | rc = kvmppc_h_logical_ci_store(vcpu); |
| 278 | if (rc == H_TOO_HARD) |
| 279 | return EMULATE_FAIL; |
| 280 | kvmppc_set_gpr(vcpu, 3, rc); |
| 281 | return EMULATE_DONE; |
| 282 | } |
| 283 | |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 284 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 285 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) |
| 286 | { |
| 287 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); |
| 288 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); |
| 289 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); |
| 290 | long rc; |
| 291 | |
| 292 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); |
| 293 | if (rc == H_TOO_HARD) |
| 294 | return EMULATE_FAIL; |
| 295 | kvmppc_set_gpr(vcpu, 3, rc); |
| 296 | return EMULATE_DONE; |
| 297 | } |
| 298 | |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 299 | static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) |
| 300 | { |
| 301 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); |
| 302 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); |
| 303 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); |
| 304 | unsigned long npages = kvmppc_get_gpr(vcpu, 7); |
| 305 | long rc; |
| 306 | |
| 307 | rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba, |
| 308 | tce, npages); |
| 309 | if (rc == H_TOO_HARD) |
| 310 | return EMULATE_FAIL; |
| 311 | kvmppc_set_gpr(vcpu, 3, rc); |
| 312 | return EMULATE_DONE; |
| 313 | } |
| 314 | |
| 315 | static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) |
| 316 | { |
| 317 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); |
| 318 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); |
| 319 | unsigned long tce_value = kvmppc_get_gpr(vcpu, 6); |
| 320 | unsigned long npages = kvmppc_get_gpr(vcpu, 7); |
| 321 | long rc; |
| 322 | |
| 323 | rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages); |
| 324 | if (rc == H_TOO_HARD) |
| 325 | return EMULATE_FAIL; |
| 326 | kvmppc_set_gpr(vcpu, 3, rc); |
| 327 | return EMULATE_DONE; |
| 328 | } |
| 329 | |
Paul Mackerras | 76d837a | 2017-05-11 14:31:59 +1000 | [diff] [blame] | 330 | #else /* CONFIG_SPAPR_TCE_IOMMU */ |
| 331 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) |
| 332 | { |
| 333 | return EMULATE_FAIL; |
| 334 | } |
| 335 | |
| 336 | static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) |
| 337 | { |
| 338 | return EMULATE_FAIL; |
| 339 | } |
| 340 | |
| 341 | static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) |
| 342 | { |
| 343 | return EMULATE_FAIL; |
| 344 | } |
| 345 | #endif /* CONFIG_SPAPR_TCE_IOMMU */ |
| 346 | |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 347 | static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) |
| 348 | { |
| 349 | long rc = kvmppc_xics_hcall(vcpu, cmd); |
| 350 | kvmppc_set_gpr(vcpu, 3, rc); |
| 351 | return EMULATE_DONE; |
| 352 | } |
| 353 | |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 354 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) |
| 355 | { |
Paul Mackerras | ef1af2e | 2014-07-19 17:59:35 +1000 | [diff] [blame] | 356 | int rc, idx; |
| 357 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 358 | if (cmd <= MAX_HCALL_OPCODE && |
| 359 | !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls)) |
| 360 | return EMULATE_FAIL; |
| 361 | |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 362 | switch (cmd) { |
| 363 | case H_ENTER: |
| 364 | return kvmppc_h_pr_enter(vcpu); |
| 365 | case H_REMOVE: |
| 366 | return kvmppc_h_pr_remove(vcpu); |
| 367 | case H_PROTECT: |
| 368 | return kvmppc_h_pr_protect(vcpu); |
| 369 | case H_BULK_REMOVE: |
Matt Evans | 3aaefef | 2012-01-30 20:25:31 +0000 | [diff] [blame] | 370 | return kvmppc_h_pr_bulk_remove(vcpu); |
Benjamin Herrenschmidt | f31e65e | 2012-03-15 21:58:34 +0000 | [diff] [blame] | 371 | case H_PUT_TCE: |
| 372 | return kvmppc_h_pr_put_tce(vcpu); |
Alexey Kardashevskiy | d3695aa | 2016-02-15 12:55:09 +1100 | [diff] [blame] | 373 | case H_PUT_TCE_INDIRECT: |
| 374 | return kvmppc_h_pr_put_tce_indirect(vcpu); |
| 375 | case H_STUFF_TCE: |
| 376 | return kvmppc_h_pr_stuff_tce(vcpu); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 377 | case H_CEDE: |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 378 | kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 379 | kvm_vcpu_block(vcpu); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 380 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 381 | vcpu->stat.generic.halt_wakeup++; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 382 | return EMULATE_DONE; |
David Gibson | 99342cf8 | 2015-02-05 11:53:25 +1100 | [diff] [blame] | 383 | case H_LOGICAL_CI_LOAD: |
| 384 | return kvmppc_h_pr_logical_ci_load(vcpu); |
| 385 | case H_LOGICAL_CI_STORE: |
| 386 | return kvmppc_h_pr_logical_ci_store(vcpu); |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 387 | case H_XIRR: |
| 388 | case H_CPPR: |
| 389 | case H_EOI: |
| 390 | case H_IPI: |
Paul Mackerras | 8e44ddc | 2013-05-23 15:42:21 +0000 | [diff] [blame] | 391 | case H_IPOLL: |
| 392 | case H_XIRR_X: |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 393 | if (kvmppc_xics_enabled(vcpu)) |
| 394 | return kvmppc_h_pr_xics_hcall(vcpu, cmd); |
| 395 | break; |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 396 | case H_RTAS: |
| 397 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
Paul Mackerras | ef1af2e | 2014-07-19 17:59:35 +1000 | [diff] [blame] | 398 | break; |
| 399 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 400 | rc = kvmppc_rtas_hcall(vcpu); |
| 401 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 402 | if (rc) |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 403 | break; |
| 404 | kvmppc_set_gpr(vcpu, 3, 0); |
| 405 | return EMULATE_DONE; |
Alexander Graf | 0254f074 | 2011-08-08 17:21:15 +0200 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | return EMULATE_FAIL; |
| 409 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 410 | |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 411 | int kvmppc_hcall_impl_pr(unsigned long cmd) |
| 412 | { |
| 413 | switch (cmd) { |
| 414 | case H_ENTER: |
| 415 | case H_REMOVE: |
| 416 | case H_PROTECT: |
| 417 | case H_BULK_REMOVE: |
| 418 | case H_PUT_TCE: |
Alexey Kardashevskiy | 3f2bb76 | 2017-10-11 16:01:08 +1100 | [diff] [blame] | 419 | case H_PUT_TCE_INDIRECT: |
| 420 | case H_STUFF_TCE: |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 421 | case H_CEDE: |
David Gibson | 99342cf8 | 2015-02-05 11:53:25 +1100 | [diff] [blame] | 422 | case H_LOGICAL_CI_LOAD: |
| 423 | case H_LOGICAL_CI_STORE: |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 424 | #ifdef CONFIG_KVM_XICS |
| 425 | case H_XIRR: |
| 426 | case H_CPPR: |
| 427 | case H_EOI: |
| 428 | case H_IPI: |
| 429 | case H_IPOLL: |
| 430 | case H_XIRR_X: |
| 431 | #endif |
| 432 | return 1; |
| 433 | } |
| 434 | return 0; |
| 435 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 436 | |
| 437 | /* |
| 438 | * List of hcall numbers to enable by default. |
| 439 | * For compatibility with old userspace, we enable by default |
| 440 | * all hcalls that were implemented before the hcall-enabling |
| 441 | * facility was added. Note this list should not include H_RTAS. |
| 442 | */ |
| 443 | static unsigned int default_hcall_list[] = { |
| 444 | H_ENTER, |
| 445 | H_REMOVE, |
| 446 | H_PROTECT, |
| 447 | H_BULK_REMOVE, |
| 448 | H_PUT_TCE, |
| 449 | H_CEDE, |
| 450 | #ifdef CONFIG_KVM_XICS |
| 451 | H_XIRR, |
| 452 | H_CPPR, |
| 453 | H_EOI, |
| 454 | H_IPI, |
| 455 | H_IPOLL, |
| 456 | H_XIRR_X, |
| 457 | #endif |
| 458 | 0 |
| 459 | }; |
| 460 | |
| 461 | void kvmppc_pr_init_default_hcalls(struct kvm *kvm) |
| 462 | { |
| 463 | int i; |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 464 | unsigned int hcall; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 465 | |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 466 | for (i = 0; default_hcall_list[i]; ++i) { |
| 467 | hcall = default_hcall_list[i]; |
| 468 | WARN_ON(!kvmppc_hcall_impl_pr(hcall)); |
| 469 | __set_bit(hcall / 4, kvm->arch.enabled_hcalls); |
| 470 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 471 | } |