Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 9 | #include <linux/cpu.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 10 | #include <linux/kvm_host.h> |
| 11 | #include <linux/preempt.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 13 | #include <linux/sched.h> |
| 14 | #include <linux/spinlock.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 15 | #include <linux/init.h> |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 16 | #include <linux/memblock.h> |
| 17 | #include <linux/sizes.h> |
Joonsoo Kim | fc95ca7 | 2014-08-06 16:05:28 -0700 | [diff] [blame] | 18 | #include <linux/cma.h> |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 19 | #include <linux/bitops.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 20 | |
Nicholas Piggin | 7c1bd80 | 2018-05-18 03:49:44 +1000 | [diff] [blame] | 21 | #include <asm/asm-prototypes.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 22 | #include <asm/cputable.h> |
| 23 | #include <asm/kvm_ppc.h> |
| 24 | #include <asm/kvm_book3s.h> |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 25 | #include <asm/archrandom.h> |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 26 | #include <asm/xics.h> |
Benjamin Herrenschmidt | 243e251 | 2017-04-05 17:54:50 +1000 | [diff] [blame] | 27 | #include <asm/xive.h> |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 28 | #include <asm/dbell.h> |
| 29 | #include <asm/cputhreads.h> |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 30 | #include <asm/io.h> |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 31 | #include <asm/opal.h> |
Paul Mackerras | e270287 | 2016-11-24 14:10:43 +1100 | [diff] [blame] | 32 | #include <asm/smp.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 33 | |
Joonsoo Kim | fc95ca7 | 2014-08-06 16:05:28 -0700 | [diff] [blame] | 34 | #define KVM_CMA_CHUNK_ORDER 18 |
| 35 | |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 36 | #include "book3s_xics.h" |
| 37 | #include "book3s_xive.h" |
| 38 | |
| 39 | /* |
| 40 | * The XIVE module will populate these when it loads |
| 41 | */ |
| 42 | unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); |
| 43 | unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); |
| 44 | int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, |
| 45 | unsigned long mfrr); |
| 46 | int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); |
| 47 | int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); |
| 48 | EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); |
| 49 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); |
| 50 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); |
| 51 | EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); |
| 52 | EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); |
| 53 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 54 | /* |
| 55 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) |
| 56 | * should be power of 2. |
| 57 | */ |
| 58 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ |
| 59 | /* |
| 60 | * By default we reserve 5% of memory for hash pagetable allocation. |
| 61 | */ |
| 62 | static unsigned long kvm_cma_resv_ratio = 5; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 63 | |
Joonsoo Kim | fc95ca7 | 2014-08-06 16:05:28 -0700 | [diff] [blame] | 64 | static struct cma *kvm_cma; |
| 65 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 66 | static int __init early_parse_kvm_cma_resv(char *p) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 67 | { |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 68 | pr_debug("%s(%s)\n", __func__, p); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 69 | if (!p) |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 70 | return -EINVAL; |
| 71 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 72 | } |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 73 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 74 | |
David Gibson | db9a290 | 2016-12-20 16:48:59 +1100 | [diff] [blame] | 75 | struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 76 | { |
Alexey Kardashevskiy | c04fa58 | 2014-08-14 15:03:07 +1000 | [diff] [blame] | 77 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
Joonsoo Kim | fc95ca7 | 2014-08-06 16:05:28 -0700 | [diff] [blame] | 78 | |
Lucas Stach | e2f466e | 2017-02-24 14:58:41 -0800 | [diff] [blame] | 79 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 80 | false); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 81 | } |
David Gibson | db9a290 | 2016-12-20 16:48:59 +1100 | [diff] [blame] | 82 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 83 | |
David Gibson | db9a290 | 2016-12-20 16:48:59 +1100 | [diff] [blame] | 84 | void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 85 | { |
Joonsoo Kim | fc95ca7 | 2014-08-06 16:05:28 -0700 | [diff] [blame] | 86 | cma_release(kvm_cma, page, nr_pages); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 87 | } |
David Gibson | db9a290 | 2016-12-20 16:48:59 +1100 | [diff] [blame] | 88 | EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 89 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 90 | /** |
| 91 | * kvm_cma_reserve() - reserve area for kvm hash pagetable |
| 92 | * |
| 93 | * This function reserves memory from early allocator. It should be |
Anton Blanchard | 14ed740 | 2014-09-17 22:15:34 +1000 | [diff] [blame] | 94 | * called by arch specific code once the memblock allocator |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 95 | * has been activated and all other subsystems have already allocated/reserved |
| 96 | * memory. |
| 97 | */ |
| 98 | void __init kvm_cma_reserve(void) |
| 99 | { |
| 100 | unsigned long align_size; |
| 101 | struct memblock_region *reg; |
| 102 | phys_addr_t selected_size = 0; |
Aneesh Kumar K.V | cec26bc | 2014-09-29 13:32:38 +0530 | [diff] [blame] | 103 | |
| 104 | /* |
| 105 | * We need CMA reservation only when we are in HV mode |
| 106 | */ |
| 107 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
| 108 | return; |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 109 | /* |
| 110 | * We cannot use memblock_phys_mem_size() here, because |
| 111 | * memblock_analyze() has not been called yet. |
| 112 | */ |
| 113 | for_each_memblock(memory, reg) |
| 114 | selected_size += memblock_region_memory_end_pfn(reg) - |
| 115 | memblock_region_memory_base_pfn(reg); |
| 116 | |
| 117 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; |
| 118 | if (selected_size) { |
| 119 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
| 120 | (unsigned long)selected_size / SZ_1M); |
Paul Mackerras | c17b98c | 2014-12-03 13:30:38 +1100 | [diff] [blame] | 121 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 122 | cma_declare_contiguous(0, selected_size, 0, align_size, |
Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 123 | KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", |
| 124 | &kvm_cma); |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 125 | } |
| 126 | } |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 127 | |
| 128 | /* |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 129 | * Real-mode H_CONFER implementation. |
| 130 | * We check if we are the only vcpu out of this virtual core |
| 131 | * still running in the guest and not ceded. If so, we pop up |
| 132 | * to the virtual-mode implementation; if not, just return to |
| 133 | * the guest. |
| 134 | */ |
| 135 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, |
| 136 | unsigned int yield_count) |
| 137 | { |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 138 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
| 139 | int ptid = local_paca->kvm_hstate.ptid; |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 140 | int threads_running; |
| 141 | int threads_ceded; |
| 142 | int threads_conferring; |
| 143 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; |
| 144 | int rv = H_SUCCESS; /* => don't yield */ |
| 145 | |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 146 | set_bit(ptid, &vc->conferring_threads); |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 147 | while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { |
| 148 | threads_running = VCORE_ENTRY_MAP(vc); |
| 149 | threads_ceded = vc->napping_threads; |
| 150 | threads_conferring = vc->conferring_threads; |
| 151 | if ((threads_ceded | threads_conferring) == threads_running) { |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 152 | rv = H_TOO_HARD; /* => do yield */ |
| 153 | break; |
| 154 | } |
| 155 | } |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 156 | clear_bit(ptid, &vc->conferring_threads); |
Sam Bobroff | 90fd09f | 2014-12-03 13:30:40 +1100 | [diff] [blame] | 157 | return rv; |
| 158 | } |
| 159 | |
| 160 | /* |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 161 | * When running HV mode KVM we need to block certain operations while KVM VMs |
| 162 | * exist in the system. We use a counter of VMs to track this. |
| 163 | * |
| 164 | * One of the operations we need to block is onlining of secondaries, so we |
| 165 | * protect hv_vm_count with get/put_online_cpus(). |
| 166 | */ |
| 167 | static atomic_t hv_vm_count; |
| 168 | |
| 169 | void kvm_hv_vm_activated(void) |
| 170 | { |
| 171 | get_online_cpus(); |
| 172 | atomic_inc(&hv_vm_count); |
| 173 | put_online_cpus(); |
| 174 | } |
| 175 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); |
| 176 | |
| 177 | void kvm_hv_vm_deactivated(void) |
| 178 | { |
| 179 | get_online_cpus(); |
| 180 | atomic_dec(&hv_vm_count); |
| 181 | put_online_cpus(); |
| 182 | } |
| 183 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); |
| 184 | |
| 185 | bool kvm_hv_mode_active(void) |
| 186 | { |
| 187 | return atomic_read(&hv_vm_count) != 0; |
| 188 | } |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 189 | |
| 190 | extern int hcall_real_table[], hcall_real_table_end[]; |
| 191 | |
| 192 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) |
| 193 | { |
| 194 | cmd /= 4; |
| 195 | if (cmd < hcall_real_table_end - hcall_real_table && |
| 196 | hcall_real_table[cmd]) |
| 197 | return 1; |
| 198 | |
| 199 | return 0; |
| 200 | } |
| 201 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 202 | |
| 203 | int kvmppc_hwrng_present(void) |
| 204 | { |
| 205 | return powernv_hwrng_present(); |
| 206 | } |
| 207 | EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); |
| 208 | |
| 209 | long kvmppc_h_random(struct kvm_vcpu *vcpu) |
| 210 | { |
Paul Mackerras | acde257 | 2017-05-10 16:39:41 +1000 | [diff] [blame] | 211 | int r; |
| 212 | |
| 213 | /* Only need to do the expensive mfmsr() on radix */ |
| 214 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) |
Simon Guo | 1143a70 | 2018-05-07 14:20:07 +0800 | [diff] [blame] | 215 | r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); |
Paul Mackerras | acde257 | 2017-05-10 16:39:41 +1000 | [diff] [blame] | 216 | else |
Simon Guo | 1143a70 | 2018-05-07 14:20:07 +0800 | [diff] [blame] | 217 | r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); |
Paul Mackerras | acde257 | 2017-05-10 16:39:41 +1000 | [diff] [blame] | 218 | if (r) |
Michael Ellerman | e928e9c | 2015-03-20 20:39:41 +1100 | [diff] [blame] | 219 | return H_SUCCESS; |
| 220 | |
| 221 | return H_HARDWARE; |
| 222 | } |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 223 | |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 224 | /* |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 225 | * Send an interrupt or message to another CPU. |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 226 | * The caller needs to include any barrier needed to order writes |
| 227 | * to memory vs. the IPI/message. |
| 228 | */ |
| 229 | void kvmhv_rm_send_ipi(int cpu) |
| 230 | { |
Benjamin Herrenschmidt | d381d7c | 2017-04-05 17:54:54 +1000 | [diff] [blame] | 231 | void __iomem *xics_phys; |
Paul Mackerras | 1704a81 | 2016-11-18 08:47:08 +1100 | [diff] [blame] | 232 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 233 | |
Paul Mackerras | f3c18e9 | 2018-10-08 16:31:05 +1100 | [diff] [blame] | 234 | /* For a nested hypervisor, use the XICS via hcall */ |
| 235 | if (kvmhv_on_pseries()) { |
| 236 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 237 | |
| 238 | plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), |
| 239 | IPI_PRIORITY); |
| 240 | return; |
| 241 | } |
| 242 | |
Paul Mackerras | 1704a81 | 2016-11-18 08:47:08 +1100 | [diff] [blame] | 243 | /* On POWER9 we can use msgsnd for any destination cpu. */ |
| 244 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
| 245 | msg |= get_hard_smp_processor_id(cpu); |
| 246 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); |
| 247 | return; |
| 248 | } |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 249 | |
Paul Mackerras | 1704a81 | 2016-11-18 08:47:08 +1100 | [diff] [blame] | 250 | /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 251 | if (cpu_has_feature(CPU_FTR_ARCH_207S) && |
| 252 | cpu_first_thread_sibling(cpu) == |
| 253 | cpu_first_thread_sibling(raw_smp_processor_id())) { |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 254 | msg |= cpu_thread_in_core(cpu); |
| 255 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); |
| 256 | return; |
| 257 | } |
| 258 | |
Benjamin Herrenschmidt | 243e251 | 2017-04-05 17:54:50 +1000 | [diff] [blame] | 259 | /* We should never reach this */ |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 260 | if (WARN_ON_ONCE(xics_on_xive())) |
Benjamin Herrenschmidt | 243e251 | 2017-04-05 17:54:50 +1000 | [diff] [blame] | 261 | return; |
| 262 | |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 263 | /* Else poke the target with an IPI */ |
Nicholas Piggin | d2e6007 | 2018-02-14 01:08:12 +1000 | [diff] [blame] | 264 | xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; |
Benjamin Herrenschmidt | ab9bad0 | 2017-02-07 16:03:17 +1100 | [diff] [blame] | 265 | if (xics_phys) |
Benjamin Herrenschmidt | d381d7c | 2017-04-05 17:54:54 +1000 | [diff] [blame] | 266 | __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 267 | else |
Benjamin Herrenschmidt | ab9bad0 | 2017-02-07 16:03:17 +1100 | [diff] [blame] | 268 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | /* |
| 272 | * The following functions are called from the assembly code |
| 273 | * in book3s_hv_rmhandlers.S. |
| 274 | */ |
| 275 | static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) |
| 276 | { |
| 277 | int cpu = vc->pcpu; |
| 278 | |
| 279 | /* Order setting of exit map vs. msgsnd/IPI */ |
| 280 | smp_mb(); |
| 281 | for (; active; active >>= 1, ++cpu) |
| 282 | if (active & 1) |
| 283 | kvmhv_rm_send_ipi(cpu); |
| 284 | } |
| 285 | |
| 286 | void kvmhv_commence_exit(int trap) |
| 287 | { |
| 288 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
| 289 | int ptid = local_paca->kvm_hstate.ptid; |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 290 | struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 291 | int me, ee, i, t; |
| 292 | int cpu0; |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 293 | |
| 294 | /* Set our bit in the threads-exiting-guest map in the 0xff00 |
| 295 | bits of vcore->entry_exit_map */ |
| 296 | me = 0x100 << ptid; |
| 297 | do { |
| 298 | ee = vc->entry_exit_map; |
| 299 | } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); |
| 300 | |
| 301 | /* Are we the first here? */ |
| 302 | if ((ee >> 8) != 0) |
| 303 | return; |
| 304 | |
| 305 | /* |
| 306 | * Trigger the other threads in this vcore to exit the guest. |
| 307 | * If this is a hypervisor decrementer interrupt then they |
| 308 | * will be already on their way out of the guest. |
| 309 | */ |
| 310 | if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) |
| 311 | kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 312 | |
| 313 | /* |
| 314 | * If we are doing dynamic micro-threading, interrupt the other |
| 315 | * subcores to pull them out of their guests too. |
| 316 | */ |
| 317 | if (!sip) |
| 318 | return; |
| 319 | |
| 320 | for (i = 0; i < MAX_SUBCORES; ++i) { |
Paul Mackerras | 898b25b | 2017-06-22 15:08:42 +1000 | [diff] [blame] | 321 | vc = sip->vc[i]; |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 322 | if (!vc) |
| 323 | break; |
| 324 | do { |
| 325 | ee = vc->entry_exit_map; |
| 326 | /* Already asked to exit? */ |
| 327 | if ((ee >> 8) != 0) |
| 328 | break; |
| 329 | } while (cmpxchg(&vc->entry_exit_map, ee, |
| 330 | ee | VCORE_EXIT_REQ) != ee); |
| 331 | if ((ee >> 8) == 0) |
| 332 | kvmhv_interrupt_vcore(vc, ee); |
| 333 | } |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 334 | |
| 335 | /* |
| 336 | * On POWER9 when running a HPT guest on a radix host (sip != NULL), |
| 337 | * we have to interrupt inactive CPU threads to get them to |
| 338 | * restore the host LPCR value. |
| 339 | */ |
| 340 | if (sip->lpcr_req) { |
| 341 | if (cmpxchg(&sip->do_restore, 0, 1) == 0) { |
| 342 | vc = local_paca->kvm_hstate.kvm_vcore; |
| 343 | cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; |
| 344 | for (t = 1; t < threads_per_core; ++t) { |
| 345 | if (sip->napped[t]) |
| 346 | kvmhv_rm_send_ipi(cpu0 + t); |
| 347 | } |
| 348 | } |
| 349 | } |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 350 | } |
Suresh Warrier | 79b6c24 | 2015-12-17 14:59:06 -0600 | [diff] [blame] | 351 | |
| 352 | struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; |
| 353 | EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 354 | |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 355 | #ifdef CONFIG_KVM_XICS |
| 356 | static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, |
| 357 | u32 xisr) |
| 358 | { |
| 359 | int i; |
| 360 | |
| 361 | /* |
| 362 | * We access the mapped array here without a lock. That |
| 363 | * is safe because we never reduce the number of entries |
| 364 | * in the array and we never change the v_hwirq field of |
| 365 | * an entry once it is set. |
| 366 | * |
| 367 | * We have also carefully ordered the stores in the writer |
| 368 | * and the loads here in the reader, so that if we find a matching |
| 369 | * hwirq here, the associated GSI and irq_desc fields are valid. |
| 370 | */ |
| 371 | for (i = 0; i < pimap->n_mapped; i++) { |
| 372 | if (xisr == pimap->mapped[i].r_hwirq) { |
| 373 | /* |
| 374 | * Order subsequent reads in the caller to serialize |
| 375 | * with the writer. |
| 376 | */ |
| 377 | smp_rmb(); |
| 378 | return &pimap->mapped[i]; |
| 379 | } |
| 380 | } |
| 381 | return NULL; |
| 382 | } |
| 383 | |
| 384 | /* |
| 385 | * If we have an interrupt that's not an IPI, check if we have a |
| 386 | * passthrough adapter and if so, check if this external interrupt |
| 387 | * is for the adapter. |
| 388 | * We will attempt to deliver the IRQ directly to the target VCPU's |
| 389 | * ICP, the virtual ICP (based on affinity - the xive value in ICS). |
| 390 | * |
| 391 | * If the delivery fails or if this is not for a passthrough adapter, |
| 392 | * return to the host to handle this interrupt. We earlier |
| 393 | * saved a copy of the XIRR in the PACA, it will be picked up by |
| 394 | * the host ICP driver. |
| 395 | */ |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 396 | static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 397 | { |
| 398 | struct kvmppc_passthru_irqmap *pimap; |
| 399 | struct kvmppc_irq_map *irq_map; |
| 400 | struct kvm_vcpu *vcpu; |
| 401 | |
| 402 | vcpu = local_paca->kvm_hstate.kvm_vcpu; |
| 403 | if (!vcpu) |
| 404 | return 1; |
| 405 | pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); |
| 406 | if (!pimap) |
| 407 | return 1; |
| 408 | irq_map = get_irqmap(pimap, xisr); |
| 409 | if (!irq_map) |
| 410 | return 1; |
| 411 | |
| 412 | /* We're handling this interrupt, generic code doesn't need to */ |
| 413 | local_paca->kvm_hstate.saved_xirr = 0; |
| 414 | |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 415 | return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | #else |
Paul Mackerras | e270287 | 2016-11-24 14:10:43 +1100 | [diff] [blame] | 419 | static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 420 | { |
| 421 | return 1; |
| 422 | } |
| 423 | #endif |
| 424 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 425 | /* |
| 426 | * Determine what sort of external interrupt is pending (if any). |
| 427 | * Returns: |
| 428 | * 0 if no interrupt is pending |
| 429 | * 1 if an interrupt is pending that needs to be handled by the host |
Suresh Warrier | f7af520 | 2016-08-19 15:35:52 +1000 | [diff] [blame] | 430 | * 2 Passthrough that needs completion in the host |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 431 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 432 | * -2 if there is PCI passthrough external interrupt that was handled |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 433 | */ |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 434 | static long kvmppc_read_one_intr(bool *again); |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 435 | |
| 436 | long kvmppc_read_intr(void) |
| 437 | { |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 438 | long ret = 0; |
| 439 | long rc; |
| 440 | bool again; |
| 441 | |
Benjamin Herrenschmidt | 243e251 | 2017-04-05 17:54:50 +1000 | [diff] [blame] | 442 | if (xive_enabled()) |
| 443 | return 1; |
| 444 | |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 445 | do { |
| 446 | again = false; |
| 447 | rc = kvmppc_read_one_intr(&again); |
| 448 | if (rc && (ret == 0 || rc > ret)) |
| 449 | ret = rc; |
| 450 | } while (again); |
| 451 | return ret; |
| 452 | } |
| 453 | |
| 454 | static long kvmppc_read_one_intr(bool *again) |
| 455 | { |
Benjamin Herrenschmidt | d381d7c | 2017-04-05 17:54:54 +1000 | [diff] [blame] | 456 | void __iomem *xics_phys; |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 457 | u32 h_xirr; |
| 458 | __be32 xirr; |
| 459 | u32 xisr; |
| 460 | u8 host_ipi; |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 461 | int64_t rc; |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 462 | |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 463 | if (xive_enabled()) |
| 464 | return 1; |
| 465 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 466 | /* see if a host IPI is pending */ |
| 467 | host_ipi = local_paca->kvm_hstate.host_ipi; |
| 468 | if (host_ipi) |
| 469 | return 1; |
| 470 | |
| 471 | /* Now read the interrupt from the ICP */ |
Paul Mackerras | f3c18e9 | 2018-10-08 16:31:05 +1100 | [diff] [blame] | 472 | if (kvmhv_on_pseries()) { |
| 473 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 474 | |
| 475 | rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); |
| 476 | xirr = cpu_to_be32(retbuf[0]); |
| 477 | } else { |
| 478 | xics_phys = local_paca->kvm_hstate.xics_phys; |
| 479 | rc = 0; |
| 480 | if (!xics_phys) |
| 481 | rc = opal_int_get_xirr(&xirr, false); |
| 482 | else |
| 483 | xirr = __raw_rm_readl(xics_phys + XICS_XIRR); |
| 484 | } |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 485 | if (rc < 0) |
| 486 | return 1; |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 487 | |
| 488 | /* |
| 489 | * Save XIRR for later. Since we get control in reverse endian |
| 490 | * on LE systems, save it byte reversed and fetch it back in |
| 491 | * host endian. Note that xirr is the value read from the |
| 492 | * XIRR register, while h_xirr is the host endian version. |
| 493 | */ |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 494 | h_xirr = be32_to_cpu(xirr); |
| 495 | local_paca->kvm_hstate.saved_xirr = h_xirr; |
| 496 | xisr = h_xirr & 0xffffff; |
| 497 | /* |
| 498 | * Ensure that the store/load complete to guarantee all side |
| 499 | * effects of loading from XIRR has completed |
| 500 | */ |
| 501 | smp_mb(); |
| 502 | |
| 503 | /* if nothing pending in the ICP */ |
| 504 | if (!xisr) |
| 505 | return 0; |
| 506 | |
| 507 | /* We found something in the ICP... |
| 508 | * |
| 509 | * If it is an IPI, clear the MFRR and EOI it. |
| 510 | */ |
| 511 | if (xisr == XICS_IPI) { |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 512 | rc = 0; |
Paul Mackerras | f3c18e9 | 2018-10-08 16:31:05 +1100 | [diff] [blame] | 513 | if (kvmhv_on_pseries()) { |
| 514 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 515 | |
| 516 | plpar_hcall_raw(H_IPI, retbuf, |
| 517 | hard_smp_processor_id(), 0xff); |
| 518 | plpar_hcall_raw(H_EOI, retbuf, h_xirr); |
| 519 | } else if (xics_phys) { |
Benjamin Herrenschmidt | d381d7c | 2017-04-05 17:54:54 +1000 | [diff] [blame] | 520 | __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); |
| 521 | __raw_rm_writel(xirr, xics_phys + XICS_XIRR); |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 522 | } else { |
Benjamin Herrenschmidt | ab9bad0 | 2017-02-07 16:03:17 +1100 | [diff] [blame] | 523 | opal_int_set_mfrr(hard_smp_processor_id(), 0xff); |
| 524 | rc = opal_int_eoi(h_xirr); |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 525 | } |
Paul Mackerras | 53af3ba | 2017-01-30 21:21:51 +1100 | [diff] [blame] | 526 | /* If rc > 0, there is another interrupt pending */ |
| 527 | *again = rc > 0; |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 528 | |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 529 | /* |
| 530 | * Need to ensure side effects of above stores |
| 531 | * complete before proceeding. |
| 532 | */ |
| 533 | smp_mb(); |
| 534 | |
| 535 | /* |
| 536 | * We need to re-check host IPI now in case it got set in the |
| 537 | * meantime. If it's clear, we bounce the interrupt to the |
| 538 | * guest |
| 539 | */ |
| 540 | host_ipi = local_paca->kvm_hstate.host_ipi; |
| 541 | if (unlikely(host_ipi != 0)) { |
| 542 | /* We raced with the host, |
| 543 | * we need to resend that IPI, bummer |
| 544 | */ |
Paul Mackerras | f3c18e9 | 2018-10-08 16:31:05 +1100 | [diff] [blame] | 545 | if (kvmhv_on_pseries()) { |
| 546 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 547 | |
| 548 | plpar_hcall_raw(H_IPI, retbuf, |
| 549 | hard_smp_processor_id(), |
| 550 | IPI_PRIORITY); |
| 551 | } else if (xics_phys) |
Benjamin Herrenschmidt | d381d7c | 2017-04-05 17:54:54 +1000 | [diff] [blame] | 552 | __raw_rm_writeb(IPI_PRIORITY, |
| 553 | xics_phys + XICS_MFRR); |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 554 | else |
Benjamin Herrenschmidt | ab9bad0 | 2017-02-07 16:03:17 +1100 | [diff] [blame] | 555 | opal_int_set_mfrr(hard_smp_processor_id(), |
| 556 | IPI_PRIORITY); |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 557 | /* Let side effects complete */ |
| 558 | smp_mb(); |
| 559 | return 1; |
| 560 | } |
| 561 | |
| 562 | /* OK, it's an IPI for us */ |
| 563 | local_paca->kvm_hstate.saved_xirr = 0; |
| 564 | return -1; |
| 565 | } |
| 566 | |
Paul Mackerras | f725758 | 2016-11-18 09:02:08 +1100 | [diff] [blame] | 567 | return kvmppc_check_passthru(xisr, xirr, again); |
Suresh Warrier | 37f55d3 | 2016-08-19 15:35:46 +1000 | [diff] [blame] | 568 | } |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 569 | |
| 570 | #ifdef CONFIG_KVM_XICS |
| 571 | static inline bool is_rm(void) |
| 572 | { |
| 573 | return !(mfmsr() & MSR_DR); |
| 574 | } |
| 575 | |
| 576 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) |
| 577 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 578 | if (!kvmppc_xics_enabled(vcpu)) |
| 579 | return H_TOO_HARD; |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 580 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 581 | if (is_rm()) |
| 582 | return xive_rm_h_xirr(vcpu); |
| 583 | if (unlikely(!__xive_vm_h_xirr)) |
| 584 | return H_NOT_AVAILABLE; |
| 585 | return __xive_vm_h_xirr(vcpu); |
| 586 | } else |
| 587 | return xics_rm_h_xirr(vcpu); |
| 588 | } |
| 589 | |
| 590 | unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) |
| 591 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 592 | if (!kvmppc_xics_enabled(vcpu)) |
| 593 | return H_TOO_HARD; |
Simon Guo | 1143a70 | 2018-05-07 14:20:07 +0800 | [diff] [blame] | 594 | vcpu->arch.regs.gpr[5] = get_tb(); |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 595 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 596 | if (is_rm()) |
| 597 | return xive_rm_h_xirr(vcpu); |
| 598 | if (unlikely(!__xive_vm_h_xirr)) |
| 599 | return H_NOT_AVAILABLE; |
| 600 | return __xive_vm_h_xirr(vcpu); |
| 601 | } else |
| 602 | return xics_rm_h_xirr(vcpu); |
| 603 | } |
| 604 | |
| 605 | unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) |
| 606 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 607 | if (!kvmppc_xics_enabled(vcpu)) |
| 608 | return H_TOO_HARD; |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 609 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 610 | if (is_rm()) |
| 611 | return xive_rm_h_ipoll(vcpu, server); |
| 612 | if (unlikely(!__xive_vm_h_ipoll)) |
| 613 | return H_NOT_AVAILABLE; |
| 614 | return __xive_vm_h_ipoll(vcpu, server); |
| 615 | } else |
| 616 | return H_TOO_HARD; |
| 617 | } |
| 618 | |
| 619 | int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
| 620 | unsigned long mfrr) |
| 621 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 622 | if (!kvmppc_xics_enabled(vcpu)) |
| 623 | return H_TOO_HARD; |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 624 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 625 | if (is_rm()) |
| 626 | return xive_rm_h_ipi(vcpu, server, mfrr); |
| 627 | if (unlikely(!__xive_vm_h_ipi)) |
| 628 | return H_NOT_AVAILABLE; |
| 629 | return __xive_vm_h_ipi(vcpu, server, mfrr); |
| 630 | } else |
| 631 | return xics_rm_h_ipi(vcpu, server, mfrr); |
| 632 | } |
| 633 | |
| 634 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
| 635 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 636 | if (!kvmppc_xics_enabled(vcpu)) |
| 637 | return H_TOO_HARD; |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 638 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 639 | if (is_rm()) |
| 640 | return xive_rm_h_cppr(vcpu, cppr); |
| 641 | if (unlikely(!__xive_vm_h_cppr)) |
| 642 | return H_NOT_AVAILABLE; |
| 643 | return __xive_vm_h_cppr(vcpu, cppr); |
| 644 | } else |
| 645 | return xics_rm_h_cppr(vcpu, cppr); |
| 646 | } |
| 647 | |
| 648 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) |
| 649 | { |
Paul Mackerras | 00bb6ae | 2017-10-26 17:00:22 +1100 | [diff] [blame] | 650 | if (!kvmppc_xics_enabled(vcpu)) |
| 651 | return H_TOO_HARD; |
Paul Mackerras | 03f9533 | 2019-02-04 22:07:20 +1100 | [diff] [blame] | 652 | if (xics_on_xive()) { |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 653 | if (is_rm()) |
| 654 | return xive_rm_h_eoi(vcpu, xirr); |
| 655 | if (unlikely(!__xive_vm_h_eoi)) |
| 656 | return H_NOT_AVAILABLE; |
| 657 | return __xive_vm_h_eoi(vcpu, xirr); |
| 658 | } else |
| 659 | return xics_rm_h_eoi(vcpu, xirr); |
| 660 | } |
| 661 | #endif /* CONFIG_KVM_XICS */ |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 662 | |
| 663 | void kvmppc_bad_interrupt(struct pt_regs *regs) |
| 664 | { |
Nicholas Piggin | 7c1bd80 | 2018-05-18 03:49:44 +1000 | [diff] [blame] | 665 | /* |
| 666 | * 100 could happen at any time, 200 can happen due to invalid real |
| 667 | * address access for example (or any time due to a hardware problem). |
| 668 | */ |
| 669 | if (TRAP(regs) == 0x100) { |
| 670 | get_paca()->in_nmi++; |
| 671 | system_reset_exception(regs); |
| 672 | get_paca()->in_nmi--; |
| 673 | } else if (TRAP(regs) == 0x200) { |
| 674 | machine_check_exception(regs); |
| 675 | } else { |
| 676 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); |
| 677 | } |
Paul Mackerras | 857b99e | 2017-09-01 16:17:27 +1000 | [diff] [blame] | 678 | panic("Bad KVM trap"); |
| 679 | } |
Paul Mackerras | c010150 | 2017-10-19 14:11:23 +1100 | [diff] [blame] | 680 | |
| 681 | /* |
| 682 | * Functions used to switch LPCR HR and UPRT bits on all threads |
| 683 | * when entering and exiting HPT guests on a radix host. |
| 684 | */ |
| 685 | |
| 686 | #define PHASE_REALMODE 1 /* in real mode */ |
| 687 | #define PHASE_SET_LPCR 2 /* have set LPCR */ |
| 688 | #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ |
| 689 | #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ |
| 690 | |
| 691 | #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) |
| 692 | |
| 693 | static void wait_for_sync(struct kvm_split_mode *sip, int phase) |
| 694 | { |
| 695 | int thr = local_paca->kvm_hstate.tid; |
| 696 | |
| 697 | sip->lpcr_sync.phase[thr] |= phase; |
| 698 | phase = ALL(phase); |
| 699 | while ((sip->lpcr_sync.allphases & phase) != phase) { |
| 700 | HMT_low(); |
| 701 | barrier(); |
| 702 | } |
| 703 | HMT_medium(); |
| 704 | } |
| 705 | |
| 706 | void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) |
| 707 | { |
| 708 | unsigned long rb, set; |
| 709 | |
| 710 | /* wait for every other thread to get to real mode */ |
| 711 | wait_for_sync(sip, PHASE_REALMODE); |
| 712 | |
| 713 | /* Set LPCR and LPIDR */ |
| 714 | mtspr(SPRN_LPCR, sip->lpcr_req); |
| 715 | mtspr(SPRN_LPID, sip->lpidr_req); |
| 716 | isync(); |
| 717 | |
| 718 | /* Invalidate the TLB on thread 0 */ |
| 719 | if (local_paca->kvm_hstate.tid == 0) { |
| 720 | sip->do_set = 0; |
| 721 | asm volatile("ptesync" : : : "memory"); |
| 722 | for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) { |
| 723 | rb = TLBIEL_INVAL_SET_LPID + |
| 724 | (set << TLBIEL_INVAL_SET_SHIFT); |
| 725 | asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : |
| 726 | "r" (rb), "r" (0)); |
| 727 | } |
| 728 | asm volatile("ptesync" : : : "memory"); |
| 729 | } |
| 730 | |
| 731 | /* indicate that we have done so and wait for others */ |
| 732 | wait_for_sync(sip, PHASE_SET_LPCR); |
| 733 | /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ |
| 734 | smp_rmb(); |
| 735 | } |
| 736 | |
| 737 | /* |
| 738 | * Called when a thread that has been in the guest needs |
| 739 | * to reload the host LPCR value - but only on POWER9 when |
| 740 | * running a HPT guest on a radix host. |
| 741 | */ |
| 742 | void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) |
| 743 | { |
| 744 | /* we're out of the guest... */ |
| 745 | wait_for_sync(sip, PHASE_OUT_OF_GUEST); |
| 746 | |
| 747 | mtspr(SPRN_LPID, 0); |
| 748 | mtspr(SPRN_LPCR, sip->host_lpcr); |
| 749 | isync(); |
| 750 | |
| 751 | if (local_paca->kvm_hstate.tid == 0) { |
| 752 | sip->do_restore = 0; |
| 753 | smp_wmb(); /* order store of do_restore vs. phase */ |
| 754 | } |
| 755 | |
| 756 | wait_for_sync(sip, PHASE_RESET_LPCR); |
| 757 | smp_mb(); |
| 758 | local_paca->kvm_hstate.kvm_split_mode = NULL; |
| 759 | } |
Paul Mackerras | f7035ce | 2018-10-08 16:30:50 +1100 | [diff] [blame] | 760 | |
| 761 | /* |
| 762 | * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? |
| 763 | * Can we inject a Decrementer or a External interrupt? |
| 764 | */ |
| 765 | void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) |
| 766 | { |
| 767 | int ext; |
| 768 | unsigned long vec = 0; |
| 769 | unsigned long lpcr; |
| 770 | |
| 771 | /* Insert EXTERNAL bit into LPCR at the MER bit position */ |
| 772 | ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; |
| 773 | lpcr = mfspr(SPRN_LPCR); |
| 774 | lpcr |= ext << LPCR_MER_SH; |
| 775 | mtspr(SPRN_LPCR, lpcr); |
| 776 | isync(); |
| 777 | |
| 778 | if (vcpu->arch.shregs.msr & MSR_EE) { |
| 779 | if (ext) { |
| 780 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
| 781 | } else { |
| 782 | long int dec = mfspr(SPRN_DEC); |
| 783 | if (!(lpcr & LPCR_LD)) |
| 784 | dec = (int) dec; |
| 785 | if (dec < 0) |
| 786 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
| 787 | } |
| 788 | } |
| 789 | if (vec) { |
| 790 | unsigned long msr, old_msr = vcpu->arch.shregs.msr; |
| 791 | |
| 792 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
| 793 | kvmppc_set_srr1(vcpu, old_msr); |
| 794 | kvmppc_set_pc(vcpu, vec); |
| 795 | msr = vcpu->arch.intr_msr; |
| 796 | if (MSR_TM_ACTIVE(old_msr)) |
| 797 | msr |= MSR_TS_S; |
| 798 | vcpu->arch.shregs.msr = msr; |
| 799 | } |
| 800 | |
| 801 | if (vcpu->arch.doorbell_request) { |
| 802 | mtspr(SPRN_DPDES, 1); |
| 803 | vcpu->arch.vcore->dpdes = 1; |
| 804 | smp_wmb(); |
| 805 | vcpu->arch.doorbell_request = 0; |
| 806 | } |
| 807 | } |