blob: 59abbdad7729c859fea3e1465413119696439943 [file] [log] [blame]
Thomas Gleixnerf6ce7f22019-05-19 15:51:49 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05002/*
3 * KVM paravirt_ops implementation
4 *
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05005 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
Zhenzhong Duan5aefd782019-10-23 19:16:21 +080010#define pr_fmt(fmt) "kvm-guest: " fmt
11
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010012#include <linux/context_tracking.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040013#include <linux/init.h>
Paolo Bonzini26d05b32020-06-15 07:53:05 -040014#include <linux/irq.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050015#include <linux/kernel.h>
16#include <linux/kvm_para.h>
17#include <linux/cpu.h>
18#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050019#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050020#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020021#include <linux/notifier.h>
22#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020023#include <linux/hash.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/kprobes.h>
Ulrich Obergfell9919e392014-10-13 15:55:37 -070027#include <linux/nmi.h>
Rik van Riel9db284f2016-03-21 15:13:27 +010028#include <linux/swait.h>
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +020029#include <linux/syscore_ops.h>
Tom Lendacky4d96f912021-09-08 17:58:37 -050030#include <linux/cc_platform.h>
Ashish Kalraf4495612021-08-24 11:07:07 +000031#include <linux/efi.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020032#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020033#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/traps.h>
35#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020036#include <asm/tlbflush.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030037#include <asm/apic.h>
38#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040039#include <asm/hypervisor.h>
Peter Zijlstra48a8b972018-08-22 17:30:16 +020040#include <asm/tlb.h>
Yi Wang19308a42019-10-10 14:37:25 +080041#include <asm/cpuidle_haltpoll.h>
Tom Lendacky99419b22020-09-07 15:16:04 +020042#include <asm/ptrace.h>
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +020043#include <asm/reboot.h>
Tom Lendacky99419b22020-09-07 15:16:04 +020044#include <asm/svm.h>
Ashish Kalraf4495612021-08-24 11:07:07 +000045#include <asm/e820/api.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050046
Andy Lutomirskief680172020-02-28 10:42:48 -080047DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
48
Gleb Natapovfd10cde2010-10-14 11:22:51 +020049static int kvmapf = 1;
50
Dou Liyangafdc3f582018-01-17 11:46:54 +080051static int __init parse_no_kvmapf(char *arg)
Gleb Natapovfd10cde2010-10-14 11:22:51 +020052{
53 kvmapf = 0;
54 return 0;
55}
56
57early_param("no-kvmapf", parse_no_kvmapf);
58
Glauber Costad910f5c2011-07-11 15:28:19 -040059static int steal_acc = 1;
Dou Liyangafdc3f582018-01-17 11:46:54 +080060static int __init parse_no_stealacc(char *arg)
Glauber Costad910f5c2011-07-11 15:28:19 -040061{
62 steal_acc = 0;
63 return 0;
64}
65
66early_param("no-steal-acc", parse_no_stealacc);
67
Brijesh Singh47162762017-10-20 09:30:58 -050068static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Andi Kleen14e581c2019-03-29 17:47:42 -070069DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
Glauber Costad910f5c2011-07-11 15:28:19 -040070static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050071
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050072/*
73 * No need for any "IO delay" on KVM
74 */
75static void kvm_io_delay(void)
76{
77}
78
Gleb Natapov631bc482010-10-14 11:22:52 +020079#define KVM_TASK_SLEEP_HASHBITS 8
80#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
81
82struct kvm_task_sleep_node {
83 struct hlist_node link;
Rik van Riel9db284f2016-03-21 15:13:27 +010084 struct swait_queue_head wq;
Gleb Natapov631bc482010-10-14 11:22:52 +020085 u32 token;
86 int cpu;
87};
88
89static struct kvm_task_sleep_head {
Rik van Riel9db284f2016-03-21 15:13:27 +010090 raw_spinlock_t lock;
Gleb Natapov631bc482010-10-14 11:22:52 +020091 struct hlist_head list;
92} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
93
94static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
95 u32 token)
96{
97 struct hlist_node *p;
98
99 hlist_for_each(p, &b->list) {
100 struct kvm_task_sleep_node *n =
101 hlist_entry(p, typeof(*n), link);
102 if (n->token == token)
103 return n;
104 }
105
106 return NULL;
107}
108
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200109static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
Gleb Natapov631bc482010-10-14 11:22:52 +0200110{
111 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
112 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100113 struct kvm_task_sleep_node *e;
Li Zhong9b132fb2012-12-04 10:35:13 +0800114
Rik van Riel9db284f2016-03-21 15:13:27 +0100115 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200116 e = _find_apf_task(b, token);
117 if (e) {
118 /* dummy entry exist -> wake up was delivered ahead of PF */
119 hlist_del(&e->link);
Rik van Riel9db284f2016-03-21 15:13:27 +0100120 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100121 kfree(e);
122 return false;
Gleb Natapov631bc482010-10-14 11:22:52 +0200123 }
124
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100125 n->token = token;
126 n->cpu = smp_processor_id();
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100127 init_swait_queue_head(&n->wq);
128 hlist_add_head(&n->link, &b->list);
Rik van Riel9db284f2016-03-21 15:13:27 +0100129 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100130 return true;
131}
132
133/*
134 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
135 * @token: Token to identify the sleep node entry
136 *
137 * Invoked from the async pagefault handling code or from the VM exit page
138 * fault handler. In both cases RCU is watching.
139 */
140void kvm_async_pf_task_wait_schedule(u32 token)
141{
142 struct kvm_task_sleep_node n;
143 DECLARE_SWAITQUEUE(wait);
144
145 lockdep_assert_irqs_disabled();
146
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200147 if (!kvm_async_pf_queue_task(token, &n))
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100148 return;
Gleb Natapov631bc482010-10-14 11:22:52 +0200149
150 for (;;) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100151 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200152 if (hlist_unhashed(&n.link))
153 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200154
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100155 local_irq_enable();
156 schedule();
157 local_irq_disable();
Gleb Natapov631bc482010-10-14 11:22:52 +0200158 }
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100159 finish_swait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200160}
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100161EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
162
Gleb Natapov631bc482010-10-14 11:22:52 +0200163static void apf_task_wake_one(struct kvm_task_sleep_node *n)
164{
165 hlist_del_init(&n->link);
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200166 if (swq_has_sleeper(&n->wq))
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200167 swake_up_one(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200168}
169
170static void apf_task_wake_all(void)
171{
172 int i;
173
174 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
Gleb Natapov631bc482010-10-14 11:22:52 +0200175 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100176 struct kvm_task_sleep_node *n;
177 struct hlist_node *p, *next;
178
Rik van Riel9db284f2016-03-21 15:13:27 +0100179 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200180 hlist_for_each_safe(p, next, &b->list) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100181 n = hlist_entry(p, typeof(*n), link);
Gleb Natapov631bc482010-10-14 11:22:52 +0200182 if (n->cpu == smp_processor_id())
183 apf_task_wake_one(n);
184 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100185 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200186 }
187}
188
189void kvm_async_pf_task_wake(u32 token)
190{
191 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
192 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
193 struct kvm_task_sleep_node *n;
194
195 if (token == ~0) {
196 apf_task_wake_all();
197 return;
198 }
199
200again:
Rik van Riel9db284f2016-03-21 15:13:27 +0100201 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200202 n = _find_apf_task(b, token);
203 if (!n) {
204 /*
205 * async PF was not yet handled.
206 * Add dummy entry for the token.
207 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300208 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200209 if (!n) {
210 /*
211 * Allocation failed! Busy wait while other cpu
212 * handles async PF.
213 */
Rik van Riel9db284f2016-03-21 15:13:27 +0100214 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200215 cpu_relax();
216 goto again;
217 }
218 n->token = token;
219 n->cpu = smp_processor_id();
Rik van Riel9db284f2016-03-21 15:13:27 +0100220 init_swait_queue_head(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200221 hlist_add_head(&n->link, &b->list);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100222 } else {
Gleb Natapov631bc482010-10-14 11:22:52 +0200223 apf_task_wake_one(n);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100224 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100225 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200226 return;
227}
228EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
229
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200230noinstr u32 kvm_read_and_reset_apf_flags(void)
Gleb Natapov631bc482010-10-14 11:22:52 +0200231{
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200232 u32 flags = 0;
Gleb Natapov631bc482010-10-14 11:22:52 +0200233
Christoph Lameter89cbc762014-08-17 12:30:40 -0500234 if (__this_cpu_read(apf_reason.enabled)) {
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200235 flags = __this_cpu_read(apf_reason.flags);
236 __this_cpu_write(apf_reason.flags, 0);
Gleb Natapov631bc482010-10-14 11:22:52 +0200237 }
238
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200239 return flags;
Gleb Natapov631bc482010-10-14 11:22:52 +0200240}
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200241EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
Gleb Natapov631bc482010-10-14 11:22:52 +0200242
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200243noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
Gleb Natapov631bc482010-10-14 11:22:52 +0200244{
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200245 u32 flags = kvm_read_and_reset_apf_flags();
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200246 irqentry_state_t state;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100247
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200248 if (!flags)
Andy Lutomirskief680172020-02-28 10:42:48 -0800249 return false;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100250
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200251 state = irqentry_enter(regs);
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200252 instrumentation_begin();
253
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100254 /*
255 * If the host managed to inject an async #PF into an interrupt
256 * disabled region, then die hard as this is not going to end well
257 * and the host side is seriously broken.
258 */
259 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
260 panic("Host injected async #PF in interrupt disabled region\n");
261
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200262 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200263 if (unlikely(!(user_mode(regs))))
264 panic("Host injected async #PF in kernel mode\n");
265 /* Page is swapped out by the host. */
266 kvm_async_pf_task_wait_schedule(token);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100267 } else {
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400268 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
Gleb Natapov631bc482010-10-14 11:22:52 +0200269 }
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200270
271 instrumentation_end();
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200272 irqentry_exit(regs, state);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100273 return true;
Gleb Natapov631bc482010-10-14 11:22:52 +0200274}
275
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400276DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200277{
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400278 struct pt_regs *old_regs = set_irq_regs(regs);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200279 u32 token;
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200280
Vitaly Kuznetsovcc17b222020-09-08 15:53:50 +0200281 ack_APIC_irq();
282
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200283 inc_irq_stat(irq_hv_callback_count);
284
285 if (__this_cpu_read(apf_reason.enabled)) {
286 token = __this_cpu_read(apf_reason.token);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200287 kvm_async_pf_task_wake(token);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200288 __this_cpu_write(apf_reason.token, 0);
289 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
290 }
291
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400292 set_irq_regs(old_regs);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200293}
294
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600295static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500296{
297 pv_info.name = "KVM";
Andy Lutomirski29fa6822014-12-05 19:03:28 -0800298
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500299 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
Juergen Gross5c835112018-08-28 09:40:19 +0200300 pv_ops.cpu.io_delay = kvm_io_delay;
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500301
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200302#ifdef CONFIG_X86_IO_APIC
303 no_timer_check = 1;
304#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500305}
306
Glauber Costad910f5c2011-07-11 15:28:19 -0400307static void kvm_register_steal_time(void)
308{
309 int cpu = smp_processor_id();
310 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
311
312 if (!has_steal_clock)
313 return;
314
Dave Hansen5dfd4862013-01-22 13:24:35 -0800315 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800316 pr_info("stealtime: cpu %d, msr %llx\n", cpu,
317 (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400318}
319
Brijesh Singh47162762017-10-20 09:30:58 -0500320static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300321
Wanpeng Li8ca22552016-11-07 11:13:40 +0800322static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300323{
324 /**
325 * This relies on __test_and_clear_bit to modify the memory
326 * in a way that is atomic with respect to the local CPU.
327 * The hypervisor only accesses this memory from the local CPU so
328 * there's no need for lock or memory barriers.
329 * An optimization barrier is implied in apic write.
330 */
Christoph Lameter89cbc762014-08-17 12:30:40 -0500331 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300332 return;
Wanpeng Li8ca22552016-11-07 11:13:40 +0800333 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300334}
335
Nicholas Krauseed3cf152015-05-20 00:24:10 -0400336static void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200337{
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200338 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
339 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Andy Lutomirskief680172020-02-28 10:42:48 -0800340
341 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
342
343 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200344 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
Wanpeng Li52a5c152017-07-13 18:30:42 -0700345
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100346 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
347 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
348
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200349 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
350
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100351 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500352 __this_cpu_write(apf_reason.enabled, 1);
Vitaly Kuznetsov0a269a02021-04-14 14:35:40 +0200353 pr_info("setup async PF for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200354 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400355
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300356 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
357 unsigned long pa;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100358
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300359 /* Size alignment is implied but just to make it explicit. */
360 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500361 __this_cpu_write(kvm_apic_eoi, 0);
362 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
Dave Hansen5dfd4862013-01-22 13:24:35 -0800363 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300364 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
365 }
366
Glauber Costad910f5c2011-07-11 15:28:19 -0400367 if (has_steal_clock)
368 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200369}
370
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300371static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200372{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500373 if (!__this_cpu_read(apf_reason.enabled))
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200374 return;
375
376 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500377 __this_cpu_write(apf_reason.enabled, 0);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200378
Vitaly Kuznetsov0a269a02021-04-14 14:35:40 +0200379 pr_info("disable async PF for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200380}
381
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200382static void kvm_disable_steal_time(void)
383{
384 if (!has_steal_clock)
385 return;
386
387 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
388}
389
Glauber Costad910f5c2011-07-11 15:28:19 -0400390static u64 kvm_steal_clock(int cpu)
391{
392 u64 steal;
393 struct kvm_steal_time *src;
394 int version;
395
396 src = &per_cpu(steal_time, cpu);
397 do {
398 version = src->version;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700399 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400400 steal = src->steal;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700401 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400402 } while ((version & 1) || (version != src->version));
403
404 return steal;
405}
406
Brijesh Singh47162762017-10-20 09:30:58 -0500407static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
408{
409 early_set_memory_decrypted((unsigned long) ptr, size);
410}
411
412/*
413 * Iterate through all possible CPUs and map the memory region pointed
414 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
415 *
416 * Note: we iterate through all possible CPUs to ensure that CPUs
417 * hotplugged will have their per-cpu variable already mapped as
418 * decrypted.
419 */
420static void __init sev_map_percpu_data(void)
421{
422 int cpu;
423
Tom Lendacky4d96f912021-09-08 17:58:37 -0500424 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
Brijesh Singh47162762017-10-20 09:30:58 -0500425 return;
426
427 for_each_possible_cpu(cpu) {
428 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
429 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
430 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
431 }
432}
433
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200434static void kvm_guest_cpu_offline(bool shutdown)
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200435{
436 kvm_disable_steal_time();
437 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
438 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
Ashish Kalraf4495612021-08-24 11:07:07 +0000439 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
440 wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200441 kvm_pv_disable_apf();
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200442 if (!shutdown)
443 apf_task_wake_all();
Vitaly Kuznetsovc02027b2021-04-14 14:35:42 +0200444 kvmclock_disable();
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200445}
446
447static int kvm_cpu_online(unsigned int cpu)
448{
449 unsigned long flags;
450
451 local_irq_save(flags);
452 kvm_guest_cpu_init();
453 local_irq_restore(flags);
454 return 0;
455}
456
Wanpeng Li2b519b572021-04-09 12:18:29 +0800457#ifdef CONFIG_SMP
458
459static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
460
Wanpeng Lia262bca2020-02-18 09:08:23 +0800461static bool pv_tlb_flush_supported(void)
462{
463 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
464 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
465 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
466}
467
Wanpeng Lia262bca2020-02-18 09:08:23 +0800468static bool pv_ipi_supported(void)
469{
470 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
471}
472
473static bool pv_sched_yield_supported(void)
474{
475 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
476 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
477 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
478}
479
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800480#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
481
482static void __send_ipi_mask(const struct cpumask *mask, int vector)
483{
484 unsigned long flags;
485 int cpu, apic_id, icr;
486 int min = 0, max = 0;
487#ifdef CONFIG_X86_64
488 __uint128_t ipi_bitmap = 0;
489#else
490 u64 ipi_bitmap = 0;
491#endif
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800492 long ret;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800493
494 if (cpumask_empty(mask))
495 return;
496
497 local_irq_save(flags);
498
499 switch (vector) {
500 default:
501 icr = APIC_DM_FIXED | vector;
502 break;
503 case NMI_VECTOR:
504 icr = APIC_DM_NMI;
505 break;
506 }
507
508 for_each_cpu(cpu, mask) {
509 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
510 if (!ipi_bitmap) {
511 min = max = apic_id;
512 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
513 ipi_bitmap <<= min - apic_id;
514 min = apic_id;
515 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
516 max = apic_id < max ? max : apic_id;
517 } else {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800518 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800519 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800520 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
521 ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800522 min = max = apic_id;
523 ipi_bitmap = 0;
524 }
525 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
526 }
527
528 if (ipi_bitmap) {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800529 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800530 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800531 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
532 ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800533 }
534
535 local_irq_restore(flags);
536}
537
538static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
539{
540 __send_ipi_mask(mask, vector);
541}
542
543static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
544{
545 unsigned int this_cpu = smp_processor_id();
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800546 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800547 const struct cpumask *local_mask;
548
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800549 cpumask_copy(new_mask, mask);
550 cpumask_clear_cpu(this_cpu, new_mask);
551 local_mask = new_mask;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800552 __send_ipi_mask(local_mask, vector);
553}
554
Ashish Kalraf4495612021-08-24 11:07:07 +0000555static int __init setup_efi_kvm_sev_migration(void)
556{
557 efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
558 efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
559 efi_status_t status;
560 unsigned long size;
561 bool enabled;
562
Paolo Bonzinib9ecb9a2021-11-11 07:40:26 -0500563 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
Ashish Kalraf4495612021-08-24 11:07:07 +0000564 !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
565 return 0;
566
567 if (!efi_enabled(EFI_BOOT))
568 return 0;
569
570 if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
571 pr_info("%s : EFI runtime services are not enabled\n", __func__);
572 return 0;
573 }
574
575 size = sizeof(enabled);
576
577 /* Get variable contents into buffer */
578 status = efi.get_variable(efi_sev_live_migration_enabled,
579 &efi_variable_guid, NULL, &size, &enabled);
580
581 if (status == EFI_NOT_FOUND) {
582 pr_info("%s : EFI live migration variable not found\n", __func__);
583 return 0;
584 }
585
586 if (status != EFI_SUCCESS) {
587 pr_info("%s : EFI variable retrieval failed\n", __func__);
588 return 0;
589 }
590
591 if (enabled == 0) {
592 pr_info("%s: live migration disabled in EFI\n", __func__);
593 return 0;
594 }
595
596 pr_info("%s : live migration enabled in EFI\n", __func__);
597 wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
598
599 return 1;
600}
601
602late_initcall(setup_efi_kvm_sev_migration);
603
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800604/*
605 * Set the IPI entry points
606 */
607static void kvm_setup_pv_ipi(void)
608{
609 apic->send_IPI_mask = kvm_send_ipi_mask;
610 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800611 pr_info("setup PV IPIs\n");
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800612}
613
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800614static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
615{
616 int cpu;
617
618 native_send_call_func_ipi(mask);
619
620 /* Make sure other vCPUs get a chance to run if they need to. */
621 for_each_cpu(cpu, mask) {
622 if (vcpu_is_preempted(cpu)) {
623 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
624 break;
625 }
626 }
627}
628
Linus Torvalds152d32a2021-05-01 10:14:08 -0700629static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
Wanpeng Li2b519b572021-04-09 12:18:29 +0800630 const struct flush_tlb_info *info)
631{
632 u8 state;
633 int cpu;
634 struct kvm_steal_time *src;
635 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
636
637 cpumask_copy(flushmask, cpumask);
638 /*
639 * We have to call flush only on online vCPUs. And
640 * queue flush_on_enter for pre-empted vCPUs
641 */
642 for_each_cpu(cpu, flushmask) {
Linus Torvalds152d32a2021-05-01 10:14:08 -0700643 /*
644 * The local vCPU is never preempted, so we do not explicitly
645 * skip check for local vCPU - it will never be cleared from
646 * flushmask.
647 */
Wanpeng Li2b519b572021-04-09 12:18:29 +0800648 src = &per_cpu(steal_time, cpu);
649 state = READ_ONCE(src->preempted);
650 if ((state & KVM_VCPU_PREEMPTED)) {
651 if (try_cmpxchg(&src->preempted, &state,
652 state | KVM_VCPU_FLUSH_TLB))
653 __cpumask_clear_cpu(cpu, flushmask);
654 }
655 }
656
Linus Torvalds152d32a2021-05-01 10:14:08 -0700657 native_flush_tlb_multi(flushmask, info);
Wanpeng Li2b519b572021-04-09 12:18:29 +0800658}
659
660static __init int kvm_alloc_cpumask(void)
661{
662 int cpu;
663
664 if (!kvm_para_available() || nopv)
665 return 0;
666
667 if (pv_tlb_flush_supported() || pv_ipi_supported())
668 for_each_possible_cpu(cpu) {
669 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
670 GFP_KERNEL, cpu_to_node(cpu));
671 }
672
673 return 0;
674}
675arch_initcall(kvm_alloc_cpumask);
676
Gleb Natapovca3f1012010-10-14 11:22:49 +0200677static void __init kvm_smp_prepare_boot_cpu(void)
678{
Brijesh Singh47162762017-10-20 09:30:58 -0500679 /*
680 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
681 * shares the guest physical address with the hypervisor.
682 */
683 sev_map_percpu_data();
684
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200685 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200686 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530687 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200688}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200689
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200690static int kvm_cpu_down_prepare(unsigned int cpu)
691{
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200692 unsigned long flags;
693
694 local_irq_save(flags);
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200695 kvm_guest_cpu_offline(false);
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200696 local_irq_restore(flags);
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200697 return 0;
698}
Wanpeng Li2b519b572021-04-09 12:18:29 +0800699
Gleb Natapovca3f1012010-10-14 11:22:49 +0200700#endif
701
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200702static int kvm_suspend(void)
703{
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200704 kvm_guest_cpu_offline(false);
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200705
706 return 0;
707}
708
709static void kvm_resume(void)
710{
711 kvm_cpu_online(raw_smp_processor_id());
712}
713
714static struct syscore_ops kvm_syscore_ops = {
715 .suspend = kvm_suspend,
716 .resume = kvm_resume,
717};
718
Vitaly Kuznetsov384fc672021-04-14 14:35:44 +0200719static void kvm_pv_guest_cpu_reboot(void *unused)
720{
721 kvm_guest_cpu_offline(true);
722}
723
724static int kvm_pv_reboot_notify(struct notifier_block *nb,
725 unsigned long code, void *unused)
726{
727 if (code == SYS_RESTART)
728 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
729 return NOTIFY_DONE;
730}
731
732static struct notifier_block kvm_pv_reboot_nb = {
733 .notifier_call = kvm_pv_reboot_notify,
734};
735
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200736/*
737 * After a PV feature is registered, the host will keep writing to the
738 * registered memory location. If the guest happens to shutdown, this memory
739 * won't be valid. In cases like kexec, in which you install a new kernel, this
740 * means a random memory location will be kept being written.
741 */
742#ifdef CONFIG_KEXEC_CORE
743static void kvm_crash_shutdown(struct pt_regs *regs)
744{
745 kvm_guest_cpu_offline(true);
746 native_machine_crash_shutdown(regs);
747}
748#endif
749
Juergen Grossf3614642017-11-09 14:27:38 +0100750static void __init kvm_guest_init(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500751{
Gleb Natapov631bc482010-10-14 11:22:52 +0200752 int i;
753
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500754 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200755 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200756 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
Rik van Riel9db284f2016-03-21 15:13:27 +0100757 raw_spin_lock_init(&async_pf_sleepers[i].lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200758
Glauber Costad910f5c2011-07-11 15:28:19 -0400759 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
760 has_steal_clock = 1;
Juergen Grossa0e2bf72021-03-11 15:23:09 +0100761 static_call_update(pv_steal_clock, kvm_steal_clock);
Glauber Costad910f5c2011-07-11 15:28:19 -0400762 }
763
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300764 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
765 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300766
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200767 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
Andy Lutomirskief680172020-02-28 10:42:48 -0800768 static_branch_enable(&kvm_async_pf_enabled);
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400769 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200770 }
Andy Lutomirskief680172020-02-28 10:42:48 -0800771
Gleb Natapovca3f1012010-10-14 11:22:49 +0200772#ifdef CONFIG_SMP
Wanpeng Li2b519b572021-04-09 12:18:29 +0800773 if (pv_tlb_flush_supported()) {
Linus Torvalds152d32a2021-05-01 10:14:08 -0700774 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
Wanpeng Li2b519b572021-04-09 12:18:29 +0800775 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
776 pr_info("KVM setup pv remote TLB flush\n");
777 }
778
Gleb Natapovca3f1012010-10-14 11:22:49 +0200779 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Wanpeng Lia262bca2020-02-18 09:08:23 +0800780 if (pv_sched_yield_supported()) {
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800781 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800782 pr_info("setup PV sched yield\n");
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800783 }
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200784 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
785 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800786 pr_err("failed to install cpu hotplug callbacks\n");
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200787#else
Brijesh Singh47162762017-10-20 09:30:58 -0500788 sev_map_percpu_data();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200789 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200790#endif
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700791
Vitaly Kuznetsov3d6b8412021-04-14 14:35:43 +0200792#ifdef CONFIG_KEXEC_CORE
793 machine_ops.crash_shutdown = kvm_crash_shutdown;
794#endif
795
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200796 register_syscore_ops(&kvm_syscore_ops);
797
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700798 /*
799 * Hard lockup detection is enabled by default. Disable it, as guests
800 * can get false positives too easily, for example if the host is
801 * overcommitted.
802 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -0700803 hardlockup_detector_disable();
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500804}
Glauber Costad910f5c2011-07-11 15:28:19 -0400805
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100806static noinline uint32_t __kvm_cpuid_base(void)
807{
808 if (boot_cpu_data.cpuid_level < 0)
809 return 0; /* So we don't blow up on old processors */
810
Borislav Petkov0c9f35362016-03-29 17:41:55 +0200811 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Paul Durrant760849b2021-11-05 09:51:01 +0000812 return hypervisor_cpuid_base(KVM_SIGNATURE, 0);
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100813
814 return 0;
815}
816
817static inline uint32_t kvm_cpuid_base(void)
818{
819 static int kvm_cpuid_base = -1;
820
821 if (kvm_cpuid_base == -1)
822 kvm_cpuid_base = __kvm_cpuid_base();
823
824 return kvm_cpuid_base;
825}
826
827bool kvm_para_available(void)
828{
829 return kvm_cpuid_base() != 0;
830}
831EXPORT_SYMBOL_GPL(kvm_para_available);
832
Paolo Bonzini77f01bd2014-01-27 14:51:44 +0100833unsigned int kvm_arch_para_features(void)
834{
835 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
836}
837
Wanpeng Lia4429e52018-02-13 09:05:40 +0800838unsigned int kvm_arch_para_hints(void)
839{
840 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
841}
Wanpeng Li1328edc2019-08-29 16:49:57 +0800842EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
Wanpeng Lia4429e52018-02-13 09:05:40 +0800843
Jason Wang9df56f12013-07-25 16:54:35 +0800844static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400845{
Jason Wang9df56f12013-07-25 16:54:35 +0800846 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400847}
848
Wanpeng Lid63bae02018-07-23 14:39:51 +0800849static void __init kvm_apic_init(void)
850{
Wanpeng Li2b519b572021-04-09 12:18:29 +0800851#ifdef CONFIG_SMP
Wanpeng Lia262bca2020-02-18 09:08:23 +0800852 if (pv_ipi_supported())
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800853 kvm_setup_pv_ipi();
854#endif
Wanpeng Lid63bae02018-07-23 14:39:51 +0800855}
856
David Woodhouse2e008ff2020-10-24 22:35:35 +0100857static bool __init kvm_msi_ext_dest_id(void)
858{
859 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
860}
861
Ashish Kalraf4495612021-08-24 11:07:07 +0000862static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
863{
864 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
865 KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
866}
867
Wanpeng Lid63bae02018-07-23 14:39:51 +0800868static void __init kvm_init_platform(void)
869{
Paolo Bonzinib9ecb9a2021-11-11 07:40:26 -0500870 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
Ashish Kalraf4495612021-08-24 11:07:07 +0000871 kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
872 unsigned long nr_pages;
Ashish Kalra73f1b4f2021-08-24 11:07:45 +0000873 int i;
Ashish Kalraf4495612021-08-24 11:07:07 +0000874
875 pv_ops.mmu.notify_page_enc_status_changed =
876 kvm_sev_hc_page_enc_status;
877
878 /*
Ashish Kalra73f1b4f2021-08-24 11:07:45 +0000879 * Reset the host's shared pages list related to kernel
880 * specific page encryption status settings before we load a
881 * new kernel by kexec. Reset the page encryption status
882 * during early boot intead of just before kexec to avoid SMP
883 * races during kvm_pv_guest_cpu_reboot().
884 * NOTE: We cannot reset the complete shared pages list
885 * here as we need to retain the UEFI/OVMF firmware
886 * specific settings.
887 */
888
889 for (i = 0; i < e820_table->nr_entries; i++) {
890 struct e820_entry *entry = &e820_table->entries[i];
891
892 if (entry->type != E820_TYPE_RAM)
893 continue;
894
895 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
896
897 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
898 nr_pages,
899 KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
900 }
901
902 /*
Ashish Kalraf4495612021-08-24 11:07:07 +0000903 * Ensure that _bss_decrypted section is marked as decrypted in the
904 * shared pages list.
905 */
906 nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
907 PAGE_SIZE);
908 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
909 nr_pages, 0);
910
911 /*
912 * If not booted using EFI, enable Live migration support.
913 */
914 if (!efi_enabled(EFI_BOOT))
915 wrmsrl(MSR_KVM_MIGRATION_CONTROL,
916 KVM_MIGRATION_READY);
917 }
Linus Torvaldse61cf2e2018-08-19 10:38:36 -0700918 kvmclock_init();
Wanpeng Lid63bae02018-07-23 14:39:51 +0800919 x86_platform.apic_post_init = kvm_apic_init;
920}
921
Tom Lendacky99419b22020-09-07 15:16:04 +0200922#if defined(CONFIG_AMD_MEM_ENCRYPT)
923static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
924{
925 /* RAX and CPL are already in the GHCB */
926 ghcb_set_rbx(ghcb, regs->bx);
927 ghcb_set_rcx(ghcb, regs->cx);
928 ghcb_set_rdx(ghcb, regs->dx);
929 ghcb_set_rsi(ghcb, regs->si);
930}
931
932static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
933{
934 /* No checking of the return state needed */
935 return true;
936}
937#endif
938
Juergen Gross03b2a322017-11-09 14:27:36 +0100939const __initconst struct hypervisor_x86 x86_hyper_kvm = {
Tom Lendacky99419b22020-09-07 15:16:04 +0200940 .name = "KVM",
941 .detect = kvm_detect,
942 .type = X86_HYPER_KVM,
943 .init.guest_late_init = kvm_guest_init,
944 .init.x2apic_available = kvm_para_available,
David Woodhouse2e008ff2020-10-24 22:35:35 +0100945 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
Tom Lendacky99419b22020-09-07 15:16:04 +0200946 .init.init_platform = kvm_init_platform,
947#if defined(CONFIG_AMD_MEM_ENCRYPT)
948 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
949 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
950#endif
Prarit Bhargavafc733732012-07-06 13:47:39 -0400951};
Prarit Bhargavafc733732012-07-06 13:47:39 -0400952
Glauber Costad910f5c2011-07-11 15:28:19 -0400953static __init int activate_jump_labels(void)
954{
955 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100956 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400957 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100958 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400959 }
960
961 return 0;
962}
963arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530964
965#ifdef CONFIG_PARAVIRT_SPINLOCKS
966
967/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530968static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530969{
970 int apicid;
971 unsigned long flags = 0;
972
973 apicid = per_cpu(x86_cpu_to_apicid, cpu);
974 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
975}
976
Waiman Longbf0c7c32015-04-24 14:56:39 -0400977#include <asm/qspinlock.h>
978
979static void kvm_wait(u8 *ptr, u8 val)
980{
Waiman Longbf0c7c32015-04-24 14:56:39 -0400981 if (in_nmi())
982 return;
983
Waiman Longbf0c7c32015-04-24 14:56:39 -0400984 /*
985 * halt until it's our turn and kicked. Note that we do safe halt
986 * for irq enabled case to avoid hang when lock info is overwritten
987 * in irq spinlock slowpath and no spurious interrupt occur to save us.
988 */
Wanpeng Lif4e61f02021-03-15 14:55:28 +0800989 if (irqs_disabled()) {
990 if (READ_ONCE(*ptr) == val)
991 halt();
992 } else {
993 local_irq_disable();
Waiman Longbf0c7c32015-04-24 14:56:39 -0400994
Lai Jiangshana40b2fd2021-08-14 11:51:29 +0800995 /* safe_halt() will enable IRQ */
Wanpeng Lif4e61f02021-03-15 14:55:28 +0800996 if (READ_ONCE(*ptr) == val)
997 safe_halt();
Lai Jiangshana40b2fd2021-08-14 11:51:29 +0800998 else
999 local_irq_enable();
Wanpeng Lif4e61f02021-03-15 14:55:28 +08001000 }
Waiman Longbf0c7c32015-04-24 14:56:39 -04001001}
1002
Waiman Longdd0fd8b2017-02-20 13:36:04 -05001003#ifdef CONFIG_X86_32
Waiman Long6c629852017-02-20 13:36:03 -05001004__visible bool __kvm_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +01001005{
1006 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
1007
Wanpeng Lifa55eed2017-12-12 17:33:01 -08001008 return !!(src->preempted & KVM_VCPU_PREEMPTED);
Peter Zijlstra3cded412016-11-15 16:47:06 +01001009}
1010PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
1011
Waiman Longdd0fd8b2017-02-20 13:36:04 -05001012#else
1013
1014#include <asm/asm-offsets.h>
1015
1016extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
1017
1018/*
1019 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
1020 * restoring to/from the stack.
1021 */
1022asm(
1023".pushsection .text;"
1024".global __raw_callee_save___kvm_vcpu_is_preempted;"
1025".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
1026"__raw_callee_save___kvm_vcpu_is_preempted:"
1027"movq __per_cpu_offset(,%rdi,8), %rax;"
1028"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
1029"setne %al;"
1030"ret;"
Josh Poimboeuf083db672019-07-17 20:36:36 -05001031".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
Waiman Longdd0fd8b2017-02-20 13:36:04 -05001032".popsection");
1033
1034#endif
1035
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +05301036/*
1037 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1038 */
1039void __init kvm_spinlock_init(void)
1040{
Zhenzhong Duan05eee612019-10-23 19:16:22 +08001041 /*
1042 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1043 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1044 * preferred over native qspinlock when vCPU is preempted.
1045 */
1046 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
1047 pr_info("PV spinlocks disabled, no host support\n");
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +05301048 return;
Zhenzhong Duande585022019-10-23 19:16:20 +08001049 }
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +05301050
Zhenzhong Duan05eee612019-10-23 19:16:22 +08001051 /*
1052 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1053 * are available.
1054 */
1055 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
1056 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1057 goto out;
1058 }
Wanpeng Lib2798ba2018-02-13 09:05:41 +08001059
Zhenzhong Duan05eee612019-10-23 19:16:22 +08001060 if (num_possible_cpus() == 1) {
1061 pr_info("PV spinlocks disabled, single CPU\n");
1062 goto out;
1063 }
1064
1065 if (nopvspin) {
1066 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1067 goto out;
1068 }
1069
1070 pr_info("PV spinlocks enabled\n");
Waiman Long3553ae52018-07-17 17:59:27 -04001071
Waiman Longbf0c7c32015-04-24 14:56:39 -04001072 __pv_init_lock_hash();
Juergen Gross5c835112018-08-28 09:40:19 +02001073 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1074 pv_ops.lock.queued_spin_unlock =
1075 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1076 pv_ops.lock.wait = kvm_wait;
1077 pv_ops.lock.kick = kvm_kick_cpu;
Peter Zijlstra3cded412016-11-15 16:47:06 +01001078
1079 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Juergen Gross5c835112018-08-28 09:40:19 +02001080 pv_ops.lock.vcpu_is_preempted =
Peter Zijlstra3cded412016-11-15 16:47:06 +01001081 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
1082 }
Zhenzhong Duan05eee612019-10-23 19:16:22 +08001083 /*
1084 * When PV spinlock is enabled which is preferred over
1085 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1086 * Just disable it anyway.
1087 */
1088out:
1089 static_branch_disable(&virt_spin_lock_key);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +05301090}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +05301091
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +05301092#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001093
1094#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1095
1096static void kvm_disable_host_haltpoll(void *i)
1097{
1098 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
1099}
1100
1101static void kvm_enable_host_haltpoll(void *i)
1102{
1103 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1104}
1105
Joao Martins97d3eb92019-09-02 11:40:31 +01001106void arch_haltpoll_enable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001107{
1108 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
Zhenzhong Duan5aefd782019-10-23 19:16:21 +08001109 pr_err_once("host does not support poll control\n");
1110 pr_err_once("host upgrade recommended\n");
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001111 return;
1112 }
1113
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001114 /* Enable guest halt poll disables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +01001115 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001116}
1117EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1118
Joao Martins97d3eb92019-09-02 11:40:31 +01001119void arch_haltpoll_disable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001120{
1121 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1122 return;
1123
Li Qiangb785a442020-09-24 08:58:00 -07001124 /* Disable guest halt poll enables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +01001125 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001126}
1127EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1128#endif