blob: adbcc9ed59dbc0184ed113b2e557107765109b6f [file] [log] [blame]
Joao Martins23200b72018-06-13 09:55:44 -04001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#ifndef __ARCH_X86_KVM_XEN_H__
10#define __ARCH_X86_KVM_XEN_H__
11
Paolo Bonzinib59b1532021-02-26 04:54:45 -050012#ifdef CONFIG_KVM_XEN
David Woodhouse7d6bbeb2021-02-02 15:48:05 +000013#include <linux/jump_label_ratelimit.h>
14
15extern struct static_key_false_deferred kvm_xen_enabled;
16
David Woodhouse40da8cc2020-12-09 20:08:30 +000017int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
David Woodhouse3e324612021-02-02 16:53:25 +000018int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
19int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
Joao Martinsa76b9642020-12-03 15:52:25 +000020int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
21int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
Joao Martins23200b72018-06-13 09:55:44 -040022int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
David Woodhouse78e98782021-02-02 13:19:35 +000023int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
Paolo Bonzini319afe62021-08-04 12:48:41 -040024void kvm_xen_init_vm(struct kvm *kvm);
David Woodhouse7d6bbeb2021-02-02 15:48:05 +000025void kvm_xen_destroy_vm(struct kvm *kvm);
Joao Martins23200b72018-06-13 09:55:44 -040026
David Woodhouse14243b32021-12-10 16:36:23 +000027int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
28 struct kvm *kvm);
29int kvm_xen_setup_evtchn(struct kvm *kvm,
30 struct kvm_kernel_irq_routing_entry *e,
31 const struct kvm_irq_routing_entry *ue);
32
David Woodhouse30b5c852021-03-01 12:53:09 +000033static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
34{
35 return static_branch_unlikely(&kvm_xen_enabled.key) &&
36 kvm->arch.xen_hvm_config.msr;
37}
38
Joao Martins23200b72018-06-13 09:55:44 -040039static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
40{
David Woodhouse7d6bbeb2021-02-02 15:48:05 +000041 return static_branch_unlikely(&kvm_xen_enabled.key) &&
42 (kvm->arch.xen_hvm_config.flags &
43 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
Joao Martins23200b72018-06-13 09:55:44 -040044}
45
David Woodhouse40da8cc2020-12-09 20:08:30 +000046static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
47{
48 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
49 vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
50 return __kvm_xen_has_interrupt(vcpu);
51
52 return 0;
53}
Paolo Bonzinib59b1532021-02-26 04:54:45 -050054#else
55static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
56{
57 return 1;
58}
59
Paolo Bonzini319afe62021-08-04 12:48:41 -040060static inline void kvm_xen_init_vm(struct kvm *kvm)
61{
62}
63
Paolo Bonzinib59b1532021-02-26 04:54:45 -050064static inline void kvm_xen_destroy_vm(struct kvm *kvm)
65{
66}
67
David Woodhouse30b5c852021-03-01 12:53:09 +000068static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
69{
70 return false;
71}
72
Paolo Bonzinib59b1532021-02-26 04:54:45 -050073static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
74{
75 return false;
76}
77
78static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
79{
80 return 0;
81}
82#endif
83
84int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
David Woodhouse1ea9f2e2020-12-03 18:45:22 +000085
David Woodhouse1ea9f2e2020-12-03 18:45:22 +000086#include <asm/pvclock-abi.h>
87#include <asm/xen/interface.h>
David Woodhouse30b5c852021-03-01 12:53:09 +000088#include <xen/interface/vcpu.h>
David Woodhouse1ea9f2e2020-12-03 18:45:22 +000089
David Woodhouse30b5c852021-03-01 12:53:09 +000090void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
91
92static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
93{
94 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
95}
96
97static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
98{
99 /*
100 * If the vCPU wasn't preempted but took a normal exit for
101 * some reason (hypercalls, I/O, etc.), that is accounted as
102 * still RUNSTATE_running, as the VMM is still operating on
103 * behalf of the vCPU. Only if the VMM does actually block
104 * does it need to enter RUNSTATE_blocked.
105 */
106 if (vcpu->preempted)
107 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
108}
109
110/* 32-bit compatibility definitions, also used natively in 32-bit build */
David Woodhouse1ea9f2e2020-12-03 18:45:22 +0000111struct compat_arch_vcpu_info {
112 unsigned int cr2;
113 unsigned int pad[5];
114};
115
116struct compat_vcpu_info {
Sean Christopherson7137b7a2021-02-10 10:26:09 -0800117 uint8_t evtchn_upcall_pending;
118 uint8_t evtchn_upcall_mask;
119 uint16_t pad;
120 uint32_t evtchn_pending_sel;
121 struct compat_arch_vcpu_info arch;
122 struct pvclock_vcpu_time_info time;
David Woodhouse1ea9f2e2020-12-03 18:45:22 +0000123}; /* 64 bytes (x86) */
124
125struct compat_arch_shared_info {
126 unsigned int max_pfn;
127 unsigned int pfn_to_mfn_frame_list_list;
128 unsigned int nmi_reason;
129 unsigned int p2m_cr3;
130 unsigned int p2m_vaddr;
131 unsigned int p2m_generation;
132 uint32_t wc_sec_hi;
133};
134
135struct compat_shared_info {
136 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
137 uint32_t evtchn_pending[32];
138 uint32_t evtchn_mask[32];
139 struct pvclock_wall_clock wc;
140 struct compat_arch_shared_info arch;
141};
142
David Woodhouse14243b32021-12-10 16:36:23 +0000143#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
144 sizeof_field(struct compat_shared_info, \
145 evtchn_pending))
David Woodhouse30b5c852021-03-01 12:53:09 +0000146struct compat_vcpu_runstate_info {
147 int state;
148 uint64_t state_entry_time;
149 uint64_t time[4];
150} __attribute__((packed));
151
Joao Martins23200b72018-06-13 09:55:44 -0400152#endif /* __ARCH_X86_KVM_XEN_H__ */