Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 2 | #ifndef __KVM_HOST_H |
| 3 | #define __KVM_HOST_H |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5 | |
| 6 | #include <linux/types.h> |
Christian Borntraeger | e56a7a2 | 2007-10-18 14:39:10 +0200 | [diff] [blame] | 7 | #include <linux/hardirq.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 8 | #include <linux/list.h> |
| 9 | #include <linux/mutex.h> |
| 10 | #include <linux/spinlock.h> |
Markus Rechberger | 06ff0d3 | 2007-05-27 10:46:52 +0300 | [diff] [blame] | 11 | #include <linux/signal.h> |
| 12 | #include <linux/sched.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 13 | #include <linux/bug.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 14 | #include <linux/mm.h> |
Eric B Munson | b297e67 | 2011-10-10 11:46:15 -0400 | [diff] [blame] | 15 | #include <linux/mmu_notifier.h> |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 16 | #include <linux/preempt.h> |
Sheng Yang | 0937c48 | 2008-11-24 14:32:53 +0800 | [diff] [blame] | 17 | #include <linux/msi.h> |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 18 | #include <linux/slab.h> |
Marc Orr | d1e5b0e | 2018-05-15 04:37:37 -0700 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 20 | #include <linux/rcupdate.h> |
Jan Kiszka | bd80158 | 2011-09-12 11:26:22 +0200 | [diff] [blame] | 21 | #include <linux/ratelimit.h> |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 22 | #include <linux/err.h> |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 23 | #include <linux/irqflags.h> |
Frederic Weisbecker | 521921b | 2013-05-16 01:21:38 +0200 | [diff] [blame] | 24 | #include <linux/context_tracking.h> |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 25 | #include <linux/irqbypass.h> |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 26 | #include <linux/rcuwait.h> |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 27 | #include <linux/refcount.h> |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 28 | #include <linux/nospec.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 29 | #include <asm/signal.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 30 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 31 | #include <linux/kvm.h> |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 32 | #include <linux/kvm_para.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 33 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 34 | #include <linux/kvm_types.h> |
Hollis Blanchard | d77a39d9 | 2007-12-03 15:30:23 -0600 | [diff] [blame] | 35 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 36 | #include <asm/kvm_host.h> |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 37 | |
Greg Kurz | 0b1b1df | 2016-05-09 18:13:37 +0200 | [diff] [blame] | 38 | #ifndef KVM_MAX_VCPU_ID |
| 39 | #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS |
| 40 | #endif |
| 41 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 42 | /* |
Xiao Guangrong | 67b2920 | 2012-08-21 10:58:45 +0800 | [diff] [blame] | 43 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used |
| 44 | * in kvm, other bits are visible for userspace which are defined in |
| 45 | * include/linux/kvm_h. |
| 46 | */ |
| 47 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
| 48 | |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 49 | /* |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 50 | * Bit 63 of the memslot generation number is an "update in-progress flag", |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 51 | * e.g. is temporarily set for the duration of install_new_memslots(). |
| 52 | * This flag effectively creates a unique generation number that is used to |
| 53 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, |
| 54 | * i.e. may (or may not) have come from the previous memslots generation. |
| 55 | * |
| 56 | * This is necessary because the actual memslots update is not atomic with |
| 57 | * respect to the generation number update. Updating the generation number |
| 58 | * first would allow a vCPU to cache a spte from the old memslots using the |
| 59 | * new generation number, and updating the generation number after switching |
| 60 | * to the new memslots would allow cache hits using the old generation number |
| 61 | * to reference the defunct memslots. |
| 62 | * |
| 63 | * This mechanism is used to prevent getting hits in KVM's caches while a |
| 64 | * memslot update is in-progress, and to prevent cache hits *after* updating |
| 65 | * the actual generation number against accesses that were inserted into the |
| 66 | * cache *before* the memslots were updated. |
| 67 | */ |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 68 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 69 | |
Xiao Guangrong | 87da7e6 | 2012-10-24 14:07:59 +0800 | [diff] [blame] | 70 | /* Two fragments for cross MMIO pages. */ |
| 71 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 72 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 73 | #ifndef KVM_ADDRESS_SPACE_NUM |
| 74 | #define KVM_ADDRESS_SPACE_NUM 1 |
| 75 | #endif |
| 76 | |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 77 | /* |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 78 | * For the normal pfn, the highest 12 bits should be zero, |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 79 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
| 80 | * mask bit 63 to indicate the noslot pfn. |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 81 | */ |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 82 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
| 83 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) |
| 84 | #define KVM_PFN_NOSLOT (0x1ULL << 63) |
Xiao Guangrong | 6c8ee57 | 2012-08-03 15:37:54 +0800 | [diff] [blame] | 85 | |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 86 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) |
| 87 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 88 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 89 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 90 | /* |
| 91 | * error pfns indicate that the gfn is in slot but faild to |
| 92 | * translate it to pfn on host. |
| 93 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 94 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 95 | { |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 96 | return !!(pfn & KVM_PFN_ERR_MASK); |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 97 | } |
| 98 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 99 | /* |
| 100 | * error_noslot pfns indicate that the gfn can not be |
| 101 | * translated to pfn - it is not in slot or failed to |
| 102 | * translate it to pfn. |
| 103 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 104 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 105 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 106 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 107 | } |
| 108 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 109 | /* noslot pfn indicates that the gfn is not in slot. */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 110 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 111 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 112 | return pfn == KVM_PFN_NOSLOT; |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 113 | } |
| 114 | |
Dominik Dingel | bf64087 | 2013-07-26 15:04:07 +0200 | [diff] [blame] | 115 | /* |
| 116 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) |
| 117 | * provide own defines and kvm_is_error_hva |
| 118 | */ |
| 119 | #ifndef KVM_HVA_ERR_BAD |
| 120 | |
Xiao Guangrong | 7068d09 | 2012-08-21 11:02:22 +0800 | [diff] [blame] | 121 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
| 122 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 123 | |
| 124 | static inline bool kvm_is_error_hva(unsigned long addr) |
| 125 | { |
Xiao Guangrong | 7068d09 | 2012-08-21 11:02:22 +0800 | [diff] [blame] | 126 | return addr >= PAGE_OFFSET; |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 127 | } |
| 128 | |
Dominik Dingel | bf64087 | 2013-07-26 15:04:07 +0200 | [diff] [blame] | 129 | #endif |
| 130 | |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 131 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
| 132 | |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 133 | static inline bool is_error_page(struct page *page) |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 134 | { |
| 135 | return IS_ERR(page); |
| 136 | } |
| 137 | |
Radim Krčmář | 930f7fd | 2017-04-26 22:32:22 +0200 | [diff] [blame] | 138 | #define KVM_REQUEST_MASK GENMASK(7,0) |
| 139 | #define KVM_REQUEST_NO_WAKEUP BIT(8) |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 140 | #define KVM_REQUEST_WAIT BIT(9) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 141 | /* |
Paolo Bonzini | 2860c4b | 2016-01-07 15:05:10 +0100 | [diff] [blame] | 142 | * Architecture-independent vcpu->requests bit members |
| 143 | * Bits 4-7 are reserved for more arch-independent bits. |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 144 | */ |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 145 | #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 146 | #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
| 147 | #define KVM_REQ_PENDING_TIMER 2 |
| 148 | #define KVM_REQ_UNHALT 3 |
Andrew Jones | 2387149 | 2017-06-04 14:43:51 +0200 | [diff] [blame] | 149 | #define KVM_REQUEST_ARCH_BASE 8 |
| 150 | |
| 151 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ |
Pankaj Bharadiya | c593642 | 2019-12-09 10:31:43 -0800 | [diff] [blame] | 152 | BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
Andrew Jones | 2387149 | 2017-06-04 14:43:51 +0200 | [diff] [blame] | 153 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
| 154 | }) |
| 155 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) |
Paolo Bonzini | 0cd3104 | 2016-01-07 15:00:53 +0100 | [diff] [blame] | 156 | |
Alex Williamson | 7a84428 | 2012-09-21 11:58:03 -0600 | [diff] [blame] | 157 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
| 158 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
Sheng Yang | 5550af4 | 2008-10-15 20:15:06 +0800 | [diff] [blame] | 159 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 160 | extern struct mutex kvm_lock; |
Geoff Levand | fc1b749 | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 161 | extern struct list_head vm_list; |
| 162 | |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 163 | struct kvm_io_range { |
| 164 | gpa_t addr; |
| 165 | int len; |
| 166 | struct kvm_io_device *dev; |
| 167 | }; |
| 168 | |
Amos Kong | 786a9f8 | 2012-03-09 12:17:40 +0800 | [diff] [blame] | 169 | #define NR_IOBUS_DEVS 1000 |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 170 | |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 171 | struct kvm_io_bus { |
Amos Kong | 6ea34c9 | 2013-05-25 06:44:15 +0800 | [diff] [blame] | 172 | int dev_count; |
| 173 | int ioeventfd_count; |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 174 | struct kvm_io_range range[]; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 175 | }; |
| 176 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 177 | enum kvm_bus { |
| 178 | KVM_MMIO_BUS, |
| 179 | KVM_PIO_BUS, |
Cornelia Huck | 060f0ce | 2013-02-28 12:33:19 +0100 | [diff] [blame] | 180 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
Michael S. Tsirkin | 68c3b4d | 2014-03-31 21:50:44 +0300 | [diff] [blame] | 181 | KVM_FAST_MMIO_BUS, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 182 | KVM_NR_BUSES |
| 183 | }; |
| 184 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 185 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 186 | int len, const void *val); |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 187 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
| 188 | gpa_t addr, int len, const void *val, long cookie); |
| 189 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
| 190 | int len, void *val); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 191 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 192 | int len, struct kvm_io_device *dev); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 193 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 194 | struct kvm_io_device *dev); |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 195 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 196 | gpa_t addr); |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 197 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 198 | #ifdef CONFIG_KVM_ASYNC_PF |
| 199 | struct kvm_async_pf { |
| 200 | struct work_struct work; |
| 201 | struct list_head link; |
| 202 | struct list_head queue; |
| 203 | struct kvm_vcpu *vcpu; |
| 204 | struct mm_struct *mm; |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 205 | gpa_t cr2_or_gpa; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 206 | unsigned long addr; |
| 207 | struct kvm_arch_async_pf arch; |
chai wen | f2e1066 | 2013-10-14 22:22:33 +0800 | [diff] [blame] | 208 | bool wakeup_all; |
Vitaly Kuznetsov | 2a18b7e | 2020-06-10 19:55:32 +0200 | [diff] [blame] | 209 | bool notpresent_injected; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 210 | }; |
| 211 | |
| 212 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); |
| 213 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 214 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 215 | unsigned long hva, struct kvm_arch_async_pf *arch); |
Gleb Natapov | 344d958 | 2010-10-14 11:22:50 +0200 | [diff] [blame] | 216 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 217 | #endif |
| 218 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 219 | enum { |
| 220 | OUTSIDE_GUEST_MODE, |
| 221 | IN_GUEST_MODE, |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 222 | EXITING_GUEST_MODE, |
| 223 | READING_SHADOW_PAGE_TABLES, |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 224 | }; |
| 225 | |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 226 | #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) |
| 227 | |
| 228 | struct kvm_host_map { |
| 229 | /* |
| 230 | * Only valid if the 'pfn' is managed by the host kernel (i.e. There is |
| 231 | * a 'struct page' for it. When using mem= kernel parameter some memory |
| 232 | * can be used as guest memory but they are not managed by host |
| 233 | * kernel). |
| 234 | * If 'pfn' is not managed by the host kernel, this field is |
| 235 | * initialized to KVM_UNMAPPED_PAGE. |
| 236 | */ |
| 237 | struct page *page; |
| 238 | void *hva; |
| 239 | kvm_pfn_t pfn; |
| 240 | kvm_pfn_t gfn; |
| 241 | }; |
| 242 | |
| 243 | /* |
| 244 | * Used to check if the mapping is valid or not. Never use 'kvm_host_map' |
| 245 | * directly to check for that. |
| 246 | */ |
| 247 | static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) |
| 248 | { |
| 249 | return !!map->hva; |
| 250 | } |
| 251 | |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 252 | /* |
| 253 | * Sometimes a large or cross-page mmio needs to be broken up into separate |
| 254 | * exits for userspace servicing. |
| 255 | */ |
| 256 | struct kvm_mmio_fragment { |
| 257 | gpa_t gpa; |
| 258 | void *data; |
| 259 | unsigned len; |
| 260 | }; |
| 261 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 262 | struct kvm_vcpu { |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 263 | struct kvm *kvm; |
Hollis Blanchard | 31bb117 | 2008-01-28 17:42:34 -0600 | [diff] [blame] | 264 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 265 | struct preempt_notifier preempt_notifier; |
Hollis Blanchard | 31bb117 | 2008-01-28 17:42:34 -0600 | [diff] [blame] | 266 | #endif |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 267 | int cpu; |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 268 | int vcpu_id; /* id given by userspace at creation */ |
| 269 | int vcpu_idx; /* index in kvm->vcpus array */ |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 270 | int srcu_idx; |
| 271 | int mode; |
KarimAllah Ahmed | 86dafed | 2018-07-10 11:27:19 +0200 | [diff] [blame] | 272 | u64 requests; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 273 | unsigned long guest_debug; |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 274 | |
Feng Wu | bf9f6ac | 2015-09-18 22:29:55 +0800 | [diff] [blame] | 275 | int pre_pcpu; |
| 276 | struct list_head blocked_vcpu_list; |
| 277 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 278 | struct mutex mutex; |
| 279 | struct kvm_run *run; |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 280 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 281 | struct rcuwait wait; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 282 | struct pid __rcu *pid; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 283 | int sigset_active; |
| 284 | sigset_t sigset; |
| 285 | struct kvm_vcpu_stat stat; |
Wanpeng Li | 19020f8 | 2015-09-03 22:07:37 +0800 | [diff] [blame] | 286 | unsigned int halt_poll_ns; |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 287 | bool valid_wakeup; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 288 | |
| 289 | #ifdef CONFIG_HAS_IOMEM |
| 290 | int mmio_needed; |
| 291 | int mmio_read_completed; |
| 292 | int mmio_is_write; |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 293 | int mmio_cur_fragment; |
| 294 | int mmio_nr_fragments; |
| 295 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 296 | #endif |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 297 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 298 | #ifdef CONFIG_KVM_ASYNC_PF |
| 299 | struct { |
| 300 | u32 queued; |
| 301 | struct list_head queue; |
| 302 | struct list_head done; |
| 303 | spinlock_t lock; |
| 304 | } async_pf; |
| 305 | #endif |
| 306 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 307 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
| 308 | /* |
| 309 | * Cpu relax intercept or pause loop exit optimization |
| 310 | * in_spin_loop: set when a vcpu does a pause loop exit |
| 311 | * or cpu relax intercepted. |
| 312 | * dy_eligible: indicates whether vcpu is eligible for directed yield. |
| 313 | */ |
| 314 | struct { |
| 315 | bool in_spin_loop; |
| 316 | bool dy_eligible; |
| 317 | } spin_loop; |
| 318 | #endif |
Raghavendra K T | 3a08a8f | 2013-03-04 23:32:07 +0530 | [diff] [blame] | 319 | bool preempted; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 320 | bool ready; |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 321 | struct kvm_vcpu_arch arch; |
| 322 | }; |
| 323 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 324 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
| 325 | { |
Andrew Jones | cde9af6 | 2017-04-26 22:32:24 +0200 | [diff] [blame] | 326 | /* |
| 327 | * The memory barrier ensures a previous write to vcpu->requests cannot |
| 328 | * be reordered with the read of vcpu->mode. It pairs with the general |
| 329 | * memory barrier following the write of vcpu->mode in VCPU RUN. |
| 330 | */ |
| 331 | smp_mb__before_atomic(); |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 332 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
| 333 | } |
| 334 | |
Takuya Yoshikawa | 660c22c | 2010-04-13 22:47:24 +0900 | [diff] [blame] | 335 | /* |
| 336 | * Some of the bitops functions do not support too long bitmaps. |
| 337 | * This number must be determined not to exceed such limits. |
| 338 | */ |
| 339 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
| 340 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 341 | struct kvm_memory_slot { |
| 342 | gfn_t base_gfn; |
| 343 | unsigned long npages; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 344 | unsigned long *dirty_bitmap; |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 345 | struct kvm_arch_memory_slot arch; |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 346 | unsigned long userspace_addr; |
Alex Williamson | 6104f47 | 2012-12-10 10:33:26 -0700 | [diff] [blame] | 347 | u32 flags; |
Alex Williamson | 1e702d9 | 2012-12-10 10:33:32 -0700 | [diff] [blame] | 348 | short id; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 349 | }; |
| 350 | |
Takuya Yoshikawa | 87bf6e7 | 2010-04-12 19:35:35 +0900 | [diff] [blame] | 351 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
| 352 | { |
| 353 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
| 354 | } |
| 355 | |
Claudio Imbrenda | 0313334 | 2018-04-30 18:33:24 +0200 | [diff] [blame] | 356 | static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) |
| 357 | { |
| 358 | unsigned long len = kvm_dirty_bitmap_bytes(memslot); |
| 359 | |
| 360 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); |
| 361 | } |
| 362 | |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 363 | #ifndef KVM_DIRTY_LOG_MANUAL_CAPS |
| 364 | #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
| 365 | #endif |
| 366 | |
Cornelia Huck | 8422359 | 2013-07-15 13:36:01 +0200 | [diff] [blame] | 367 | struct kvm_s390_adapter_int { |
| 368 | u64 ind_addr; |
| 369 | u64 summary_addr; |
| 370 | u64 ind_offset; |
| 371 | u32 summary_offset; |
| 372 | u32 adapter_id; |
| 373 | }; |
| 374 | |
Andrey Smetanin | 5c919412 | 2015-11-10 15:36:34 +0300 | [diff] [blame] | 375 | struct kvm_hv_sint { |
| 376 | u32 vcpu; |
| 377 | u32 sint; |
| 378 | }; |
| 379 | |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 380 | struct kvm_kernel_irq_routing_entry { |
| 381 | u32 gsi; |
Michael S. Tsirkin | 5116d8f | 2009-07-26 17:10:01 +0300 | [diff] [blame] | 382 | u32 type; |
Gleb Natapov | 4925663 | 2009-02-04 17:28:14 +0200 | [diff] [blame] | 383 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 384 | struct kvm *kvm, int irq_source_id, int level, |
| 385 | bool line_status); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 386 | union { |
| 387 | struct { |
| 388 | unsigned irqchip; |
| 389 | unsigned pin; |
| 390 | } irqchip; |
Eric Auger | 0455e72 | 2016-07-22 16:20:38 +0000 | [diff] [blame] | 391 | struct { |
| 392 | u32 address_lo; |
| 393 | u32 address_hi; |
| 394 | u32 data; |
| 395 | u32 flags; |
| 396 | u32 devid; |
| 397 | } msi; |
Cornelia Huck | 8422359 | 2013-07-15 13:36:01 +0200 | [diff] [blame] | 398 | struct kvm_s390_adapter_int adapter; |
Andrey Smetanin | 5c919412 | 2015-11-10 15:36:34 +0300 | [diff] [blame] | 399 | struct kvm_hv_sint hv_sint; |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 400 | }; |
Gleb Natapov | 46e624b | 2009-08-24 11:54:20 +0300 | [diff] [blame] | 401 | struct hlist_node link; |
| 402 | }; |
| 403 | |
Steve Rutherford | b053b2a | 2015-07-29 23:32:35 -0700 | [diff] [blame] | 404 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 405 | struct kvm_irq_routing_table { |
| 406 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
| 407 | u32 nr_rt_entries; |
| 408 | /* |
| 409 | * Array indexed by gsi. Each entry contains list of irq chips |
| 410 | * the gsi is connected to. |
| 411 | */ |
Gustavo A. R. Silva | 764e515f | 2020-05-28 09:35:11 -0500 | [diff] [blame] | 412 | struct hlist_head map[]; |
Steve Rutherford | b053b2a | 2015-07-29 23:32:35 -0700 | [diff] [blame] | 413 | }; |
| 414 | #endif |
| 415 | |
Alex Williamson | 0743247 | 2012-12-10 10:33:15 -0700 | [diff] [blame] | 416 | #ifndef KVM_PRIVATE_MEM_SLOTS |
| 417 | #define KVM_PRIVATE_MEM_SLOTS 0 |
| 418 | #endif |
| 419 | |
Xiao Guangrong | 93a5cef | 2011-11-24 17:37:48 +0800 | [diff] [blame] | 420 | #ifndef KVM_MEM_SLOTS_NUM |
Alex Williamson | bbacc0c | 2012-12-10 10:33:09 -0700 | [diff] [blame] | 421 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
Xiao Guangrong | 93a5cef | 2011-11-24 17:37:48 +0800 | [diff] [blame] | 422 | #endif |
| 423 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 424 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
| 425 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) |
| 426 | { |
| 427 | return 0; |
| 428 | } |
| 429 | #endif |
| 430 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 431 | /* |
| 432 | * Note: |
| 433 | * memslots are not sorted by id anymore, please use id_to_memslot() |
| 434 | * to get the memslot by its id. |
| 435 | */ |
Marcelo Tosatti | 46a26bf | 2009-12-23 14:35:16 -0200 | [diff] [blame] | 436 | struct kvm_memslots { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 437 | u64 generation; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 438 | /* The mapping table from slot id to the index in memslots[]. */ |
Alex Williamson | 1e702d9 | 2012-12-10 10:33:32 -0700 | [diff] [blame] | 439 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 440 | atomic_t lru_slot; |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 441 | int used_slots; |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 442 | struct kvm_memory_slot memslots[]; |
Marcelo Tosatti | 46a26bf | 2009-12-23 14:35:16 -0200 | [diff] [blame] | 443 | }; |
| 444 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 445 | struct kvm { |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 446 | spinlock_t mmu_lock; |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 447 | struct mutex slots_lock; |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 448 | struct mm_struct *mm; /* userspace tied to this vm */ |
Christian Borntraeger | a80cf7b | 2017-07-06 16:17:14 +0200 | [diff] [blame] | 449 | struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 450 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 451 | |
| 452 | /* |
| 453 | * created_vcpus is protected by kvm->lock, and is incremented |
| 454 | * at the beginning of KVM_CREATE_VCPU. online_vcpus is only |
| 455 | * incremented after storing the kvm_vcpu pointer in vcpus, |
| 456 | * and is accessed atomically. |
| 457 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 458 | atomic_t online_vcpus; |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 459 | int created_vcpus; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 460 | int last_boosted_vcpu; |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 461 | struct list_head vm_list; |
Marcelo Tosatti | 60eead7 | 2009-06-04 15:08:23 -0300 | [diff] [blame] | 462 | struct mutex lock; |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 463 | struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 464 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
| 465 | struct { |
| 466 | spinlock_t lock; |
| 467 | struct list_head items; |
Alex Williamson | 7a84428 | 2012-09-21 11:58:03 -0600 | [diff] [blame] | 468 | struct list_head resampler_list; |
| 469 | struct mutex resampler_lock; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 470 | } irqfds; |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 471 | struct list_head ioeventfds; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 472 | #endif |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 473 | struct kvm_vm_stat stat; |
Zhang Xiantao | d69fb81 | 2007-12-14 09:54:20 +0800 | [diff] [blame] | 474 | struct kvm_arch arch; |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 475 | refcount_t users_count; |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 476 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 477 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
Sasha Levin | 2b3c246a | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 478 | spinlock_t ring_lock; |
| 479 | struct list_head coalesced_zones; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 480 | #endif |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 481 | |
Marcelo Tosatti | 60eead7 | 2009-06-04 15:08:23 -0300 | [diff] [blame] | 482 | struct mutex irq_lock; |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 483 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 484 | /* |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 485 | * Update side is protected by irq_lock. |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 486 | */ |
Arnd Bergmann | 4b6a287 | 2010-03-04 15:59:23 +0100 | [diff] [blame] | 487 | struct kvm_irq_routing_table __rcu *irq_routing; |
Paolo Bonzini | c77dcac | 2014-08-06 14:24:45 +0200 | [diff] [blame] | 488 | #endif |
| 489 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Gleb Natapov | 136bdfe | 2009-08-24 11:54:23 +0300 | [diff] [blame] | 490 | struct hlist_head irq_ack_notifier_list; |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 491 | #endif |
| 492 | |
Marc Zyngier | 36c1ed8 | 2012-06-15 15:07:24 -0400 | [diff] [blame] | 493 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 494 | struct mmu_notifier mmu_notifier; |
| 495 | unsigned long mmu_notifier_seq; |
| 496 | long mmu_notifier_count; |
| 497 | #endif |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 498 | long tlbs_dirty; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 499 | struct list_head devices; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 500 | u64 manual_dirty_log_protect; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 501 | struct dentry *debugfs_dentry; |
| 502 | struct kvm_stat_data **debugfs_stat_data; |
Paul E. McKenney | 6ade869 | 2017-04-20 17:30:06 -0700 | [diff] [blame] | 503 | struct srcu_struct srcu; |
| 504 | struct srcu_struct irq_srcu; |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 505 | pid_t userspace_pid; |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 506 | unsigned int max_halt_poll_ns; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 507 | }; |
| 508 | |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 509 | #define kvm_err(fmt, ...) \ |
| 510 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 511 | #define kvm_info(fmt, ...) \ |
| 512 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 513 | #define kvm_debug(fmt, ...) \ |
| 514 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
Bandan Das | ae0f549 | 2016-11-15 01:36:18 -0500 | [diff] [blame] | 515 | #define kvm_debug_ratelimited(fmt, ...) \ |
| 516 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ |
| 517 | ## __VA_ARGS__) |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 518 | #define kvm_pr_unimpl(fmt, ...) \ |
| 519 | pr_err_ratelimited("kvm [%i]: " fmt, \ |
| 520 | task_tgid_nr(current), ## __VA_ARGS__) |
Rusty Russell | f024247 | 2007-08-01 10:48:02 +1000 | [diff] [blame] | 521 | |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 522 | /* The guest did something we don't support. */ |
| 523 | #define vcpu_unimpl(vcpu, fmt, ...) \ |
Borislav Petkov | 671d9ab | 2015-11-20 19:52:12 +0100 | [diff] [blame] | 524 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
| 525 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 526 | |
Andrey Smetanin | ee86dbc | 2015-07-03 15:01:35 +0300 | [diff] [blame] | 527 | #define vcpu_debug(vcpu, fmt, ...) \ |
| 528 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
Bandan Das | ae0f549 | 2016-11-15 01:36:18 -0500 | [diff] [blame] | 529 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ |
| 530 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ |
| 531 | ## __VA_ARGS__) |
Andrey Smetanin | 765eaa0 | 2015-11-30 19:22:20 +0300 | [diff] [blame] | 532 | #define vcpu_err(vcpu, fmt, ...) \ |
| 533 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
Andrey Smetanin | ee86dbc | 2015-07-03 15:01:35 +0300 | [diff] [blame] | 534 | |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 535 | static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) |
| 536 | { |
| 537 | return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); |
| 538 | } |
| 539 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 540 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
| 541 | { |
| 542 | return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 543 | lockdep_is_held(&kvm->slots_lock) || |
| 544 | !refcount_read(&kvm->users_count)); |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 545 | } |
| 546 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 547 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
| 548 | { |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 549 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
| 550 | i = array_index_nospec(i, num_vcpus); |
| 551 | |
| 552 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 553 | smp_rmb(); |
| 554 | return kvm->vcpus[i]; |
| 555 | } |
| 556 | |
| 557 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
Jeff Mahoney | b42fc3c | 2011-04-12 21:30:17 -0400 | [diff] [blame] | 558 | for (idx = 0; \ |
| 559 | idx < atomic_read(&kvm->online_vcpus) && \ |
| 560 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
| 561 | idx++) |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 562 | |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 563 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
| 564 | { |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 565 | struct kvm_vcpu *vcpu = NULL; |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 566 | int i; |
| 567 | |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 568 | if (id < 0) |
David Hildenbrand | c896939 | 2015-11-05 09:55:08 +0100 | [diff] [blame] | 569 | return NULL; |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 570 | if (id < KVM_MAX_VCPUS) |
| 571 | vcpu = kvm_get_vcpu(kvm, id); |
David Hildenbrand | c896939 | 2015-11-05 09:55:08 +0100 | [diff] [blame] | 572 | if (vcpu && vcpu->vcpu_id == id) |
| 573 | return vcpu; |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 574 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 575 | if (vcpu->vcpu_id == id) |
| 576 | return vcpu; |
| 577 | return NULL; |
| 578 | } |
| 579 | |
Christoffer Dall | 497d72d | 2017-05-08 20:38:40 +0200 | [diff] [blame] | 580 | static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) |
| 581 | { |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 582 | return vcpu->vcpu_idx; |
Christoffer Dall | 497d72d | 2017-05-08 20:38:40 +0200 | [diff] [blame] | 583 | } |
| 584 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 585 | #define kvm_for_each_memslot(memslot, slots) \ |
| 586 | for (memslot = &slots->memslots[0]; \ |
| 587 | memslot < slots->memslots + slots->used_slots; memslot++) \ |
| 588 | if (WARN_ON_ONCE(!memslot->npages)) { \ |
| 589 | } else |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 590 | |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 591 | void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 592 | |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 593 | void vcpu_load(struct kvm_vcpu *vcpu); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 594 | void vcpu_put(struct kvm_vcpu *vcpu); |
| 595 | |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 596 | #ifdef __KVM_HAVE_IOAPIC |
David Hildenbrand | 993225a | 2017-04-07 10:50:33 +0200 | [diff] [blame] | 597 | void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
Andrey Smetanin | abdb080 | 2015-11-10 15:36:31 +0300 | [diff] [blame] | 598 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 599 | #else |
David Hildenbrand | 993225a | 2017-04-07 10:50:33 +0200 | [diff] [blame] | 600 | static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 601 | { |
| 602 | } |
Andrey Smetanin | abdb080 | 2015-11-10 15:36:31 +0300 | [diff] [blame] | 603 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
Steve Rutherford | b053b2a | 2015-07-29 23:32:35 -0700 | [diff] [blame] | 604 | { |
| 605 | } |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 606 | #endif |
| 607 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 608 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 609 | int kvm_irqfd_init(void); |
| 610 | void kvm_irqfd_exit(void); |
| 611 | #else |
| 612 | static inline int kvm_irqfd_init(void) |
| 613 | { |
| 614 | return 0; |
| 615 | } |
| 616 | |
| 617 | static inline void kvm_irqfd_exit(void) |
| 618 | { |
| 619 | } |
| 620 | #endif |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 621 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 622 | struct module *module); |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 623 | void kvm_exit(void); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 624 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 625 | void kvm_get_kvm(struct kvm *kvm); |
| 626 | void kvm_put_kvm(struct kvm *kvm); |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 627 | void kvm_put_kvm_no_destroy(struct kvm *kvm); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 628 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 629 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 630 | { |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 631 | as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); |
Christian Borntraeger | 7e988b1 | 2017-07-07 15:49:00 +0200 | [diff] [blame] | 632 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 633 | lockdep_is_held(&kvm->slots_lock) || |
| 634 | !refcount_read(&kvm->users_count)); |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 635 | } |
| 636 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 637 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
| 638 | { |
| 639 | return __kvm_memslots(kvm, 0); |
| 640 | } |
| 641 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 642 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
| 643 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 644 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
| 645 | |
| 646 | return __kvm_memslots(vcpu->kvm, as_id); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 647 | } |
| 648 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 649 | static inline |
| 650 | struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) |
Xiao Guangrong | 28a3754 | 2011-11-24 19:04:35 +0800 | [diff] [blame] | 651 | { |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 652 | int index = slots->id_to_index[id]; |
| 653 | struct kvm_memory_slot *slot; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 654 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 655 | if (index < 0) |
| 656 | return NULL; |
| 657 | |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 658 | slot = &slots->memslots[index]; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 659 | |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 660 | WARN_ON(slot->id != id); |
| 661 | return slot; |
Xiao Guangrong | 28a3754 | 2011-11-24 19:04:35 +0800 | [diff] [blame] | 662 | } |
| 663 | |
Takuya Yoshikawa | 74d0727 | 2013-02-27 19:43:44 +0900 | [diff] [blame] | 664 | /* |
| 665 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: |
| 666 | * - create a new memory slot |
| 667 | * - delete an existing memory slot |
| 668 | * - modify an existing memory slot |
| 669 | * -- move it in the guest physical memory space |
| 670 | * -- just change its flags |
| 671 | * |
| 672 | * Since flags can be changed by some of these operations, the following |
| 673 | * differentiation is the best we can do for __kvm_set_memory_region(): |
| 674 | */ |
| 675 | enum kvm_mr_change { |
| 676 | KVM_MR_CREATE, |
| 677 | KVM_MR_DELETE, |
| 678 | KVM_MR_MOVE, |
| 679 | KVM_MR_FLAGS_ONLY, |
| 680 | }; |
| 681 | |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 682 | int kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 683 | const struct kvm_userspace_memory_region *mem); |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 684 | int __kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 685 | const struct kvm_userspace_memory_region *mem); |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 686 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 687 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 688 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 689 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 690 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 691 | enum kvm_mr_change change); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 692 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 693 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 694 | struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 695 | const struct kvm_memory_slot *new, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 696 | enum kvm_mr_change change); |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 697 | /* flush all memory translations */ |
| 698 | void kvm_arch_flush_shadow_all(struct kvm *kvm); |
| 699 | /* flush memory translations pointing to 'slot' */ |
| 700 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 701 | struct kvm_memory_slot *slot); |
Marcelo Tosatti | a983fb2 | 2009-12-23 14:35:23 -0200 | [diff] [blame] | 702 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 703 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 704 | struct page **pages, int nr_pages); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 705 | |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 706 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 707 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
Paolo Bonzini | ba6a354 | 2013-09-09 13:52:33 +0200 | [diff] [blame] | 708 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 709 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 710 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 711 | bool *writable); |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 712 | void kvm_release_page_clean(struct page *page); |
| 713 | void kvm_release_page_dirty(struct page *page); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 714 | void kvm_set_page_accessed(struct page *page); |
| 715 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 716 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
| 717 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 718 | bool *writable); |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 719 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
| 720 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
| 721 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 722 | bool atomic, bool *async, bool write_fault, |
| 723 | bool *writable); |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 724 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 725 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 726 | void kvm_release_pfn_dirty(kvm_pfn_t pfn); |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 727 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
| 728 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); |
| 729 | void kvm_get_pfn(kvm_pfn_t pfn); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 730 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 731 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 732 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| 733 | int len); |
| 734 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 735 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 736 | void *data, unsigned long len); |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 737 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 738 | void *data, unsigned int offset, |
| 739 | unsigned long len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 740 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
| 741 | int offset, int len); |
| 742 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| 743 | unsigned long len); |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 744 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 745 | void *data, unsigned long len); |
| 746 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Jim Mattson | 7a86dab | 2018-12-14 14:34:43 -0800 | [diff] [blame] | 747 | void *data, unsigned int offset, |
| 748 | unsigned long len); |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 749 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 750 | gpa_t gpa, unsigned long len); |
Steven Price | cac0f1b | 2019-10-21 16:28:17 +0100 | [diff] [blame] | 751 | |
| 752 | #define __kvm_put_guest(kvm, gfn, offset, value, type) \ |
| 753 | ({ \ |
| 754 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ |
| 755 | type __user *__uaddr = (type __user *)(__addr + offset); \ |
| 756 | int __ret = -EFAULT; \ |
| 757 | \ |
| 758 | if (!kvm_is_error_hva(__addr)) \ |
| 759 | __ret = put_user(value, __uaddr); \ |
| 760 | if (!__ret) \ |
| 761 | mark_page_dirty(kvm, gfn); \ |
| 762 | __ret; \ |
| 763 | }) |
| 764 | |
| 765 | #define kvm_put_guest(kvm, gpa, value, type) \ |
| 766 | ({ \ |
| 767 | gpa_t __gpa = gpa; \ |
| 768 | struct kvm *__kvm = kvm; \ |
| 769 | __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
| 770 | offset_in_page(__gpa), (value), type); \ |
| 771 | }) |
| 772 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 773 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
| 774 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 775 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
Yaowei Bai | 33e9415 | 2015-11-14 11:21:06 +0800 | [diff] [blame] | 776 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
Sean Christopherson | f9b84e1 | 2020-01-08 12:24:37 -0800 | [diff] [blame] | 777 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 778 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
| 779 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 780 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
| 781 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 782 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 783 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 784 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 785 | int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, |
| 786 | struct gfn_to_pfn_cache *cache, bool atomic); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 787 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 788 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 789 | int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, |
| 790 | struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 791 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 792 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); |
| 793 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, |
| 794 | int len); |
| 795 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
| 796 | unsigned long len); |
| 797 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
| 798 | unsigned long len); |
| 799 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, |
| 800 | int offset, int len); |
| 801 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| 802 | unsigned long len); |
| 803 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 804 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 805 | void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
| 806 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); |
| 807 | |
Hollis Blanchard | 8776e51 | 2007-10-31 17:24:24 -0500 | [diff] [blame] | 808 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
Christoffer Dall | 3217f7c | 2015-08-27 16:41:15 +0200 | [diff] [blame] | 809 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
| 810 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 811 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 812 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 813 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 814 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); |
Xiao Guangrong | a4ee1ca | 2010-11-23 11:13:00 +0800 | [diff] [blame] | 815 | |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 816 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 817 | void kvm_reload_remote_mmus(struct kvm *kvm); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 818 | |
| 819 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 820 | struct kvm_vcpu *except, |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 821 | unsigned long *vcpu_bitmap, cpumask_var_t tmp); |
Tang Chen | 445b823 | 2014-09-24 15:57:55 +0800 | [diff] [blame] | 822 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 823 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
| 824 | struct kvm_vcpu *except); |
Nitesh Narayan Lal | 7ee30bc | 2019-11-07 07:53:43 -0500 | [diff] [blame] | 825 | bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, |
| 826 | unsigned long *vcpu_bitmap); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 827 | |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 828 | long kvm_arch_dev_ioctl(struct file *filp, |
| 829 | unsigned int ioctl, unsigned long arg); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 830 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 831 | unsigned int ioctl, unsigned long arg); |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 832 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 833 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 834 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 835 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 836 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 837 | struct kvm_memory_slot *slot, |
| 838 | gfn_t gfn_offset, |
| 839 | unsigned long mask); |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 840 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 841 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 842 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 843 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
| 844 | struct kvm_memory_slot *memslot); |
| 845 | #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
| 846 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); |
| 847 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 848 | int *is_dirty, struct kvm_memory_slot **memslot); |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 849 | #endif |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 850 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 851 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
| 852 | bool line_status); |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 853 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 854 | struct kvm_enable_cap *cap); |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 855 | long kvm_arch_vm_ioctl(struct file *filp, |
| 856 | unsigned int ioctl, unsigned long arg); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 857 | |
Hollis Blanchard | d075206 | 2007-10-31 17:24:25 -0500 | [diff] [blame] | 858 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| 859 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| 860 | |
Zhang Xiantao | 8b00679 | 2007-11-16 13:05:55 +0800 | [diff] [blame] | 861 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 862 | struct kvm_translation *tr); |
| 863 | |
Hollis Blanchard | b6c7a5d | 2007-11-01 14:16:10 -0500 | [diff] [blame] | 864 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| 865 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| 866 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| 867 | struct kvm_sregs *sregs); |
| 868 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| 869 | struct kvm_sregs *sregs); |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 870 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 871 | struct kvm_mp_state *mp_state); |
| 872 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 873 | struct kvm_mp_state *mp_state); |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 874 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 875 | struct kvm_guest_debug *dbg); |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 876 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); |
Hollis Blanchard | b6c7a5d | 2007-11-01 14:16:10 -0500 | [diff] [blame] | 877 | |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 878 | int kvm_arch_init(void *opaque); |
| 879 | void kvm_arch_exit(void); |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 880 | |
Radim Krčmář | e790d9e | 2014-08-21 18:08:05 +0200 | [diff] [blame] | 881 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
| 882 | |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 883 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| 884 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 885 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 886 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 887 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
Hollis Blanchard | d40ccc6 | 2007-11-19 14:04:43 -0600 | [diff] [blame] | 888 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 889 | |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 890 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 891 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 892 | #endif |
Luiz Capitulino | 235539b | 2016-09-07 14:47:23 -0400 | [diff] [blame] | 893 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 894 | int kvm_arch_hardware_enable(void); |
| 895 | void kvm_arch_hardware_disable(void); |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 896 | int kvm_arch_hardware_setup(void *opaque); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 897 | void kvm_arch_hardware_unsetup(void); |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 898 | int kvm_arch_check_processor_compat(void *opaque); |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 899 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 900 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 901 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 902 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
Paolo Bonzini | d970a32 | 2020-02-13 18:22:55 +0100 | [diff] [blame] | 903 | int kvm_arch_post_init_vm(struct kvm *kvm); |
| 904 | void kvm_arch_pre_destroy_vm(struct kvm *kvm); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 905 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 906 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
Marc Orr | d1e5b0e | 2018-05-15 04:37:37 -0700 | [diff] [blame] | 907 | /* |
| 908 | * All architectures that want to use vzalloc currently also |
| 909 | * need their own kvm_arch_alloc_vm implementation. |
| 910 | */ |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 911 | static inline struct kvm *kvm_arch_alloc_vm(void) |
| 912 | { |
| 913 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); |
| 914 | } |
| 915 | |
| 916 | static inline void kvm_arch_free_vm(struct kvm *kvm) |
| 917 | { |
| 918 | kfree(kvm); |
| 919 | } |
| 920 | #endif |
| 921 | |
Tianyu Lan | b08660e | 2018-07-19 08:40:17 +0000 | [diff] [blame] | 922 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB |
| 923 | static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) |
| 924 | { |
| 925 | return -ENOTSUPP; |
| 926 | } |
| 927 | #endif |
| 928 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 929 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
| 930 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); |
| 931 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); |
| 932 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); |
| 933 | #else |
| 934 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) |
| 935 | { |
| 936 | } |
| 937 | |
| 938 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) |
| 939 | { |
| 940 | } |
| 941 | |
| 942 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) |
| 943 | { |
| 944 | return false; |
| 945 | } |
| 946 | #endif |
Paolo Bonzini | 5544eb9 | 2015-07-07 15:41:58 +0200 | [diff] [blame] | 947 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
| 948 | void kvm_arch_start_assignment(struct kvm *kvm); |
| 949 | void kvm_arch_end_assignment(struct kvm *kvm); |
| 950 | bool kvm_arch_has_assigned_device(struct kvm *kvm); |
| 951 | #else |
| 952 | static inline void kvm_arch_start_assignment(struct kvm *kvm) |
| 953 | { |
| 954 | } |
| 955 | |
| 956 | static inline void kvm_arch_end_assignment(struct kvm *kvm) |
| 957 | { |
| 958 | } |
| 959 | |
| 960 | static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
| 961 | { |
| 962 | return false; |
| 963 | } |
| 964 | #endif |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 965 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 966 | static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 967 | { |
Alexander Graf | 2246f8b | 2012-03-13 22:35:01 +0100 | [diff] [blame] | 968 | #ifdef __KVM_HAVE_ARCH_WQP |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 969 | return vcpu->arch.waitp; |
Alexander Graf | 2246f8b | 2012-03-13 22:35:01 +0100 | [diff] [blame] | 970 | #else |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 971 | return &vcpu->wait; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 972 | #endif |
Alexander Graf | 2246f8b | 2012-03-13 22:35:01 +0100 | [diff] [blame] | 973 | } |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 974 | |
Eric Auger | 01c94e6 | 2015-03-04 11:14:33 +0100 | [diff] [blame] | 975 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
| 976 | /* |
| 977 | * returns true if the virtual interrupt controller is initialized and |
| 978 | * ready to accept virtual IRQ. On some architectures the virtual interrupt |
| 979 | * controller is dynamically instantiated and this is not always true. |
| 980 | */ |
| 981 | bool kvm_arch_intc_initialized(struct kvm *kvm); |
| 982 | #else |
| 983 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 984 | { |
| 985 | return true; |
| 986 | } |
| 987 | #endif |
| 988 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 989 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
Zhang Xiantao | d19a9cd | 2007-11-18 18:43:45 +0800 | [diff] [blame] | 990 | void kvm_arch_destroy_vm(struct kvm *kvm); |
Sheng Yang | ad8ba2c | 2009-01-06 10:03:02 +0800 | [diff] [blame] | 991 | void kvm_arch_sync_events(struct kvm *kvm); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 992 | |
Marcelo Tosatti | 3d80840 | 2008-04-11 14:53:26 -0300 | [diff] [blame] | 993 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
Zhang Xiantao | 682c59a | 2007-12-11 20:36:00 +0800 | [diff] [blame] | 994 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 995 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 996 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 997 | bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); |
Xiantao Zhang | c77fb9d | 2008-09-27 10:55:40 +0800 | [diff] [blame] | 998 | |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 999 | struct kvm_irq_ack_notifier { |
| 1000 | struct hlist_node link; |
| 1001 | unsigned gsi; |
| 1002 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
| 1003 | }; |
| 1004 | |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 1005 | int kvm_irq_map_gsi(struct kvm *kvm, |
| 1006 | struct kvm_kernel_irq_routing_entry *entries, int gsi); |
| 1007 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Paul Mackerras | 8ba918d | 2014-06-30 20:51:10 +1000 | [diff] [blame] | 1008 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 1009 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
| 1010 | bool line_status); |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1011 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 1012 | int irq_source_id, int level, bool line_status); |
Paolo Bonzini | b97e6de | 2015-10-28 19:16:47 +0100 | [diff] [blame] | 1013 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
| 1014 | struct kvm *kvm, int irq_source_id, |
| 1015 | int level, bool line_status); |
Yang Zhang | c7c9c56 | 2013-01-25 10:18:51 +0800 | [diff] [blame] | 1016 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Andrey Smetanin | ba1aefc | 2015-10-16 10:07:46 +0300 | [diff] [blame] | 1017 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
Marcelo Tosatti | 44882ee | 2009-01-27 15:12:38 -0200 | [diff] [blame] | 1018 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Xiantao Zhang | 3de42dc | 2008-10-06 13:48:45 +0800 | [diff] [blame] | 1019 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
| 1020 | struct kvm_irq_ack_notifier *kian); |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 1021 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
| 1022 | struct kvm_irq_ack_notifier *kian); |
Sheng Yang | 5550af4 | 2008-10-15 20:15:06 +0800 | [diff] [blame] | 1023 | int kvm_request_irq_source_id(struct kvm *kvm); |
| 1024 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
Yi Wang | cdc238e | 2019-07-10 08:24:03 +0800 | [diff] [blame] | 1025 | bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 1026 | |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 1027 | /* |
| 1028 | * search_memslots() and __gfn_to_memslot() are here because they are |
| 1029 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. |
| 1030 | * gfn_to_memslot() itself isn't here as an inline because that would |
| 1031 | * bloat other code too much. |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1032 | * |
| 1033 | * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 1034 | */ |
| 1035 | static inline struct kvm_memory_slot * |
| 1036 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) |
| 1037 | { |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 1038 | int start = 0, end = slots->used_slots; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 1039 | int slot = atomic_read(&slots->lru_slot); |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 1040 | struct kvm_memory_slot *memslots = slots->memslots; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 1041 | |
Sean Christopherson | 0774a96 | 2020-03-20 13:55:40 -0700 | [diff] [blame] | 1042 | if (unlikely(!slots->used_slots)) |
| 1043 | return NULL; |
| 1044 | |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 1045 | if (gfn >= memslots[slot].base_gfn && |
| 1046 | gfn < memslots[slot].base_gfn + memslots[slot].npages) |
| 1047 | return &memslots[slot]; |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 1048 | |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 1049 | while (start < end) { |
| 1050 | slot = start + (end - start) / 2; |
| 1051 | |
| 1052 | if (gfn >= memslots[slot].base_gfn) |
| 1053 | end = slot; |
| 1054 | else |
| 1055 | start = slot + 1; |
| 1056 | } |
| 1057 | |
Sean Christopherson | b6467ab | 2020-04-07 23:40:58 -0700 | [diff] [blame] | 1058 | if (start < slots->used_slots && gfn >= memslots[start].base_gfn && |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 1059 | gfn < memslots[start].base_gfn + memslots[start].npages) { |
| 1060 | atomic_set(&slots->lru_slot, start); |
| 1061 | return &memslots[start]; |
| 1062 | } |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 1063 | |
| 1064 | return NULL; |
| 1065 | } |
| 1066 | |
| 1067 | static inline struct kvm_memory_slot * |
| 1068 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) |
| 1069 | { |
| 1070 | return search_memslots(slots, gfn); |
| 1071 | } |
| 1072 | |
Gavin Shan | 66a0350 | 2012-08-24 16:50:28 +0800 | [diff] [blame] | 1073 | static inline unsigned long |
| 1074 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
| 1075 | { |
| 1076 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; |
| 1077 | } |
| 1078 | |
Xiao Guangrong | 0ee8dcb | 2011-03-09 15:41:59 +0800 | [diff] [blame] | 1079 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
| 1080 | { |
| 1081 | return gfn_to_memslot(kvm, gfn)->id; |
| 1082 | } |
| 1083 | |
Takuya Yoshikawa | d19a748 | 2012-07-02 17:54:30 +0900 | [diff] [blame] | 1084 | static inline gfn_t |
| 1085 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 1086 | { |
Takuya Yoshikawa | d19a748 | 2012-07-02 17:54:30 +0900 | [diff] [blame] | 1087 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
| 1088 | |
| 1089 | return slot->base_gfn + gfn_offset; |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 1090 | } |
| 1091 | |
Avi Kivity | 1755fbc | 2007-11-21 14:44:45 +0200 | [diff] [blame] | 1092 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
| 1093 | { |
| 1094 | return (gpa_t)gfn << PAGE_SHIFT; |
| 1095 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1096 | |
Joerg Roedel | c30a358 | 2010-09-10 17:30:48 +0200 | [diff] [blame] | 1097 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
| 1098 | { |
| 1099 | return (gfn_t)(gpa >> PAGE_SHIFT); |
| 1100 | } |
| 1101 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1102 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 1103 | { |
| 1104 | return (hpa_t)pfn << PAGE_SHIFT; |
| 1105 | } |
| 1106 | |
David Hildenbrand | 5e2f30b | 2017-08-03 18:11:04 +0200 | [diff] [blame] | 1107 | static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, |
| 1108 | gpa_t gpa) |
| 1109 | { |
| 1110 | return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); |
| 1111 | } |
| 1112 | |
Heiko Carstens | dfeec84 | 2014-01-01 16:09:21 +0100 | [diff] [blame] | 1113 | static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) |
| 1114 | { |
| 1115 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); |
| 1116 | |
| 1117 | return kvm_is_error_hva(hva); |
| 1118 | } |
| 1119 | |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 1120 | enum kvm_stat_kind { |
| 1121 | KVM_STAT_VM, |
| 1122 | KVM_STAT_VCPU, |
| 1123 | }; |
| 1124 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1125 | struct kvm_stat_data { |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1126 | struct kvm *kvm; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 1127 | struct kvm_stats_debugfs_item *dbgfs_item; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1128 | }; |
| 1129 | |
Hollis Blanchard | 417bc30 | 2007-10-31 17:24:23 -0500 | [diff] [blame] | 1130 | struct kvm_stats_debugfs_item { |
| 1131 | const char *name; |
| 1132 | int offset; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 1133 | enum kvm_stat_kind kind; |
Paolo Bonzini | 833b45d | 2019-09-30 18:48:44 +0200 | [diff] [blame] | 1134 | int mode; |
Hollis Blanchard | 417bc30 | 2007-10-31 17:24:23 -0500 | [diff] [blame] | 1135 | }; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 1136 | |
| 1137 | #define KVM_DBGFS_GET_MODE(dbgfs_item) \ |
| 1138 | ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) |
| 1139 | |
Emanuele Giuseppe Esposito | 812756a | 2020-04-14 17:56:25 +0200 | [diff] [blame] | 1140 | #define VM_STAT(n, x, ...) \ |
| 1141 | { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } |
| 1142 | #define VCPU_STAT(n, x, ...) \ |
| 1143 | { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } |
| 1144 | |
Hollis Blanchard | 417bc30 | 2007-10-31 17:24:23 -0500 | [diff] [blame] | 1145 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 1146 | extern struct dentry *kvm_debugfs_dir; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 1147 | |
Marc Zyngier | 36c1ed8 | 2012-06-15 15:07:24 -0400 | [diff] [blame] | 1148 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1149 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1150 | { |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1151 | if (unlikely(kvm->mmu_notifier_count)) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1152 | return 1; |
| 1153 | /* |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 1154 | * Ensure the read of mmu_notifier_count happens before the read |
| 1155 | * of mmu_notifier_seq. This interacts with the smp_wmb() in |
| 1156 | * mmu_notifier_invalidate_range_end to make sure that the caller |
| 1157 | * either sees the old (non-zero) value of mmu_notifier_count or |
| 1158 | * the new (incremented) value of mmu_notifier_seq. |
| 1159 | * PowerPC Book3s HV KVM calls this under a per-page lock |
| 1160 | * rather than under kvm->mmu_lock, for scalability, so |
| 1161 | * can't rely on kvm->mmu_lock to keep things ordered. |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1162 | */ |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 1163 | smp_rmb(); |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1164 | if (kvm->mmu_notifier_seq != mmu_seq) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1165 | return 1; |
| 1166 | return 0; |
| 1167 | } |
| 1168 | #endif |
| 1169 | |
Alexander Graf | a725d56 | 2013-04-17 13:29:30 +0200 | [diff] [blame] | 1170 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1171 | |
Wanpeng Li | ddc9cfb | 2018-04-26 17:55:03 -0700 | [diff] [blame] | 1172 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1173 | |
David Hildenbrand | 5c0aea0 | 2017-04-28 17:06:20 +0200 | [diff] [blame] | 1174 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1175 | int kvm_set_irq_routing(struct kvm *kvm, |
| 1176 | const struct kvm_irq_routing_entry *entries, |
| 1177 | unsigned nr, |
| 1178 | unsigned flags); |
Radim Krčmář | c63cf53 | 2016-07-12 22:09:26 +0200 | [diff] [blame] | 1179 | int kvm_set_routing_entry(struct kvm *kvm, |
| 1180 | struct kvm_kernel_irq_routing_entry *e, |
Alexander Graf | e8cde09 | 2013-04-15 23:23:21 +0200 | [diff] [blame] | 1181 | const struct kvm_irq_routing_entry *ue); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1182 | void kvm_free_irq_routing(struct kvm *kvm); |
| 1183 | |
| 1184 | #else |
| 1185 | |
| 1186 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} |
| 1187 | |
| 1188 | #endif |
| 1189 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 1190 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
| 1191 | |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1192 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
| 1193 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1194 | void kvm_eventfd_init(struct kvm *kvm); |
Alexander Graf | 914daba | 2012-10-09 00:22:59 +0200 | [diff] [blame] | 1195 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
| 1196 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 1197 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 1198 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1199 | void kvm_irqfd_release(struct kvm *kvm); |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 1200 | void kvm_irq_routing_update(struct kvm *); |
Alexander Graf | 914daba | 2012-10-09 00:22:59 +0200 | [diff] [blame] | 1201 | #else |
| 1202 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
| 1203 | { |
| 1204 | return -EINVAL; |
| 1205 | } |
| 1206 | |
| 1207 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
| 1208 | #endif |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1209 | |
| 1210 | #else |
| 1211 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1212 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1213 | |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 1214 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1215 | { |
| 1216 | return -EINVAL; |
| 1217 | } |
| 1218 | |
| 1219 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1220 | |
Alexander Graf | 27923eb | 2010-11-25 10:25:44 +0100 | [diff] [blame] | 1221 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 1222 | static inline void kvm_irq_routing_update(struct kvm *kvm) |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1223 | { |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1224 | } |
Alexander Graf | 27923eb | 2010-11-25 10:25:44 +0100 | [diff] [blame] | 1225 | #endif |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1226 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1227 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
| 1228 | { |
| 1229 | return -ENOSYS; |
| 1230 | } |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1231 | |
| 1232 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
| 1233 | |
Sebastian Ott | 0764674 | 2018-02-22 13:05:41 +0100 | [diff] [blame] | 1234 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
| 1235 | |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1236 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
| 1237 | { |
Paolo Bonzini | 2e4682b | 2016-03-10 16:30:22 +0100 | [diff] [blame] | 1238 | /* |
| 1239 | * Ensure the rest of the request is published to kvm_check_request's |
| 1240 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. |
| 1241 | */ |
| 1242 | smp_wmb(); |
KarimAllah Ahmed | 86dafed | 2018-07-10 11:27:19 +0200 | [diff] [blame] | 1243 | set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1244 | } |
| 1245 | |
Radim Krčmář | 2fa6e1e | 2017-06-04 14:43:52 +0200 | [diff] [blame] | 1246 | static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) |
| 1247 | { |
| 1248 | return READ_ONCE(vcpu->requests); |
| 1249 | } |
| 1250 | |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 1251 | static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) |
| 1252 | { |
KarimAllah Ahmed | 86dafed | 2018-07-10 11:27:19 +0200 | [diff] [blame] | 1253 | return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 1254 | } |
| 1255 | |
| 1256 | static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) |
| 1257 | { |
KarimAllah Ahmed | 86dafed | 2018-07-10 11:27:19 +0200 | [diff] [blame] | 1258 | clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 1259 | } |
| 1260 | |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1261 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
| 1262 | { |
Radim Krčmář | 72875d8 | 2017-04-26 22:32:19 +0200 | [diff] [blame] | 1263 | if (kvm_test_request(req, vcpu)) { |
| 1264 | kvm_clear_request(req, vcpu); |
Paolo Bonzini | 2e4682b | 2016-03-10 16:30:22 +0100 | [diff] [blame] | 1265 | |
| 1266 | /* |
| 1267 | * Ensure the rest of the request is visible to kvm_check_request's |
| 1268 | * caller. Paired with the smp_wmb in kvm_make_request. |
| 1269 | */ |
| 1270 | smp_mb__after_atomic(); |
Avi Kivity | 0719837 | 2010-05-10 13:08:26 +0300 | [diff] [blame] | 1271 | return true; |
| 1272 | } else { |
| 1273 | return false; |
| 1274 | } |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1275 | } |
| 1276 | |
Geoff Levand | 8b415dc | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 1277 | extern bool kvm_rebooting; |
| 1278 | |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 1279 | extern unsigned int halt_poll_ns; |
| 1280 | extern unsigned int halt_poll_ns_grow; |
Nir Weiner | 49113d3 | 2019-01-27 12:17:15 +0200 | [diff] [blame] | 1281 | extern unsigned int halt_poll_ns_grow_start; |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 1282 | extern unsigned int halt_poll_ns_shrink; |
| 1283 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1284 | struct kvm_device { |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 1285 | const struct kvm_device_ops *ops; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1286 | struct kvm *kvm; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1287 | void *private; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1288 | struct list_head vm_node; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1289 | }; |
| 1290 | |
| 1291 | /* create, destroy, and name are mandatory */ |
| 1292 | struct kvm_device_ops { |
| 1293 | const char *name; |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 1294 | |
| 1295 | /* |
| 1296 | * create is called holding kvm->lock and any operations not suitable |
| 1297 | * to do while holding the lock should be deferred to init (see |
| 1298 | * below). |
| 1299 | */ |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1300 | int (*create)(struct kvm_device *dev, u32 type); |
| 1301 | |
| 1302 | /* |
Christoffer Dall | 023e9fd | 2016-08-09 19:13:00 +0200 | [diff] [blame] | 1303 | * init is called after create if create is successful and is called |
| 1304 | * outside of holding kvm->lock. |
| 1305 | */ |
| 1306 | void (*init)(struct kvm_device *dev); |
| 1307 | |
| 1308 | /* |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1309 | * Destroy is responsible for freeing dev. |
| 1310 | * |
| 1311 | * Destroy may be called before or after destructors are called |
| 1312 | * on emulated I/O regions, depending on whether a reference is |
| 1313 | * held by a vcpu or other kvm component that gets destroyed |
| 1314 | * after the emulated I/O. |
| 1315 | */ |
| 1316 | void (*destroy)(struct kvm_device *dev); |
| 1317 | |
Cédric Le Goater | 2bde9b3 | 2019-04-18 12:39:41 +0200 | [diff] [blame] | 1318 | /* |
| 1319 | * Release is an alternative method to free the device. It is |
| 1320 | * called when the device file descriptor is closed. Once |
| 1321 | * release is called, the destroy method will not be called |
| 1322 | * anymore as the device is removed from the device list of |
| 1323 | * the VM. kvm->lock is held. |
| 1324 | */ |
| 1325 | void (*release)(struct kvm_device *dev); |
| 1326 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1327 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1328 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1329 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1330 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, |
| 1331 | unsigned long arg); |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 1332 | int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1333 | }; |
| 1334 | |
| 1335 | void kvm_device_get(struct kvm_device *dev); |
| 1336 | void kvm_device_put(struct kvm_device *dev); |
| 1337 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 1338 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 1339 | void kvm_unregister_device_ops(u32 type); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1340 | |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1341 | extern struct kvm_device_ops kvm_mpic_ops; |
Andre Przywara | ea2f83a | 2014-10-26 23:17:00 +0000 | [diff] [blame] | 1342 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
Andre Przywara | a0675c2 | 2014-06-07 00:54:51 +0200 | [diff] [blame] | 1343 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1344 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 1345 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
| 1346 | |
| 1347 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
| 1348 | { |
| 1349 | vcpu->spin_loop.in_spin_loop = val; |
| 1350 | } |
| 1351 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
| 1352 | { |
| 1353 | vcpu->spin_loop.dy_eligible = val; |
| 1354 | } |
| 1355 | |
| 1356 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
| 1357 | |
| 1358 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
| 1359 | { |
| 1360 | } |
| 1361 | |
| 1362 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
| 1363 | { |
| 1364 | } |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 1365 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1366 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 1367 | static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) |
| 1368 | { |
| 1369 | return (memslot && memslot->id < KVM_USER_MEM_SLOTS && |
| 1370 | !(memslot->flags & KVM_MEMSLOT_INVALID)); |
| 1371 | } |
| 1372 | |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 1373 | struct kvm_vcpu *kvm_get_running_vcpu(void); |
Christian Borntraeger | fcd07f9 | 2020-02-28 09:49:41 +0100 | [diff] [blame] | 1374 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 1375 | |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1376 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
Alex Williamson | 14717e2 | 2016-05-05 11:58:35 -0600 | [diff] [blame] | 1377 | bool kvm_arch_has_irq_bypass(void); |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1378 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
| 1379 | struct irq_bypass_producer *); |
| 1380 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, |
| 1381 | struct irq_bypass_producer *); |
| 1382 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); |
| 1383 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); |
Feng Wu | f70c20a | 2015-09-18 22:29:53 +0800 | [diff] [blame] | 1384 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
| 1385 | uint32_t guest_irq, bool set); |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1386 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
Haozhong Zhang | 35181e8 | 2015-10-20 15:39:03 +0800 | [diff] [blame] | 1387 | |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 1388 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
| 1389 | /* If we wakeup during the poll time, was it a sucessful poll? */ |
| 1390 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
| 1391 | { |
| 1392 | return vcpu->valid_wakeup; |
| 1393 | } |
| 1394 | |
| 1395 | #else |
| 1396 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
| 1397 | { |
| 1398 | return true; |
| 1399 | } |
| 1400 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
| 1401 | |
Christian Borntraeger | cdd6ad3 | 2019-03-05 05:30:01 -0500 | [diff] [blame] | 1402 | #ifdef CONFIG_HAVE_KVM_NO_POLL |
| 1403 | /* Callback that tells if we must not poll */ |
| 1404 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); |
| 1405 | #else |
| 1406 | static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) |
| 1407 | { |
| 1408 | return false; |
| 1409 | } |
| 1410 | #endif /* CONFIG_HAVE_KVM_NO_POLL */ |
| 1411 | |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 1412 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
| 1413 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 1414 | unsigned int ioctl, unsigned long arg); |
| 1415 | #else |
| 1416 | static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, |
| 1417 | unsigned int ioctl, |
| 1418 | unsigned long arg) |
| 1419 | { |
| 1420 | return -ENOIOCTLCMD; |
| 1421 | } |
| 1422 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ |
| 1423 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 1424 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
| 1425 | unsigned long start, unsigned long end); |
Sebastian Ott | f75e492 | 2018-02-22 13:04:39 +0100 | [diff] [blame] | 1426 | |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 1427 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
| 1428 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); |
| 1429 | #else |
| 1430 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
| 1431 | { |
| 1432 | return 0; |
| 1433 | } |
| 1434 | #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ |
| 1435 | |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1436 | typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); |
| 1437 | |
| 1438 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, |
| 1439 | uintptr_t data, const char *name, |
| 1440 | struct task_struct **thread_ptr); |
| 1441 | |
Thomas Gleixner | 935ace2 | 2020-07-22 23:59:59 +0200 | [diff] [blame^] | 1442 | #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK |
| 1443 | static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) |
| 1444 | { |
| 1445 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
| 1446 | vcpu->stat.signal_exits++; |
| 1447 | } |
| 1448 | #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ |
| 1449 | |
Avi Kivity | bfd99ff | 2009-08-26 14:57:50 +0300 | [diff] [blame] | 1450 | #endif |