Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 10 | * |
| 11 | * Authors: |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 14 | */ |
| 15 | |
Andre Przywara | af669ac | 2015-03-26 14:39:29 +0000 | [diff] [blame] | 16 | #include <kvm/iodev.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 18 | #include <linux/kvm_host.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 19 | #include <linux/kvm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/errno.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 22 | #include <linux/percpu.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/miscdevice.h> |
| 25 | #include <linux/vmalloc.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 26 | #include <linux/reboot.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/highmem.h> |
| 29 | #include <linux/file.h> |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 30 | #include <linux/syscore_ops.h> |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 31 | #include <linux/cpu.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 32 | #include <linux/sched/signal.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 33 | #include <linux/sched/mm.h> |
Ingo Molnar | 03441a3 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 34 | #include <linux/sched/stat.h> |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 35 | #include <linux/cpumask.h> |
| 36 | #include <linux/smp.h> |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 37 | #include <linux/anon_inodes.h> |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 38 | #include <linux/profile.h> |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 39 | #include <linux/kvm_para.h> |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 40 | #include <linux/pagemap.h> |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 41 | #include <linux/mman.h> |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 42 | #include <linux/swap.h> |
Sheng Yang | e56d532 | 2009-03-12 21:45:39 +0800 | [diff] [blame] | 43 | #include <linux/bitops.h> |
Marcelo Tosatti | 547de29 | 2009-05-07 17:55:13 -0300 | [diff] [blame] | 44 | #include <linux/spinlock.h> |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 45 | #include <linux/compat.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 46 | #include <linux/srcu.h> |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 47 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 48 | #include <linux/slab.h> |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 49 | #include <linux/sort.h> |
| 50 | #include <linux/bsearch.h> |
Paolo Bonzini | c011d23 | 2019-05-17 14:08:53 +0200 | [diff] [blame] | 51 | #include <linux/io.h> |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 52 | #include <linux/lockdep.h> |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 53 | #include <linux/kthread.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 54 | |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 55 | #include <asm/processor.h> |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 56 | #include <asm/ioctl.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 57 | #include <linux/uaccess.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 58 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 59 | #include "coalesced_mmio.h" |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 60 | #include "async_pf.h" |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 61 | #include "vfio.h" |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 62 | |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 63 | #define CREATE_TRACE_POINTS |
| 64 | #include <trace/events/kvm.h> |
| 65 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 66 | #include <linux/kvm_dirty_ring.h> |
| 67 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 68 | /* Worst case buffer size needed for holding an integer. */ |
| 69 | #define ITOA_MAX_LEN 12 |
| 70 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 71 | MODULE_AUTHOR("Qumranet"); |
| 72 | MODULE_LICENSE("GPL"); |
| 73 | |
David Hildenbrand | 920552b | 2015-09-18 12:34:53 +0200 | [diff] [blame] | 74 | /* Architectures should define their poll value according to the halt latency */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 75 | unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 76 | module_param(halt_poll_ns, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 77 | EXPORT_SYMBOL_GPL(halt_poll_ns); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 78 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 79 | /* Default doubles per-vcpu halt_poll_ns. */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 80 | unsigned int halt_poll_ns_grow = 2; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 81 | module_param(halt_poll_ns_grow, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 82 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 83 | |
Nir Weiner | 49113d3 | 2019-01-27 12:17:15 +0200 | [diff] [blame] | 84 | /* The start value to grow halt_poll_ns from */ |
| 85 | unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ |
| 86 | module_param(halt_poll_ns_grow_start, uint, 0644); |
| 87 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); |
| 88 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 89 | /* Default resets per-vcpu halt_poll_ns . */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 90 | unsigned int halt_poll_ns_shrink; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 91 | module_param(halt_poll_ns_shrink, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 92 | EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 93 | |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 94 | /* |
| 95 | * Ordering of locks: |
| 96 | * |
Xiubo Li | b7d409d | 2015-02-26 14:58:24 +0800 | [diff] [blame] | 97 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 98 | */ |
| 99 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 100 | DEFINE_MUTEX(kvm_lock); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 101 | static DEFINE_RAW_SPINLOCK(kvm_count_lock); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 102 | LIST_HEAD(vm_list); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 103 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 104 | static cpumask_var_t cpus_hardware_enabled; |
Xiubo Li | f4fee93 | 2015-02-26 14:58:21 +0800 | [diff] [blame] | 105 | static int kvm_usage_count; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 106 | static atomic_t hardware_enable_failed; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 107 | |
Sean Christopherson | aaba298 | 2019-12-18 13:55:16 -0800 | [diff] [blame] | 108 | static struct kmem_cache *kvm_vcpu_cache; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 109 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 110 | static __read_mostly struct preempt_ops kvm_preempt_ops; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 111 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 112 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 113 | struct dentry *kvm_debugfs_dir; |
Paul Mackerras | e23a808 | 2015-03-28 14:21:01 +1100 | [diff] [blame] | 114 | EXPORT_SYMBOL_GPL(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 115 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 116 | static int kvm_debugfs_num_entries; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 117 | static const struct file_operations stat_fops_per_vm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 118 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 119 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
| 120 | unsigned long arg); |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 121 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 122 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| 123 | unsigned long arg); |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 124 | #define KVM_COMPAT(c) .compat_ioctl = (c) |
| 125 | #else |
Marc Zyngier | 9cb09e7 | 2019-11-14 13:17:39 +0000 | [diff] [blame] | 126 | /* |
| 127 | * For architectures that don't implement a compat infrastructure, |
| 128 | * adopt a double line of defense: |
| 129 | * - Prevent a compat task from opening /dev/kvm |
| 130 | * - If the open has been done by a 64bit task, and the KVM fd |
| 131 | * passed to a compat task, let the ioctls fail. |
| 132 | */ |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 133 | static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, |
| 134 | unsigned long arg) { return -EINVAL; } |
Marc Zyngier | b9876e6 | 2019-11-13 16:05:23 +0000 | [diff] [blame] | 135 | |
| 136 | static int kvm_no_compat_open(struct inode *inode, struct file *file) |
| 137 | { |
| 138 | return is_compat_task() ? -ENODEV : 0; |
| 139 | } |
| 140 | #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ |
| 141 | .open = kvm_no_compat_open |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 142 | #endif |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 143 | static int hardware_enable_all(void); |
| 144 | static void hardware_disable_all(void); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 145 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 146 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 147 | |
Andi Kleen | 5248013 | 2014-02-08 08:51:57 +0100 | [diff] [blame] | 148 | __visible bool kvm_rebooting; |
Avi Kivity | b7c4145 | 2010-12-02 17:52:50 +0200 | [diff] [blame] | 149 | EXPORT_SYMBOL_GPL(kvm_rebooting); |
Avi Kivity | 4ecac3f | 2008-05-13 13:23:38 +0300 | [diff] [blame] | 150 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 151 | #define KVM_EVENT_CREATE_VM 0 |
| 152 | #define KVM_EVENT_DESTROY_VM 1 |
| 153 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); |
| 154 | static unsigned long long kvm_createvm_count; |
| 155 | static unsigned long long kvm_active_vms; |
| 156 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 157 | __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
| 158 | unsigned long start, unsigned long end) |
Radim Krčmář | b1394e7 | 2017-11-30 19:05:45 +0100 | [diff] [blame] | 159 | { |
| 160 | } |
| 161 | |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 162 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) |
| 163 | { |
| 164 | /* |
| 165 | * The metadata used by is_zone_device_page() to determine whether or |
| 166 | * not a page is ZONE_DEVICE is guaranteed to be valid if and only if |
| 167 | * the device has been pinned, e.g. by get_user_pages(). WARN if the |
| 168 | * page_count() is zero to help detect bad usage of this helper. |
| 169 | */ |
| 170 | if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) |
| 171 | return false; |
| 172 | |
| 173 | return is_zone_device_page(pfn_to_page(pfn)); |
| 174 | } |
| 175 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 176 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 177 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 178 | /* |
| 179 | * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting |
| 180 | * perspective they are "normal" pages, albeit with slightly different |
| 181 | * usage rules. |
| 182 | */ |
Andrea Arcangeli | 11feeb4 | 2013-07-25 03:04:38 +0200 | [diff] [blame] | 183 | if (pfn_valid(pfn)) |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 184 | return PageReserved(pfn_to_page(pfn)) && |
Zhuang Yanying | 7df003c | 2019-10-12 11:37:31 +0800 | [diff] [blame] | 185 | !is_zero_pfn(pfn) && |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 186 | !kvm_is_zone_device_pfn(pfn); |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 187 | |
| 188 | return true; |
| 189 | } |
| 190 | |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 191 | bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) |
| 192 | { |
| 193 | struct page *page = pfn_to_page(pfn); |
| 194 | |
| 195 | if (!PageTransCompoundMap(page)) |
| 196 | return false; |
| 197 | |
| 198 | return is_transparent_hugepage(compound_head(page)); |
| 199 | } |
| 200 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 201 | /* |
| 202 | * Switches to specified vcpu, until a matching vcpu_put() |
| 203 | */ |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 204 | void vcpu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 205 | { |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 206 | int cpu = get_cpu(); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 207 | |
| 208 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 209 | preempt_notifier_register(&vcpu->preempt_notifier); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 210 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 211 | put_cpu(); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 212 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 213 | EXPORT_SYMBOL_GPL(vcpu_load); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 214 | |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 215 | void vcpu_put(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 216 | { |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 217 | preempt_disable(); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 218 | kvm_arch_vcpu_put(vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 219 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 220 | __this_cpu_write(kvm_running_vcpu, NULL); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 221 | preempt_enable(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 222 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 223 | EXPORT_SYMBOL_GPL(vcpu_put); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 224 | |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 225 | /* TODO: merge with kvm_arch_vcpu_should_kick */ |
| 226 | static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) |
| 227 | { |
| 228 | int mode = kvm_vcpu_exiting_guest_mode(vcpu); |
| 229 | |
| 230 | /* |
| 231 | * We need to wait for the VCPU to reenable interrupts and get out of |
| 232 | * READING_SHADOW_PAGE_TABLES mode. |
| 233 | */ |
| 234 | if (req & KVM_REQUEST_WAIT) |
| 235 | return mode != OUTSIDE_GUEST_MODE; |
| 236 | |
| 237 | /* |
| 238 | * Need to kick a running VCPU, but otherwise there is nothing to do. |
| 239 | */ |
| 240 | return mode == IN_GUEST_MODE; |
| 241 | } |
| 242 | |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 243 | static void ack_flush(void *_completed) |
| 244 | { |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 245 | } |
| 246 | |
Paolo Bonzini | b49defe | 2017-06-30 13:25:45 +0200 | [diff] [blame] | 247 | static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) |
| 248 | { |
| 249 | if (unlikely(!cpus)) |
| 250 | cpus = cpu_online_mask; |
| 251 | |
| 252 | if (cpumask_empty(cpus)) |
| 253 | return false; |
| 254 | |
| 255 | smp_call_function_many(cpus, ack_flush, NULL, wait); |
| 256 | return true; |
| 257 | } |
| 258 | |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 259 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 260 | struct kvm_vcpu *except, |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 261 | unsigned long *vcpu_bitmap, cpumask_var_t tmp) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 262 | { |
Avi Kivity | 597a5f5 | 2008-07-20 14:24:22 +0300 | [diff] [blame] | 263 | int i, cpu, me; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 264 | struct kvm_vcpu *vcpu; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 265 | bool called; |
Rusty Russell | 6ef7a1b | 2008-12-08 20:28:04 +1030 | [diff] [blame] | 266 | |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 267 | me = get_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 268 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 269 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 270 | if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || |
| 271 | vcpu == except) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 272 | continue; |
| 273 | |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 274 | kvm_make_request(req, vcpu); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 275 | cpu = vcpu->cpu; |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 276 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 277 | if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) |
| 278 | continue; |
Radim Krčmář | 6c6e836 | 2017-04-26 22:32:23 +0200 | [diff] [blame] | 279 | |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 280 | if (tmp != NULL && cpu != -1 && cpu != me && |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 281 | kvm_request_needs_ipi(vcpu, req)) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 282 | __cpumask_set_cpu(cpu, tmp); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 283 | } |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 284 | |
| 285 | called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 286 | put_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 287 | |
| 288 | return called; |
| 289 | } |
| 290 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 291 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
| 292 | struct kvm_vcpu *except) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 293 | { |
| 294 | cpumask_var_t cpus; |
| 295 | bool called; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 296 | |
| 297 | zalloc_cpumask_var(&cpus, GFP_ATOMIC); |
| 298 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 299 | called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 300 | |
Rusty Russell | 6ef7a1b | 2008-12-08 20:28:04 +1030 | [diff] [blame] | 301 | free_cpumask_var(cpus); |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 302 | return called; |
| 303 | } |
| 304 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 305 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) |
| 306 | { |
| 307 | return kvm_make_all_cpus_request_except(kvm, req, NULL); |
| 308 | } |
| 309 | |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 310 | #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 311 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 312 | { |
Lan Tianyu | 4ae3cb3 | 2016-03-13 11:10:28 +0800 | [diff] [blame] | 313 | /* |
| 314 | * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in |
| 315 | * kvm_make_all_cpus_request. |
| 316 | */ |
| 317 | long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 318 | |
Lan Tianyu | 4ae3cb3 | 2016-03-13 11:10:28 +0800 | [diff] [blame] | 319 | /* |
| 320 | * We want to publish modifications to the page tables before reading |
| 321 | * mode. Pairs with a memory barrier in arch-specific code. |
| 322 | * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest |
| 323 | * and smp_mb in walk_shadow_page_lockless_begin/end. |
| 324 | * - powerpc: smp_mb in kvmppc_prepare_to_enter. |
| 325 | * |
| 326 | * There is already an smp_mb__after_atomic() before |
| 327 | * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that |
| 328 | * barrier here. |
| 329 | */ |
Tianyu Lan | b08660e | 2018-07-19 08:40:17 +0000 | [diff] [blame] | 330 | if (!kvm_arch_flush_remote_tlb(kvm) |
| 331 | || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 332 | ++kvm->stat.remote_tlb_flush; |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 333 | cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 334 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 335 | EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 336 | #endif |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 337 | |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 338 | void kvm_reload_remote_mmus(struct kvm *kvm) |
| 339 | { |
Tang Chen | 445b823 | 2014-09-24 15:57:55 +0800 | [diff] [blame] | 340 | kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 341 | } |
| 342 | |
Sean Christopherson | 6926f95 | 2020-07-02 19:35:39 -0700 | [diff] [blame] | 343 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
| 344 | static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, |
| 345 | gfp_t gfp_flags) |
| 346 | { |
| 347 | gfp_flags |= mc->gfp_zero; |
| 348 | |
| 349 | if (mc->kmem_cache) |
| 350 | return kmem_cache_alloc(mc->kmem_cache, gfp_flags); |
| 351 | else |
| 352 | return (void *)__get_free_page(gfp_flags); |
| 353 | } |
| 354 | |
| 355 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) |
| 356 | { |
| 357 | void *obj; |
| 358 | |
| 359 | if (mc->nobjs >= min) |
| 360 | return 0; |
| 361 | while (mc->nobjs < ARRAY_SIZE(mc->objects)) { |
| 362 | obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); |
| 363 | if (!obj) |
| 364 | return mc->nobjs >= min ? 0 : -ENOMEM; |
| 365 | mc->objects[mc->nobjs++] = obj; |
| 366 | } |
| 367 | return 0; |
| 368 | } |
| 369 | |
| 370 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) |
| 371 | { |
| 372 | return mc->nobjs; |
| 373 | } |
| 374 | |
| 375 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| 376 | { |
| 377 | while (mc->nobjs) { |
| 378 | if (mc->kmem_cache) |
| 379 | kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); |
| 380 | else |
| 381 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| 386 | { |
| 387 | void *p; |
| 388 | |
| 389 | if (WARN_ON(!mc->nobjs)) |
| 390 | p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); |
| 391 | else |
| 392 | p = mc->objects[--mc->nobjs]; |
| 393 | BUG_ON(!p); |
| 394 | return p; |
| 395 | } |
| 396 | #endif |
| 397 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 398 | static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 399 | { |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 400 | mutex_init(&vcpu->mutex); |
| 401 | vcpu->cpu = -1; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 402 | vcpu->kvm = kvm; |
| 403 | vcpu->vcpu_id = id; |
Rik van Riel | 34bb10b | 2011-02-01 09:52:41 -0500 | [diff] [blame] | 404 | vcpu->pid = NULL; |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 405 | rcuwait_init(&vcpu->wait); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 406 | kvm_async_pf_vcpu_init(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 407 | |
Feng Wu | bf9f6ac | 2015-09-18 22:29:55 +0800 | [diff] [blame] | 408 | vcpu->pre_pcpu = -1; |
| 409 | INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); |
| 410 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 411 | kvm_vcpu_set_in_spin_loop(vcpu, false); |
| 412 | kvm_vcpu_set_dy_eligible(vcpu, false); |
Raghavendra K T | 3a08a8f | 2013-03-04 23:32:07 +0530 | [diff] [blame] | 413 | vcpu->preempted = false; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 414 | vcpu->ready = false; |
Sean Christopherson | d5c48de | 2019-12-18 13:55:17 -0800 | [diff] [blame] | 415 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 416 | } |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 417 | |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 418 | void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 419 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 420 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 421 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 422 | |
Sean Christopherson | 9941d22 | 2019-12-18 13:55:29 -0800 | [diff] [blame] | 423 | /* |
| 424 | * No need for rcu_read_lock as VCPU_RUN is the only place that changes |
| 425 | * the vcpu->pid pointer, and at destruction time all file descriptors |
| 426 | * are already gone. |
| 427 | */ |
| 428 | put_pid(rcu_dereference_protected(vcpu->pid, 1)); |
| 429 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 430 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 431 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 432 | } |
| 433 | EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); |
| 434 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 435 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 436 | static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
| 437 | { |
| 438 | return container_of(mn, struct kvm, mmu_notifier); |
| 439 | } |
| 440 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 441 | static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, |
| 442 | struct mm_struct *mm, |
| 443 | unsigned long start, unsigned long end) |
| 444 | { |
| 445 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 446 | int idx; |
| 447 | |
| 448 | idx = srcu_read_lock(&kvm->srcu); |
| 449 | kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); |
| 450 | srcu_read_unlock(&kvm->srcu, idx); |
| 451 | } |
| 452 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 453 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
| 454 | struct mm_struct *mm, |
| 455 | unsigned long address, |
| 456 | pte_t pte) |
| 457 | { |
| 458 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 459 | int idx; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 460 | |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 461 | idx = srcu_read_lock(&kvm->srcu); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 462 | spin_lock(&kvm->mmu_lock); |
| 463 | kvm->mmu_notifier_seq++; |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 464 | |
| 465 | if (kvm_set_spte_hva(kvm, address, pte)) |
| 466 | kvm_flush_remote_tlbs(kvm); |
| 467 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 468 | spin_unlock(&kvm->mmu_lock); |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 469 | srcu_read_unlock(&kvm->srcu, idx); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 470 | } |
| 471 | |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 472 | static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
Jérôme Glisse | 5d6527a | 2018-12-28 00:38:05 -0800 | [diff] [blame] | 473 | const struct mmu_notifier_range *range) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 474 | { |
| 475 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 476 | int need_tlb_flush = 0, idx; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 477 | |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 478 | idx = srcu_read_lock(&kvm->srcu); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 479 | spin_lock(&kvm->mmu_lock); |
| 480 | /* |
| 481 | * The count increase must become visible at unlock time as no |
| 482 | * spte can be established without taking the mmu_lock and |
| 483 | * count is also read inside the mmu_lock critical section. |
| 484 | */ |
| 485 | kvm->mmu_notifier_count++; |
Will Deacon | fdfe7cb | 2020-08-11 11:27:24 +0100 | [diff] [blame] | 486 | need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, |
| 487 | range->flags); |
Xiao Guangrong | a4ee1ca | 2010-11-23 11:13:00 +0800 | [diff] [blame] | 488 | need_tlb_flush |= kvm->tlbs_dirty; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 489 | /* we've to flush the tlb before the pages can be freed */ |
| 490 | if (need_tlb_flush) |
| 491 | kvm_flush_remote_tlbs(kvm); |
Takuya Yoshikawa | 565f3be | 2012-02-10 15:28:31 +0900 | [diff] [blame] | 492 | |
| 493 | spin_unlock(&kvm->mmu_lock); |
| 494 | srcu_read_unlock(&kvm->srcu, idx); |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 495 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 496 | return 0; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
Jérôme Glisse | 5d6527a | 2018-12-28 00:38:05 -0800 | [diff] [blame] | 500 | const struct mmu_notifier_range *range) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 501 | { |
| 502 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 503 | |
| 504 | spin_lock(&kvm->mmu_lock); |
| 505 | /* |
| 506 | * This sequence increase will notify the kvm page fault that |
| 507 | * the page that is going to be mapped in the spte could have |
| 508 | * been freed. |
| 509 | */ |
| 510 | kvm->mmu_notifier_seq++; |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 511 | smp_wmb(); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 512 | /* |
| 513 | * The above sequence increase must be visible before the |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 514 | * below count decrease, which is ensured by the smp_wmb above |
| 515 | * in conjunction with the smp_rmb in mmu_notifier_retry(). |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 516 | */ |
| 517 | kvm->mmu_notifier_count--; |
| 518 | spin_unlock(&kvm->mmu_lock); |
| 519 | |
| 520 | BUG_ON(kvm->mmu_notifier_count < 0); |
| 521 | } |
| 522 | |
| 523 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, |
| 524 | struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 525 | unsigned long start, |
| 526 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 527 | { |
| 528 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 529 | int young, idx; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 530 | |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 531 | idx = srcu_read_lock(&kvm->srcu); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 532 | spin_lock(&kvm->mmu_lock); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 533 | |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 534 | young = kvm_age_hva(kvm, start, end); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 535 | if (young) |
| 536 | kvm_flush_remote_tlbs(kvm); |
| 537 | |
Takuya Yoshikawa | 565f3be | 2012-02-10 15:28:31 +0900 | [diff] [blame] | 538 | spin_unlock(&kvm->mmu_lock); |
| 539 | srcu_read_unlock(&kvm->srcu, idx); |
| 540 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 541 | return young; |
| 542 | } |
| 543 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 544 | static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, |
| 545 | struct mm_struct *mm, |
| 546 | unsigned long start, |
| 547 | unsigned long end) |
| 548 | { |
| 549 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 550 | int young, idx; |
| 551 | |
| 552 | idx = srcu_read_lock(&kvm->srcu); |
| 553 | spin_lock(&kvm->mmu_lock); |
| 554 | /* |
| 555 | * Even though we do not flush TLB, this will still adversely |
| 556 | * affect performance on pre-Haswell Intel EPT, where there is |
| 557 | * no EPT Access Bit to clear so that we have to tear down EPT |
| 558 | * tables instead. If we find this unacceptable, we can always |
| 559 | * add a parameter to kvm_age_hva so that it effectively doesn't |
| 560 | * do anything on clear_young. |
| 561 | * |
| 562 | * Also note that currently we never issue secondary TLB flushes |
| 563 | * from clear_young, leaving this job up to the regular system |
| 564 | * cadence. If we find this inaccurate, we might come up with a |
| 565 | * more sophisticated heuristic later. |
| 566 | */ |
| 567 | young = kvm_age_hva(kvm, start, end); |
| 568 | spin_unlock(&kvm->mmu_lock); |
| 569 | srcu_read_unlock(&kvm->srcu, idx); |
| 570 | |
| 571 | return young; |
| 572 | } |
| 573 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 574 | static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, |
| 575 | struct mm_struct *mm, |
| 576 | unsigned long address) |
| 577 | { |
| 578 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 579 | int young, idx; |
| 580 | |
| 581 | idx = srcu_read_lock(&kvm->srcu); |
| 582 | spin_lock(&kvm->mmu_lock); |
| 583 | young = kvm_test_age_hva(kvm, address); |
| 584 | spin_unlock(&kvm->mmu_lock); |
| 585 | srcu_read_unlock(&kvm->srcu, idx); |
| 586 | |
| 587 | return young; |
| 588 | } |
| 589 | |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 590 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
| 591 | struct mm_struct *mm) |
| 592 | { |
| 593 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 594 | int idx; |
| 595 | |
| 596 | idx = srcu_read_lock(&kvm->srcu); |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 597 | kvm_arch_flush_shadow_all(kvm); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 598 | srcu_read_unlock(&kvm->srcu, idx); |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 599 | } |
| 600 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 601 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 602 | .invalidate_range = kvm_mmu_notifier_invalidate_range, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 603 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| 604 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| 605 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 606 | .clear_young = kvm_mmu_notifier_clear_young, |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 607 | .test_young = kvm_mmu_notifier_test_young, |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 608 | .change_pte = kvm_mmu_notifier_change_pte, |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 609 | .release = kvm_mmu_notifier_release, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 610 | }; |
Avi Kivity | 4c07b0a | 2009-12-20 14:54:04 +0200 | [diff] [blame] | 611 | |
| 612 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 613 | { |
| 614 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
| 615 | return mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
| 616 | } |
| 617 | |
| 618 | #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ |
| 619 | |
| 620 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 621 | { |
| 622 | return 0; |
| 623 | } |
| 624 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 625 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
| 626 | |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 627 | static struct kvm_memslots *kvm_alloc_memslots(void) |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 628 | { |
| 629 | int i; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 630 | struct kvm_memslots *slots; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 631 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 632 | slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 633 | if (!slots) |
| 634 | return NULL; |
| 635 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 636 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 637 | slots->id_to_index[i] = -1; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 638 | |
| 639 | return slots; |
| 640 | } |
| 641 | |
| 642 | static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) |
| 643 | { |
| 644 | if (!memslot->dirty_bitmap) |
| 645 | return; |
| 646 | |
| 647 | kvfree(memslot->dirty_bitmap); |
| 648 | memslot->dirty_bitmap = NULL; |
| 649 | } |
| 650 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 651 | static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 652 | { |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 653 | kvm_destroy_dirty_bitmap(slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 654 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 655 | kvm_arch_free_memslot(kvm, slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 656 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 657 | slot->flags = 0; |
| 658 | slot->npages = 0; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) |
| 662 | { |
| 663 | struct kvm_memory_slot *memslot; |
| 664 | |
| 665 | if (!slots) |
| 666 | return; |
| 667 | |
| 668 | kvm_for_each_memslot(memslot, slots) |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 669 | kvm_free_memslot(kvm, memslot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 670 | |
| 671 | kvfree(slots); |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 672 | } |
| 673 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 674 | static void kvm_destroy_vm_debugfs(struct kvm *kvm) |
| 675 | { |
| 676 | int i; |
| 677 | |
| 678 | if (!kvm->debugfs_dentry) |
| 679 | return; |
| 680 | |
| 681 | debugfs_remove_recursive(kvm->debugfs_dentry); |
| 682 | |
Luiz Capitulino | 9d5a1dc | 2016-09-07 14:47:21 -0400 | [diff] [blame] | 683 | if (kvm->debugfs_stat_data) { |
| 684 | for (i = 0; i < kvm_debugfs_num_entries; i++) |
| 685 | kfree(kvm->debugfs_stat_data[i]); |
| 686 | kfree(kvm->debugfs_stat_data); |
| 687 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) |
| 691 | { |
| 692 | char dir_name[ITOA_MAX_LEN * 2]; |
| 693 | struct kvm_stat_data *stat_data; |
| 694 | struct kvm_stats_debugfs_item *p; |
| 695 | |
| 696 | if (!debugfs_initialized()) |
| 697 | return 0; |
| 698 | |
| 699 | snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 700 | kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 701 | |
| 702 | kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, |
| 703 | sizeof(*kvm->debugfs_stat_data), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 704 | GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 705 | if (!kvm->debugfs_stat_data) |
| 706 | return -ENOMEM; |
| 707 | |
| 708 | for (p = debugfs_entries; p->name; p++) { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 709 | stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 710 | if (!stat_data) |
| 711 | return -ENOMEM; |
| 712 | |
| 713 | stat_data->kvm = kvm; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 714 | stat_data->dbgfs_item = p; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 715 | kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 716 | debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), |
| 717 | kvm->debugfs_dentry, stat_data, |
| 718 | &stat_fops_per_vm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 719 | } |
| 720 | return 0; |
| 721 | } |
| 722 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 723 | /* |
| 724 | * Called after the VM is otherwise initialized, but just before adding it to |
| 725 | * the vm_list. |
| 726 | */ |
| 727 | int __weak kvm_arch_post_init_vm(struct kvm *kvm) |
| 728 | { |
| 729 | return 0; |
| 730 | } |
| 731 | |
| 732 | /* |
| 733 | * Called just after removing the VM from the vm_list, but before doing any |
| 734 | * other destruction. |
| 735 | */ |
| 736 | void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) |
| 737 | { |
| 738 | } |
| 739 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 740 | static struct kvm *kvm_create_vm(unsigned long type) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 741 | { |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 742 | struct kvm *kvm = kvm_arch_alloc_vm(); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 743 | int r = -ENOMEM; |
| 744 | int i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 745 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 746 | if (!kvm) |
| 747 | return ERR_PTR(-ENOMEM); |
| 748 | |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 749 | spin_lock_init(&kvm->mmu_lock); |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 750 | mmgrab(current->mm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 751 | kvm->mm = current->mm; |
| 752 | kvm_eventfd_init(kvm); |
| 753 | mutex_init(&kvm->lock); |
| 754 | mutex_init(&kvm->irq_lock); |
| 755 | mutex_init(&kvm->slots_lock); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 756 | INIT_LIST_HEAD(&kvm->devices); |
| 757 | |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 758 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| 759 | |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 760 | if (init_srcu_struct(&kvm->srcu)) |
| 761 | goto out_err_no_srcu; |
| 762 | if (init_srcu_struct(&kvm->irq_srcu)) |
| 763 | goto out_err_no_irq_srcu; |
| 764 | |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 765 | refcount_set(&kvm->users_count, 1); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 766 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 767 | struct kvm_memslots *slots = kvm_alloc_memslots(); |
| 768 | |
| 769 | if (!slots) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 770 | goto out_err_no_arch_destroy_vm; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 771 | /* Generations must be different for each address space. */ |
| 772 | slots->generation = i; |
| 773 | rcu_assign_pointer(kvm->memslots[i], slots); |
| 774 | } |
| 775 | |
| 776 | for (i = 0; i < KVM_NR_BUSES; i++) { |
| 777 | rcu_assign_pointer(kvm->buses[i], |
| 778 | kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); |
| 779 | if (!kvm->buses[i]) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 780 | goto out_err_no_arch_destroy_vm; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 781 | } |
| 782 | |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 783 | kvm->max_halt_poll_ns = halt_poll_ns; |
| 784 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 785 | r = kvm_arch_init_vm(kvm, type); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 786 | if (r) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 787 | goto out_err_no_arch_destroy_vm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 788 | |
| 789 | r = hardware_enable_all(); |
| 790 | if (r) |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 791 | goto out_err_no_disable; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 792 | |
Paolo Bonzini | c77dcac | 2014-08-06 14:24:45 +0200 | [diff] [blame] | 793 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Gleb Natapov | 136bdfe | 2009-08-24 11:54:23 +0300 | [diff] [blame] | 794 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 795 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 796 | |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 797 | r = kvm_init_mmu_notifier(kvm); |
| 798 | if (r) |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 799 | goto out_err_no_mmu_notifier; |
| 800 | |
| 801 | r = kvm_arch_post_init_vm(kvm); |
| 802 | if (r) |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 803 | goto out_err; |
| 804 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 805 | mutex_lock(&kvm_lock); |
Rusty Russell | 5e58cfe | 2007-07-23 17:08:21 +1000 | [diff] [blame] | 806 | list_add(&kvm->vm_list, &vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 807 | mutex_unlock(&kvm_lock); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 808 | |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 809 | preempt_notifier_inc(); |
| 810 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 811 | return kvm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 812 | |
| 813 | out_err: |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 814 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 815 | if (kvm->mmu_notifier.ops) |
| 816 | mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); |
| 817 | #endif |
| 818 | out_err_no_mmu_notifier: |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 819 | hardware_disable_all(); |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 820 | out_err_no_disable: |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 821 | kvm_arch_destroy_vm(kvm); |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 822 | out_err_no_arch_destroy_vm: |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 823 | WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 824 | for (i = 0; i < KVM_NR_BUSES; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 825 | kfree(kvm_get_bus(kvm, i)); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 826 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 827 | kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 828 | cleanup_srcu_struct(&kvm->irq_srcu); |
| 829 | out_err_no_irq_srcu: |
| 830 | cleanup_srcu_struct(&kvm->srcu); |
| 831 | out_err_no_srcu: |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 832 | kvm_arch_free_vm(kvm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 833 | mmdrop(current->mm); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 834 | return ERR_PTR(r); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 835 | } |
| 836 | |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 837 | static void kvm_destroy_devices(struct kvm *kvm) |
| 838 | { |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 839 | struct kvm_device *dev, *tmp; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 840 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 841 | /* |
| 842 | * We do not need to take the kvm->lock here, because nobody else |
| 843 | * has a reference to the struct kvm at this point and therefore |
| 844 | * cannot access the devices list anyhow. |
| 845 | */ |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 846 | list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { |
| 847 | list_del(&dev->vm_node); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 848 | dev->ops->destroy(dev); |
| 849 | } |
| 850 | } |
| 851 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 852 | static void kvm_destroy_vm(struct kvm *kvm) |
| 853 | { |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 854 | int i; |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 855 | struct mm_struct *mm = kvm->mm; |
| 856 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 857 | kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 858 | kvm_destroy_vm_debugfs(kvm); |
Sheng Yang | ad8ba2c | 2009-01-06 10:03:02 +0800 | [diff] [blame] | 859 | kvm_arch_sync_events(kvm); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 860 | mutex_lock(&kvm_lock); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 861 | list_del(&kvm->vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 862 | mutex_unlock(&kvm_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 863 | kvm_arch_pre_destroy_vm(kvm); |
| 864 | |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 865 | kvm_free_irq_routing(kvm); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 866 | for (i = 0; i < KVM_NR_BUSES; i++) { |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 867 | struct kvm_io_bus *bus = kvm_get_bus(kvm, i); |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 868 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 869 | if (bus) |
| 870 | kvm_io_bus_destroy(bus); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 871 | kvm->buses[i] = NULL; |
| 872 | } |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 873 | kvm_coalesced_mmio_free(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 874 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 875 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
Gleb Natapov | f00be0c | 2009-03-19 12:20:36 +0200 | [diff] [blame] | 876 | #else |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 877 | kvm_arch_flush_shadow_all(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 878 | #endif |
Zhang Xiantao | d19a9cd | 2007-11-18 18:43:45 +0800 | [diff] [blame] | 879 | kvm_arch_destroy_vm(kvm); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 880 | kvm_destroy_devices(kvm); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 881 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 882 | kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); |
Paolo Bonzini | 820b3fc | 2014-06-03 13:44:17 +0200 | [diff] [blame] | 883 | cleanup_srcu_struct(&kvm->irq_srcu); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 884 | cleanup_srcu_struct(&kvm->srcu); |
| 885 | kvm_arch_free_vm(kvm); |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 886 | preempt_notifier_dec(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 887 | hardware_disable_all(); |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 888 | mmdrop(mm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 889 | } |
| 890 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 891 | void kvm_get_kvm(struct kvm *kvm) |
| 892 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 893 | refcount_inc(&kvm->users_count); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 894 | } |
| 895 | EXPORT_SYMBOL_GPL(kvm_get_kvm); |
| 896 | |
| 897 | void kvm_put_kvm(struct kvm *kvm) |
| 898 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 899 | if (refcount_dec_and_test(&kvm->users_count)) |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 900 | kvm_destroy_vm(kvm); |
| 901 | } |
| 902 | EXPORT_SYMBOL_GPL(kvm_put_kvm); |
| 903 | |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 904 | /* |
| 905 | * Used to put a reference that was taken on behalf of an object associated |
| 906 | * with a user-visible file descriptor, e.g. a vcpu or device, if installation |
| 907 | * of the new file descriptor fails and the reference cannot be transferred to |
| 908 | * its final owner. In such cases, the caller is still actively using @kvm and |
| 909 | * will fail miserably if the refcount unexpectedly hits zero. |
| 910 | */ |
| 911 | void kvm_put_kvm_no_destroy(struct kvm *kvm) |
| 912 | { |
| 913 | WARN_ON(refcount_dec_and_test(&kvm->users_count)); |
| 914 | } |
| 915 | EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 916 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 917 | static int kvm_vm_release(struct inode *inode, struct file *filp) |
| 918 | { |
| 919 | struct kvm *kvm = filp->private_data; |
| 920 | |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 921 | kvm_irqfd_release(kvm); |
| 922 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 923 | kvm_put_kvm(kvm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 924 | return 0; |
| 925 | } |
| 926 | |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 927 | /* |
| 928 | * Allocation size is twice as large as the actual dirty bitmap size. |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 929 | * See kvm_vm_ioctl_get_dirty_log() why this is needed. |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 930 | */ |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 931 | static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 932 | { |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 933 | unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 934 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 935 | memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 936 | if (!memslot->dirty_bitmap) |
| 937 | return -ENOMEM; |
| 938 | |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 939 | return 0; |
| 940 | } |
| 941 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 942 | /* |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 943 | * Delete a memslot by decrementing the number of used slots and shifting all |
| 944 | * other entries in the array forward one spot. |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 945 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 946 | static inline void kvm_memslot_delete(struct kvm_memslots *slots, |
| 947 | struct kvm_memory_slot *memslot) |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 948 | { |
Igor Mammedov | 063584d | 2014-11-13 23:00:13 +0000 | [diff] [blame] | 949 | struct kvm_memory_slot *mslots = slots->memslots; |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 950 | int i; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 951 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 952 | if (WARN_ON(slots->id_to_index[memslot->id] == -1)) |
| 953 | return; |
Igor Mammedov | 0e60b07 | 2014-12-01 17:29:26 +0000 | [diff] [blame] | 954 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 955 | slots->used_slots--; |
| 956 | |
Sean Christopherson | 0774a96 | 2020-03-20 13:55:40 -0700 | [diff] [blame] | 957 | if (atomic_read(&slots->lru_slot) >= slots->used_slots) |
| 958 | atomic_set(&slots->lru_slot, 0); |
| 959 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 960 | for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { |
Igor Mammedov | 7f379cf | 2014-12-01 17:29:24 +0000 | [diff] [blame] | 961 | mslots[i] = mslots[i + 1]; |
| 962 | slots->id_to_index[mslots[i].id] = i; |
Igor Mammedov | 7f379cf | 2014-12-01 17:29:24 +0000 | [diff] [blame] | 963 | } |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 964 | mslots[i] = *memslot; |
| 965 | slots->id_to_index[memslot->id] = -1; |
| 966 | } |
| 967 | |
| 968 | /* |
| 969 | * "Insert" a new memslot by incrementing the number of used slots. Returns |
| 970 | * the new slot's initial index into the memslots array. |
| 971 | */ |
| 972 | static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) |
| 973 | { |
| 974 | return slots->used_slots++; |
| 975 | } |
| 976 | |
| 977 | /* |
| 978 | * Move a changed memslot backwards in the array by shifting existing slots |
| 979 | * with a higher GFN toward the front of the array. Note, the changed memslot |
| 980 | * itself is not preserved in the array, i.e. not swapped at this time, only |
| 981 | * its new index into the array is tracked. Returns the changed memslot's |
| 982 | * current index into the memslots array. |
| 983 | */ |
| 984 | static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, |
| 985 | struct kvm_memory_slot *memslot) |
| 986 | { |
| 987 | struct kvm_memory_slot *mslots = slots->memslots; |
| 988 | int i; |
| 989 | |
| 990 | if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || |
| 991 | WARN_ON_ONCE(!slots->used_slots)) |
| 992 | return -1; |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 993 | |
| 994 | /* |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 995 | * Move the target memslot backward in the array by shifting existing |
| 996 | * memslots with a higher GFN (than the target memslot) towards the |
| 997 | * front of the array. |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 998 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 999 | for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { |
| 1000 | if (memslot->base_gfn > mslots[i + 1].base_gfn) |
| 1001 | break; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 1002 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1003 | WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); |
| 1004 | |
| 1005 | /* Shift the next memslot forward one and update its index. */ |
| 1006 | mslots[i] = mslots[i + 1]; |
| 1007 | slots->id_to_index[mslots[i].id] = i; |
| 1008 | } |
| 1009 | return i; |
| 1010 | } |
| 1011 | |
| 1012 | /* |
| 1013 | * Move a changed memslot forwards in the array by shifting existing slots with |
| 1014 | * a lower GFN toward the back of the array. Note, the changed memslot itself |
| 1015 | * is not preserved in the array, i.e. not swapped at this time, only its new |
| 1016 | * index into the array is tracked. Returns the changed memslot's final index |
| 1017 | * into the memslots array. |
| 1018 | */ |
| 1019 | static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, |
| 1020 | struct kvm_memory_slot *memslot, |
| 1021 | int start) |
| 1022 | { |
| 1023 | struct kvm_memory_slot *mslots = slots->memslots; |
| 1024 | int i; |
| 1025 | |
| 1026 | for (i = start; i > 0; i--) { |
| 1027 | if (memslot->base_gfn < mslots[i - 1].base_gfn) |
| 1028 | break; |
| 1029 | |
| 1030 | WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); |
| 1031 | |
| 1032 | /* Shift the next memslot back one and update its index. */ |
| 1033 | mslots[i] = mslots[i - 1]; |
| 1034 | slots->id_to_index[mslots[i].id] = i; |
| 1035 | } |
| 1036 | return i; |
| 1037 | } |
| 1038 | |
| 1039 | /* |
| 1040 | * Re-sort memslots based on their GFN to account for an added, deleted, or |
| 1041 | * moved memslot. Sorting memslots by GFN allows using a binary search during |
| 1042 | * memslot lookup. |
| 1043 | * |
| 1044 | * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry |
| 1045 | * at memslots[0] has the highest GFN. |
| 1046 | * |
| 1047 | * The sorting algorithm takes advantage of having initially sorted memslots |
| 1048 | * and knowing the position of the changed memslot. Sorting is also optimized |
| 1049 | * by not swapping the updated memslot and instead only shifting other memslots |
| 1050 | * and tracking the new index for the update memslot. Only once its final |
| 1051 | * index is known is the updated memslot copied into its position in the array. |
| 1052 | * |
| 1053 | * - When deleting a memslot, the deleted memslot simply needs to be moved to |
| 1054 | * the end of the array. |
| 1055 | * |
| 1056 | * - When creating a memslot, the algorithm "inserts" the new memslot at the |
| 1057 | * end of the array and then it forward to its correct location. |
| 1058 | * |
| 1059 | * - When moving a memslot, the algorithm first moves the updated memslot |
| 1060 | * backward to handle the scenario where the memslot's GFN was changed to a |
| 1061 | * lower value. update_memslots() then falls through and runs the same flow |
| 1062 | * as creating a memslot to move the memslot forward to handle the scenario |
| 1063 | * where its GFN was changed to a higher value. |
| 1064 | * |
| 1065 | * Note, slots are sorted from highest->lowest instead of lowest->highest for |
| 1066 | * historical reasons. Originally, invalid memslots where denoted by having |
| 1067 | * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots |
| 1068 | * to the end of the array. The current algorithm uses dedicated logic to |
| 1069 | * delete a memslot and thus does not rely on invalid memslots having GFN=0. |
| 1070 | * |
| 1071 | * The other historical motiviation for highest->lowest was to improve the |
| 1072 | * performance of memslot lookup. KVM originally used a linear search starting |
| 1073 | * at memslots[0]. On x86, the largest memslot usually has one of the highest, |
| 1074 | * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a |
| 1075 | * single memslot above the 4gb boundary. As the largest memslot is also the |
| 1076 | * most likely to be referenced, sorting it to the front of the array was |
| 1077 | * advantageous. The current binary search starts from the middle of the array |
| 1078 | * and uses an LRU pointer to improve performance for all memslots and GFNs. |
| 1079 | */ |
| 1080 | static void update_memslots(struct kvm_memslots *slots, |
| 1081 | struct kvm_memory_slot *memslot, |
| 1082 | enum kvm_mr_change change) |
| 1083 | { |
| 1084 | int i; |
| 1085 | |
| 1086 | if (change == KVM_MR_DELETE) { |
| 1087 | kvm_memslot_delete(slots, memslot); |
| 1088 | } else { |
| 1089 | if (change == KVM_MR_CREATE) |
| 1090 | i = kvm_memslot_insert_back(slots); |
| 1091 | else |
| 1092 | i = kvm_memslot_move_backward(slots, memslot); |
| 1093 | i = kvm_memslot_move_forward(slots, memslot, i); |
| 1094 | |
| 1095 | /* |
| 1096 | * Copy the memslot to its new position in memslots and update |
| 1097 | * its index accordingly. |
| 1098 | */ |
| 1099 | slots->memslots[i] = *memslot; |
| 1100 | slots->id_to_index[memslot->id] = i; |
| 1101 | } |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1102 | } |
| 1103 | |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1104 | static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1105 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1106 | u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; |
| 1107 | |
Christoffer Dall | 0f8a4de | 2014-08-26 14:00:37 +0200 | [diff] [blame] | 1108 | #ifdef __KVM_HAVE_READONLY_MEM |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1109 | valid_flags |= KVM_MEM_READONLY; |
| 1110 | #endif |
| 1111 | |
| 1112 | if (mem->flags & ~valid_flags) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1113 | return -EINVAL; |
| 1114 | |
| 1115 | return 0; |
| 1116 | } |
| 1117 | |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1118 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1119 | int as_id, struct kvm_memslots *slots) |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1120 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1121 | struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1122 | u64 gen = old_memslots->generation; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1123 | |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1124 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
| 1125 | slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1126 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1127 | rcu_assign_pointer(kvm->memslots[as_id], slots); |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1128 | synchronize_srcu_expedited(&kvm->srcu); |
Takuya Yoshikawa | e59dbe0 | 2013-07-04 13:40:29 +0900 | [diff] [blame] | 1129 | |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1130 | /* |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1131 | * Increment the new memslot generation a second time, dropping the |
Miaohe Lin | 0011679 | 2019-12-11 14:26:23 +0800 | [diff] [blame] | 1132 | * update in-progress flag and incrementing the generation based on |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1133 | * the number of address spaces. This provides a unique and easily |
| 1134 | * identifiable generation number while the memslots are in flux. |
| 1135 | */ |
| 1136 | gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
| 1137 | |
| 1138 | /* |
Paolo Bonzini | 4bd518f | 2017-02-03 20:44:51 -0800 | [diff] [blame] | 1139 | * Generations must be unique even across address spaces. We do not need |
| 1140 | * a global counter for that, instead the generation space is evenly split |
| 1141 | * across address spaces. For example, with two address spaces, address |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1142 | * space 0 will use generations 0, 2, 4, ... while address space 1 will |
| 1143 | * use generations 1, 3, 5, ... |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1144 | */ |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1145 | gen += KVM_ADDRESS_SPACE_NUM; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1146 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 1147 | kvm_arch_memslots_updated(kvm, gen); |
| 1148 | |
| 1149 | slots->generation = gen; |
Takuya Yoshikawa | e59dbe0 | 2013-07-04 13:40:29 +0900 | [diff] [blame] | 1150 | |
| 1151 | return old_memslots; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1152 | } |
| 1153 | |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1154 | /* |
| 1155 | * Note, at a minimum, the current number of used slots must be allocated, even |
| 1156 | * when deleting a memslot, as we need a complete duplicate of the memslots for |
| 1157 | * use when invalidating a memslot prior to deleting/moving the memslot. |
| 1158 | */ |
| 1159 | static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, |
| 1160 | enum kvm_mr_change change) |
| 1161 | { |
| 1162 | struct kvm_memslots *slots; |
| 1163 | size_t old_size, new_size; |
| 1164 | |
| 1165 | old_size = sizeof(struct kvm_memslots) + |
| 1166 | (sizeof(struct kvm_memory_slot) * old->used_slots); |
| 1167 | |
| 1168 | if (change == KVM_MR_CREATE) |
| 1169 | new_size = old_size + sizeof(struct kvm_memory_slot); |
| 1170 | else |
| 1171 | new_size = old_size; |
| 1172 | |
| 1173 | slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); |
| 1174 | if (likely(slots)) |
| 1175 | memcpy(slots, old, old_size); |
| 1176 | |
| 1177 | return slots; |
| 1178 | } |
| 1179 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1180 | static int kvm_set_memslot(struct kvm *kvm, |
| 1181 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 1182 | struct kvm_memory_slot *old, |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1183 | struct kvm_memory_slot *new, int as_id, |
| 1184 | enum kvm_mr_change change) |
| 1185 | { |
| 1186 | struct kvm_memory_slot *slot; |
| 1187 | struct kvm_memslots *slots; |
| 1188 | int r; |
| 1189 | |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1190 | slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1191 | if (!slots) |
| 1192 | return -ENOMEM; |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1193 | |
| 1194 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| 1195 | /* |
| 1196 | * Note, the INVALID flag needs to be in the appropriate entry |
| 1197 | * in the freshly allocated memslots, not in @old or @new. |
| 1198 | */ |
| 1199 | slot = id_to_memslot(slots, old->id); |
| 1200 | slot->flags |= KVM_MEMSLOT_INVALID; |
| 1201 | |
| 1202 | /* |
| 1203 | * We can re-use the old memslots, the only difference from the |
| 1204 | * newly installed memslots is the invalid flag, which will get |
| 1205 | * dropped by update_memslots anyway. We'll also revert to the |
| 1206 | * old memslots if preparing the new memory region fails. |
| 1207 | */ |
| 1208 | slots = install_new_memslots(kvm, as_id, slots); |
| 1209 | |
| 1210 | /* From this point no new shadow pages pointing to a deleted, |
| 1211 | * or moved, memslot will be created. |
| 1212 | * |
| 1213 | * validation of sp->gfn happens in: |
| 1214 | * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) |
| 1215 | * - kvm_is_visible_gfn (mmu_check_root) |
| 1216 | */ |
| 1217 | kvm_arch_flush_shadow_memslot(kvm, slot); |
| 1218 | } |
| 1219 | |
| 1220 | r = kvm_arch_prepare_memory_region(kvm, new, mem, change); |
| 1221 | if (r) |
| 1222 | goto out_slots; |
| 1223 | |
| 1224 | update_memslots(slots, new, change); |
| 1225 | slots = install_new_memslots(kvm, as_id, slots); |
| 1226 | |
| 1227 | kvm_arch_commit_memory_region(kvm, mem, old, new, change); |
| 1228 | |
| 1229 | kvfree(slots); |
| 1230 | return 0; |
| 1231 | |
| 1232 | out_slots: |
| 1233 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) |
| 1234 | slots = install_new_memslots(kvm, as_id, slots); |
| 1235 | kvfree(slots); |
| 1236 | return r; |
| 1237 | } |
| 1238 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1239 | static int kvm_delete_memslot(struct kvm *kvm, |
| 1240 | const struct kvm_userspace_memory_region *mem, |
| 1241 | struct kvm_memory_slot *old, int as_id) |
| 1242 | { |
| 1243 | struct kvm_memory_slot new; |
| 1244 | int r; |
| 1245 | |
| 1246 | if (!old->npages) |
| 1247 | return -EINVAL; |
| 1248 | |
| 1249 | memset(&new, 0, sizeof(new)); |
| 1250 | new.id = old->id; |
Peter Xu | 9e9eb22 | 2020-10-14 11:26:46 -0700 | [diff] [blame] | 1251 | /* |
| 1252 | * This is only for debugging purpose; it should never be referenced |
| 1253 | * for a removed memslot. |
| 1254 | */ |
| 1255 | new.as_id = as_id; |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1256 | |
| 1257 | r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); |
| 1258 | if (r) |
| 1259 | return r; |
| 1260 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 1261 | kvm_free_memslot(kvm, old); |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1262 | return 0; |
| 1263 | } |
| 1264 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1265 | /* |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1266 | * Allocate some memory and give it an address in the guest physical address |
| 1267 | * space. |
| 1268 | * |
| 1269 | * Discontiguous memory is allowed, mostly for framebuffers. |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1270 | * |
Dominik Dingel | 02d5d55 | 2014-10-27 16:22:56 +0100 | [diff] [blame] | 1271 | * Must be called holding kvm->slots_lock for write. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1272 | */ |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1273 | int __kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1274 | const struct kvm_userspace_memory_region *mem) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1275 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1276 | struct kvm_memory_slot old, new; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1277 | struct kvm_memory_slot *tmp; |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1278 | enum kvm_mr_change change; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1279 | int as_id, id; |
| 1280 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1281 | |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1282 | r = check_memory_region_flags(mem); |
| 1283 | if (r) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1284 | return r; |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1285 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1286 | as_id = mem->slot >> 16; |
| 1287 | id = (u16)mem->slot; |
| 1288 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1289 | /* General sanity checks */ |
| 1290 | if (mem->memory_size & (PAGE_SIZE - 1)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1291 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1292 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1293 | return -EINVAL; |
Takuya Yoshikawa | fa3d315 | 2011-05-07 16:35:38 +0900 | [diff] [blame] | 1294 | /* We can read the guest memory with __xxx_user() later on. */ |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1295 | if ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 1296 | !access_ok((void __user *)(unsigned long)mem->userspace_addr, |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1297 | mem->memory_size)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1298 | return -EINVAL; |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1299 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1300 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1301 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1302 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1303 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1304 | /* |
| 1305 | * Make a full copy of the old memslot, the pointer will become stale |
| 1306 | * when the memslots are re-sorted by update_memslots(), and the old |
| 1307 | * memslot needs to be referenced after calling update_memslots(), e.g. |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1308 | * to free its resources and for arch specific behavior. |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1309 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1310 | tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); |
| 1311 | if (tmp) { |
| 1312 | old = *tmp; |
| 1313 | tmp = NULL; |
| 1314 | } else { |
| 1315 | memset(&old, 0, sizeof(old)); |
| 1316 | old.id = id; |
| 1317 | } |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1318 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1319 | if (!mem->memory_size) |
| 1320 | return kvm_delete_memslot(kvm, mem, &old, as_id); |
| 1321 | |
Peter Xu | 9e9eb22 | 2020-10-14 11:26:46 -0700 | [diff] [blame] | 1322 | new.as_id = as_id; |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1323 | new.id = id; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1324 | new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
| 1325 | new.npages = mem->memory_size >> PAGE_SHIFT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1326 | new.flags = mem->flags; |
Sean Christopherson | 414de7ab | 2020-02-18 13:07:20 -0800 | [diff] [blame] | 1327 | new.userspace_addr = mem->userspace_addr; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1328 | |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1329 | if (new.npages > KVM_MEM_MAX_NR_PAGES) |
| 1330 | return -EINVAL; |
| 1331 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1332 | if (!old.npages) { |
| 1333 | change = KVM_MR_CREATE; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1334 | new.dirty_bitmap = NULL; |
| 1335 | memset(&new.arch, 0, sizeof(new.arch)); |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1336 | } else { /* Modify an existing slot. */ |
| 1337 | if ((new.userspace_addr != old.userspace_addr) || |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1338 | (new.npages != old.npages) || |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1339 | ((new.flags ^ old.flags) & KVM_MEM_READONLY)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1340 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1341 | |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1342 | if (new.base_gfn != old.base_gfn) |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1343 | change = KVM_MR_MOVE; |
| 1344 | else if (new.flags != old.flags) |
| 1345 | change = KVM_MR_FLAGS_ONLY; |
| 1346 | else /* Nothing to change. */ |
| 1347 | return 0; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1348 | |
| 1349 | /* Copy dirty_bitmap and arch from the current memslot. */ |
| 1350 | new.dirty_bitmap = old.dirty_bitmap; |
| 1351 | memcpy(&new.arch, &old.arch, sizeof(new.arch)); |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1352 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1353 | |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1354 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1355 | /* Check for overlaps */ |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1356 | kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { |
| 1357 | if (tmp->id == id) |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1358 | continue; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1359 | if (!((new.base_gfn + new.npages <= tmp->base_gfn) || |
| 1360 | (new.base_gfn >= tmp->base_gfn + tmp->npages))) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1361 | return -EEXIST; |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1362 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1363 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1364 | |
Sean Christopherson | 414de7ab | 2020-02-18 13:07:20 -0800 | [diff] [blame] | 1365 | /* Allocate/free page dirty bitmap as needed */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1366 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) |
Al Viro | 8b6d44c | 2007-02-09 16:38:40 +0000 | [diff] [blame] | 1367 | new.dirty_bitmap = NULL; |
Sean Christopherson | 414de7ab | 2020-02-18 13:07:20 -0800 | [diff] [blame] | 1368 | else if (!new.dirty_bitmap) { |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1369 | r = kvm_alloc_dirty_bitmap(&new); |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1370 | if (r) |
| 1371 | return r; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1372 | |
| 1373 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
| 1374 | bitmap_set(new.dirty_bitmap, 0, new.npages); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1375 | } |
| 1376 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1377 | r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); |
| 1378 | if (r) |
| 1379 | goto out_bitmap; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 1380 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1381 | if (old.dirty_bitmap && !new.dirty_bitmap) |
| 1382 | kvm_destroy_dirty_bitmap(&old); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1383 | return 0; |
| 1384 | |
Sean Christopherson | bd0e96f | 2020-02-18 13:07:21 -0800 | [diff] [blame] | 1385 | out_bitmap: |
| 1386 | if (new.dirty_bitmap && !old.dirty_bitmap) |
| 1387 | kvm_destroy_dirty_bitmap(&new); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1388 | return r; |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1389 | } |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1390 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
| 1391 | |
| 1392 | int kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1393 | const struct kvm_userspace_memory_region *mem) |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1394 | { |
| 1395 | int r; |
| 1396 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1397 | mutex_lock(&kvm->slots_lock); |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1398 | r = __kvm_set_memory_region(kvm, mem); |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1399 | mutex_unlock(&kvm->slots_lock); |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1400 | return r; |
| 1401 | } |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1402 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
| 1403 | |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 1404 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| 1405 | struct kvm_userspace_memory_region *mem) |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1406 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1407 | if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1408 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1409 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1410 | return kvm_set_memory_region(kvm, mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1411 | } |
| 1412 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1413 | #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1414 | /** |
| 1415 | * kvm_get_dirty_log - get a snapshot of dirty pages |
| 1416 | * @kvm: pointer to kvm instance |
| 1417 | * @log: slot id and address to which we copy the log |
| 1418 | * @is_dirty: set to '1' if any dirty pages were found |
| 1419 | * @memslot: set to the associated memslot, always valid on success |
| 1420 | */ |
| 1421 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
| 1422 | int *is_dirty, struct kvm_memory_slot **memslot) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1423 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1424 | struct kvm_memslots *slots; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1425 | int i, as_id, id; |
Takuya Yoshikawa | 87bf6e7 | 2010-04-12 19:35:35 +0900 | [diff] [blame] | 1426 | unsigned long n; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1427 | unsigned long any = 0; |
| 1428 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame^] | 1429 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1430 | if (kvm->dirty_ring_size) |
| 1431 | return -ENXIO; |
| 1432 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1433 | *memslot = NULL; |
| 1434 | *is_dirty = 0; |
| 1435 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1436 | as_id = log->slot >> 16; |
| 1437 | id = (u16)log->slot; |
| 1438 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1439 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1440 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1441 | slots = __kvm_memslots(kvm, as_id); |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1442 | *memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1443 | if (!(*memslot) || !(*memslot)->dirty_bitmap) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1444 | return -ENOENT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1445 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1446 | kvm_arch_sync_dirty_log(kvm, *memslot); |
| 1447 | |
| 1448 | n = kvm_dirty_bitmap_bytes(*memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1449 | |
Uri Lublin | cd1a4a9 | 2007-02-22 16:43:09 +0200 | [diff] [blame] | 1450 | for (i = 0; !any && i < n/sizeof(long); ++i) |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1451 | any = (*memslot)->dirty_bitmap[i]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1452 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1453 | if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1454 | return -EFAULT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1455 | |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 1456 | if (any) |
| 1457 | *is_dirty = 1; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1458 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1459 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1460 | EXPORT_SYMBOL_GPL(kvm_get_dirty_log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1461 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1462 | #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1463 | /** |
Jiang Biao | b8b0022 | 2019-04-23 19:40:30 +0800 | [diff] [blame] | 1464 | * kvm_get_dirty_log_protect - get a snapshot of dirty pages |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1465 | * and reenable dirty page tracking for the corresponding pages. |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1466 | * @kvm: pointer to kvm instance |
| 1467 | * @log: slot id and address to which we copy the log |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1468 | * |
| 1469 | * We need to keep it in mind that VCPU threads can write to the bitmap |
| 1470 | * concurrently. So, to avoid losing track of dirty pages we keep the |
| 1471 | * following order: |
| 1472 | * |
| 1473 | * 1. Take a snapshot of the bit and clear it if needed. |
| 1474 | * 2. Write protect the corresponding page. |
| 1475 | * 3. Copy the snapshot to the userspace. |
| 1476 | * 4. Upon return caller flushes TLB's if needed. |
| 1477 | * |
| 1478 | * Between 2 and 4, the guest may write to the page using the remaining TLB |
| 1479 | * entry. This is not a problem because the page is reported dirty using |
| 1480 | * the snapshot taken before and step 4 ensures that writes done after |
| 1481 | * exiting to userspace will be logged for the next call. |
| 1482 | * |
| 1483 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1484 | static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1485 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1486 | struct kvm_memslots *slots; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1487 | struct kvm_memory_slot *memslot; |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1488 | int i, as_id, id; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1489 | unsigned long n; |
| 1490 | unsigned long *dirty_bitmap; |
| 1491 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1492 | bool flush; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1493 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame^] | 1494 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1495 | if (kvm->dirty_ring_size) |
| 1496 | return -ENXIO; |
| 1497 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1498 | as_id = log->slot >> 16; |
| 1499 | id = (u16)log->slot; |
| 1500 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1501 | return -EINVAL; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1502 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1503 | slots = __kvm_memslots(kvm, as_id); |
| 1504 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1505 | if (!memslot || !memslot->dirty_bitmap) |
| 1506 | return -ENOENT; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1507 | |
| 1508 | dirty_bitmap = memslot->dirty_bitmap; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1509 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1510 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 1511 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1512 | n = kvm_dirty_bitmap_bytes(memslot); |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1513 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1514 | if (kvm->manual_dirty_log_protect) { |
| 1515 | /* |
| 1516 | * Unlike kvm_get_dirty_log, we always return false in *flush, |
| 1517 | * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There |
| 1518 | * is some code duplication between this function and |
| 1519 | * kvm_get_dirty_log, but hopefully all architecture |
| 1520 | * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log |
| 1521 | * can be eliminated. |
| 1522 | */ |
| 1523 | dirty_bitmap_buffer = dirty_bitmap; |
| 1524 | } else { |
| 1525 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 1526 | memset(dirty_bitmap_buffer, 0, n); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1527 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1528 | spin_lock(&kvm->mmu_lock); |
| 1529 | for (i = 0; i < n / sizeof(long); i++) { |
| 1530 | unsigned long mask; |
| 1531 | gfn_t offset; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1532 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1533 | if (!dirty_bitmap[i]) |
| 1534 | continue; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1535 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1536 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1537 | mask = xchg(&dirty_bitmap[i], 0); |
| 1538 | dirty_bitmap_buffer[i] = mask; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1539 | |
Lan Tianyu | a67794c | 2019-02-02 17:20:27 +0800 | [diff] [blame] | 1540 | offset = i * BITS_PER_LONG; |
| 1541 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 1542 | offset, mask); |
Takuya Yoshikawa | 58d2930 | 2015-03-17 16:19:58 +0900 | [diff] [blame] | 1543 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1544 | spin_unlock(&kvm->mmu_lock); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1545 | } |
| 1546 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1547 | if (flush) |
| 1548 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 1549 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1550 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1551 | return -EFAULT; |
| 1552 | return 0; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1553 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1554 | |
| 1555 | |
| 1556 | /** |
| 1557 | * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot |
| 1558 | * @kvm: kvm instance |
| 1559 | * @log: slot id and address to which we copy the log |
| 1560 | * |
| 1561 | * Steps 1-4 below provide general overview of dirty page logging. See |
| 1562 | * kvm_get_dirty_log_protect() function description for additional details. |
| 1563 | * |
| 1564 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we |
| 1565 | * always flush the TLB (step 4) even if previous step failed and the dirty |
| 1566 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API |
| 1567 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures |
| 1568 | * writes will be marked dirty for next log read. |
| 1569 | * |
| 1570 | * 1. Take a snapshot of the bit and clear it if needed. |
| 1571 | * 2. Write protect the corresponding page. |
| 1572 | * 3. Copy the snapshot to the userspace. |
| 1573 | * 4. Flush TLB's if needed. |
| 1574 | */ |
| 1575 | static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 1576 | struct kvm_dirty_log *log) |
| 1577 | { |
| 1578 | int r; |
| 1579 | |
| 1580 | mutex_lock(&kvm->slots_lock); |
| 1581 | |
| 1582 | r = kvm_get_dirty_log_protect(kvm, log); |
| 1583 | |
| 1584 | mutex_unlock(&kvm->slots_lock); |
| 1585 | return r; |
| 1586 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1587 | |
| 1588 | /** |
| 1589 | * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap |
| 1590 | * and reenable dirty page tracking for the corresponding pages. |
| 1591 | * @kvm: pointer to kvm instance |
| 1592 | * @log: slot id and address from which to fetch the bitmap of dirty pages |
| 1593 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1594 | static int kvm_clear_dirty_log_protect(struct kvm *kvm, |
| 1595 | struct kvm_clear_dirty_log *log) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1596 | { |
| 1597 | struct kvm_memslots *slots; |
| 1598 | struct kvm_memory_slot *memslot; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1599 | int as_id, id; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1600 | gfn_t offset; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1601 | unsigned long i, n; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1602 | unsigned long *dirty_bitmap; |
| 1603 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1604 | bool flush; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1605 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame^] | 1606 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1607 | if (kvm->dirty_ring_size) |
| 1608 | return -ENXIO; |
| 1609 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1610 | as_id = log->slot >> 16; |
| 1611 | id = (u16)log->slot; |
| 1612 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| 1613 | return -EINVAL; |
| 1614 | |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 1615 | if (log->first_page & 63) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1616 | return -EINVAL; |
| 1617 | |
| 1618 | slots = __kvm_memslots(kvm, as_id); |
| 1619 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1620 | if (!memslot || !memslot->dirty_bitmap) |
| 1621 | return -ENOENT; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1622 | |
| 1623 | dirty_bitmap = memslot->dirty_bitmap; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1624 | |
Peter Xu | 4ddc920 | 2019-05-08 17:15:45 +0800 | [diff] [blame] | 1625 | n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1626 | |
| 1627 | if (log->first_page > memslot->npages || |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 1628 | log->num_pages > memslot->npages - log->first_page || |
| 1629 | (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) |
| 1630 | return -EINVAL; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1631 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1632 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 1633 | |
| 1634 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1635 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 1636 | if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) |
| 1637 | return -EFAULT; |
| 1638 | |
| 1639 | spin_lock(&kvm->mmu_lock); |
Peter Xu | 53eac7a | 2019-05-08 17:15:46 +0800 | [diff] [blame] | 1640 | for (offset = log->first_page, i = offset / BITS_PER_LONG, |
| 1641 | n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1642 | i++, offset += BITS_PER_LONG) { |
| 1643 | unsigned long mask = *dirty_bitmap_buffer++; |
| 1644 | atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; |
| 1645 | if (!mask) |
| 1646 | continue; |
| 1647 | |
| 1648 | mask &= atomic_long_fetch_andnot(mask, p); |
| 1649 | |
| 1650 | /* |
| 1651 | * mask contains the bits that really have been cleared. This |
| 1652 | * never includes any bits beyond the length of the memslot (if |
| 1653 | * the length is not aligned to 64 pages), therefore it is not |
| 1654 | * a problem if userspace sets them in log->dirty_bitmap. |
| 1655 | */ |
| 1656 | if (mask) { |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1657 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1658 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 1659 | offset, mask); |
| 1660 | } |
| 1661 | } |
| 1662 | spin_unlock(&kvm->mmu_lock); |
| 1663 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1664 | if (flush) |
| 1665 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 1666 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1667 | return 0; |
| 1668 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1669 | |
| 1670 | static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, |
| 1671 | struct kvm_clear_dirty_log *log) |
| 1672 | { |
| 1673 | int r; |
| 1674 | |
| 1675 | mutex_lock(&kvm->slots_lock); |
| 1676 | |
| 1677 | r = kvm_clear_dirty_log_protect(kvm, log); |
| 1678 | |
| 1679 | mutex_unlock(&kvm->slots_lock); |
| 1680 | return r; |
| 1681 | } |
| 1682 | #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1683 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 1684 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
| 1685 | { |
| 1686 | return __gfn_to_memslot(kvm_memslots(kvm), gfn); |
| 1687 | } |
Avi Kivity | a1f4d395 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 1688 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1689 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 1690 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1691 | { |
| 1692 | return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); |
| 1693 | } |
Paolo Bonzini | e72436b | 2020-04-17 12:21:06 -0400 | [diff] [blame] | 1694 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 1695 | |
Yaowei Bai | 33e9415 | 2015-11-14 11:21:06 +0800 | [diff] [blame] | 1696 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1697 | { |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1698 | struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1699 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 1700 | return kvm_is_visible_memslot(memslot); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1701 | } |
| 1702 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
| 1703 | |
Vitaly Kuznetsov | 995decb | 2020-07-08 16:00:23 +0200 | [diff] [blame] | 1704 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1705 | { |
| 1706 | struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 1707 | |
| 1708 | return kvm_is_visible_memslot(memslot); |
| 1709 | } |
| 1710 | EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); |
| 1711 | |
Sean Christopherson | f9b84e1 | 2020-01-08 12:24:37 -0800 | [diff] [blame] | 1712 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 1713 | { |
| 1714 | struct vm_area_struct *vma; |
| 1715 | unsigned long addr, size; |
| 1716 | |
| 1717 | size = PAGE_SIZE; |
| 1718 | |
Sean Christopherson | 42cde48 | 2020-01-08 12:24:38 -0800 | [diff] [blame] | 1719 | addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 1720 | if (kvm_is_error_hva(addr)) |
| 1721 | return PAGE_SIZE; |
| 1722 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1723 | mmap_read_lock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 1724 | vma = find_vma(current->mm, addr); |
| 1725 | if (!vma) |
| 1726 | goto out; |
| 1727 | |
| 1728 | size = vma_kernel_pagesize(vma); |
| 1729 | |
| 1730 | out: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1731 | mmap_read_unlock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 1732 | |
| 1733 | return size; |
| 1734 | } |
| 1735 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1736 | static bool memslot_is_readonly(struct kvm_memory_slot *slot) |
| 1737 | { |
| 1738 | return slot->flags & KVM_MEM_READONLY; |
| 1739 | } |
| 1740 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1741 | static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| 1742 | gfn_t *nr_pages, bool write) |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 1743 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1744 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 1745 | return KVM_HVA_ERR_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 1746 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1747 | if (memslot_is_readonly(slot) && write) |
| 1748 | return KVM_HVA_ERR_RO_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 1749 | |
| 1750 | if (nr_pages) |
| 1751 | *nr_pages = slot->npages - (gfn - slot->base_gfn); |
| 1752 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1753 | return __gfn_to_hva_memslot(slot, gfn); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 1754 | } |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 1755 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1756 | static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| 1757 | gfn_t *nr_pages) |
| 1758 | { |
| 1759 | return __gfn_to_hva_many(slot, gfn, nr_pages, true); |
| 1760 | } |
| 1761 | |
| 1762 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 1763 | gfn_t gfn) |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1764 | { |
| 1765 | return gfn_to_hva_many(slot, gfn, NULL); |
| 1766 | } |
| 1767 | EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); |
| 1768 | |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 1769 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
| 1770 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 1771 | return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 1772 | } |
Sheng Yang | 0d15029 | 2008-04-25 21:44:50 +0800 | [diff] [blame] | 1773 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 1774 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 1775 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1776 | { |
| 1777 | return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); |
| 1778 | } |
| 1779 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); |
| 1780 | |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 1781 | /* |
Wei Yang | 970c0d4 | 2018-10-09 10:41:15 +0800 | [diff] [blame] | 1782 | * Return the hva of a @gfn and the R/W attribute if possible. |
| 1783 | * |
| 1784 | * @slot: the kvm_memory_slot which contains @gfn |
| 1785 | * @gfn: the gfn to be translated |
| 1786 | * @writable: used to return the read/write attribute of the @slot if the hva |
| 1787 | * is valid and @writable is not NULL |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 1788 | */ |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 1789 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
| 1790 | gfn_t gfn, bool *writable) |
Gleb Natapov | 8030089 | 2010-10-19 18:13:41 +0200 | [diff] [blame] | 1791 | { |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 1792 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
| 1793 | |
| 1794 | if (!kvm_is_error_hva(hva) && writable) |
Paolo Bonzini | ba6a354 | 2013-09-09 13:52:33 +0200 | [diff] [blame] | 1795 | *writable = !memslot_is_readonly(slot); |
| 1796 | |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 1797 | return hva; |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 1798 | } |
| 1799 | |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 1800 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) |
| 1801 | { |
| 1802 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 1803 | |
| 1804 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 1805 | } |
| 1806 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 1807 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) |
| 1808 | { |
| 1809 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 1810 | |
| 1811 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 1812 | } |
| 1813 | |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 1814 | static inline int check_user_page_hwpoison(unsigned long addr) |
| 1815 | { |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 1816 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 1817 | |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 1818 | rc = get_user_pages(addr, 1, flags, NULL, NULL); |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 1819 | return rc == -EHWPOISON; |
| 1820 | } |
| 1821 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1822 | /* |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 1823 | * The fast path to get the writable pfn which will be stored in @pfn, |
| 1824 | * true indicates success, otherwise false is returned. It's also the |
Miaohe Lin | 311497e | 2019-12-11 14:26:25 +0800 | [diff] [blame] | 1825 | * only part that runs if we can in atomic context. |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1826 | */ |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 1827 | static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, |
| 1828 | bool *writable, kvm_pfn_t *pfn) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1829 | { |
| 1830 | struct page *page[1]; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1831 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 1832 | /* |
| 1833 | * Fast pin a writable pfn only if it is a write fault request |
| 1834 | * or the caller allows to map a writable pfn for a read fault |
| 1835 | * request. |
| 1836 | */ |
| 1837 | if (!(write_fault || writable)) |
| 1838 | return false; |
| 1839 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 1840 | if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1841 | *pfn = page_to_pfn(page[0]); |
| 1842 | |
| 1843 | if (writable) |
| 1844 | *writable = true; |
| 1845 | return true; |
| 1846 | } |
| 1847 | |
| 1848 | return false; |
| 1849 | } |
| 1850 | |
| 1851 | /* |
| 1852 | * The slow path to get the pfn of the specified host virtual address, |
| 1853 | * 1 indicates success, -errno is returned if error is detected. |
| 1854 | */ |
| 1855 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1856 | bool *writable, kvm_pfn_t *pfn) |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 1857 | { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1858 | unsigned int flags = FOLL_HWPOISON; |
| 1859 | struct page *page; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 1860 | int npages = 0; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1861 | |
| 1862 | might_sleep(); |
| 1863 | |
| 1864 | if (writable) |
| 1865 | *writable = write_fault; |
| 1866 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1867 | if (write_fault) |
| 1868 | flags |= FOLL_WRITE; |
| 1869 | if (async) |
| 1870 | flags |= FOLL_NOWAIT; |
Lorenzo Stoakes | d4944b0 | 2016-10-13 01:20:12 +0100 | [diff] [blame] | 1871 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1872 | npages = get_user_pages_unlocked(addr, 1, &page, flags); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1873 | if (npages != 1) |
| 1874 | return npages; |
| 1875 | |
| 1876 | /* map read fault as writable if possible */ |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 1877 | if (unlikely(!write_fault) && writable) { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1878 | struct page *wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1879 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 1880 | if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1881 | *writable = true; |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1882 | put_page(page); |
| 1883 | page = wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1884 | } |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1885 | } |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 1886 | *pfn = page_to_pfn(page); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1887 | return npages; |
| 1888 | } |
| 1889 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1890 | static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) |
| 1891 | { |
| 1892 | if (unlikely(!(vma->vm_flags & VM_READ))) |
| 1893 | return false; |
| 1894 | |
| 1895 | if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) |
| 1896 | return false; |
| 1897 | |
| 1898 | return true; |
| 1899 | } |
| 1900 | |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 1901 | static int hva_to_pfn_remapped(struct vm_area_struct *vma, |
| 1902 | unsigned long addr, bool *async, |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 1903 | bool write_fault, bool *writable, |
| 1904 | kvm_pfn_t *p_pfn) |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 1905 | { |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 1906 | unsigned long pfn; |
| 1907 | int r; |
| 1908 | |
| 1909 | r = follow_pfn(vma, addr, &pfn); |
| 1910 | if (r) { |
| 1911 | /* |
| 1912 | * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does |
| 1913 | * not call the fault handler, so do it here. |
| 1914 | */ |
| 1915 | bool unlocked = false; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1916 | r = fixup_user_fault(current->mm, addr, |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 1917 | (write_fault ? FAULT_FLAG_WRITE : 0), |
| 1918 | &unlocked); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 1919 | if (unlocked) |
| 1920 | return -EAGAIN; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 1921 | if (r) |
| 1922 | return r; |
| 1923 | |
| 1924 | r = follow_pfn(vma, addr, &pfn); |
| 1925 | if (r) |
| 1926 | return r; |
| 1927 | |
| 1928 | } |
| 1929 | |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 1930 | if (writable) |
| 1931 | *writable = true; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 1932 | |
| 1933 | /* |
| 1934 | * Get a reference here because callers of *hva_to_pfn* and |
| 1935 | * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the |
| 1936 | * returned pfn. This is only needed if the VMA has VM_MIXEDMAP |
| 1937 | * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will |
| 1938 | * simply do nothing for reserved pfns. |
| 1939 | * |
| 1940 | * Whoever called remap_pfn_range is also going to call e.g. |
| 1941 | * unmap_mapping_range before the underlying pages are freed, |
| 1942 | * causing a call to our MMU notifier. |
| 1943 | */ |
| 1944 | kvm_get_pfn(pfn); |
| 1945 | |
| 1946 | *p_pfn = pfn; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 1947 | return 0; |
| 1948 | } |
| 1949 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 1950 | /* |
| 1951 | * Pin guest page in memory and return its pfn. |
| 1952 | * @addr: host virtual address which maps memory to the guest |
| 1953 | * @atomic: whether this function can sleep |
| 1954 | * @async: whether this function need to wait IO complete if the |
| 1955 | * host page is not in the memory |
| 1956 | * @write_fault: whether we should get a writable host page |
| 1957 | * @writable: whether it allows to map a writable host page for !@write_fault |
| 1958 | * |
| 1959 | * The function will map a writable host page for these two cases: |
| 1960 | * 1): @write_fault = true |
| 1961 | * 2): @write_fault = false && @writable, @writable will tell the caller |
| 1962 | * whether the mapping is writable. |
| 1963 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1964 | static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1965 | bool write_fault, bool *writable) |
| 1966 | { |
| 1967 | struct vm_area_struct *vma; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1968 | kvm_pfn_t pfn = 0; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 1969 | int npages, r; |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 1970 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 1971 | /* we can do it either atomically or asynchronously, not both */ |
| 1972 | BUG_ON(atomic && async); |
| 1973 | |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 1974 | if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1975 | return pfn; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 1976 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1977 | if (atomic) |
| 1978 | return KVM_PFN_ERR_FAULT; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 1979 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1980 | npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); |
| 1981 | if (npages == 1) |
| 1982 | return pfn; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 1983 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1984 | mmap_read_lock(current->mm); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1985 | if (npages == -EHWPOISON || |
| 1986 | (!async && check_user_page_hwpoison(addr))) { |
| 1987 | pfn = KVM_PFN_ERR_HWPOISON; |
| 1988 | goto exit; |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 1989 | } |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 1990 | |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 1991 | retry: |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1992 | vma = find_vma_intersection(current->mm, addr, addr + 1); |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 1993 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 1994 | if (vma == NULL) |
| 1995 | pfn = KVM_PFN_ERR_FAULT; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 1996 | else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 1997 | r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 1998 | if (r == -EAGAIN) |
| 1999 | goto retry; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2000 | if (r < 0) |
| 2001 | pfn = KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2002 | } else { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2003 | if (async && vma_is_valid(vma, write_fault)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2004 | *async = true; |
| 2005 | pfn = KVM_PFN_ERR_FAULT; |
| 2006 | } |
| 2007 | exit: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2008 | mmap_read_unlock(current->mm); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2009 | return pfn; |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2010 | } |
| 2011 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2012 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2013 | bool atomic, bool *async, bool write_fault, |
| 2014 | bool *writable) |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2015 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2016 | unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); |
| 2017 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2018 | if (addr == KVM_HVA_ERR_RO_BAD) { |
| 2019 | if (writable) |
| 2020 | *writable = false; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2021 | return KVM_PFN_ERR_RO_FAULT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2022 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2023 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2024 | if (kvm_is_error_hva(addr)) { |
| 2025 | if (writable) |
| 2026 | *writable = false; |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2027 | return KVM_PFN_NOSLOT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2028 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2029 | |
| 2030 | /* Do not map writable pfn in the readonly memslot. */ |
| 2031 | if (writable && memslot_is_readonly(slot)) { |
| 2032 | *writable = false; |
| 2033 | writable = NULL; |
| 2034 | } |
| 2035 | |
| 2036 | return hva_to_pfn(addr, atomic, async, write_fault, |
| 2037 | writable); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2038 | } |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 2039 | EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2040 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2041 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2042 | bool *writable) |
| 2043 | { |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2044 | return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, |
| 2045 | write_fault, writable); |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2046 | } |
| 2047 | EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); |
| 2048 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2049 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2050 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2051 | return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2052 | } |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2053 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2054 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2055 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2056 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2057 | return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2058 | } |
| 2059 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); |
| 2060 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2061 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2062 | { |
| 2063 | return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2064 | } |
| 2065 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); |
| 2066 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2067 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2068 | { |
| 2069 | return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); |
| 2070 | } |
| 2071 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
| 2072 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2073 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2074 | { |
| 2075 | return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2076 | } |
| 2077 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); |
| 2078 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2079 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2080 | struct page **pages, int nr_pages) |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2081 | { |
| 2082 | unsigned long addr; |
Arnd Bergmann | 076b925 | 2017-08-10 14:14:39 +0200 | [diff] [blame] | 2083 | gfn_t entry = 0; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2084 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2085 | addr = gfn_to_hva_many(slot, gfn, &entry); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2086 | if (kvm_is_error_hva(addr)) |
| 2087 | return -1; |
| 2088 | |
| 2089 | if (entry < nr_pages) |
| 2090 | return 0; |
| 2091 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2092 | return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2093 | } |
| 2094 | EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); |
| 2095 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2096 | static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2097 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2098 | if (is_error_noslot_pfn(pfn)) |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 2099 | return KVM_ERR_PTR_BAD_PAGE; |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2100 | |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2101 | if (kvm_is_reserved_pfn(pfn)) { |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 2102 | WARN_ON(1); |
| 2103 | return KVM_ERR_PTR_BAD_PAGE; |
| 2104 | } |
| 2105 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2106 | return pfn_to_page(pfn); |
| 2107 | } |
| 2108 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2109 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
| 2110 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2111 | kvm_pfn_t pfn; |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2112 | |
| 2113 | pfn = gfn_to_pfn(kvm, gfn); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2114 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2115 | return kvm_pfn_to_page(pfn); |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2116 | } |
| 2117 | EXPORT_SYMBOL_GPL(gfn_to_page); |
| 2118 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2119 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) |
| 2120 | { |
| 2121 | if (pfn == 0) |
| 2122 | return; |
| 2123 | |
| 2124 | if (cache) |
| 2125 | cache->pfn = cache->gfn = 0; |
| 2126 | |
| 2127 | if (dirty) |
| 2128 | kvm_release_pfn_dirty(pfn); |
| 2129 | else |
| 2130 | kvm_release_pfn_clean(pfn); |
| 2131 | } |
| 2132 | |
| 2133 | static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2134 | struct gfn_to_pfn_cache *cache, u64 gen) |
| 2135 | { |
| 2136 | kvm_release_pfn(cache->pfn, cache->dirty, cache); |
| 2137 | |
| 2138 | cache->pfn = gfn_to_pfn_memslot(slot, gfn); |
| 2139 | cache->gfn = gfn; |
| 2140 | cache->dirty = false; |
| 2141 | cache->generation = gen; |
| 2142 | } |
| 2143 | |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2144 | static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2145 | struct kvm_host_map *map, |
| 2146 | struct gfn_to_pfn_cache *cache, |
| 2147 | bool atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2148 | { |
| 2149 | kvm_pfn_t pfn; |
| 2150 | void *hva = NULL; |
| 2151 | struct page *page = KVM_UNMAPPED_PAGE; |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2152 | struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2153 | u64 gen = slots->generation; |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2154 | |
| 2155 | if (!map) |
| 2156 | return -EINVAL; |
| 2157 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2158 | if (cache) { |
| 2159 | if (!cache->pfn || cache->gfn != gfn || |
| 2160 | cache->generation != gen) { |
| 2161 | if (atomic) |
| 2162 | return -EAGAIN; |
| 2163 | kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); |
| 2164 | } |
| 2165 | pfn = cache->pfn; |
| 2166 | } else { |
| 2167 | if (atomic) |
| 2168 | return -EAGAIN; |
| 2169 | pfn = gfn_to_pfn_memslot(slot, gfn); |
| 2170 | } |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2171 | if (is_error_noslot_pfn(pfn)) |
| 2172 | return -EINVAL; |
| 2173 | |
| 2174 | if (pfn_valid(pfn)) { |
| 2175 | page = pfn_to_page(pfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2176 | if (atomic) |
| 2177 | hva = kmap_atomic(page); |
| 2178 | else |
| 2179 | hva = kmap(page); |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2180 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2181 | } else if (!atomic) { |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2182 | hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2183 | } else { |
| 2184 | return -EINVAL; |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2185 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2186 | } |
| 2187 | |
| 2188 | if (!hva) |
| 2189 | return -EFAULT; |
| 2190 | |
| 2191 | map->page = page; |
| 2192 | map->hva = hva; |
| 2193 | map->pfn = pfn; |
| 2194 | map->gfn = gfn; |
| 2195 | |
| 2196 | return 0; |
| 2197 | } |
| 2198 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2199 | int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, |
| 2200 | struct gfn_to_pfn_cache *cache, bool atomic) |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2201 | { |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2202 | return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, |
| 2203 | cache, atomic); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2204 | } |
| 2205 | EXPORT_SYMBOL_GPL(kvm_map_gfn); |
| 2206 | |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2207 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) |
| 2208 | { |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2209 | return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, |
| 2210 | NULL, false); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2211 | } |
| 2212 | EXPORT_SYMBOL_GPL(kvm_vcpu_map); |
| 2213 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2214 | static void __kvm_unmap_gfn(struct kvm *kvm, |
| 2215 | struct kvm_memory_slot *memslot, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2216 | struct kvm_host_map *map, |
| 2217 | struct gfn_to_pfn_cache *cache, |
| 2218 | bool dirty, bool atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2219 | { |
| 2220 | if (!map) |
| 2221 | return; |
| 2222 | |
| 2223 | if (!map->hva) |
| 2224 | return; |
| 2225 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2226 | if (map->page != KVM_UNMAPPED_PAGE) { |
| 2227 | if (atomic) |
| 2228 | kunmap_atomic(map->hva); |
| 2229 | else |
| 2230 | kunmap(map->page); |
| 2231 | } |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2232 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2233 | else if (!atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2234 | memunmap(map->hva); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2235 | else |
| 2236 | WARN_ONCE(1, "Unexpected unmapping in atomic context"); |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2237 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2238 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2239 | if (dirty) |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2240 | mark_page_dirty_in_slot(kvm, memslot, map->gfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2241 | |
| 2242 | if (cache) |
| 2243 | cache->dirty |= dirty; |
| 2244 | else |
| 2245 | kvm_release_pfn(map->pfn, dirty, NULL); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2246 | |
| 2247 | map->hva = NULL; |
| 2248 | map->page = NULL; |
| 2249 | } |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2250 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2251 | int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, |
| 2252 | struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2253 | { |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2254 | __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2255 | cache, dirty, atomic); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2256 | return 0; |
| 2257 | } |
| 2258 | EXPORT_SYMBOL_GPL(kvm_unmap_gfn); |
| 2259 | |
| 2260 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) |
| 2261 | { |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2262 | __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), |
| 2263 | map, NULL, dirty, false); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2264 | } |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2265 | EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); |
| 2266 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2267 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2268 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2269 | kvm_pfn_t pfn; |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2270 | |
| 2271 | pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); |
| 2272 | |
| 2273 | return kvm_pfn_to_page(pfn); |
| 2274 | } |
| 2275 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); |
| 2276 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2277 | void kvm_release_page_clean(struct page *page) |
| 2278 | { |
Xiao Guangrong | 32cad84 | 2012-08-03 15:42:52 +0800 | [diff] [blame] | 2279 | WARN_ON(is_error_page(page)); |
| 2280 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2281 | kvm_release_pfn_clean(page_to_pfn(page)); |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2282 | } |
| 2283 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
| 2284 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2285 | void kvm_release_pfn_clean(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2286 | { |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2287 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2288 | put_page(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2289 | } |
| 2290 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
| 2291 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2292 | void kvm_release_page_dirty(struct page *page) |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2293 | { |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2294 | WARN_ON(is_error_page(page)); |
| 2295 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2296 | kvm_release_pfn_dirty(page_to_pfn(page)); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2297 | } |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2298 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2299 | |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2300 | void kvm_release_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2301 | { |
| 2302 | kvm_set_pfn_dirty(pfn); |
| 2303 | kvm_release_pfn_clean(pfn); |
| 2304 | } |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2305 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2306 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2307 | void kvm_set_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2308 | { |
Miaohe Lin | d29c03a | 2019-12-05 11:05:05 +0800 | [diff] [blame] | 2309 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
| 2310 | SetPageDirty(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2311 | } |
| 2312 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
| 2313 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2314 | void kvm_set_pfn_accessed(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2315 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 2316 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2317 | mark_page_accessed(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2318 | } |
| 2319 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
| 2320 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2321 | void kvm_get_pfn(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2322 | { |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2323 | if (!kvm_is_reserved_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2324 | get_page(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2325 | } |
| 2326 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |
| 2327 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2328 | static int next_segment(unsigned long len, int offset) |
| 2329 | { |
| 2330 | if (len > PAGE_SIZE - offset) |
| 2331 | return PAGE_SIZE - offset; |
| 2332 | else |
| 2333 | return len; |
| 2334 | } |
| 2335 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2336 | static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2337 | void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2338 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2339 | int r; |
| 2340 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2341 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2342 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2343 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2344 | return -EFAULT; |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2345 | r = __copy_from_user(data, (void __user *)addr + offset, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2346 | if (r) |
| 2347 | return -EFAULT; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2348 | return 0; |
| 2349 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2350 | |
| 2351 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| 2352 | int len) |
| 2353 | { |
| 2354 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2355 | |
| 2356 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2357 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2358 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
| 2359 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2360 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, |
| 2361 | int offset, int len) |
| 2362 | { |
| 2363 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2364 | |
| 2365 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2366 | } |
| 2367 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); |
| 2368 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2369 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) |
| 2370 | { |
| 2371 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2372 | int seg; |
| 2373 | int offset = offset_in_page(gpa); |
| 2374 | int ret; |
| 2375 | |
| 2376 | while ((seg = next_segment(len, offset)) != 0) { |
| 2377 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); |
| 2378 | if (ret < 0) |
| 2379 | return ret; |
| 2380 | offset = 0; |
| 2381 | len -= seg; |
| 2382 | data += seg; |
| 2383 | ++gfn; |
| 2384 | } |
| 2385 | return 0; |
| 2386 | } |
| 2387 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
| 2388 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2389 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) |
| 2390 | { |
| 2391 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2392 | int seg; |
| 2393 | int offset = offset_in_page(gpa); |
| 2394 | int ret; |
| 2395 | |
| 2396 | while ((seg = next_segment(len, offset)) != 0) { |
| 2397 | ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); |
| 2398 | if (ret < 0) |
| 2399 | return ret; |
| 2400 | offset = 0; |
| 2401 | len -= seg; |
| 2402 | data += seg; |
| 2403 | ++gfn; |
| 2404 | } |
| 2405 | return 0; |
| 2406 | } |
| 2407 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); |
| 2408 | |
| 2409 | static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2410 | void *data, int offset, unsigned long len) |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2411 | { |
| 2412 | int r; |
| 2413 | unsigned long addr; |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2414 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2415 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2416 | if (kvm_is_error_hva(addr)) |
| 2417 | return -EFAULT; |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2418 | pagefault_disable(); |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2419 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2420 | pagefault_enable(); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2421 | if (r) |
| 2422 | return -EFAULT; |
| 2423 | return 0; |
| 2424 | } |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2425 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2426 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 2427 | void *data, unsigned long len) |
| 2428 | { |
| 2429 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2430 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2431 | int offset = offset_in_page(gpa); |
| 2432 | |
| 2433 | return __kvm_read_guest_atomic(slot, gfn, data, offset, len); |
| 2434 | } |
| 2435 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); |
| 2436 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2437 | static int __kvm_write_guest_page(struct kvm *kvm, |
| 2438 | struct kvm_memory_slot *memslot, gfn_t gfn, |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2439 | const void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2440 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2441 | int r; |
| 2442 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2443 | |
Radim Krčmář | 251eb84 | 2015-04-10 21:47:27 +0200 | [diff] [blame] | 2444 | addr = gfn_to_hva_memslot(memslot, gfn); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2445 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2446 | return -EFAULT; |
Xiao Guangrong | 8b0cedf | 2011-05-15 23:22:04 +0800 | [diff] [blame] | 2447 | r = __copy_to_user((void __user *)addr + offset, data, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2448 | if (r) |
| 2449 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2450 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2451 | return 0; |
| 2452 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2453 | |
| 2454 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, |
| 2455 | const void *data, int offset, int len) |
| 2456 | { |
| 2457 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2458 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2459 | return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2460 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2461 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
| 2462 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2463 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2464 | const void *data, int offset, int len) |
| 2465 | { |
| 2466 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2467 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2468 | return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2469 | } |
| 2470 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); |
| 2471 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2472 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| 2473 | unsigned long len) |
| 2474 | { |
| 2475 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2476 | int seg; |
| 2477 | int offset = offset_in_page(gpa); |
| 2478 | int ret; |
| 2479 | |
| 2480 | while ((seg = next_segment(len, offset)) != 0) { |
| 2481 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); |
| 2482 | if (ret < 0) |
| 2483 | return ret; |
| 2484 | offset = 0; |
| 2485 | len -= seg; |
| 2486 | data += seg; |
| 2487 | ++gfn; |
| 2488 | } |
| 2489 | return 0; |
| 2490 | } |
Wincy Van | ff651cb | 2014-12-11 08:52:58 +0300 | [diff] [blame] | 2491 | EXPORT_SYMBOL_GPL(kvm_write_guest); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2492 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2493 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| 2494 | unsigned long len) |
| 2495 | { |
| 2496 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2497 | int seg; |
| 2498 | int offset = offset_in_page(gpa); |
| 2499 | int ret; |
| 2500 | |
| 2501 | while ((seg = next_segment(len, offset)) != 0) { |
| 2502 | ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); |
| 2503 | if (ret < 0) |
| 2504 | return ret; |
| 2505 | offset = 0; |
| 2506 | len -= seg; |
| 2507 | data += seg; |
| 2508 | ++gfn; |
| 2509 | } |
| 2510 | return 0; |
| 2511 | } |
| 2512 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); |
| 2513 | |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2514 | static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, |
| 2515 | struct gfn_to_hva_cache *ghc, |
| 2516 | gpa_t gpa, unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2517 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2518 | int offset = offset_in_page(gpa); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2519 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
| 2520 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; |
| 2521 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; |
| 2522 | gfn_t nr_pages_avail; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2523 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2524 | /* Update ghc->generation before performing any error checks. */ |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2525 | ghc->generation = slots->generation; |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2526 | |
| 2527 | if (start_gfn > end_gfn) { |
| 2528 | ghc->hva = KVM_HVA_ERR_BAD; |
| 2529 | return -EINVAL; |
| 2530 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2531 | |
| 2532 | /* |
| 2533 | * If the requested region crosses two memslots, we still |
| 2534 | * verify that the entire region is valid here. |
| 2535 | */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2536 | for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2537 | ghc->memslot = __gfn_to_memslot(slots, start_gfn); |
| 2538 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, |
| 2539 | &nr_pages_avail); |
| 2540 | if (kvm_is_error_hva(ghc->hva)) |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2541 | return -EFAULT; |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2542 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2543 | |
| 2544 | /* Use the slow path for cross page reads and writes. */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2545 | if (nr_pages_needed == 1) |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2546 | ghc->hva += offset; |
| 2547 | else |
| 2548 | ghc->memslot = NULL; |
| 2549 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2550 | ghc->gpa = gpa; |
| 2551 | ghc->len = len; |
| 2552 | return 0; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2553 | } |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2554 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2555 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2556 | gpa_t gpa, unsigned long len) |
| 2557 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2558 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2559 | return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); |
| 2560 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2561 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2562 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2563 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Jim Mattson | 7a86dab | 2018-12-14 14:34:43 -0800 | [diff] [blame] | 2564 | void *data, unsigned int offset, |
| 2565 | unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2566 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2567 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2568 | int r; |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2569 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2570 | |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2571 | BUG_ON(len + offset > ghc->len); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2572 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 2573 | if (slots->generation != ghc->generation) { |
| 2574 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 2575 | return -EFAULT; |
| 2576 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2577 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2578 | if (kvm_is_error_hva(ghc->hva)) |
| 2579 | return -EFAULT; |
| 2580 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 2581 | if (unlikely(!ghc->memslot)) |
| 2582 | return kvm_write_guest(kvm, gpa, data, len); |
| 2583 | |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2584 | r = __copy_to_user((void __user *)ghc->hva + offset, data, len); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2585 | if (r) |
| 2586 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2587 | mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2588 | |
| 2589 | return 0; |
| 2590 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2591 | EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2592 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2593 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 2594 | void *data, unsigned long len) |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2595 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2596 | return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2597 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2598 | EXPORT_SYMBOL_GPL(kvm_write_guest_cached); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2599 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2600 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 2601 | void *data, unsigned int offset, |
| 2602 | unsigned long len) |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2603 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2604 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2605 | int r; |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2606 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2607 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2608 | BUG_ON(len + offset > ghc->len); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2609 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 2610 | if (slots->generation != ghc->generation) { |
| 2611 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 2612 | return -EFAULT; |
| 2613 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2614 | |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2615 | if (kvm_is_error_hva(ghc->hva)) |
| 2616 | return -EFAULT; |
| 2617 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 2618 | if (unlikely(!ghc->memslot)) |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2619 | return kvm_read_guest(kvm, gpa, data, len); |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 2620 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2621 | r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2622 | if (r) |
| 2623 | return -EFAULT; |
| 2624 | |
| 2625 | return 0; |
| 2626 | } |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 2627 | EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); |
| 2628 | |
| 2629 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 2630 | void *data, unsigned long len) |
| 2631 | { |
| 2632 | return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); |
| 2633 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2634 | EXPORT_SYMBOL_GPL(kvm_read_guest_cached); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 2635 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2636 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) |
| 2637 | { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 2638 | const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2639 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2640 | int seg; |
| 2641 | int offset = offset_in_page(gpa); |
| 2642 | int ret; |
| 2643 | |
Kevin Mulvey | bfda0e8 | 2015-02-20 08:21:36 -0500 | [diff] [blame] | 2644 | while ((seg = next_segment(len, offset)) != 0) { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 2645 | ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2646 | if (ret < 0) |
| 2647 | return ret; |
| 2648 | offset = 0; |
| 2649 | len -= seg; |
| 2650 | ++gfn; |
| 2651 | } |
| 2652 | return 0; |
| 2653 | } |
| 2654 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
| 2655 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2656 | void mark_page_dirty_in_slot(struct kvm *kvm, |
| 2657 | struct kvm_memory_slot *memslot, |
| 2658 | gfn_t gfn) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2659 | { |
Rusty Russell | 7e9d619 | 2007-07-31 20:41:14 +1000 | [diff] [blame] | 2660 | if (memslot && memslot->dirty_bitmap) { |
| 2661 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 2662 | u32 slot = (memslot->as_id << 16) | memslot->id; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2663 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 2664 | if (kvm->dirty_ring_size) |
| 2665 | kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), |
| 2666 | slot, rel_gfn); |
| 2667 | else |
| 2668 | set_bit_le(rel_gfn, memslot->dirty_bitmap); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2669 | } |
| 2670 | } |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 2671 | EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2672 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2673 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
| 2674 | { |
| 2675 | struct kvm_memory_slot *memslot; |
| 2676 | |
| 2677 | memslot = gfn_to_memslot(kvm, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2678 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2679 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2680 | EXPORT_SYMBOL_GPL(mark_page_dirty); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2681 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2682 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2683 | { |
| 2684 | struct kvm_memory_slot *memslot; |
| 2685 | |
| 2686 | memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2687 | mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2688 | } |
| 2689 | EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); |
| 2690 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 2691 | void kvm_sigset_activate(struct kvm_vcpu *vcpu) |
| 2692 | { |
| 2693 | if (!vcpu->sigset_active) |
| 2694 | return; |
| 2695 | |
| 2696 | /* |
| 2697 | * This does a lockless modification of ->real_blocked, which is fine |
| 2698 | * because, only current can change ->real_blocked and all readers of |
| 2699 | * ->real_blocked don't care as long ->real_blocked is always a subset |
| 2700 | * of ->blocked. |
| 2701 | */ |
| 2702 | sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); |
| 2703 | } |
| 2704 | |
| 2705 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) |
| 2706 | { |
| 2707 | if (!vcpu->sigset_active) |
| 2708 | return; |
| 2709 | |
| 2710 | sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); |
| 2711 | sigemptyset(¤t->real_blocked); |
| 2712 | } |
| 2713 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2714 | static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 2715 | { |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 2716 | unsigned int old, val, grow, grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2717 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 2718 | old = val = vcpu->halt_poll_ns; |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 2719 | grow_start = READ_ONCE(halt_poll_ns_grow_start); |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 2720 | grow = READ_ONCE(halt_poll_ns_grow); |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 2721 | if (!grow) |
| 2722 | goto out; |
| 2723 | |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 2724 | val *= grow; |
| 2725 | if (val < grow_start) |
| 2726 | val = grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2727 | |
David Matlack | 313f636 | 2016-03-08 16:19:44 -0800 | [diff] [blame] | 2728 | if (val > halt_poll_ns) |
| 2729 | val = halt_poll_ns; |
| 2730 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2731 | vcpu->halt_poll_ns = val; |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 2732 | out: |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 2733 | trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2734 | } |
| 2735 | |
| 2736 | static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 2737 | { |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 2738 | unsigned int old, val, shrink; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2739 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 2740 | old = val = vcpu->halt_poll_ns; |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 2741 | shrink = READ_ONCE(halt_poll_ns_shrink); |
| 2742 | if (shrink == 0) |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2743 | val = 0; |
| 2744 | else |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 2745 | val /= shrink; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2746 | |
| 2747 | vcpu->halt_poll_ns = val; |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 2748 | trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2749 | } |
| 2750 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2751 | static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) |
| 2752 | { |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 2753 | int ret = -EINTR; |
| 2754 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 2755 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2756 | if (kvm_arch_vcpu_runnable(vcpu)) { |
| 2757 | kvm_make_request(KVM_REQ_UNHALT, vcpu); |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 2758 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2759 | } |
| 2760 | if (kvm_cpu_has_pending_timer(vcpu)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 2761 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2762 | if (signal_pending(current)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 2763 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2764 | |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 2765 | ret = 0; |
| 2766 | out: |
| 2767 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 2768 | return ret; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2769 | } |
| 2770 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 2771 | static inline void |
| 2772 | update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) |
| 2773 | { |
| 2774 | if (waited) |
| 2775 | vcpu->stat.halt_poll_fail_ns += poll_ns; |
| 2776 | else |
| 2777 | vcpu->stat.halt_poll_success_ns += poll_ns; |
| 2778 | } |
| 2779 | |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2780 | /* |
| 2781 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. |
| 2782 | */ |
Hollis Blanchard | 8776e51 | 2007-10-31 17:24:24 -0500 | [diff] [blame] | 2783 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2784 | { |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 2785 | ktime_t start, cur, poll_end; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2786 | bool waited = false; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2787 | u64 block_ns; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2788 | |
Marc Zyngier | 07ab0f8 | 2019-08-02 11:37:09 +0100 | [diff] [blame] | 2789 | kvm_arch_vcpu_blocking(vcpu); |
| 2790 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 2791 | start = cur = poll_end = ktime_get(); |
Christian Borntraeger | cdd6ad3 | 2019-03-05 05:30:01 -0500 | [diff] [blame] | 2792 | if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { |
Wanpeng Li | 19020f8 | 2015-09-03 22:07:37 +0800 | [diff] [blame] | 2793 | ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 2794 | |
Paolo Bonzini | 62bea5b | 2015-09-15 18:27:57 +0200 | [diff] [blame] | 2795 | ++vcpu->stat.halt_attempted_poll; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2796 | do { |
| 2797 | /* |
| 2798 | * This sets KVM_REQ_UNHALT if an interrupt |
| 2799 | * arrives. |
| 2800 | */ |
| 2801 | if (kvm_vcpu_check_block(vcpu) < 0) { |
| 2802 | ++vcpu->stat.halt_successful_poll; |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 2803 | if (!vcpu_valid_wakeup(vcpu)) |
| 2804 | ++vcpu->stat.halt_poll_invalid; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2805 | goto out; |
| 2806 | } |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 2807 | poll_end = cur = ktime_get(); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2808 | } while (single_task_running() && ktime_before(cur, stop)); |
| 2809 | } |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2810 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 2811 | prepare_to_rcuwait(&vcpu->wait); |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 2812 | for (;;) { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 2813 | set_current_state(TASK_INTERRUPTIBLE); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2814 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2815 | if (kvm_vcpu_check_block(vcpu) < 0) |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 2816 | break; |
| 2817 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2818 | waited = true; |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2819 | schedule(); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2820 | } |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 2821 | finish_rcuwait(&vcpu->wait); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2822 | cur = ktime_get(); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 2823 | out: |
Marc Zyngier | 07ab0f8 | 2019-08-02 11:37:09 +0100 | [diff] [blame] | 2824 | kvm_arch_vcpu_unblocking(vcpu); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2825 | block_ns = ktime_to_ns(cur) - ktime_to_ns(start); |
| 2826 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 2827 | update_halt_poll_stats( |
| 2828 | vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); |
| 2829 | |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 2830 | if (!kvm_arch_no_poll(vcpu)) { |
| 2831 | if (!vcpu_valid_wakeup(vcpu)) { |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2832 | shrink_halt_poll_ns(vcpu); |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 2833 | } else if (vcpu->kvm->max_halt_poll_ns) { |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 2834 | if (block_ns <= vcpu->halt_poll_ns) |
| 2835 | ; |
| 2836 | /* we had a long block, shrink polling */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 2837 | else if (vcpu->halt_poll_ns && |
| 2838 | block_ns > vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 2839 | shrink_halt_poll_ns(vcpu); |
| 2840 | /* we had a short halt and our poll time is too small */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 2841 | else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && |
| 2842 | block_ns < vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 2843 | grow_halt_poll_ns(vcpu); |
| 2844 | } else { |
| 2845 | vcpu->halt_poll_ns = 0; |
| 2846 | } |
| 2847 | } |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 2848 | |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 2849 | trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); |
| 2850 | kvm_arch_vcpu_block_finish(vcpu); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2851 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2852 | EXPORT_SYMBOL_GPL(kvm_vcpu_block); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 2853 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 2854 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2855 | { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 2856 | struct rcuwait *waitp; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2857 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 2858 | waitp = kvm_arch_vcpu_get_wait(vcpu); |
| 2859 | if (rcuwait_wake_up(waitp)) { |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 2860 | WRITE_ONCE(vcpu->ready, true); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2861 | ++vcpu->stat.halt_wakeup; |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 2862 | return true; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2863 | } |
| 2864 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 2865 | return false; |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 2866 | } |
| 2867 | EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); |
| 2868 | |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 2869 | #ifndef CONFIG_S390 |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 2870 | /* |
| 2871 | * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. |
| 2872 | */ |
| 2873 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
| 2874 | { |
| 2875 | int me; |
| 2876 | int cpu = vcpu->cpu; |
| 2877 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 2878 | if (kvm_vcpu_wake_up(vcpu)) |
| 2879 | return; |
| 2880 | |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2881 | me = get_cpu(); |
| 2882 | if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) |
| 2883 | if (kvm_arch_vcpu_should_kick(vcpu)) |
| 2884 | smp_send_reschedule(cpu); |
| 2885 | put_cpu(); |
| 2886 | } |
Yang Zhang | a20ed54 | 2013-04-11 19:25:15 +0800 | [diff] [blame] | 2887 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 2888 | #endif /* !CONFIG_S390 */ |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 2889 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 2890 | int kvm_vcpu_yield_to(struct kvm_vcpu *target) |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 2891 | { |
| 2892 | struct pid *pid; |
| 2893 | struct task_struct *task = NULL; |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 2894 | int ret = 0; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 2895 | |
| 2896 | rcu_read_lock(); |
| 2897 | pid = rcu_dereference(target->pid); |
| 2898 | if (pid) |
Sam Bobroff | 27fbe64b | 2014-09-19 09:40:41 +1000 | [diff] [blame] | 2899 | task = get_pid_task(pid, PIDTYPE_PID); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 2900 | rcu_read_unlock(); |
| 2901 | if (!task) |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2902 | return ret; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2903 | ret = yield_to(task, 1); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 2904 | put_task_struct(task); |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2905 | |
| 2906 | return ret; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 2907 | } |
| 2908 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); |
| 2909 | |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2910 | /* |
| 2911 | * Helper that checks whether a VCPU is eligible for directed yield. |
| 2912 | * Most eligible candidate to yield is decided by following heuristics: |
| 2913 | * |
| 2914 | * (a) VCPU which has not done pl-exit or cpu relax intercepted recently |
| 2915 | * (preempted lock holder), indicated by @in_spin_loop. |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 2916 | * Set at the beginning and cleared at the end of interception/PLE handler. |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2917 | * |
| 2918 | * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get |
| 2919 | * chance last time (mostly it has become eligible now since we have probably |
| 2920 | * yielded to lockholder in last iteration. This is done by toggling |
| 2921 | * @dy_eligible each time a VCPU checked for eligibility.) |
| 2922 | * |
| 2923 | * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding |
| 2924 | * to preempted lock-holder could result in wrong VCPU selection and CPU |
| 2925 | * burning. Giving priority for a potential lock-holder increases lock |
| 2926 | * progress. |
| 2927 | * |
| 2928 | * Since algorithm is based on heuristics, accessing another VCPU data without |
| 2929 | * locking does not harm. It may result in trying to yield to same VCPU, fail |
| 2930 | * and continue with next VCPU and so on. |
| 2931 | */ |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 2932 | static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2933 | { |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 2934 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2935 | bool eligible; |
| 2936 | |
| 2937 | eligible = !vcpu->spin_loop.in_spin_loop || |
Christian Borntraeger | 3465611 | 2014-09-04 21:13:31 +0200 | [diff] [blame] | 2938 | vcpu->spin_loop.dy_eligible; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2939 | |
| 2940 | if (vcpu->spin_loop.in_spin_loop) |
| 2941 | kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); |
| 2942 | |
| 2943 | return eligible; |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 2944 | #else |
| 2945 | return true; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 2946 | #endif |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 2947 | } |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2948 | |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 2949 | /* |
| 2950 | * Unlike kvm_arch_vcpu_runnable, this function is called outside |
| 2951 | * a vcpu_load/vcpu_put pair. However, for most architectures |
| 2952 | * kvm_arch_vcpu_runnable does not require vcpu_load. |
| 2953 | */ |
| 2954 | bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
| 2955 | { |
| 2956 | return kvm_arch_vcpu_runnable(vcpu); |
| 2957 | } |
| 2958 | |
| 2959 | static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) |
| 2960 | { |
| 2961 | if (kvm_arch_dy_runnable(vcpu)) |
| 2962 | return true; |
| 2963 | |
| 2964 | #ifdef CONFIG_KVM_ASYNC_PF |
| 2965 | if (!list_empty_careful(&vcpu->async_pf.done)) |
| 2966 | return true; |
| 2967 | #endif |
| 2968 | |
| 2969 | return false; |
| 2970 | } |
| 2971 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 2972 | void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 2973 | { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2974 | struct kvm *kvm = me->kvm; |
| 2975 | struct kvm_vcpu *vcpu; |
| 2976 | int last_boosted_vcpu = me->kvm->last_boosted_vcpu; |
| 2977 | int yielded = 0; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2978 | int try = 3; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2979 | int pass; |
| 2980 | int i; |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 2981 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 2982 | kvm_vcpu_set_in_spin_loop(me, true); |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2983 | /* |
| 2984 | * We boost the priority of a VCPU that is runnable but not |
| 2985 | * currently running, because it got preempted by something |
| 2986 | * else and called schedule in __vcpu_run. Hopefully that |
| 2987 | * VCPU is holding the lock that we need and will release it. |
| 2988 | * We approximate round-robin by starting at the last boosted VCPU. |
| 2989 | */ |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 2990 | for (pass = 0; pass < 2 && !yielded && try; pass++) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2991 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Rik van Riel | 5cfc2aa | 2012-06-19 16:51:04 -0400 | [diff] [blame] | 2992 | if (!pass && i <= last_boosted_vcpu) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2993 | i = last_boosted_vcpu; |
| 2994 | continue; |
| 2995 | } else if (pass && i > last_boosted_vcpu) |
| 2996 | break; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 2997 | if (!READ_ONCE(vcpu->ready)) |
Raghavendra K T | 7bc7ae2 | 2013-03-04 23:32:27 +0530 | [diff] [blame] | 2998 | continue; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 2999 | if (vcpu == me) |
| 3000 | continue; |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3001 | if (rcuwait_active(&vcpu->wait) && |
| 3002 | !vcpu_dy_runnable(vcpu)) |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3003 | continue; |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 3004 | if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && |
| 3005 | !kvm_arch_vcpu_in_kernel(vcpu)) |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 3006 | continue; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3007 | if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) |
| 3008 | continue; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3009 | |
| 3010 | yielded = kvm_vcpu_yield_to(vcpu); |
| 3011 | if (yielded > 0) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3012 | kvm->last_boosted_vcpu = i; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3013 | break; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3014 | } else if (yielded < 0) { |
| 3015 | try--; |
| 3016 | if (!try) |
| 3017 | break; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3018 | } |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3019 | } |
| 3020 | } |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 3021 | kvm_vcpu_set_in_spin_loop(me, false); |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3022 | |
| 3023 | /* Ensure vcpu is not eligible during next spinloop */ |
| 3024 | kvm_vcpu_set_dy_eligible(me, false); |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3025 | } |
| 3026 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
| 3027 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3028 | static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) |
| 3029 | { |
| 3030 | #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 |
| 3031 | return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && |
| 3032 | (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + |
| 3033 | kvm->dirty_ring_size / PAGE_SIZE); |
| 3034 | #else |
| 3035 | return false; |
| 3036 | #endif |
| 3037 | } |
| 3038 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 3039 | static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3040 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 3041 | struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3042 | struct page *page; |
| 3043 | |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3044 | if (vmf->pgoff == 0) |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3045 | page = virt_to_page(vcpu->run); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3046 | #ifdef CONFIG_X86 |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3047 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3048 | page = virt_to_page(vcpu->arch.pio_data); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3049 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 3050 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3051 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) |
| 3052 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); |
| 3053 | #endif |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3054 | else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) |
| 3055 | page = kvm_dirty_ring_get_page( |
| 3056 | &vcpu->dirty_ring, |
| 3057 | vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3058 | else |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 3059 | return kvm_arch_vcpu_fault(vcpu, vmf); |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3060 | get_page(page); |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3061 | vmf->page = page; |
| 3062 | return 0; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3063 | } |
| 3064 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 3065 | static const struct vm_operations_struct kvm_vcpu_vm_ops = { |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3066 | .fault = kvm_vcpu_fault, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3067 | }; |
| 3068 | |
| 3069 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) |
| 3070 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3071 | struct kvm_vcpu *vcpu = file->private_data; |
| 3072 | unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 3073 | |
| 3074 | if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || |
| 3075 | kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && |
| 3076 | ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) |
| 3077 | return -EINVAL; |
| 3078 | |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3079 | vma->vm_ops = &kvm_vcpu_vm_ops; |
| 3080 | return 0; |
| 3081 | } |
| 3082 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3083 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) |
| 3084 | { |
| 3085 | struct kvm_vcpu *vcpu = filp->private_data; |
| 3086 | |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3087 | kvm_put_kvm(vcpu->kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3088 | return 0; |
| 3089 | } |
| 3090 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 3091 | static struct file_operations kvm_vcpu_fops = { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3092 | .release = kvm_vcpu_release, |
| 3093 | .unlocked_ioctl = kvm_vcpu_ioctl, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3094 | .mmap = kvm_vcpu_mmap, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 3095 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 3096 | KVM_COMPAT(kvm_vcpu_compat_ioctl), |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3097 | }; |
| 3098 | |
| 3099 | /* |
| 3100 | * Allocates an inode for the vcpu. |
| 3101 | */ |
| 3102 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) |
| 3103 | { |
Masatake YAMATO | e46b469 | 2018-01-20 04:04:22 +0900 | [diff] [blame] | 3104 | char name[8 + 1 + ITOA_MAX_LEN + 1]; |
| 3105 | |
| 3106 | snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); |
| 3107 | return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3108 | } |
| 3109 | |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3110 | static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3111 | { |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3112 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3113 | struct dentry *debugfs_dentry; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3114 | char dir_name[ITOA_MAX_LEN * 2]; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3115 | |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3116 | if (!debugfs_initialized()) |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3117 | return; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3118 | |
| 3119 | snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3120 | debugfs_dentry = debugfs_create_dir(dir_name, |
| 3121 | vcpu->kvm->debugfs_dentry); |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3122 | |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3123 | kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3124 | #endif |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3125 | } |
| 3126 | |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3127 | /* |
| 3128 | * Creates some virtual cpus. Good luck creating more than one. |
| 3129 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3130 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3131 | { |
| 3132 | int r; |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3133 | struct kvm_vcpu *vcpu; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3134 | struct page *page; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3135 | |
Greg Kurz | 0b1b1df | 2016-05-09 18:13:37 +0200 | [diff] [blame] | 3136 | if (id >= KVM_MAX_VCPU_ID) |
Andy Honig | 338c7db | 2013-11-18 16:09:22 -0800 | [diff] [blame] | 3137 | return -EINVAL; |
| 3138 | |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3139 | mutex_lock(&kvm->lock); |
| 3140 | if (kvm->created_vcpus == KVM_MAX_VCPUS) { |
| 3141 | mutex_unlock(&kvm->lock); |
| 3142 | return -EINVAL; |
| 3143 | } |
| 3144 | |
| 3145 | kvm->created_vcpus++; |
| 3146 | mutex_unlock(&kvm->lock); |
| 3147 | |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 3148 | r = kvm_arch_vcpu_precreate(kvm, id); |
| 3149 | if (r) |
| 3150 | goto vcpu_decrement; |
| 3151 | |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3152 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
| 3153 | if (!vcpu) { |
| 3154 | r = -ENOMEM; |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3155 | goto vcpu_decrement; |
| 3156 | } |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3157 | |
Peter Xu | fcd97ad | 2020-01-09 09:57:12 -0500 | [diff] [blame] | 3158 | BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3159 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 3160 | if (!page) { |
| 3161 | r = -ENOMEM; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3162 | goto vcpu_free; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3163 | } |
| 3164 | vcpu->run = page_address(page); |
| 3165 | |
| 3166 | kvm_vcpu_init(vcpu, kvm, id); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3167 | |
| 3168 | r = kvm_arch_vcpu_create(vcpu); |
| 3169 | if (r) |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3170 | goto vcpu_free_run_page; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3171 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3172 | if (kvm->dirty_ring_size) { |
| 3173 | r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, |
| 3174 | id, kvm->dirty_ring_size); |
| 3175 | if (r) |
| 3176 | goto arch_vcpu_destroy; |
| 3177 | } |
| 3178 | |
Shaohua Li | 11ec280 | 2007-07-23 14:51:37 +0800 | [diff] [blame] | 3179 | mutex_lock(&kvm->lock); |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3180 | if (kvm_get_vcpu_by_id(kvm, id)) { |
| 3181 | r = -EEXIST; |
| 3182 | goto unlock_vcpu_destroy; |
| 3183 | } |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3184 | |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 3185 | vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); |
| 3186 | BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3187 | |
| 3188 | /* Now it's all set up, let userspace reach it */ |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3189 | kvm_get_kvm(kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3190 | r = create_vcpu_fd(vcpu); |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3191 | if (r < 0) { |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 3192 | kvm_put_kvm_no_destroy(kvm); |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3193 | goto unlock_vcpu_destroy; |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3194 | } |
| 3195 | |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 3196 | kvm->vcpus[vcpu->vcpu_idx] = vcpu; |
Paolo Bonzini | dd48924 | 2015-07-29 11:32:20 +0200 | [diff] [blame] | 3197 | |
| 3198 | /* |
| 3199 | * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus |
| 3200 | * before kvm->online_vcpu's incremented value. |
| 3201 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3202 | smp_wmb(); |
| 3203 | atomic_inc(&kvm->online_vcpus); |
| 3204 | |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3205 | mutex_unlock(&kvm->lock); |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 3206 | kvm_arch_vcpu_postcreate(vcpu); |
Paolo Bonzini | 63d0434 | 2020-04-01 00:42:22 +0200 | [diff] [blame] | 3207 | kvm_create_vcpu_debugfs(vcpu); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3208 | return r; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3209 | |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3210 | unlock_vcpu_destroy: |
Glauber Costa | 7d8fece | 2008-09-17 23:16:59 -0300 | [diff] [blame] | 3211 | mutex_unlock(&kvm->lock); |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3212 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
| 3213 | arch_vcpu_destroy: |
Hollis Blanchard | d40ccc6 | 2007-11-19 14:04:43 -0600 | [diff] [blame] | 3214 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3215 | vcpu_free_run_page: |
| 3216 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3217 | vcpu_free: |
| 3218 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3219 | vcpu_decrement: |
| 3220 | mutex_lock(&kvm->lock); |
| 3221 | kvm->created_vcpus--; |
| 3222 | mutex_unlock(&kvm->lock); |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3223 | return r; |
| 3224 | } |
| 3225 | |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3226 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) |
| 3227 | { |
| 3228 | if (sigset) { |
| 3229 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 3230 | vcpu->sigset_active = 1; |
| 3231 | vcpu->sigset = *sigset; |
| 3232 | } else |
| 3233 | vcpu->sigset_active = 0; |
| 3234 | return 0; |
| 3235 | } |
| 3236 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3237 | static long kvm_vcpu_ioctl(struct file *filp, |
| 3238 | unsigned int ioctl, unsigned long arg) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3239 | { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3240 | struct kvm_vcpu *vcpu = filp->private_data; |
Al Viro | 2f366987 | 2007-02-09 16:38:35 +0000 | [diff] [blame] | 3241 | void __user *argp = (void __user *)arg; |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 3242 | int r; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3243 | struct kvm_fpu *fpu = NULL; |
| 3244 | struct kvm_sregs *kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3245 | |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 3246 | if (vcpu->kvm->mm != current->mm) |
| 3247 | return -EIO; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3248 | |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 3249 | if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) |
| 3250 | return -EINVAL; |
| 3251 | |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3252 | /* |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3253 | * Some architectures have vcpu ioctls that are asynchronous to vcpu |
| 3254 | * execution; mutex_lock() would break them. |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3255 | */ |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3256 | r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); |
| 3257 | if (r != -ENOIOCTLCMD) |
Michael S. Tsirkin | 9fc7744 | 2012-09-16 11:50:30 +0300 | [diff] [blame] | 3258 | return r; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3259 | |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 3260 | if (mutex_lock_killable(&vcpu->mutex)) |
| 3261 | return -EINTR; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3262 | switch (ioctl) { |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3263 | case KVM_RUN: { |
| 3264 | struct pid *oldpid; |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 3265 | r = -EINVAL; |
| 3266 | if (arg) |
| 3267 | goto out; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3268 | oldpid = rcu_access_pointer(vcpu->pid); |
Eric W. Biederman | 71dbc8a | 2017-07-16 21:39:32 -0500 | [diff] [blame] | 3269 | if (unlikely(oldpid != task_pid(current))) { |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3270 | /* The thread running this VCPU changed. */ |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3271 | struct pid *newpid; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3272 | |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3273 | r = kvm_arch_vcpu_run_pid_change(vcpu); |
| 3274 | if (r) |
| 3275 | break; |
| 3276 | |
| 3277 | newpid = get_task_pid(current, PIDTYPE_PID); |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3278 | rcu_assign_pointer(vcpu->pid, newpid); |
| 3279 | if (oldpid) |
| 3280 | synchronize_rcu(); |
| 3281 | put_pid(oldpid); |
| 3282 | } |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 3283 | r = kvm_arch_vcpu_ioctl_run(vcpu); |
Gleb Natapov | 64be500 | 2010-10-24 16:49:08 +0200 | [diff] [blame] | 3284 | trace_kvm_userspace_exit(vcpu->run->exit_reason, r); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3285 | break; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3286 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3287 | case KVM_GET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3288 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3289 | |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3290 | r = -ENOMEM; |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3291 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3292 | if (!kvm_regs) |
| 3293 | goto out; |
| 3294 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3295 | if (r) |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3296 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3297 | r = -EFAULT; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3298 | if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) |
| 3299 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3300 | r = 0; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3301 | out_free1: |
| 3302 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3303 | break; |
| 3304 | } |
| 3305 | case KVM_SET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3306 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3307 | |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3308 | kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); |
| 3309 | if (IS_ERR(kvm_regs)) { |
| 3310 | r = PTR_ERR(kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3311 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3312 | } |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3313 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3314 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3315 | break; |
| 3316 | } |
| 3317 | case KVM_GET_SREGS: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3318 | kvm_sregs = kzalloc(sizeof(struct kvm_sregs), |
| 3319 | GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3320 | r = -ENOMEM; |
| 3321 | if (!kvm_sregs) |
| 3322 | goto out; |
| 3323 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3324 | if (r) |
| 3325 | goto out; |
| 3326 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3327 | if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3328 | goto out; |
| 3329 | r = 0; |
| 3330 | break; |
| 3331 | } |
| 3332 | case KVM_SET_SREGS: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3333 | kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); |
| 3334 | if (IS_ERR(kvm_sregs)) { |
| 3335 | r = PTR_ERR(kvm_sregs); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 3336 | kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3337 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3338 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3339 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3340 | break; |
| 3341 | } |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3342 | case KVM_GET_MP_STATE: { |
| 3343 | struct kvm_mp_state mp_state; |
| 3344 | |
| 3345 | r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); |
| 3346 | if (r) |
| 3347 | goto out; |
| 3348 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3349 | if (copy_to_user(argp, &mp_state, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3350 | goto out; |
| 3351 | r = 0; |
| 3352 | break; |
| 3353 | } |
| 3354 | case KVM_SET_MP_STATE: { |
| 3355 | struct kvm_mp_state mp_state; |
| 3356 | |
| 3357 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3358 | if (copy_from_user(&mp_state, argp, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3359 | goto out; |
| 3360 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3361 | break; |
| 3362 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3363 | case KVM_TRANSLATE: { |
| 3364 | struct kvm_translation tr; |
| 3365 | |
| 3366 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3367 | if (copy_from_user(&tr, argp, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3368 | goto out; |
Zhang Xiantao | 8b00679 | 2007-11-16 13:05:55 +0800 | [diff] [blame] | 3369 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3370 | if (r) |
| 3371 | goto out; |
| 3372 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3373 | if (copy_to_user(argp, &tr, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3374 | goto out; |
| 3375 | r = 0; |
| 3376 | break; |
| 3377 | } |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3378 | case KVM_SET_GUEST_DEBUG: { |
| 3379 | struct kvm_guest_debug dbg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3380 | |
| 3381 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3382 | if (copy_from_user(&dbg, argp, sizeof(dbg))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3383 | goto out; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3384 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3385 | break; |
| 3386 | } |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3387 | case KVM_SET_SIGNAL_MASK: { |
| 3388 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 3389 | struct kvm_signal_mask kvm_sigmask; |
| 3390 | sigset_t sigset, *p; |
| 3391 | |
| 3392 | p = NULL; |
| 3393 | if (argp) { |
| 3394 | r = -EFAULT; |
| 3395 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3396 | sizeof(kvm_sigmask))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3397 | goto out; |
| 3398 | r = -EINVAL; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3399 | if (kvm_sigmask.len != sizeof(sigset)) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3400 | goto out; |
| 3401 | r = -EFAULT; |
| 3402 | if (copy_from_user(&sigset, sigmask_arg->sigset, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3403 | sizeof(sigset))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3404 | goto out; |
| 3405 | p = &sigset; |
| 3406 | } |
Andi Kleen | 376d41f | 2010-06-10 13:10:47 +0200 | [diff] [blame] | 3407 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3408 | break; |
| 3409 | } |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3410 | case KVM_GET_FPU: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3411 | fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3412 | r = -ENOMEM; |
| 3413 | if (!fpu) |
| 3414 | goto out; |
| 3415 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3416 | if (r) |
| 3417 | goto out; |
| 3418 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3419 | if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3420 | goto out; |
| 3421 | r = 0; |
| 3422 | break; |
| 3423 | } |
| 3424 | case KVM_SET_FPU: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3425 | fpu = memdup_user(argp, sizeof(*fpu)); |
| 3426 | if (IS_ERR(fpu)) { |
| 3427 | r = PTR_ERR(fpu); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 3428 | fpu = NULL; |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3429 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3430 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3431 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3432 | break; |
| 3433 | } |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3434 | default: |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 3435 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3436 | } |
| 3437 | out: |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 3438 | mutex_unlock(&vcpu->mutex); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3439 | kfree(fpu); |
| 3440 | kfree(kvm_sregs); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3441 | return r; |
| 3442 | } |
| 3443 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 3444 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3445 | static long kvm_vcpu_compat_ioctl(struct file *filp, |
| 3446 | unsigned int ioctl, unsigned long arg) |
| 3447 | { |
| 3448 | struct kvm_vcpu *vcpu = filp->private_data; |
| 3449 | void __user *argp = compat_ptr(arg); |
| 3450 | int r; |
| 3451 | |
| 3452 | if (vcpu->kvm->mm != current->mm) |
| 3453 | return -EIO; |
| 3454 | |
| 3455 | switch (ioctl) { |
| 3456 | case KVM_SET_SIGNAL_MASK: { |
| 3457 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 3458 | struct kvm_signal_mask kvm_sigmask; |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3459 | sigset_t sigset; |
| 3460 | |
| 3461 | if (argp) { |
| 3462 | r = -EFAULT; |
| 3463 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3464 | sizeof(kvm_sigmask))) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3465 | goto out; |
| 3466 | r = -EINVAL; |
Al Viro | 3968cf6 | 2017-09-03 21:45:17 -0400 | [diff] [blame] | 3467 | if (kvm_sigmask.len != sizeof(compat_sigset_t)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3468 | goto out; |
| 3469 | r = -EFAULT; |
Paolo Bonzini | 1393b4a | 2020-07-02 05:39:31 -0400 | [diff] [blame] | 3470 | if (get_compat_sigset(&sigset, |
| 3471 | (compat_sigset_t __user *)sigmask_arg->sigset)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3472 | goto out; |
Alan Cox | 760a9a3 | 2012-08-22 14:34:11 +0100 | [diff] [blame] | 3473 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
| 3474 | } else |
| 3475 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3476 | break; |
| 3477 | } |
| 3478 | default: |
| 3479 | r = kvm_vcpu_ioctl(filp, ioctl, arg); |
| 3480 | } |
| 3481 | |
| 3482 | out: |
| 3483 | return r; |
| 3484 | } |
| 3485 | #endif |
| 3486 | |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 3487 | static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) |
| 3488 | { |
| 3489 | struct kvm_device *dev = filp->private_data; |
| 3490 | |
| 3491 | if (dev->ops->mmap) |
| 3492 | return dev->ops->mmap(dev, vma); |
| 3493 | |
| 3494 | return -ENODEV; |
| 3495 | } |
| 3496 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3497 | static int kvm_device_ioctl_attr(struct kvm_device *dev, |
| 3498 | int (*accessor)(struct kvm_device *dev, |
| 3499 | struct kvm_device_attr *attr), |
| 3500 | unsigned long arg) |
| 3501 | { |
| 3502 | struct kvm_device_attr attr; |
| 3503 | |
| 3504 | if (!accessor) |
| 3505 | return -EPERM; |
| 3506 | |
| 3507 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) |
| 3508 | return -EFAULT; |
| 3509 | |
| 3510 | return accessor(dev, &attr); |
| 3511 | } |
| 3512 | |
| 3513 | static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, |
| 3514 | unsigned long arg) |
| 3515 | { |
| 3516 | struct kvm_device *dev = filp->private_data; |
| 3517 | |
Sean Christopherson | ddba918 | 2019-02-15 12:48:39 -0800 | [diff] [blame] | 3518 | if (dev->kvm->mm != current->mm) |
| 3519 | return -EIO; |
| 3520 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3521 | switch (ioctl) { |
| 3522 | case KVM_SET_DEVICE_ATTR: |
| 3523 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); |
| 3524 | case KVM_GET_DEVICE_ATTR: |
| 3525 | return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); |
| 3526 | case KVM_HAS_DEVICE_ATTR: |
| 3527 | return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); |
| 3528 | default: |
| 3529 | if (dev->ops->ioctl) |
| 3530 | return dev->ops->ioctl(dev, ioctl, arg); |
| 3531 | |
| 3532 | return -ENOTTY; |
| 3533 | } |
| 3534 | } |
| 3535 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3536 | static int kvm_device_release(struct inode *inode, struct file *filp) |
| 3537 | { |
| 3538 | struct kvm_device *dev = filp->private_data; |
| 3539 | struct kvm *kvm = dev->kvm; |
| 3540 | |
Cédric Le Goater | 2bde9b3 | 2019-04-18 12:39:41 +0200 | [diff] [blame] | 3541 | if (dev->ops->release) { |
| 3542 | mutex_lock(&kvm->lock); |
| 3543 | list_del(&dev->vm_node); |
| 3544 | dev->ops->release(dev); |
| 3545 | mutex_unlock(&kvm->lock); |
| 3546 | } |
| 3547 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3548 | kvm_put_kvm(kvm); |
| 3549 | return 0; |
| 3550 | } |
| 3551 | |
| 3552 | static const struct file_operations kvm_device_fops = { |
| 3553 | .unlocked_ioctl = kvm_device_ioctl, |
| 3554 | .release = kvm_device_release, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 3555 | KVM_COMPAT(kvm_device_ioctl), |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 3556 | .mmap = kvm_device_mmap, |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3557 | }; |
| 3558 | |
| 3559 | struct kvm_device *kvm_device_from_filp(struct file *filp) |
| 3560 | { |
| 3561 | if (filp->f_op != &kvm_device_fops) |
| 3562 | return NULL; |
| 3563 | |
| 3564 | return filp->private_data; |
| 3565 | } |
| 3566 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 3567 | static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3568 | #ifdef CONFIG_KVM_MPIC |
| 3569 | [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, |
| 3570 | [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, |
| 3571 | #endif |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3572 | }; |
| 3573 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 3574 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3575 | { |
| 3576 | if (type >= ARRAY_SIZE(kvm_device_ops_table)) |
| 3577 | return -ENOSPC; |
| 3578 | |
| 3579 | if (kvm_device_ops_table[type] != NULL) |
| 3580 | return -EEXIST; |
| 3581 | |
| 3582 | kvm_device_ops_table[type] = ops; |
| 3583 | return 0; |
| 3584 | } |
| 3585 | |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 3586 | void kvm_unregister_device_ops(u32 type) |
| 3587 | { |
| 3588 | if (kvm_device_ops_table[type] != NULL) |
| 3589 | kvm_device_ops_table[type] = NULL; |
| 3590 | } |
| 3591 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3592 | static int kvm_ioctl_create_device(struct kvm *kvm, |
| 3593 | struct kvm_create_device *cd) |
| 3594 | { |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 3595 | const struct kvm_device_ops *ops = NULL; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3596 | struct kvm_device *dev; |
| 3597 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 3598 | int type; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3599 | int ret; |
| 3600 | |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3601 | if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3602 | return -ENODEV; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3603 | |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 3604 | type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); |
| 3605 | ops = kvm_device_ops_table[type]; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 3606 | if (ops == NULL) |
| 3607 | return -ENODEV; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3608 | |
| 3609 | if (test) |
| 3610 | return 0; |
| 3611 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3612 | dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3613 | if (!dev) |
| 3614 | return -ENOMEM; |
| 3615 | |
| 3616 | dev->ops = ops; |
| 3617 | dev->kvm = kvm; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3618 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 3619 | mutex_lock(&kvm->lock); |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 3620 | ret = ops->create(dev, type); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3621 | if (ret < 0) { |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 3622 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3623 | kfree(dev); |
| 3624 | return ret; |
| 3625 | } |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 3626 | list_add(&dev->vm_node, &kvm->devices); |
| 3627 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3628 | |
Christoffer Dall | 023e9fd | 2016-08-09 19:13:00 +0200 | [diff] [blame] | 3629 | if (ops->init) |
| 3630 | ops->init(dev); |
| 3631 | |
Jann Horn | cfa3938 | 2019-01-26 01:54:33 +0100 | [diff] [blame] | 3632 | kvm_get_kvm(kvm); |
Yann Droneaud | 24009b0 | 2013-08-24 22:14:07 +0200 | [diff] [blame] | 3633 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3634 | if (ret < 0) { |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 3635 | kvm_put_kvm_no_destroy(kvm); |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 3636 | mutex_lock(&kvm->lock); |
| 3637 | list_del(&dev->vm_node); |
| 3638 | mutex_unlock(&kvm->lock); |
Dan Carpenter | a0f1d21 | 2016-11-30 22:21:05 +0300 | [diff] [blame] | 3639 | ops->destroy(dev); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3640 | return ret; |
| 3641 | } |
| 3642 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3643 | cd->fd = ret; |
| 3644 | return 0; |
| 3645 | } |
| 3646 | |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3647 | static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) |
| 3648 | { |
| 3649 | switch (arg) { |
| 3650 | case KVM_CAP_USER_MEMORY: |
| 3651 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
| 3652 | case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3653 | case KVM_CAP_INTERNAL_ERROR_DATA: |
| 3654 | #ifdef CONFIG_HAVE_KVM_MSI |
| 3655 | case KVM_CAP_SIGNAL_MSI: |
| 3656 | #endif |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 3657 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Paolo Bonzini | dc9be0f | 2015-03-05 11:54:46 +0100 | [diff] [blame] | 3658 | case KVM_CAP_IRQFD: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3659 | case KVM_CAP_IRQFD_RESAMPLE: |
| 3660 | #endif |
Jason Wang | e9ea506 | 2015-09-15 14:41:59 +0800 | [diff] [blame] | 3661 | case KVM_CAP_IOEVENTFD_ANY_LENGTH: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3662 | case KVM_CAP_CHECK_EXTENSION_VM: |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 3663 | case KVM_CAP_ENABLE_CAP_VM: |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3664 | case KVM_CAP_HALT_POLL: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3665 | return 1; |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 3666 | #ifdef CONFIG_KVM_MMIO |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 3667 | case KVM_CAP_COALESCED_MMIO: |
| 3668 | return KVM_COALESCED_MMIO_PAGE_OFFSET; |
Peng Hao | 0804c84 | 2018-10-14 07:09:55 +0800 | [diff] [blame] | 3669 | case KVM_CAP_COALESCED_PIO: |
| 3670 | return 1; |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 3671 | #endif |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 3672 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 3673 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: |
| 3674 | return KVM_DIRTY_LOG_MANUAL_CAPS; |
| 3675 | #endif |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3676 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 3677 | case KVM_CAP_IRQ_ROUTING: |
| 3678 | return KVM_MAX_IRQ_ROUTES; |
| 3679 | #endif |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 3680 | #if KVM_ADDRESS_SPACE_NUM > 1 |
| 3681 | case KVM_CAP_MULTI_ADDRESS_SPACE: |
| 3682 | return KVM_ADDRESS_SPACE_NUM; |
| 3683 | #endif |
Paolo Bonzini | c110ae5 | 2019-03-28 17:24:03 +0100 | [diff] [blame] | 3684 | case KVM_CAP_NR_MEMSLOTS: |
| 3685 | return KVM_USER_MEM_SLOTS; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3686 | case KVM_CAP_DIRTY_LOG_RING: |
| 3687 | #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 |
| 3688 | return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); |
| 3689 | #else |
| 3690 | return 0; |
| 3691 | #endif |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3692 | default: |
| 3693 | break; |
| 3694 | } |
| 3695 | return kvm_vm_ioctl_check_extension(kvm, arg); |
| 3696 | } |
| 3697 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3698 | static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) |
| 3699 | { |
| 3700 | int r; |
| 3701 | |
| 3702 | if (!KVM_DIRTY_LOG_PAGE_OFFSET) |
| 3703 | return -EINVAL; |
| 3704 | |
| 3705 | /* the size should be power of 2 */ |
| 3706 | if (!size || (size & (size - 1))) |
| 3707 | return -EINVAL; |
| 3708 | |
| 3709 | /* Should be bigger to keep the reserved entries, or a page */ |
| 3710 | if (size < kvm_dirty_ring_get_rsvd_entries() * |
| 3711 | sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) |
| 3712 | return -EINVAL; |
| 3713 | |
| 3714 | if (size > KVM_DIRTY_RING_MAX_ENTRIES * |
| 3715 | sizeof(struct kvm_dirty_gfn)) |
| 3716 | return -E2BIG; |
| 3717 | |
| 3718 | /* We only allow it to set once */ |
| 3719 | if (kvm->dirty_ring_size) |
| 3720 | return -EINVAL; |
| 3721 | |
| 3722 | mutex_lock(&kvm->lock); |
| 3723 | |
| 3724 | if (kvm->created_vcpus) { |
| 3725 | /* We don't allow to change this value after vcpu created */ |
| 3726 | r = -EINVAL; |
| 3727 | } else { |
| 3728 | kvm->dirty_ring_size = size; |
| 3729 | r = 0; |
| 3730 | } |
| 3731 | |
| 3732 | mutex_unlock(&kvm->lock); |
| 3733 | return r; |
| 3734 | } |
| 3735 | |
| 3736 | static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) |
| 3737 | { |
| 3738 | int i; |
| 3739 | struct kvm_vcpu *vcpu; |
| 3740 | int cleared = 0; |
| 3741 | |
| 3742 | if (!kvm->dirty_ring_size) |
| 3743 | return -EINVAL; |
| 3744 | |
| 3745 | mutex_lock(&kvm->slots_lock); |
| 3746 | |
| 3747 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 3748 | cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); |
| 3749 | |
| 3750 | mutex_unlock(&kvm->slots_lock); |
| 3751 | |
| 3752 | if (cleared) |
| 3753 | kvm_flush_remote_tlbs(kvm); |
| 3754 | |
| 3755 | return cleared; |
| 3756 | } |
| 3757 | |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 3758 | int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 3759 | struct kvm_enable_cap *cap) |
| 3760 | { |
| 3761 | return -EINVAL; |
| 3762 | } |
| 3763 | |
| 3764 | static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, |
| 3765 | struct kvm_enable_cap *cap) |
| 3766 | { |
| 3767 | switch (cap->cap) { |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 3768 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 3769 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { |
| 3770 | u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; |
| 3771 | |
| 3772 | if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) |
| 3773 | allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; |
| 3774 | |
| 3775 | if (cap->flags || (cap->args[0] & ~allowed_options)) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 3776 | return -EINVAL; |
| 3777 | kvm->manual_dirty_log_protect = cap->args[0]; |
| 3778 | return 0; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 3779 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 3780 | #endif |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3781 | case KVM_CAP_HALT_POLL: { |
| 3782 | if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) |
| 3783 | return -EINVAL; |
| 3784 | |
| 3785 | kvm->max_halt_poll_ns = cap->args[0]; |
| 3786 | return 0; |
| 3787 | } |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3788 | case KVM_CAP_DIRTY_LOG_RING: |
| 3789 | return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 3790 | default: |
| 3791 | return kvm_vm_ioctl_enable_cap(kvm, cap); |
| 3792 | } |
| 3793 | } |
| 3794 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3795 | static long kvm_vm_ioctl(struct file *filp, |
| 3796 | unsigned int ioctl, unsigned long arg) |
| 3797 | { |
| 3798 | struct kvm *kvm = filp->private_data; |
| 3799 | void __user *argp = (void __user *)arg; |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 3800 | int r; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3801 | |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 3802 | if (kvm->mm != current->mm) |
| 3803 | return -EIO; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3804 | switch (ioctl) { |
| 3805 | case KVM_CREATE_VCPU: |
| 3806 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3807 | break; |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 3808 | case KVM_ENABLE_CAP: { |
| 3809 | struct kvm_enable_cap cap; |
| 3810 | |
| 3811 | r = -EFAULT; |
| 3812 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 3813 | goto out; |
| 3814 | r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); |
| 3815 | break; |
| 3816 | } |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 3817 | case KVM_SET_USER_MEMORY_REGION: { |
| 3818 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 3819 | |
| 3820 | r = -EFAULT; |
| 3821 | if (copy_from_user(&kvm_userspace_mem, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3822 | sizeof(kvm_userspace_mem))) |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 3823 | goto out; |
| 3824 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 3825 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3826 | break; |
| 3827 | } |
| 3828 | case KVM_GET_DIRTY_LOG: { |
| 3829 | struct kvm_dirty_log log; |
| 3830 | |
| 3831 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3832 | if (copy_from_user(&log, argp, sizeof(log))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3833 | goto out; |
Avi Kivity | 2c6f5df | 2007-02-20 18:27:58 +0200 | [diff] [blame] | 3834 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3835 | break; |
| 3836 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 3837 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 3838 | case KVM_CLEAR_DIRTY_LOG: { |
| 3839 | struct kvm_clear_dirty_log log; |
| 3840 | |
| 3841 | r = -EFAULT; |
| 3842 | if (copy_from_user(&log, argp, sizeof(log))) |
| 3843 | goto out; |
| 3844 | r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
| 3845 | break; |
| 3846 | } |
| 3847 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 3848 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3849 | case KVM_REGISTER_COALESCED_MMIO: { |
| 3850 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3851 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3852 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3853 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3854 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3855 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3856 | break; |
| 3857 | } |
| 3858 | case KVM_UNREGISTER_COALESCED_MMIO: { |
| 3859 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3860 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3861 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3862 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3863 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3864 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3865 | break; |
| 3866 | } |
| 3867 | #endif |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 3868 | case KVM_IRQFD: { |
| 3869 | struct kvm_irqfd data; |
| 3870 | |
| 3871 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3872 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 3873 | goto out; |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 3874 | r = kvm_irqfd(kvm, &data); |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 3875 | break; |
| 3876 | } |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 3877 | case KVM_IOEVENTFD: { |
| 3878 | struct kvm_ioeventfd data; |
| 3879 | |
| 3880 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3881 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 3882 | goto out; |
| 3883 | r = kvm_ioeventfd(kvm, &data); |
| 3884 | break; |
| 3885 | } |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 3886 | #ifdef CONFIG_HAVE_KVM_MSI |
| 3887 | case KVM_SIGNAL_MSI: { |
| 3888 | struct kvm_msi msi; |
| 3889 | |
| 3890 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3891 | if (copy_from_user(&msi, argp, sizeof(msi))) |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 3892 | goto out; |
| 3893 | r = kvm_send_userspace_msi(kvm, &msi); |
| 3894 | break; |
| 3895 | } |
| 3896 | #endif |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 3897 | #ifdef __KVM_HAVE_IRQ_LINE |
| 3898 | case KVM_IRQ_LINE_STATUS: |
| 3899 | case KVM_IRQ_LINE: { |
| 3900 | struct kvm_irq_level irq_event; |
| 3901 | |
| 3902 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3903 | if (copy_from_user(&irq_event, argp, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 3904 | goto out; |
| 3905 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 3906 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
| 3907 | ioctl == KVM_IRQ_LINE_STATUS); |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 3908 | if (r) |
| 3909 | goto out; |
| 3910 | |
| 3911 | r = -EFAULT; |
| 3912 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3913 | if (copy_to_user(argp, &irq_event, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 3914 | goto out; |
| 3915 | } |
| 3916 | |
| 3917 | r = 0; |
| 3918 | break; |
| 3919 | } |
| 3920 | #endif |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 3921 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 3922 | case KVM_SET_GSI_ROUTING: { |
| 3923 | struct kvm_irq_routing routing; |
| 3924 | struct kvm_irq_routing __user *urouting; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 3925 | struct kvm_irq_routing_entry *entries = NULL; |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 3926 | |
| 3927 | r = -EFAULT; |
| 3928 | if (copy_from_user(&routing, argp, sizeof(routing))) |
| 3929 | goto out; |
| 3930 | r = -EINVAL; |
David Hildenbrand | 5c0aea0 | 2017-04-28 17:06:20 +0200 | [diff] [blame] | 3931 | if (!kvm_arch_can_set_irq_routing(kvm)) |
| 3932 | goto out; |
Xiubo Li | caf1ff2 | 2016-06-15 18:00:33 +0800 | [diff] [blame] | 3933 | if (routing.nr > KVM_MAX_IRQ_ROUTES) |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 3934 | goto out; |
| 3935 | if (routing.flags) |
| 3936 | goto out; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 3937 | if (routing.nr) { |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 3938 | urouting = argp; |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 3939 | entries = vmemdup_user(urouting->entries, |
| 3940 | array_size(sizeof(*entries), |
| 3941 | routing.nr)); |
| 3942 | if (IS_ERR(entries)) { |
| 3943 | r = PTR_ERR(entries); |
| 3944 | goto out; |
| 3945 | } |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 3946 | } |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 3947 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
| 3948 | routing.flags); |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 3949 | kvfree(entries); |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 3950 | break; |
| 3951 | } |
| 3952 | #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3953 | case KVM_CREATE_DEVICE: { |
| 3954 | struct kvm_create_device cd; |
| 3955 | |
| 3956 | r = -EFAULT; |
| 3957 | if (copy_from_user(&cd, argp, sizeof(cd))) |
| 3958 | goto out; |
| 3959 | |
| 3960 | r = kvm_ioctl_create_device(kvm, &cd); |
| 3961 | if (r) |
| 3962 | goto out; |
| 3963 | |
| 3964 | r = -EFAULT; |
| 3965 | if (copy_to_user(argp, &cd, sizeof(cd))) |
| 3966 | goto out; |
| 3967 | |
| 3968 | r = 0; |
| 3969 | break; |
| 3970 | } |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 3971 | case KVM_CHECK_EXTENSION: |
| 3972 | r = kvm_vm_ioctl_check_extension_generic(kvm, arg); |
| 3973 | break; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3974 | case KVM_RESET_DIRTY_RINGS: |
| 3975 | r = kvm_vm_ioctl_reset_dirty_pages(kvm); |
| 3976 | break; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 3977 | default: |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 3978 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 3979 | } |
| 3980 | out: |
| 3981 | return r; |
| 3982 | } |
| 3983 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 3984 | #ifdef CONFIG_KVM_COMPAT |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 3985 | struct compat_kvm_dirty_log { |
| 3986 | __u32 slot; |
| 3987 | __u32 padding1; |
| 3988 | union { |
| 3989 | compat_uptr_t dirty_bitmap; /* one bit per page */ |
| 3990 | __u64 padding2; |
| 3991 | }; |
| 3992 | }; |
| 3993 | |
| 3994 | static long kvm_vm_compat_ioctl(struct file *filp, |
| 3995 | unsigned int ioctl, unsigned long arg) |
| 3996 | { |
| 3997 | struct kvm *kvm = filp->private_data; |
| 3998 | int r; |
| 3999 | |
| 4000 | if (kvm->mm != current->mm) |
| 4001 | return -EIO; |
| 4002 | switch (ioctl) { |
| 4003 | case KVM_GET_DIRTY_LOG: { |
| 4004 | struct compat_kvm_dirty_log compat_log; |
| 4005 | struct kvm_dirty_log log; |
| 4006 | |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4007 | if (copy_from_user(&compat_log, (void __user *)arg, |
| 4008 | sizeof(compat_log))) |
Markus Elfring | f6a3b16 | 2017-01-22 11:30:21 +0100 | [diff] [blame] | 4009 | return -EFAULT; |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4010 | log.slot = compat_log.slot; |
| 4011 | log.padding1 = compat_log.padding1; |
| 4012 | log.padding2 = compat_log.padding2; |
| 4013 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
| 4014 | |
| 4015 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4016 | break; |
| 4017 | } |
| 4018 | default: |
| 4019 | r = kvm_vm_ioctl(filp, ioctl, arg); |
| 4020 | } |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4021 | return r; |
| 4022 | } |
| 4023 | #endif |
| 4024 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 4025 | static struct file_operations kvm_vm_fops = { |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4026 | .release = kvm_vm_release, |
| 4027 | .unlocked_ioctl = kvm_vm_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4028 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4029 | KVM_COMPAT(kvm_vm_compat_ioctl), |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4030 | }; |
| 4031 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4032 | static int kvm_dev_ioctl_create_vm(unsigned long type) |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4033 | { |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4034 | int r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4035 | struct kvm *kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4036 | struct file *file; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4037 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4038 | kvm = kvm_create_vm(type); |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 4039 | if (IS_ERR(kvm)) |
| 4040 | return PTR_ERR(kvm); |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4041 | #ifdef CONFIG_KVM_MMIO |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4042 | r = kvm_coalesced_mmio_init(kvm); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4043 | if (r < 0) |
| 4044 | goto put_kvm; |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4045 | #endif |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4046 | r = get_unused_fd_flags(O_CLOEXEC); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4047 | if (r < 0) |
| 4048 | goto put_kvm; |
| 4049 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4050 | file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); |
| 4051 | if (IS_ERR(file)) { |
| 4052 | put_unused_fd(r); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4053 | r = PTR_ERR(file); |
| 4054 | goto put_kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4055 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4056 | |
Paolo Bonzini | 525df86 | 2017-06-27 15:45:09 +0200 | [diff] [blame] | 4057 | /* |
| 4058 | * Don't call kvm_put_kvm anymore at this point; file->f_op is |
| 4059 | * already set, with ->release() being kvm_vm_release(). In error |
| 4060 | * cases it will be called by the final fput(file) and will take |
| 4061 | * care of doing kvm_put_kvm(kvm). |
| 4062 | */ |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4063 | if (kvm_create_vm_debugfs(kvm, r) < 0) { |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4064 | put_unused_fd(r); |
| 4065 | fput(file); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4066 | return -ENOMEM; |
| 4067 | } |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4068 | kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4069 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4070 | fd_install(r, file); |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4071 | return r; |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4072 | |
| 4073 | put_kvm: |
| 4074 | kvm_put_kvm(kvm); |
| 4075 | return r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4076 | } |
| 4077 | |
| 4078 | static long kvm_dev_ioctl(struct file *filp, |
| 4079 | unsigned int ioctl, unsigned long arg) |
| 4080 | { |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4081 | long r = -EINVAL; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4082 | |
| 4083 | switch (ioctl) { |
| 4084 | case KVM_GET_API_VERSION: |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 4085 | if (arg) |
| 4086 | goto out; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4087 | r = KVM_API_VERSION; |
| 4088 | break; |
| 4089 | case KVM_CREATE_VM: |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4090 | r = kvm_dev_ioctl_create_vm(arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4091 | break; |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 4092 | case KVM_CHECK_EXTENSION: |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 4093 | r = kvm_vm_ioctl_check_extension_generic(NULL, arg); |
Avi Kivity | 5d308f4 | 2007-03-01 17:56:20 +0200 | [diff] [blame] | 4094 | break; |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4095 | case KVM_GET_VCPU_MMAP_SIZE: |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4096 | if (arg) |
| 4097 | goto out; |
Avi Kivity | adb1ff4 | 2008-01-24 15:13:08 +0200 | [diff] [blame] | 4098 | r = PAGE_SIZE; /* struct kvm_run */ |
| 4099 | #ifdef CONFIG_X86 |
| 4100 | r += PAGE_SIZE; /* pio data page */ |
| 4101 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4102 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4103 | r += PAGE_SIZE; /* coalesced mmio ring page */ |
| 4104 | #endif |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4105 | break; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4106 | case KVM_TRACE_ENABLE: |
| 4107 | case KVM_TRACE_PAUSE: |
| 4108 | case KVM_TRACE_DISABLE: |
Marcelo Tosatti | 2023a29 | 2009-06-18 11:47:28 -0300 | [diff] [blame] | 4109 | r = -EOPNOTSUPP; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4110 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4111 | default: |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 4112 | return kvm_arch_dev_ioctl(filp, ioctl, arg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4113 | } |
| 4114 | out: |
| 4115 | return r; |
| 4116 | } |
| 4117 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4118 | static struct file_operations kvm_chardev_ops = { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4119 | .unlocked_ioctl = kvm_dev_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4120 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4121 | KVM_COMPAT(kvm_dev_ioctl), |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4122 | }; |
| 4123 | |
| 4124 | static struct miscdevice kvm_dev = { |
Avi Kivity | bbe4432 | 2007-03-04 13:27:36 +0200 | [diff] [blame] | 4125 | KVM_MINOR, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4126 | "kvm", |
| 4127 | &kvm_chardev_ops, |
| 4128 | }; |
| 4129 | |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4130 | static void hardware_enable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4131 | { |
| 4132 | int cpu = raw_smp_processor_id(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4133 | int r; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4134 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4135 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4136 | return; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4137 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4138 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4139 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4140 | r = kvm_arch_hardware_enable(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4141 | |
| 4142 | if (r) { |
| 4143 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
| 4144 | atomic_inc(&hardware_enable_failed); |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4145 | pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4146 | } |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4147 | } |
| 4148 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4149 | static int kvm_starting_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4150 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4151 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4152 | if (kvm_usage_count) |
| 4153 | hardware_enable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4154 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4155 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4156 | } |
| 4157 | |
| 4158 | static void hardware_disable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4159 | { |
| 4160 | int cpu = raw_smp_processor_id(); |
| 4161 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4162 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4163 | return; |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4164 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4165 | kvm_arch_hardware_disable(); |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4166 | } |
| 4167 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4168 | static int kvm_dying_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4169 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4170 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4171 | if (kvm_usage_count) |
| 4172 | hardware_disable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4173 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4174 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4175 | } |
| 4176 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4177 | static void hardware_disable_all_nolock(void) |
| 4178 | { |
| 4179 | BUG_ON(!kvm_usage_count); |
| 4180 | |
| 4181 | kvm_usage_count--; |
| 4182 | if (!kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4183 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4184 | } |
| 4185 | |
| 4186 | static void hardware_disable_all(void) |
| 4187 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4188 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4189 | hardware_disable_all_nolock(); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4190 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4191 | } |
| 4192 | |
| 4193 | static int hardware_enable_all(void) |
| 4194 | { |
| 4195 | int r = 0; |
| 4196 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4197 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4198 | |
| 4199 | kvm_usage_count++; |
| 4200 | if (kvm_usage_count == 1) { |
| 4201 | atomic_set(&hardware_enable_failed, 0); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4202 | on_each_cpu(hardware_enable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4203 | |
| 4204 | if (atomic_read(&hardware_enable_failed)) { |
| 4205 | hardware_disable_all_nolock(); |
| 4206 | r = -EBUSY; |
| 4207 | } |
| 4208 | } |
| 4209 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4210 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4211 | |
| 4212 | return r; |
| 4213 | } |
| 4214 | |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4215 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 4216 | void *v) |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4217 | { |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4218 | /* |
| 4219 | * Some (well, at least mine) BIOSes hang on reboot if |
| 4220 | * in vmx root mode. |
| 4221 | * |
| 4222 | * And Intel TXT required VMX off for all cpu when system shutdown. |
| 4223 | */ |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4224 | pr_info("kvm: exiting hardware virtualization\n"); |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4225 | kvm_rebooting = true; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4226 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4227 | return NOTIFY_OK; |
| 4228 | } |
| 4229 | |
| 4230 | static struct notifier_block kvm_reboot_notifier = { |
| 4231 | .notifier_call = kvm_reboot, |
| 4232 | .priority = 0, |
| 4233 | }; |
| 4234 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4235 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4236 | { |
| 4237 | int i; |
| 4238 | |
| 4239 | for (i = 0; i < bus->dev_count; i++) { |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4240 | struct kvm_io_device *pos = bus->range[i].dev; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4241 | |
| 4242 | kvm_iodevice_destructor(pos); |
| 4243 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4244 | kfree(bus); |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4245 | } |
| 4246 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4247 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, |
Xiubo Li | 20e87b7 | 2015-02-26 14:58:25 +0800 | [diff] [blame] | 4248 | const struct kvm_io_range *r2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4249 | { |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4250 | gpa_t addr1 = r1->addr; |
| 4251 | gpa_t addr2 = r2->addr; |
| 4252 | |
| 4253 | if (addr1 < addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4254 | return -1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4255 | |
| 4256 | /* If r2->len == 0, match the exact address. If r2->len != 0, |
| 4257 | * accept any overlapping write. Any order is acceptable for |
| 4258 | * overlapping ranges, because kvm_io_bus_get_first_dev ensures |
| 4259 | * we process all of them. |
| 4260 | */ |
| 4261 | if (r2->len) { |
| 4262 | addr1 += r1->len; |
| 4263 | addr2 += r2->len; |
| 4264 | } |
| 4265 | |
| 4266 | if (addr1 > addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4267 | return 1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4268 | |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4269 | return 0; |
| 4270 | } |
| 4271 | |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4272 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
| 4273 | { |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4274 | return kvm_io_bus_cmp(p1, p2); |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4275 | } |
| 4276 | |
Geoff Levand | 39369f7 | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 4277 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4278 | gpa_t addr, int len) |
| 4279 | { |
| 4280 | struct kvm_io_range *range, key; |
| 4281 | int off; |
| 4282 | |
| 4283 | key = (struct kvm_io_range) { |
| 4284 | .addr = addr, |
| 4285 | .len = len, |
| 4286 | }; |
| 4287 | |
| 4288 | range = bsearch(&key, bus->range, bus->dev_count, |
| 4289 | sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); |
| 4290 | if (range == NULL) |
| 4291 | return -ENOENT; |
| 4292 | |
| 4293 | off = range - bus->range; |
| 4294 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4295 | while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4296 | off--; |
| 4297 | |
| 4298 | return off; |
| 4299 | } |
| 4300 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4301 | static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4302 | struct kvm_io_range *range, const void *val) |
| 4303 | { |
| 4304 | int idx; |
| 4305 | |
| 4306 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
| 4307 | if (idx < 0) |
| 4308 | return -EOPNOTSUPP; |
| 4309 | |
| 4310 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4311 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4312 | if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4313 | range->len, val)) |
| 4314 | return idx; |
| 4315 | idx++; |
| 4316 | } |
| 4317 | |
| 4318 | return -EOPNOTSUPP; |
| 4319 | } |
| 4320 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4321 | /* kvm_io_bus_write - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4322 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4323 | int len, const void *val) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4324 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4325 | struct kvm_io_bus *bus; |
| 4326 | struct kvm_io_range range; |
| 4327 | int r; |
| 4328 | |
| 4329 | range = (struct kvm_io_range) { |
| 4330 | .addr = addr, |
| 4331 | .len = len, |
| 4332 | }; |
| 4333 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4334 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4335 | if (!bus) |
| 4336 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4337 | r = __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4338 | return r < 0 ? r : 0; |
| 4339 | } |
Leo Yan | a242010 | 2019-02-22 16:10:09 +0800 | [diff] [blame] | 4340 | EXPORT_SYMBOL_GPL(kvm_io_bus_write); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4341 | |
| 4342 | /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4343 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
| 4344 | gpa_t addr, int len, const void *val, long cookie) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4345 | { |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 4346 | struct kvm_io_bus *bus; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4347 | struct kvm_io_range range; |
| 4348 | |
| 4349 | range = (struct kvm_io_range) { |
| 4350 | .addr = addr, |
| 4351 | .len = len, |
| 4352 | }; |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 4353 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4354 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4355 | if (!bus) |
| 4356 | return -ENOMEM; |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4357 | |
| 4358 | /* First try the device referenced by cookie. */ |
| 4359 | if ((cookie >= 0) && (cookie < bus->dev_count) && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4360 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4361 | if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4362 | val)) |
| 4363 | return cookie; |
| 4364 | |
| 4365 | /* |
| 4366 | * cookie contained garbage; fall back to search and return the |
| 4367 | * correct cookie value. |
| 4368 | */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4369 | return __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4370 | } |
| 4371 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4372 | static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
| 4373 | struct kvm_io_range *range, void *val) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4374 | { |
| 4375 | int idx; |
| 4376 | |
| 4377 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4378 | if (idx < 0) |
| 4379 | return -EOPNOTSUPP; |
| 4380 | |
| 4381 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4382 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4383 | if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4384 | range->len, val)) |
| 4385 | return idx; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4386 | idx++; |
| 4387 | } |
| 4388 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4389 | return -EOPNOTSUPP; |
| 4390 | } |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4391 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4392 | /* kvm_io_bus_read - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4393 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4394 | int len, void *val) |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4395 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4396 | struct kvm_io_bus *bus; |
| 4397 | struct kvm_io_range range; |
| 4398 | int r; |
| 4399 | |
| 4400 | range = (struct kvm_io_range) { |
| 4401 | .addr = addr, |
| 4402 | .len = len, |
| 4403 | }; |
| 4404 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4405 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4406 | if (!bus) |
| 4407 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4408 | r = __kvm_io_bus_read(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4409 | return r < 0 ? r : 0; |
| 4410 | } |
| 4411 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 4412 | /* Caller must hold slots_lock. */ |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4413 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 4414 | int len, struct kvm_io_device *dev) |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 4415 | { |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4416 | int i; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4417 | struct kvm_io_bus *new_bus, *bus; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4418 | struct kvm_io_range range; |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4419 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 4420 | bus = kvm_get_bus(kvm, bus_idx); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4421 | if (!bus) |
| 4422 | return -ENOMEM; |
| 4423 | |
Amos Kong | 6ea34c9 | 2013-05-25 06:44:15 +0800 | [diff] [blame] | 4424 | /* exclude ioeventfd which is limited by maximum fd */ |
| 4425 | if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4426 | return -ENOSPC; |
| 4427 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 4428 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4429 | GFP_KERNEL_ACCOUNT); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4430 | if (!new_bus) |
| 4431 | return -ENOMEM; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4432 | |
| 4433 | range = (struct kvm_io_range) { |
| 4434 | .addr = addr, |
| 4435 | .len = len, |
| 4436 | .dev = dev, |
| 4437 | }; |
| 4438 | |
| 4439 | for (i = 0; i < bus->dev_count; i++) |
| 4440 | if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) |
| 4441 | break; |
| 4442 | |
| 4443 | memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
| 4444 | new_bus->dev_count++; |
| 4445 | new_bus->range[i] = range; |
| 4446 | memcpy(new_bus->range + i + 1, bus->range + i, |
| 4447 | (bus->dev_count - i) * sizeof(struct kvm_io_range)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4448 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 4449 | synchronize_srcu_expedited(&kvm->srcu); |
| 4450 | kfree(bus); |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4451 | |
| 4452 | return 0; |
| 4453 | } |
| 4454 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 4455 | /* Caller must hold slots_lock. */ |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4456 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 4457 | struct kvm_io_device *dev) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4458 | { |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 4459 | int i, j; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4460 | struct kvm_io_bus *new_bus, *bus; |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 4461 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 4462 | bus = kvm_get_bus(kvm, bus_idx); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 4463 | if (!bus) |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4464 | return; |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 4465 | |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 4466 | for (i = 0; i < bus->dev_count; i++) |
| 4467 | if (bus->range[i].dev == dev) { |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4468 | break; |
| 4469 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4470 | |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4471 | if (i == bus->dev_count) |
| 4472 | return; |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 4473 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 4474 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4475 | GFP_KERNEL_ACCOUNT); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 4476 | if (new_bus) { |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 4477 | memcpy(new_bus, bus, struct_size(bus, range, i)); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 4478 | new_bus->dev_count--; |
| 4479 | memcpy(new_bus->range + i, bus->range + i + 1, |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 4480 | flex_array_size(new_bus, range, new_bus->dev_count - i)); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 4481 | } else { |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4482 | pr_err("kvm: failed to shrink bus, removing it completely\n"); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 4483 | for (j = 0; j < bus->dev_count; j++) { |
| 4484 | if (j == i) |
| 4485 | continue; |
| 4486 | kvm_iodevice_destructor(bus->range[j].dev); |
| 4487 | } |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4488 | } |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 4489 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4490 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 4491 | synchronize_srcu_expedited(&kvm->srcu); |
| 4492 | kfree(bus); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4493 | return; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4494 | } |
| 4495 | |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 4496 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 4497 | gpa_t addr) |
| 4498 | { |
| 4499 | struct kvm_io_bus *bus; |
| 4500 | int dev_idx, srcu_idx; |
| 4501 | struct kvm_io_device *iodev = NULL; |
| 4502 | |
| 4503 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 4504 | |
| 4505 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4506 | if (!bus) |
| 4507 | goto out_unlock; |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 4508 | |
| 4509 | dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); |
| 4510 | if (dev_idx < 0) |
| 4511 | goto out_unlock; |
| 4512 | |
| 4513 | iodev = bus->range[dev_idx].dev; |
| 4514 | |
| 4515 | out_unlock: |
| 4516 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 4517 | |
| 4518 | return iodev; |
| 4519 | } |
| 4520 | EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); |
| 4521 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4522 | static int kvm_debugfs_open(struct inode *inode, struct file *file, |
| 4523 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
| 4524 | const char *fmt) |
| 4525 | { |
| 4526 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 4527 | inode->i_private; |
| 4528 | |
| 4529 | /* The debugfs files are a reference to the kvm struct which |
| 4530 | * is still valid when kvm_destroy_vm is called. |
| 4531 | * To avoid the race between open and the removal of the debugfs |
| 4532 | * directory we test against the users count. |
| 4533 | */ |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 4534 | if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4535 | return -ENOENT; |
| 4536 | |
Paolo Bonzini | 833b45d | 2019-09-30 18:48:44 +0200 | [diff] [blame] | 4537 | if (simple_attr_open(inode, file, get, |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4538 | KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222 |
| 4539 | ? set : NULL, |
| 4540 | fmt)) { |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4541 | kvm_put_kvm(stat_data->kvm); |
| 4542 | return -ENOMEM; |
| 4543 | } |
| 4544 | |
| 4545 | return 0; |
| 4546 | } |
| 4547 | |
| 4548 | static int kvm_debugfs_release(struct inode *inode, struct file *file) |
| 4549 | { |
| 4550 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 4551 | inode->i_private; |
| 4552 | |
| 4553 | simple_attr_release(inode, file); |
| 4554 | kvm_put_kvm(stat_data->kvm); |
| 4555 | |
| 4556 | return 0; |
| 4557 | } |
| 4558 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4559 | static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4560 | { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4561 | *val = *(ulong *)((void *)kvm + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4562 | |
| 4563 | return 0; |
| 4564 | } |
| 4565 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4566 | static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4567 | { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4568 | *(ulong *)((void *)kvm + offset) = 0; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4569 | |
| 4570 | return 0; |
| 4571 | } |
| 4572 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4573 | static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4574 | { |
| 4575 | int i; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4576 | struct kvm_vcpu *vcpu; |
| 4577 | |
| 4578 | *val = 0; |
| 4579 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4580 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 4581 | *val += *(u64 *)((void *)vcpu + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4582 | |
| 4583 | return 0; |
| 4584 | } |
| 4585 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4586 | static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4587 | { |
| 4588 | int i; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4589 | struct kvm_vcpu *vcpu; |
| 4590 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4591 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 4592 | *(u64 *)((void *)vcpu + offset) = 0; |
| 4593 | |
| 4594 | return 0; |
| 4595 | } |
| 4596 | |
| 4597 | static int kvm_stat_data_get(void *data, u64 *val) |
| 4598 | { |
| 4599 | int r = -EFAULT; |
| 4600 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 4601 | |
| 4602 | switch (stat_data->dbgfs_item->kind) { |
| 4603 | case KVM_STAT_VM: |
| 4604 | r = kvm_get_stat_per_vm(stat_data->kvm, |
| 4605 | stat_data->dbgfs_item->offset, val); |
| 4606 | break; |
| 4607 | case KVM_STAT_VCPU: |
| 4608 | r = kvm_get_stat_per_vcpu(stat_data->kvm, |
| 4609 | stat_data->dbgfs_item->offset, val); |
| 4610 | break; |
| 4611 | } |
| 4612 | |
| 4613 | return r; |
| 4614 | } |
| 4615 | |
| 4616 | static int kvm_stat_data_clear(void *data, u64 val) |
| 4617 | { |
| 4618 | int r = -EFAULT; |
| 4619 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 4620 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4621 | if (val) |
| 4622 | return -EINVAL; |
| 4623 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4624 | switch (stat_data->dbgfs_item->kind) { |
| 4625 | case KVM_STAT_VM: |
| 4626 | r = kvm_clear_stat_per_vm(stat_data->kvm, |
| 4627 | stat_data->dbgfs_item->offset); |
| 4628 | break; |
| 4629 | case KVM_STAT_VCPU: |
| 4630 | r = kvm_clear_stat_per_vcpu(stat_data->kvm, |
| 4631 | stat_data->dbgfs_item->offset); |
| 4632 | break; |
| 4633 | } |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4634 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4635 | return r; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4636 | } |
| 4637 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4638 | static int kvm_stat_data_open(struct inode *inode, struct file *file) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4639 | { |
| 4640 | __simple_attr_check_format("%llu\n", 0ull); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4641 | return kvm_debugfs_open(inode, file, kvm_stat_data_get, |
| 4642 | kvm_stat_data_clear, "%llu\n"); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4643 | } |
| 4644 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4645 | static const struct file_operations stat_fops_per_vm = { |
| 4646 | .owner = THIS_MODULE, |
| 4647 | .open = kvm_stat_data_open, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4648 | .release = kvm_debugfs_release, |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4649 | .read = simple_attr_read, |
| 4650 | .write = simple_attr_write, |
| 4651 | .llseek = no_llseek, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4652 | }; |
| 4653 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4654 | static int vm_stat_get(void *_offset, u64 *val) |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4655 | { |
| 4656 | unsigned offset = (long)_offset; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4657 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4658 | u64 tmp_val; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4659 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4660 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4661 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4662 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4663 | kvm_get_stat_per_vm(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4664 | *val += tmp_val; |
| 4665 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4666 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4667 | return 0; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4668 | } |
| 4669 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4670 | static int vm_stat_clear(void *_offset, u64 val) |
| 4671 | { |
| 4672 | unsigned offset = (long)_offset; |
| 4673 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4674 | |
| 4675 | if (val) |
| 4676 | return -EINVAL; |
| 4677 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4678 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4679 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4680 | kvm_clear_stat_per_vm(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4681 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4682 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4683 | |
| 4684 | return 0; |
| 4685 | } |
| 4686 | |
| 4687 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4688 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4689 | static int vcpu_stat_get(void *_offset, u64 *val) |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 4690 | { |
| 4691 | unsigned offset = (long)_offset; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 4692 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4693 | u64 tmp_val; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 4694 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4695 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4696 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4697 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4698 | kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4699 | *val += tmp_val; |
| 4700 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4701 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 4702 | return 0; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 4703 | } |
| 4704 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4705 | static int vcpu_stat_clear(void *_offset, u64 val) |
| 4706 | { |
| 4707 | unsigned offset = (long)_offset; |
| 4708 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4709 | |
| 4710 | if (val) |
| 4711 | return -EINVAL; |
| 4712 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4713 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4714 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4715 | kvm_clear_stat_per_vcpu(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4716 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4717 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 4718 | |
| 4719 | return 0; |
| 4720 | } |
| 4721 | |
| 4722 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, |
| 4723 | "%llu\n"); |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4724 | |
Alexey Dobriyan | 828c095 | 2009-10-01 15:43:56 -0700 | [diff] [blame] | 4725 | static const struct file_operations *stat_fops[] = { |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 4726 | [KVM_STAT_VCPU] = &vcpu_stat_fops, |
| 4727 | [KVM_STAT_VM] = &vm_stat_fops, |
| 4728 | }; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 4729 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4730 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) |
| 4731 | { |
| 4732 | struct kobj_uevent_env *env; |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4733 | unsigned long long created, active; |
| 4734 | |
| 4735 | if (!kvm_dev.this_device || !kvm) |
| 4736 | return; |
| 4737 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4738 | mutex_lock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4739 | if (type == KVM_EVENT_CREATE_VM) { |
| 4740 | kvm_createvm_count++; |
| 4741 | kvm_active_vms++; |
| 4742 | } else if (type == KVM_EVENT_DESTROY_VM) { |
| 4743 | kvm_active_vms--; |
| 4744 | } |
| 4745 | created = kvm_createvm_count; |
| 4746 | active = kvm_active_vms; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 4747 | mutex_unlock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4748 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4749 | env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4750 | if (!env) |
| 4751 | return; |
| 4752 | |
| 4753 | add_uevent_var(env, "CREATED=%llu", created); |
| 4754 | add_uevent_var(env, "COUNT=%llu", active); |
| 4755 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 4756 | if (type == KVM_EVENT_CREATE_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4757 | add_uevent_var(env, "EVENT=create"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 4758 | kvm->userspace_pid = task_pid_nr(current); |
| 4759 | } else if (type == KVM_EVENT_DESTROY_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4760 | add_uevent_var(env, "EVENT=destroy"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 4761 | } |
| 4762 | add_uevent_var(env, "PID=%d", kvm->userspace_pid); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4763 | |
Greg Kroah-Hartman | 8ed0579 | 2019-02-28 16:34:37 +0100 | [diff] [blame] | 4764 | if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4765 | char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4766 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 4767 | if (p) { |
| 4768 | tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); |
| 4769 | if (!IS_ERR(tmp)) |
| 4770 | add_uevent_var(env, "STATS_PATH=%s", tmp); |
| 4771 | kfree(p); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4772 | } |
| 4773 | } |
| 4774 | /* no need for checks, since we are adding at most only 5 keys */ |
| 4775 | env->envp[env->envp_idx++] = NULL; |
| 4776 | kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); |
| 4777 | kfree(env); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4778 | } |
| 4779 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 4780 | static void kvm_init_debug(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4781 | { |
| 4782 | struct kvm_stats_debugfs_item *p; |
| 4783 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 4784 | kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 4785 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4786 | kvm_debugfs_num_entries = 0; |
| 4787 | for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 4788 | debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p), |
| 4789 | kvm_debugfs_dir, (void *)(long)p->offset, |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 4790 | stat_fops[p->kind]); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 4791 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4792 | } |
| 4793 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4794 | static int kvm_suspend(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 4795 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4796 | if (kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4797 | hardware_disable_nolock(NULL); |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 4798 | return 0; |
| 4799 | } |
| 4800 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4801 | static void kvm_resume(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 4802 | { |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 4803 | if (kvm_usage_count) { |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 4804 | #ifdef CONFIG_LOCKDEP |
| 4805 | WARN_ON(lockdep_is_held(&kvm_count_lock)); |
| 4806 | #endif |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4807 | hardware_enable_nolock(NULL); |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 4808 | } |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 4809 | } |
| 4810 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4811 | static struct syscore_ops kvm_syscore_ops = { |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 4812 | .suspend = kvm_suspend, |
| 4813 | .resume = kvm_resume, |
| 4814 | }; |
| 4815 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 4816 | static inline |
| 4817 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
| 4818 | { |
| 4819 | return container_of(pn, struct kvm_vcpu, preempt_notifier); |
| 4820 | } |
| 4821 | |
| 4822 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
| 4823 | { |
| 4824 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 4825 | |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 4826 | WRITE_ONCE(vcpu->preempted, false); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 4827 | WRITE_ONCE(vcpu->ready, false); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 4828 | |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 4829 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Radim Krčmář | e790d9e | 2014-08-21 18:08:05 +0200 | [diff] [blame] | 4830 | kvm_arch_sched_in(vcpu, cpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 4831 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 4832 | } |
| 4833 | |
| 4834 | static void kvm_sched_out(struct preempt_notifier *pn, |
| 4835 | struct task_struct *next) |
| 4836 | { |
| 4837 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
| 4838 | |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 4839 | if (current->state == TASK_RUNNING) { |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 4840 | WRITE_ONCE(vcpu->preempted, true); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 4841 | WRITE_ONCE(vcpu->ready, true); |
| 4842 | } |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 4843 | kvm_arch_vcpu_put(vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 4844 | __this_cpu_write(kvm_running_vcpu, NULL); |
| 4845 | } |
| 4846 | |
| 4847 | /** |
| 4848 | * kvm_get_running_vcpu - get the vcpu running on the current CPU. |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 4849 | * |
| 4850 | * We can disable preemption locally around accessing the per-CPU variable, |
| 4851 | * and use the resolved vcpu pointer after enabling preemption again, |
| 4852 | * because even if the current thread is migrated to another CPU, reading |
| 4853 | * the per-CPU value later will give us the same value as we update the |
| 4854 | * per-CPU variable in the preempt notifier handlers. |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 4855 | */ |
| 4856 | struct kvm_vcpu *kvm_get_running_vcpu(void) |
| 4857 | { |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 4858 | struct kvm_vcpu *vcpu; |
| 4859 | |
| 4860 | preempt_disable(); |
| 4861 | vcpu = __this_cpu_read(kvm_running_vcpu); |
| 4862 | preempt_enable(); |
| 4863 | |
| 4864 | return vcpu; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 4865 | } |
Wanpeng Li | 379a3c8 | 2020-04-28 14:23:27 +0800 | [diff] [blame] | 4866 | EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 4867 | |
| 4868 | /** |
| 4869 | * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
| 4870 | */ |
| 4871 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) |
| 4872 | { |
| 4873 | return &kvm_running_vcpu; |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 4874 | } |
| 4875 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4876 | struct kvm_cpu_compat_check { |
| 4877 | void *opaque; |
| 4878 | int *ret; |
| 4879 | }; |
| 4880 | |
| 4881 | static void check_processor_compat(void *data) |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 4882 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4883 | struct kvm_cpu_compat_check *c = data; |
| 4884 | |
| 4885 | *c->ret = kvm_arch_check_processor_compat(c->opaque); |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 4886 | } |
| 4887 | |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 4888 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4889 | struct module *module) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4890 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4891 | struct kvm_cpu_compat_check c; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4892 | int r; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4893 | int cpu; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4894 | |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 4895 | r = kvm_arch_init(opaque); |
| 4896 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 4897 | goto out_fail; |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 4898 | |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 4899 | /* |
| 4900 | * kvm_arch_init makes sure there's at most one caller |
| 4901 | * for architectures that support multiple implementations, |
| 4902 | * like intel and amd on x86. |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 4903 | * kvm_arch_init must be called before kvm_irqfd_init to avoid creating |
| 4904 | * conflicts in case kvm is already setup for another implementation. |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 4905 | */ |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 4906 | r = kvm_irqfd_init(); |
| 4907 | if (r) |
| 4908 | goto out_irqfd; |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 4909 | |
Avi Kivity | 8437a617 | 2009-06-06 14:52:35 -0700 | [diff] [blame] | 4910 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4911 | r = -ENOMEM; |
| 4912 | goto out_free_0; |
| 4913 | } |
| 4914 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4915 | r = kvm_arch_hardware_setup(opaque); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4916 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 4917 | goto out_free_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4918 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4919 | c.ret = &r; |
| 4920 | c.opaque = opaque; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4921 | for_each_online_cpu(cpu) { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 4922 | smp_call_function_single(cpu, check_processor_compat, &c, 1); |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4923 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 4924 | goto out_free_2; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4925 | } |
| 4926 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 4927 | r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4928 | kvm_starting_cpu, kvm_dying_cpu); |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 4929 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 4930 | goto out_free_2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4931 | register_reboot_notifier(&kvm_reboot_notifier); |
| 4932 | |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4933 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 4934 | if (!vcpu_align) |
| 4935 | vcpu_align = __alignof__(struct kvm_vcpu); |
Paolo Bonzini | 4651573 | 2017-10-26 15:45:46 +0200 | [diff] [blame] | 4936 | kvm_vcpu_cache = |
| 4937 | kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, |
| 4938 | SLAB_ACCOUNT, |
| 4939 | offsetof(struct kvm_vcpu, arch), |
| 4940 | sizeof_field(struct kvm_vcpu, arch), |
| 4941 | NULL); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4942 | if (!kvm_vcpu_cache) { |
| 4943 | r = -ENOMEM; |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4944 | goto out_free_3; |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4945 | } |
| 4946 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4947 | r = kvm_async_pf_init(); |
| 4948 | if (r) |
| 4949 | goto out_free; |
| 4950 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4951 | kvm_chardev_ops.owner = module; |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 4952 | kvm_vm_fops.owner = module; |
| 4953 | kvm_vcpu_fops.owner = module; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4954 | |
| 4955 | r = misc_register(&kvm_dev); |
| 4956 | if (r) { |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4957 | pr_err("kvm: misc device register failed\n"); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4958 | goto out_unreg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4959 | } |
| 4960 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4961 | register_syscore_ops(&kvm_syscore_ops); |
| 4962 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 4963 | kvm_preempt_ops.sched_in = kvm_sched_in; |
| 4964 | kvm_preempt_ops.sched_out = kvm_sched_out; |
| 4965 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 4966 | kvm_init_debug(); |
Darrick J. Wong | 0ea4ed8 | 2009-10-14 16:21:00 -0700 | [diff] [blame] | 4967 | |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 4968 | r = kvm_vfio_ops_init(); |
| 4969 | WARN_ON(r); |
| 4970 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 4971 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4972 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4973 | out_unreg: |
| 4974 | kvm_async_pf_deinit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4975 | out_free: |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4976 | kmem_cache_destroy(kvm_vcpu_cache); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 4977 | out_free_3: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4978 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4979 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 4980 | out_free_2: |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 4981 | kvm_arch_hardware_unsetup(); |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 4982 | out_free_1: |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4983 | free_cpumask_var(cpus_hardware_enabled); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 4984 | out_free_0: |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 4985 | kvm_irqfd_exit(); |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 4986 | out_irqfd: |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 4987 | kvm_arch_exit(); |
| 4988 | out_fail: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4989 | return r; |
| 4990 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 4991 | EXPORT_SYMBOL_GPL(kvm_init); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4992 | |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 4993 | void kvm_exit(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4994 | { |
Janosch Frank | 4bd33b5 | 2015-10-14 12:37:35 +0200 | [diff] [blame] | 4995 | debugfs_remove_recursive(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4996 | misc_deregister(&kvm_dev); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 4997 | kmem_cache_destroy(kvm_vcpu_cache); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4998 | kvm_async_pf_deinit(); |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 4999 | unregister_syscore_ops(&kvm_syscore_ops); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5000 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5001 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5002 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5003 | kvm_arch_hardware_unsetup(); |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 5004 | kvm_arch_exit(); |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 5005 | kvm_irqfd_exit(); |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5006 | free_cpumask_var(cpus_hardware_enabled); |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 5007 | kvm_vfio_ops_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5008 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5009 | EXPORT_SYMBOL_GPL(kvm_exit); |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5010 | |
| 5011 | struct kvm_vm_worker_thread_context { |
| 5012 | struct kvm *kvm; |
| 5013 | struct task_struct *parent; |
| 5014 | struct completion init_done; |
| 5015 | kvm_vm_thread_fn_t thread_fn; |
| 5016 | uintptr_t data; |
| 5017 | int err; |
| 5018 | }; |
| 5019 | |
| 5020 | static int kvm_vm_worker_thread(void *context) |
| 5021 | { |
| 5022 | /* |
| 5023 | * The init_context is allocated on the stack of the parent thread, so |
| 5024 | * we have to locally copy anything that is needed beyond initialization |
| 5025 | */ |
| 5026 | struct kvm_vm_worker_thread_context *init_context = context; |
| 5027 | struct kvm *kvm = init_context->kvm; |
| 5028 | kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; |
| 5029 | uintptr_t data = init_context->data; |
| 5030 | int err; |
| 5031 | |
| 5032 | err = kthread_park(current); |
| 5033 | /* kthread_park(current) is never supposed to return an error */ |
| 5034 | WARN_ON(err != 0); |
| 5035 | if (err) |
| 5036 | goto init_complete; |
| 5037 | |
| 5038 | err = cgroup_attach_task_all(init_context->parent, current); |
| 5039 | if (err) { |
| 5040 | kvm_err("%s: cgroup_attach_task_all failed with err %d\n", |
| 5041 | __func__, err); |
| 5042 | goto init_complete; |
| 5043 | } |
| 5044 | |
| 5045 | set_user_nice(current, task_nice(init_context->parent)); |
| 5046 | |
| 5047 | init_complete: |
| 5048 | init_context->err = err; |
| 5049 | complete(&init_context->init_done); |
| 5050 | init_context = NULL; |
| 5051 | |
| 5052 | if (err) |
| 5053 | return err; |
| 5054 | |
| 5055 | /* Wait to be woken up by the spawner before proceeding. */ |
| 5056 | kthread_parkme(); |
| 5057 | |
| 5058 | if (!kthread_should_stop()) |
| 5059 | err = thread_fn(kvm, data); |
| 5060 | |
| 5061 | return err; |
| 5062 | } |
| 5063 | |
| 5064 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, |
| 5065 | uintptr_t data, const char *name, |
| 5066 | struct task_struct **thread_ptr) |
| 5067 | { |
| 5068 | struct kvm_vm_worker_thread_context init_context = {}; |
| 5069 | struct task_struct *thread; |
| 5070 | |
| 5071 | *thread_ptr = NULL; |
| 5072 | init_context.kvm = kvm; |
| 5073 | init_context.parent = current; |
| 5074 | init_context.thread_fn = thread_fn; |
| 5075 | init_context.data = data; |
| 5076 | init_completion(&init_context.init_done); |
| 5077 | |
| 5078 | thread = kthread_run(kvm_vm_worker_thread, &init_context, |
| 5079 | "%s-%d", name, task_pid_nr(current)); |
| 5080 | if (IS_ERR(thread)) |
| 5081 | return PTR_ERR(thread); |
| 5082 | |
| 5083 | /* kthread_run is never supposed to return NULL */ |
| 5084 | WARN_ON(thread == NULL); |
| 5085 | |
| 5086 | wait_for_completion(&init_context.init_done); |
| 5087 | |
| 5088 | if (!init_context.err) |
| 5089 | *thread_ptr = thread; |
| 5090 | |
| 5091 | return init_context.err; |
| 5092 | } |