Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 10 | * |
| 11 | * Authors: |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 14 | */ |
| 15 | |
Andre Przywara | af669ac | 2015-03-26 14:39:29 +0000 | [diff] [blame] | 16 | #include <kvm/iodev.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 18 | #include <linux/kvm_host.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 19 | #include <linux/kvm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/errno.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 22 | #include <linux/percpu.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/miscdevice.h> |
| 25 | #include <linux/vmalloc.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 26 | #include <linux/reboot.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/highmem.h> |
| 29 | #include <linux/file.h> |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 30 | #include <linux/syscore_ops.h> |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 31 | #include <linux/cpu.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 32 | #include <linux/sched/signal.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 33 | #include <linux/sched/mm.h> |
Ingo Molnar | 03441a3 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 34 | #include <linux/sched/stat.h> |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 35 | #include <linux/cpumask.h> |
| 36 | #include <linux/smp.h> |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 37 | #include <linux/anon_inodes.h> |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 38 | #include <linux/profile.h> |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 39 | #include <linux/kvm_para.h> |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 40 | #include <linux/pagemap.h> |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 41 | #include <linux/mman.h> |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 42 | #include <linux/swap.h> |
Sheng Yang | e56d532 | 2009-03-12 21:45:39 +0800 | [diff] [blame] | 43 | #include <linux/bitops.h> |
Marcelo Tosatti | 547de29 | 2009-05-07 17:55:13 -0300 | [diff] [blame] | 44 | #include <linux/spinlock.h> |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 45 | #include <linux/compat.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 46 | #include <linux/srcu.h> |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 47 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 48 | #include <linux/slab.h> |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 49 | #include <linux/sort.h> |
| 50 | #include <linux/bsearch.h> |
Paolo Bonzini | c011d23 | 2019-05-17 14:08:53 +0200 | [diff] [blame] | 51 | #include <linux/io.h> |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 52 | #include <linux/lockdep.h> |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 53 | #include <linux/kthread.h> |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 54 | #include <linux/suspend.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 55 | |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 56 | #include <asm/processor.h> |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 57 | #include <asm/ioctl.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 58 | #include <linux/uaccess.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 59 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 60 | #include "coalesced_mmio.h" |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 61 | #include "async_pf.h" |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 62 | #include "mmu_lock.h" |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 63 | #include "vfio.h" |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 64 | |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 65 | #define CREATE_TRACE_POINTS |
| 66 | #include <trace/events/kvm.h> |
| 67 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 68 | #include <linux/kvm_dirty_ring.h> |
| 69 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 70 | /* Worst case buffer size needed for holding an integer. */ |
| 71 | #define ITOA_MAX_LEN 12 |
| 72 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 73 | MODULE_AUTHOR("Qumranet"); |
| 74 | MODULE_LICENSE("GPL"); |
| 75 | |
David Hildenbrand | 920552b | 2015-09-18 12:34:53 +0200 | [diff] [blame] | 76 | /* Architectures should define their poll value according to the halt latency */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 77 | unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 78 | module_param(halt_poll_ns, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 79 | EXPORT_SYMBOL_GPL(halt_poll_ns); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 80 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 81 | /* Default doubles per-vcpu halt_poll_ns. */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 82 | unsigned int halt_poll_ns_grow = 2; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 83 | module_param(halt_poll_ns_grow, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 84 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 85 | |
Nir Weiner | 49113d3 | 2019-01-27 12:17:15 +0200 | [diff] [blame] | 86 | /* The start value to grow halt_poll_ns from */ |
| 87 | unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ |
| 88 | module_param(halt_poll_ns_grow_start, uint, 0644); |
| 89 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); |
| 90 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 91 | /* Default resets per-vcpu halt_poll_ns . */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 92 | unsigned int halt_poll_ns_shrink; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 93 | module_param(halt_poll_ns_shrink, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 94 | EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 95 | |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 96 | /* |
| 97 | * Ordering of locks: |
| 98 | * |
Xiubo Li | b7d409d | 2015-02-26 14:58:24 +0800 | [diff] [blame] | 99 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 100 | */ |
| 101 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 102 | DEFINE_MUTEX(kvm_lock); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 103 | static DEFINE_RAW_SPINLOCK(kvm_count_lock); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 104 | LIST_HEAD(vm_list); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 105 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 106 | static cpumask_var_t cpus_hardware_enabled; |
Xiubo Li | f4fee93 | 2015-02-26 14:58:21 +0800 | [diff] [blame] | 107 | static int kvm_usage_count; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 108 | static atomic_t hardware_enable_failed; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 109 | |
Sean Christopherson | aaba298 | 2019-12-18 13:55:16 -0800 | [diff] [blame] | 110 | static struct kmem_cache *kvm_vcpu_cache; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 111 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 112 | static __read_mostly struct preempt_ops kvm_preempt_ops; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 113 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 114 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 115 | struct dentry *kvm_debugfs_dir; |
Paul Mackerras | e23a808 | 2015-03-28 14:21:01 +1100 | [diff] [blame] | 116 | EXPORT_SYMBOL_GPL(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 117 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 118 | static const struct file_operations stat_fops_per_vm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 119 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 120 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
| 121 | unsigned long arg); |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 122 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 123 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| 124 | unsigned long arg); |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 125 | #define KVM_COMPAT(c) .compat_ioctl = (c) |
| 126 | #else |
Marc Zyngier | 9cb09e7 | 2019-11-14 13:17:39 +0000 | [diff] [blame] | 127 | /* |
| 128 | * For architectures that don't implement a compat infrastructure, |
| 129 | * adopt a double line of defense: |
| 130 | * - Prevent a compat task from opening /dev/kvm |
| 131 | * - If the open has been done by a 64bit task, and the KVM fd |
| 132 | * passed to a compat task, let the ioctls fail. |
| 133 | */ |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 134 | static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, |
| 135 | unsigned long arg) { return -EINVAL; } |
Marc Zyngier | b9876e6 | 2019-11-13 16:05:23 +0000 | [diff] [blame] | 136 | |
| 137 | static int kvm_no_compat_open(struct inode *inode, struct file *file) |
| 138 | { |
| 139 | return is_compat_task() ? -ENODEV : 0; |
| 140 | } |
| 141 | #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ |
| 142 | .open = kvm_no_compat_open |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 143 | #endif |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 144 | static int hardware_enable_all(void); |
| 145 | static void hardware_disable_all(void); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 146 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 147 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 148 | |
Andi Kleen | 5248013 | 2014-02-08 08:51:57 +0100 | [diff] [blame] | 149 | __visible bool kvm_rebooting; |
Avi Kivity | b7c4145 | 2010-12-02 17:52:50 +0200 | [diff] [blame] | 150 | EXPORT_SYMBOL_GPL(kvm_rebooting); |
Avi Kivity | 4ecac3f | 2008-05-13 13:23:38 +0300 | [diff] [blame] | 151 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 152 | #define KVM_EVENT_CREATE_VM 0 |
| 153 | #define KVM_EVENT_DESTROY_VM 1 |
| 154 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); |
| 155 | static unsigned long long kvm_createvm_count; |
| 156 | static unsigned long long kvm_active_vms; |
| 157 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 158 | __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
| 159 | unsigned long start, unsigned long end) |
Radim Krčmář | b1394e7 | 2017-11-30 19:05:45 +0100 | [diff] [blame] | 160 | { |
| 161 | } |
| 162 | |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 163 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) |
| 164 | { |
| 165 | /* |
| 166 | * The metadata used by is_zone_device_page() to determine whether or |
| 167 | * not a page is ZONE_DEVICE is guaranteed to be valid if and only if |
| 168 | * the device has been pinned, e.g. by get_user_pages(). WARN if the |
| 169 | * page_count() is zero to help detect bad usage of this helper. |
| 170 | */ |
| 171 | if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) |
| 172 | return false; |
| 173 | |
| 174 | return is_zone_device_page(pfn_to_page(pfn)); |
| 175 | } |
| 176 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 177 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 178 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 179 | /* |
| 180 | * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting |
| 181 | * perspective they are "normal" pages, albeit with slightly different |
| 182 | * usage rules. |
| 183 | */ |
Andrea Arcangeli | 11feeb4 | 2013-07-25 03:04:38 +0200 | [diff] [blame] | 184 | if (pfn_valid(pfn)) |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 185 | return PageReserved(pfn_to_page(pfn)) && |
Zhuang Yanying | 7df003c | 2019-10-12 11:37:31 +0800 | [diff] [blame] | 186 | !is_zero_pfn(pfn) && |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 187 | !kvm_is_zone_device_pfn(pfn); |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 188 | |
| 189 | return true; |
| 190 | } |
| 191 | |
Sean Christopherson | 005ba37 | 2020-01-08 12:24:36 -0800 | [diff] [blame] | 192 | bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) |
| 193 | { |
| 194 | struct page *page = pfn_to_page(pfn); |
| 195 | |
| 196 | if (!PageTransCompoundMap(page)) |
| 197 | return false; |
| 198 | |
| 199 | return is_transparent_hugepage(compound_head(page)); |
| 200 | } |
| 201 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 202 | /* |
| 203 | * Switches to specified vcpu, until a matching vcpu_put() |
| 204 | */ |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 205 | void vcpu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 206 | { |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 207 | int cpu = get_cpu(); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 208 | |
| 209 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 210 | preempt_notifier_register(&vcpu->preempt_notifier); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 211 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 212 | put_cpu(); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 213 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 214 | EXPORT_SYMBOL_GPL(vcpu_load); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 215 | |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 216 | void vcpu_put(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 217 | { |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 218 | preempt_disable(); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 219 | kvm_arch_vcpu_put(vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 220 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 221 | __this_cpu_write(kvm_running_vcpu, NULL); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 222 | preempt_enable(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 223 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 224 | EXPORT_SYMBOL_GPL(vcpu_put); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 225 | |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 226 | /* TODO: merge with kvm_arch_vcpu_should_kick */ |
| 227 | static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) |
| 228 | { |
| 229 | int mode = kvm_vcpu_exiting_guest_mode(vcpu); |
| 230 | |
| 231 | /* |
| 232 | * We need to wait for the VCPU to reenable interrupts and get out of |
| 233 | * READING_SHADOW_PAGE_TABLES mode. |
| 234 | */ |
| 235 | if (req & KVM_REQUEST_WAIT) |
| 236 | return mode != OUTSIDE_GUEST_MODE; |
| 237 | |
| 238 | /* |
| 239 | * Need to kick a running VCPU, but otherwise there is nothing to do. |
| 240 | */ |
| 241 | return mode == IN_GUEST_MODE; |
| 242 | } |
| 243 | |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 244 | static void ack_flush(void *_completed) |
| 245 | { |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 246 | } |
| 247 | |
Paolo Bonzini | b49defe | 2017-06-30 13:25:45 +0200 | [diff] [blame] | 248 | static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) |
| 249 | { |
| 250 | if (unlikely(!cpus)) |
| 251 | cpus = cpu_online_mask; |
| 252 | |
| 253 | if (cpumask_empty(cpus)) |
| 254 | return false; |
| 255 | |
| 256 | smp_call_function_many(cpus, ack_flush, NULL, wait); |
| 257 | return true; |
| 258 | } |
| 259 | |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 260 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 261 | struct kvm_vcpu *except, |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 262 | unsigned long *vcpu_bitmap, cpumask_var_t tmp) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 263 | { |
Avi Kivity | 597a5f5 | 2008-07-20 14:24:22 +0300 | [diff] [blame] | 264 | int i, cpu, me; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 265 | struct kvm_vcpu *vcpu; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 266 | bool called; |
Rusty Russell | 6ef7a1b | 2008-12-08 20:28:04 +1030 | [diff] [blame] | 267 | |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 268 | me = get_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 269 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 270 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 271 | if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || |
| 272 | vcpu == except) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 273 | continue; |
| 274 | |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 275 | kvm_make_request(req, vcpu); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 276 | cpu = vcpu->cpu; |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 277 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 278 | if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) |
| 279 | continue; |
Radim Krčmář | 6c6e836 | 2017-04-26 22:32:23 +0200 | [diff] [blame] | 280 | |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 281 | if (tmp != NULL && cpu != -1 && cpu != me && |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 282 | kvm_request_needs_ipi(vcpu, req)) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 283 | __cpumask_set_cpu(cpu, tmp); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 284 | } |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 285 | |
| 286 | called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 287 | put_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 288 | |
| 289 | return called; |
| 290 | } |
| 291 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 292 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
| 293 | struct kvm_vcpu *except) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 294 | { |
| 295 | cpumask_var_t cpus; |
| 296 | bool called; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 297 | |
| 298 | zalloc_cpumask_var(&cpus, GFP_ATOMIC); |
| 299 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 300 | called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 301 | |
Rusty Russell | 6ef7a1b | 2008-12-08 20:28:04 +1030 | [diff] [blame] | 302 | free_cpumask_var(cpus); |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 303 | return called; |
| 304 | } |
| 305 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 306 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) |
| 307 | { |
| 308 | return kvm_make_all_cpus_request_except(kvm, req, NULL); |
| 309 | } |
Marcelo Tosatti | a248602 | 2021-05-26 14:20:14 -0300 | [diff] [blame] | 310 | EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 311 | |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 312 | #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 313 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 314 | { |
Lan Tianyu | 4ae3cb3 | 2016-03-13 11:10:28 +0800 | [diff] [blame] | 315 | /* |
| 316 | * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in |
| 317 | * kvm_make_all_cpus_request. |
| 318 | */ |
| 319 | long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 320 | |
Lan Tianyu | 4ae3cb3 | 2016-03-13 11:10:28 +0800 | [diff] [blame] | 321 | /* |
| 322 | * We want to publish modifications to the page tables before reading |
| 323 | * mode. Pairs with a memory barrier in arch-specific code. |
| 324 | * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest |
| 325 | * and smp_mb in walk_shadow_page_lockless_begin/end. |
| 326 | * - powerpc: smp_mb in kvmppc_prepare_to_enter. |
| 327 | * |
| 328 | * There is already an smp_mb__after_atomic() before |
| 329 | * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that |
| 330 | * barrier here. |
| 331 | */ |
Tianyu Lan | b08660e | 2018-07-19 08:40:17 +0000 | [diff] [blame] | 332 | if (!kvm_arch_flush_remote_tlb(kvm) |
| 333 | || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 334 | ++kvm->stat.generic.remote_tlb_flush; |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 335 | cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 336 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 337 | EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 338 | #endif |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 339 | |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 340 | void kvm_reload_remote_mmus(struct kvm *kvm) |
| 341 | { |
Tang Chen | 445b823 | 2014-09-24 15:57:55 +0800 | [diff] [blame] | 342 | kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 343 | } |
| 344 | |
Sean Christopherson | 6926f95 | 2020-07-02 19:35:39 -0700 | [diff] [blame] | 345 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
| 346 | static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, |
| 347 | gfp_t gfp_flags) |
| 348 | { |
| 349 | gfp_flags |= mc->gfp_zero; |
| 350 | |
| 351 | if (mc->kmem_cache) |
| 352 | return kmem_cache_alloc(mc->kmem_cache, gfp_flags); |
| 353 | else |
| 354 | return (void *)__get_free_page(gfp_flags); |
| 355 | } |
| 356 | |
| 357 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) |
| 358 | { |
| 359 | void *obj; |
| 360 | |
| 361 | if (mc->nobjs >= min) |
| 362 | return 0; |
| 363 | while (mc->nobjs < ARRAY_SIZE(mc->objects)) { |
| 364 | obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); |
| 365 | if (!obj) |
| 366 | return mc->nobjs >= min ? 0 : -ENOMEM; |
| 367 | mc->objects[mc->nobjs++] = obj; |
| 368 | } |
| 369 | return 0; |
| 370 | } |
| 371 | |
| 372 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) |
| 373 | { |
| 374 | return mc->nobjs; |
| 375 | } |
| 376 | |
| 377 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| 378 | { |
| 379 | while (mc->nobjs) { |
| 380 | if (mc->kmem_cache) |
| 381 | kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); |
| 382 | else |
| 383 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| 388 | { |
| 389 | void *p; |
| 390 | |
| 391 | if (WARN_ON(!mc->nobjs)) |
| 392 | p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); |
| 393 | else |
| 394 | p = mc->objects[--mc->nobjs]; |
| 395 | BUG_ON(!p); |
| 396 | return p; |
| 397 | } |
| 398 | #endif |
| 399 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 400 | static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 401 | { |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 402 | mutex_init(&vcpu->mutex); |
| 403 | vcpu->cpu = -1; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 404 | vcpu->kvm = kvm; |
| 405 | vcpu->vcpu_id = id; |
Rik van Riel | 34bb10b | 2011-02-01 09:52:41 -0500 | [diff] [blame] | 406 | vcpu->pid = NULL; |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 407 | rcuwait_init(&vcpu->wait); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 408 | kvm_async_pf_vcpu_init(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 409 | |
Feng Wu | bf9f6ac | 2015-09-18 22:29:55 +0800 | [diff] [blame] | 410 | vcpu->pre_pcpu = -1; |
| 411 | INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); |
| 412 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 413 | kvm_vcpu_set_in_spin_loop(vcpu, false); |
| 414 | kvm_vcpu_set_dy_eligible(vcpu, false); |
Raghavendra K T | 3a08a8f | 2013-03-04 23:32:07 +0530 | [diff] [blame] | 415 | vcpu->preempted = false; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 416 | vcpu->ready = false; |
Sean Christopherson | d5c48de | 2019-12-18 13:55:17 -0800 | [diff] [blame] | 417 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 418 | vcpu->last_used_slot = 0; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 419 | } |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 420 | |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 421 | void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 422 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 423 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 424 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 425 | |
Sean Christopherson | 9941d22 | 2019-12-18 13:55:29 -0800 | [diff] [blame] | 426 | /* |
| 427 | * No need for rcu_read_lock as VCPU_RUN is the only place that changes |
| 428 | * the vcpu->pid pointer, and at destruction time all file descriptors |
| 429 | * are already gone. |
| 430 | */ |
| 431 | put_pid(rcu_dereference_protected(vcpu->pid, 1)); |
| 432 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 433 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 434 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 435 | } |
| 436 | EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); |
| 437 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 438 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 439 | static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
| 440 | { |
| 441 | return container_of(mn, struct kvm, mmu_notifier); |
| 442 | } |
| 443 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 444 | static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, |
| 445 | struct mm_struct *mm, |
| 446 | unsigned long start, unsigned long end) |
| 447 | { |
| 448 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 449 | int idx; |
| 450 | |
| 451 | idx = srcu_read_lock(&kvm->srcu); |
| 452 | kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); |
| 453 | srcu_read_unlock(&kvm->srcu, idx); |
| 454 | } |
| 455 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 456 | typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); |
| 457 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 458 | typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, |
| 459 | unsigned long end); |
| 460 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 461 | struct kvm_hva_range { |
| 462 | unsigned long start; |
| 463 | unsigned long end; |
| 464 | pte_t pte; |
| 465 | hva_handler_t handler; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 466 | on_lock_fn_t on_lock; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 467 | bool flush_on_ret; |
| 468 | bool may_block; |
| 469 | }; |
| 470 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 471 | /* |
| 472 | * Use a dedicated stub instead of NULL to indicate that there is no callback |
| 473 | * function/handler. The compiler technically can't guarantee that a real |
| 474 | * function will have a non-zero address, and so it will generate code to |
| 475 | * check for !NULL, whereas comparing against a stub will be elided at compile |
| 476 | * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). |
| 477 | */ |
| 478 | static void kvm_null_fn(void) |
| 479 | { |
| 480 | |
| 481 | } |
| 482 | #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) |
| 483 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 484 | static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, |
| 485 | const struct kvm_hva_range *range) |
| 486 | { |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 487 | bool ret = false, locked = false; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 488 | struct kvm_gfn_range gfn_range; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 489 | struct kvm_memory_slot *slot; |
| 490 | struct kvm_memslots *slots; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 491 | int i, idx; |
| 492 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 493 | /* A null handler is allowed if and only if on_lock() is provided. */ |
| 494 | if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && |
| 495 | IS_KVM_NULL_FN(range->handler))) |
| 496 | return 0; |
| 497 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 498 | idx = srcu_read_lock(&kvm->srcu); |
| 499 | |
| 500 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 501 | slots = __kvm_memslots(kvm, i); |
| 502 | kvm_for_each_memslot(slot, slots) { |
| 503 | unsigned long hva_start, hva_end; |
| 504 | |
| 505 | hva_start = max(range->start, slot->userspace_addr); |
| 506 | hva_end = min(range->end, slot->userspace_addr + |
| 507 | (slot->npages << PAGE_SHIFT)); |
| 508 | if (hva_start >= hva_end) |
| 509 | continue; |
| 510 | |
| 511 | /* |
| 512 | * To optimize for the likely case where the address |
| 513 | * range is covered by zero or one memslots, don't |
| 514 | * bother making these conditional (to avoid writes on |
| 515 | * the second or later invocation of the handler). |
| 516 | */ |
| 517 | gfn_range.pte = range->pte; |
| 518 | gfn_range.may_block = range->may_block; |
| 519 | |
| 520 | /* |
| 521 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 522 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| 523 | */ |
| 524 | gfn_range.start = hva_to_gfn_memslot(hva_start, slot); |
| 525 | gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); |
| 526 | gfn_range.slot = slot; |
| 527 | |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 528 | if (!locked) { |
| 529 | locked = true; |
| 530 | KVM_MMU_LOCK(kvm); |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 531 | if (!IS_KVM_NULL_FN(range->on_lock)) |
| 532 | range->on_lock(kvm, range->start, range->end); |
| 533 | if (IS_KVM_NULL_FN(range->handler)) |
| 534 | break; |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 535 | } |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 536 | ret |= range->handler(kvm, &gfn_range); |
| 537 | } |
| 538 | } |
| 539 | |
| 540 | if (range->flush_on_ret && (ret || kvm->tlbs_dirty)) |
| 541 | kvm_flush_remote_tlbs(kvm); |
| 542 | |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 543 | if (locked) |
| 544 | KVM_MMU_UNLOCK(kvm); |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 545 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 546 | srcu_read_unlock(&kvm->srcu, idx); |
| 547 | |
| 548 | /* The notifiers are averse to booleans. :-( */ |
| 549 | return (int)ret; |
| 550 | } |
| 551 | |
| 552 | static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, |
| 553 | unsigned long start, |
| 554 | unsigned long end, |
| 555 | pte_t pte, |
| 556 | hva_handler_t handler) |
| 557 | { |
| 558 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 559 | const struct kvm_hva_range range = { |
| 560 | .start = start, |
| 561 | .end = end, |
| 562 | .pte = pte, |
| 563 | .handler = handler, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 564 | .on_lock = (void *)kvm_null_fn, |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 565 | .flush_on_ret = true, |
| 566 | .may_block = false, |
| 567 | }; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 568 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 569 | return __kvm_handle_hva_range(kvm, &range); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, |
| 573 | unsigned long start, |
| 574 | unsigned long end, |
| 575 | hva_handler_t handler) |
| 576 | { |
| 577 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 578 | const struct kvm_hva_range range = { |
| 579 | .start = start, |
| 580 | .end = end, |
| 581 | .pte = __pte(0), |
| 582 | .handler = handler, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 583 | .on_lock = (void *)kvm_null_fn, |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 584 | .flush_on_ret = false, |
| 585 | .may_block = false, |
| 586 | }; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 587 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 588 | return __kvm_handle_hva_range(kvm, &range); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 589 | } |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 590 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
| 591 | struct mm_struct *mm, |
| 592 | unsigned long address, |
| 593 | pte_t pte) |
| 594 | { |
| 595 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 596 | |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 597 | trace_kvm_set_spte_hva(address); |
| 598 | |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 599 | /* |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 600 | * .change_pte() must be surrounded by .invalidate_range_{start,end}(). |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 601 | * If mmu_notifier_count is zero, then no in-progress invalidations, |
| 602 | * including this one, found a relevant memslot at start(); rechecking |
| 603 | * memslots here is unnecessary. Note, a false positive (count elevated |
| 604 | * by a different invalidation) is sub-optimal but functionally ok. |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 605 | */ |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 606 | WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 607 | if (!READ_ONCE(kvm->mmu_notifier_count)) |
| 608 | return; |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 609 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 610 | kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 611 | } |
| 612 | |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 613 | void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 614 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 615 | { |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 616 | /* |
| 617 | * The count increase must become visible at unlock time as no |
| 618 | * spte can be established without taking the mmu_lock and |
| 619 | * count is also read inside the mmu_lock critical section. |
| 620 | */ |
| 621 | kvm->mmu_notifier_count++; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 622 | if (likely(kvm->mmu_notifier_count == 1)) { |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 623 | kvm->mmu_notifier_range_start = start; |
| 624 | kvm->mmu_notifier_range_end = end; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 625 | } else { |
| 626 | /* |
| 627 | * Fully tracking multiple concurrent ranges has dimishing |
| 628 | * returns. Keep things simple and just find the minimal range |
| 629 | * which includes the current and new ranges. As there won't be |
| 630 | * enough information to subtract a range after its invalidate |
| 631 | * completes, any ranges invalidated concurrently will |
| 632 | * accumulate and persist until all outstanding invalidates |
| 633 | * complete. |
| 634 | */ |
| 635 | kvm->mmu_notifier_range_start = |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 636 | min(kvm->mmu_notifier_range_start, start); |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 637 | kvm->mmu_notifier_range_end = |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 638 | max(kvm->mmu_notifier_range_end, end); |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 639 | } |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 640 | } |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 641 | EXPORT_SYMBOL_GPL(kvm_inc_notifier_count); |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 642 | |
| 643 | static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 644 | const struct mmu_notifier_range *range) |
| 645 | { |
| 646 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 647 | const struct kvm_hva_range hva_range = { |
| 648 | .start = range->start, |
| 649 | .end = range->end, |
| 650 | .pte = __pte(0), |
| 651 | .handler = kvm_unmap_gfn_range, |
| 652 | .on_lock = kvm_inc_notifier_count, |
| 653 | .flush_on_ret = true, |
| 654 | .may_block = mmu_notifier_range_blockable(range), |
| 655 | }; |
| 656 | |
| 657 | trace_kvm_unmap_hva_range(range->start, range->end); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 658 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 659 | /* |
| 660 | * Prevent memslot modification between range_start() and range_end() |
| 661 | * so that conditionally locking provides the same result in both |
| 662 | * functions. Without that guarantee, the mmu_notifier_count |
| 663 | * adjustments will be imbalanced. |
| 664 | * |
| 665 | * Pairs with the decrement in range_end(). |
| 666 | */ |
| 667 | spin_lock(&kvm->mn_invalidate_lock); |
| 668 | kvm->mn_active_invalidate_count++; |
| 669 | spin_unlock(&kvm->mn_invalidate_lock); |
| 670 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 671 | __kvm_handle_hva_range(kvm, &hva_range); |
Takuya Yoshikawa | 565f3be | 2012-02-10 15:28:31 +0900 | [diff] [blame] | 672 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 673 | return 0; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 674 | } |
| 675 | |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 676 | void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 677 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 678 | { |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 679 | /* |
| 680 | * This sequence increase will notify the kvm page fault that |
| 681 | * the page that is going to be mapped in the spte could have |
| 682 | * been freed. |
| 683 | */ |
| 684 | kvm->mmu_notifier_seq++; |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 685 | smp_wmb(); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 686 | /* |
| 687 | * The above sequence increase must be visible before the |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 688 | * below count decrease, which is ensured by the smp_wmb above |
| 689 | * in conjunction with the smp_rmb in mmu_notifier_retry(). |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 690 | */ |
| 691 | kvm->mmu_notifier_count--; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 692 | } |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 693 | EXPORT_SYMBOL_GPL(kvm_dec_notifier_count); |
| 694 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 695 | |
| 696 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
| 697 | const struct mmu_notifier_range *range) |
| 698 | { |
| 699 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 700 | const struct kvm_hva_range hva_range = { |
| 701 | .start = range->start, |
| 702 | .end = range->end, |
| 703 | .pte = __pte(0), |
| 704 | .handler = (void *)kvm_null_fn, |
| 705 | .on_lock = kvm_dec_notifier_count, |
| 706 | .flush_on_ret = false, |
| 707 | .may_block = mmu_notifier_range_blockable(range), |
| 708 | }; |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 709 | bool wake; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 710 | |
| 711 | __kvm_handle_hva_range(kvm, &hva_range); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 712 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 713 | /* Pairs with the increment in range_start(). */ |
| 714 | spin_lock(&kvm->mn_invalidate_lock); |
| 715 | wake = (--kvm->mn_active_invalidate_count == 0); |
| 716 | spin_unlock(&kvm->mn_invalidate_lock); |
| 717 | |
| 718 | /* |
| 719 | * There can only be one waiter, since the wait happens under |
| 720 | * slots_lock. |
| 721 | */ |
| 722 | if (wake) |
| 723 | rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); |
| 724 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 725 | BUG_ON(kvm->mmu_notifier_count < 0); |
| 726 | } |
| 727 | |
| 728 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, |
| 729 | struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 730 | unsigned long start, |
| 731 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 732 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 733 | trace_kvm_age_hva(start, end); |
| 734 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 735 | return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 736 | } |
| 737 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 738 | static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, |
| 739 | struct mm_struct *mm, |
| 740 | unsigned long start, |
| 741 | unsigned long end) |
| 742 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 743 | trace_kvm_age_hva(start, end); |
| 744 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 745 | /* |
| 746 | * Even though we do not flush TLB, this will still adversely |
| 747 | * affect performance on pre-Haswell Intel EPT, where there is |
| 748 | * no EPT Access Bit to clear so that we have to tear down EPT |
| 749 | * tables instead. If we find this unacceptable, we can always |
| 750 | * add a parameter to kvm_age_hva so that it effectively doesn't |
| 751 | * do anything on clear_young. |
| 752 | * |
| 753 | * Also note that currently we never issue secondary TLB flushes |
| 754 | * from clear_young, leaving this job up to the regular system |
| 755 | * cadence. If we find this inaccurate, we might come up with a |
| 756 | * more sophisticated heuristic later. |
| 757 | */ |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 758 | return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 759 | } |
| 760 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 761 | static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, |
| 762 | struct mm_struct *mm, |
| 763 | unsigned long address) |
| 764 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 765 | trace_kvm_test_age_hva(address); |
| 766 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 767 | return kvm_handle_hva_range_no_flush(mn, address, address + 1, |
| 768 | kvm_test_age_gfn); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 769 | } |
| 770 | |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 771 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
| 772 | struct mm_struct *mm) |
| 773 | { |
| 774 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 775 | int idx; |
| 776 | |
| 777 | idx = srcu_read_lock(&kvm->srcu); |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 778 | kvm_arch_flush_shadow_all(kvm); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 779 | srcu_read_unlock(&kvm->srcu, idx); |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 780 | } |
| 781 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 782 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 783 | .invalidate_range = kvm_mmu_notifier_invalidate_range, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 784 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| 785 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| 786 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 787 | .clear_young = kvm_mmu_notifier_clear_young, |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 788 | .test_young = kvm_mmu_notifier_test_young, |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 789 | .change_pte = kvm_mmu_notifier_change_pte, |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 790 | .release = kvm_mmu_notifier_release, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 791 | }; |
Avi Kivity | 4c07b0a | 2009-12-20 14:54:04 +0200 | [diff] [blame] | 792 | |
| 793 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 794 | { |
| 795 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
| 796 | return mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
| 797 | } |
| 798 | |
| 799 | #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ |
| 800 | |
| 801 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 802 | { |
| 803 | return 0; |
| 804 | } |
| 805 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 806 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
| 807 | |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 808 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
| 809 | static int kvm_pm_notifier_call(struct notifier_block *bl, |
| 810 | unsigned long state, |
| 811 | void *unused) |
| 812 | { |
| 813 | struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); |
| 814 | |
| 815 | return kvm_arch_pm_notifier(kvm, state); |
| 816 | } |
| 817 | |
| 818 | static void kvm_init_pm_notifier(struct kvm *kvm) |
| 819 | { |
| 820 | kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; |
| 821 | /* Suspend KVM before we suspend ftrace, RCU, etc. */ |
| 822 | kvm->pm_notifier.priority = INT_MAX; |
| 823 | register_pm_notifier(&kvm->pm_notifier); |
| 824 | } |
| 825 | |
| 826 | static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| 827 | { |
| 828 | unregister_pm_notifier(&kvm->pm_notifier); |
| 829 | } |
| 830 | #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| 831 | static void kvm_init_pm_notifier(struct kvm *kvm) |
| 832 | { |
| 833 | } |
| 834 | |
| 835 | static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| 836 | { |
| 837 | } |
| 838 | #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| 839 | |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 840 | static struct kvm_memslots *kvm_alloc_memslots(void) |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 841 | { |
| 842 | int i; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 843 | struct kvm_memslots *slots; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 844 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 845 | slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 846 | if (!slots) |
| 847 | return NULL; |
| 848 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 849 | for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 850 | slots->id_to_index[i] = -1; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 851 | |
| 852 | return slots; |
| 853 | } |
| 854 | |
| 855 | static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) |
| 856 | { |
| 857 | if (!memslot->dirty_bitmap) |
| 858 | return; |
| 859 | |
| 860 | kvfree(memslot->dirty_bitmap); |
| 861 | memslot->dirty_bitmap = NULL; |
| 862 | } |
| 863 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 864 | static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 865 | { |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 866 | kvm_destroy_dirty_bitmap(slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 867 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 868 | kvm_arch_free_memslot(kvm, slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 869 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 870 | slot->flags = 0; |
| 871 | slot->npages = 0; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 872 | } |
| 873 | |
| 874 | static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) |
| 875 | { |
| 876 | struct kvm_memory_slot *memslot; |
| 877 | |
| 878 | if (!slots) |
| 879 | return; |
| 880 | |
| 881 | kvm_for_each_memslot(memslot, slots) |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 882 | kvm_free_memslot(kvm, memslot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 883 | |
| 884 | kvfree(slots); |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 885 | } |
| 886 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 887 | static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) |
| 888 | { |
| 889 | switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { |
| 890 | case KVM_STATS_TYPE_INSTANT: |
| 891 | return 0444; |
| 892 | case KVM_STATS_TYPE_CUMULATIVE: |
| 893 | case KVM_STATS_TYPE_PEAK: |
| 894 | default: |
| 895 | return 0644; |
| 896 | } |
| 897 | } |
| 898 | |
| 899 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 900 | static void kvm_destroy_vm_debugfs(struct kvm *kvm) |
| 901 | { |
| 902 | int i; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 903 | int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| 904 | kvm_vcpu_stats_header.num_desc; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 905 | |
| 906 | if (!kvm->debugfs_dentry) |
| 907 | return; |
| 908 | |
| 909 | debugfs_remove_recursive(kvm->debugfs_dentry); |
| 910 | |
Luiz Capitulino | 9d5a1dc | 2016-09-07 14:47:21 -0400 | [diff] [blame] | 911 | if (kvm->debugfs_stat_data) { |
| 912 | for (i = 0; i < kvm_debugfs_num_entries; i++) |
| 913 | kfree(kvm->debugfs_stat_data[i]); |
| 914 | kfree(kvm->debugfs_stat_data); |
| 915 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 916 | } |
| 917 | |
| 918 | static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) |
| 919 | { |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 920 | static DEFINE_MUTEX(kvm_debugfs_lock); |
| 921 | struct dentry *dent; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 922 | char dir_name[ITOA_MAX_LEN * 2]; |
| 923 | struct kvm_stat_data *stat_data; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 924 | const struct _kvm_stats_desc *pdesc; |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 925 | int i, ret; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 926 | int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| 927 | kvm_vcpu_stats_header.num_desc; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 928 | |
| 929 | if (!debugfs_initialized()) |
| 930 | return 0; |
| 931 | |
| 932 | snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 933 | mutex_lock(&kvm_debugfs_lock); |
| 934 | dent = debugfs_lookup(dir_name, kvm_debugfs_dir); |
| 935 | if (dent) { |
| 936 | pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); |
| 937 | dput(dent); |
| 938 | mutex_unlock(&kvm_debugfs_lock); |
| 939 | return 0; |
| 940 | } |
| 941 | dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); |
| 942 | mutex_unlock(&kvm_debugfs_lock); |
| 943 | if (IS_ERR(dent)) |
| 944 | return 0; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 945 | |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 946 | kvm->debugfs_dentry = dent; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 947 | kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, |
| 948 | sizeof(*kvm->debugfs_stat_data), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 949 | GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 950 | if (!kvm->debugfs_stat_data) |
| 951 | return -ENOMEM; |
| 952 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 953 | for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { |
| 954 | pdesc = &kvm_vm_stats_desc[i]; |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 955 | stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 956 | if (!stat_data) |
| 957 | return -ENOMEM; |
| 958 | |
| 959 | stat_data->kvm = kvm; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 960 | stat_data->desc = pdesc; |
| 961 | stat_data->kind = KVM_STAT_VM; |
| 962 | kvm->debugfs_stat_data[i] = stat_data; |
| 963 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 964 | kvm->debugfs_dentry, stat_data, |
| 965 | &stat_fops_per_vm); |
| 966 | } |
| 967 | |
| 968 | for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { |
| 969 | pdesc = &kvm_vcpu_stats_desc[i]; |
| 970 | stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
| 971 | if (!stat_data) |
| 972 | return -ENOMEM; |
| 973 | |
| 974 | stat_data->kvm = kvm; |
| 975 | stat_data->desc = pdesc; |
| 976 | stat_data->kind = KVM_STAT_VCPU; |
Pavel Skripkin | 004d62e | 2021-07-01 22:55:00 +0300 | [diff] [blame] | 977 | kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 978 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 979 | kvm->debugfs_dentry, stat_data, |
| 980 | &stat_fops_per_vm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 981 | } |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 982 | |
| 983 | ret = kvm_arch_create_vm_debugfs(kvm); |
| 984 | if (ret) { |
| 985 | kvm_destroy_vm_debugfs(kvm); |
| 986 | return i; |
| 987 | } |
| 988 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 989 | return 0; |
| 990 | } |
| 991 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 992 | /* |
| 993 | * Called after the VM is otherwise initialized, but just before adding it to |
| 994 | * the vm_list. |
| 995 | */ |
| 996 | int __weak kvm_arch_post_init_vm(struct kvm *kvm) |
| 997 | { |
| 998 | return 0; |
| 999 | } |
| 1000 | |
| 1001 | /* |
| 1002 | * Called just after removing the VM from the vm_list, but before doing any |
| 1003 | * other destruction. |
| 1004 | */ |
| 1005 | void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) |
| 1006 | { |
| 1007 | } |
| 1008 | |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 1009 | /* |
| 1010 | * Called after per-vm debugfs created. When called kvm->debugfs_dentry should |
| 1011 | * be setup already, so we can create arch-specific debugfs entries under it. |
| 1012 | * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so |
| 1013 | * a per-arch destroy interface is not needed. |
| 1014 | */ |
| 1015 | int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) |
| 1016 | { |
| 1017 | return 0; |
| 1018 | } |
| 1019 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 1020 | static struct kvm *kvm_create_vm(unsigned long type) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1021 | { |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1022 | struct kvm *kvm = kvm_arch_alloc_vm(); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1023 | int r = -ENOMEM; |
| 1024 | int i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1025 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1026 | if (!kvm) |
| 1027 | return ERR_PTR(-ENOMEM); |
| 1028 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1029 | KVM_MMU_LOCK_INIT(kvm); |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 1030 | mmgrab(current->mm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1031 | kvm->mm = current->mm; |
| 1032 | kvm_eventfd_init(kvm); |
| 1033 | mutex_init(&kvm->lock); |
| 1034 | mutex_init(&kvm->irq_lock); |
| 1035 | mutex_init(&kvm->slots_lock); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1036 | mutex_init(&kvm->slots_arch_lock); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1037 | spin_lock_init(&kvm->mn_invalidate_lock); |
| 1038 | rcuwait_init(&kvm->mn_memslots_update_rcuwait); |
| 1039 | |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1040 | INIT_LIST_HEAD(&kvm->devices); |
| 1041 | |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1042 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| 1043 | |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 1044 | if (init_srcu_struct(&kvm->srcu)) |
| 1045 | goto out_err_no_srcu; |
| 1046 | if (init_srcu_struct(&kvm->irq_srcu)) |
| 1047 | goto out_err_no_irq_srcu; |
| 1048 | |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 1049 | refcount_set(&kvm->users_count, 1); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1050 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 1051 | struct kvm_memslots *slots = kvm_alloc_memslots(); |
| 1052 | |
| 1053 | if (!slots) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1054 | goto out_err_no_arch_destroy_vm; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1055 | /* Generations must be different for each address space. */ |
| 1056 | slots->generation = i; |
| 1057 | rcu_assign_pointer(kvm->memslots[i], slots); |
| 1058 | } |
| 1059 | |
| 1060 | for (i = 0; i < KVM_NR_BUSES; i++) { |
| 1061 | rcu_assign_pointer(kvm->buses[i], |
| 1062 | kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); |
| 1063 | if (!kvm->buses[i]) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1064 | goto out_err_no_arch_destroy_vm; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1065 | } |
| 1066 | |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 1067 | kvm->max_halt_poll_ns = halt_poll_ns; |
| 1068 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 1069 | r = kvm_arch_init_vm(kvm, type); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1070 | if (r) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1071 | goto out_err_no_arch_destroy_vm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1072 | |
| 1073 | r = hardware_enable_all(); |
| 1074 | if (r) |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 1075 | goto out_err_no_disable; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1076 | |
Paolo Bonzini | c77dcac | 2014-08-06 14:24:45 +0200 | [diff] [blame] | 1077 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Gleb Natapov | 136bdfe | 2009-08-24 11:54:23 +0300 | [diff] [blame] | 1078 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 1079 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1080 | |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 1081 | r = kvm_init_mmu_notifier(kvm); |
| 1082 | if (r) |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1083 | goto out_err_no_mmu_notifier; |
| 1084 | |
| 1085 | r = kvm_arch_post_init_vm(kvm); |
| 1086 | if (r) |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 1087 | goto out_err; |
| 1088 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1089 | mutex_lock(&kvm_lock); |
Rusty Russell | 5e58cfe | 2007-07-23 17:08:21 +1000 | [diff] [blame] | 1090 | list_add(&kvm->vm_list, &vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1091 | mutex_unlock(&kvm_lock); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1092 | |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1093 | preempt_notifier_inc(); |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 1094 | kvm_init_pm_notifier(kvm); |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1095 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1096 | return kvm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1097 | |
| 1098 | out_err: |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1099 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 1100 | if (kvm->mmu_notifier.ops) |
| 1101 | mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); |
| 1102 | #endif |
| 1103 | out_err_no_mmu_notifier: |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1104 | hardware_disable_all(); |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 1105 | out_err_no_disable: |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1106 | kvm_arch_destroy_vm(kvm); |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1107 | out_err_no_arch_destroy_vm: |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 1108 | WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 1109 | for (i = 0; i < KVM_NR_BUSES; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1110 | kfree(kvm_get_bus(kvm, i)); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1111 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1112 | kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 1113 | cleanup_srcu_struct(&kvm->irq_srcu); |
| 1114 | out_err_no_irq_srcu: |
| 1115 | cleanup_srcu_struct(&kvm->srcu); |
| 1116 | out_err_no_srcu: |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1117 | kvm_arch_free_vm(kvm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1118 | mmdrop(current->mm); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1119 | return ERR_PTR(r); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1120 | } |
| 1121 | |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1122 | static void kvm_destroy_devices(struct kvm *kvm) |
| 1123 | { |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 1124 | struct kvm_device *dev, *tmp; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1125 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 1126 | /* |
| 1127 | * We do not need to take the kvm->lock here, because nobody else |
| 1128 | * has a reference to the struct kvm at this point and therefore |
| 1129 | * cannot access the devices list anyhow. |
| 1130 | */ |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 1131 | list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { |
| 1132 | list_del(&dev->vm_node); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1133 | dev->ops->destroy(dev); |
| 1134 | } |
| 1135 | } |
| 1136 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1137 | static void kvm_destroy_vm(struct kvm *kvm) |
| 1138 | { |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 1139 | int i; |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 1140 | struct mm_struct *mm = kvm->mm; |
| 1141 | |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 1142 | kvm_destroy_pm_notifier(kvm); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 1143 | kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1144 | kvm_destroy_vm_debugfs(kvm); |
Sheng Yang | ad8ba2c | 2009-01-06 10:03:02 +0800 | [diff] [blame] | 1145 | kvm_arch_sync_events(kvm); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1146 | mutex_lock(&kvm_lock); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 1147 | list_del(&kvm->vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1148 | mutex_unlock(&kvm_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1149 | kvm_arch_pre_destroy_vm(kvm); |
| 1150 | |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1151 | kvm_free_irq_routing(kvm); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 1152 | for (i = 0; i < KVM_NR_BUSES; i++) { |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1153 | struct kvm_io_bus *bus = kvm_get_bus(kvm, i); |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 1154 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 1155 | if (bus) |
| 1156 | kvm_io_bus_destroy(bus); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 1157 | kvm->buses[i] = NULL; |
| 1158 | } |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 1159 | kvm_coalesced_mmio_free(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1160 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 1161 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1162 | /* |
| 1163 | * At this point, pending calls to invalidate_range_start() |
| 1164 | * have completed but no more MMU notifiers will run, so |
| 1165 | * mn_active_invalidate_count may remain unbalanced. |
| 1166 | * No threads can be waiting in install_new_memslots as the |
| 1167 | * last reference on KVM has been dropped, but freeing |
| 1168 | * memslots would deadlock without this manual intervention. |
| 1169 | */ |
| 1170 | WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); |
| 1171 | kvm->mn_active_invalidate_count = 0; |
Gleb Natapov | f00be0c | 2009-03-19 12:20:36 +0200 | [diff] [blame] | 1172 | #else |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 1173 | kvm_arch_flush_shadow_all(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1174 | #endif |
Zhang Xiantao | d19a9cd | 2007-11-18 18:43:45 +0800 | [diff] [blame] | 1175 | kvm_arch_destroy_vm(kvm); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1176 | kvm_destroy_devices(kvm); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1177 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1178 | kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); |
Paolo Bonzini | 820b3fc | 2014-06-03 13:44:17 +0200 | [diff] [blame] | 1179 | cleanup_srcu_struct(&kvm->irq_srcu); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1180 | cleanup_srcu_struct(&kvm->srcu); |
| 1181 | kvm_arch_free_vm(kvm); |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1182 | preempt_notifier_dec(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1183 | hardware_disable_all(); |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 1184 | mmdrop(mm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1185 | } |
| 1186 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1187 | void kvm_get_kvm(struct kvm *kvm) |
| 1188 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 1189 | refcount_inc(&kvm->users_count); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1190 | } |
| 1191 | EXPORT_SYMBOL_GPL(kvm_get_kvm); |
| 1192 | |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 1193 | /* |
| 1194 | * Make sure the vm is not during destruction, which is a safe version of |
| 1195 | * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. |
| 1196 | */ |
| 1197 | bool kvm_get_kvm_safe(struct kvm *kvm) |
| 1198 | { |
| 1199 | return refcount_inc_not_zero(&kvm->users_count); |
| 1200 | } |
| 1201 | EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); |
| 1202 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1203 | void kvm_put_kvm(struct kvm *kvm) |
| 1204 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 1205 | if (refcount_dec_and_test(&kvm->users_count)) |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1206 | kvm_destroy_vm(kvm); |
| 1207 | } |
| 1208 | EXPORT_SYMBOL_GPL(kvm_put_kvm); |
| 1209 | |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 1210 | /* |
| 1211 | * Used to put a reference that was taken on behalf of an object associated |
| 1212 | * with a user-visible file descriptor, e.g. a vcpu or device, if installation |
| 1213 | * of the new file descriptor fails and the reference cannot be transferred to |
| 1214 | * its final owner. In such cases, the caller is still actively using @kvm and |
| 1215 | * will fail miserably if the refcount unexpectedly hits zero. |
| 1216 | */ |
| 1217 | void kvm_put_kvm_no_destroy(struct kvm *kvm) |
| 1218 | { |
| 1219 | WARN_ON(refcount_dec_and_test(&kvm->users_count)); |
| 1220 | } |
| 1221 | EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1222 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1223 | static int kvm_vm_release(struct inode *inode, struct file *filp) |
| 1224 | { |
| 1225 | struct kvm *kvm = filp->private_data; |
| 1226 | |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1227 | kvm_irqfd_release(kvm); |
| 1228 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1229 | kvm_put_kvm(kvm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1230 | return 0; |
| 1231 | } |
| 1232 | |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1233 | /* |
| 1234 | * Allocation size is twice as large as the actual dirty bitmap size. |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1235 | * See kvm_vm_ioctl_get_dirty_log() why this is needed. |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1236 | */ |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1237 | static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1238 | { |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1239 | unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1240 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 1241 | memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1242 | if (!memslot->dirty_bitmap) |
| 1243 | return -ENOMEM; |
| 1244 | |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1245 | return 0; |
| 1246 | } |
| 1247 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1248 | /* |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1249 | * Delete a memslot by decrementing the number of used slots and shifting all |
| 1250 | * other entries in the array forward one spot. |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1251 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1252 | static inline void kvm_memslot_delete(struct kvm_memslots *slots, |
| 1253 | struct kvm_memory_slot *memslot) |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1254 | { |
Igor Mammedov | 063584d | 2014-11-13 23:00:13 +0000 | [diff] [blame] | 1255 | struct kvm_memory_slot *mslots = slots->memslots; |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1256 | int i; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 1257 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1258 | if (WARN_ON(slots->id_to_index[memslot->id] == -1)) |
| 1259 | return; |
Igor Mammedov | 0e60b07 | 2014-12-01 17:29:26 +0000 | [diff] [blame] | 1260 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1261 | slots->used_slots--; |
| 1262 | |
David Matlack | 8768927 | 2021-08-04 22:28:38 +0000 | [diff] [blame] | 1263 | if (atomic_read(&slots->last_used_slot) >= slots->used_slots) |
| 1264 | atomic_set(&slots->last_used_slot, 0); |
Sean Christopherson | 0774a96 | 2020-03-20 13:55:40 -0700 | [diff] [blame] | 1265 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1266 | for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { |
Igor Mammedov | 7f379cf | 2014-12-01 17:29:24 +0000 | [diff] [blame] | 1267 | mslots[i] = mslots[i + 1]; |
| 1268 | slots->id_to_index[mslots[i].id] = i; |
Igor Mammedov | 7f379cf | 2014-12-01 17:29:24 +0000 | [diff] [blame] | 1269 | } |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1270 | mslots[i] = *memslot; |
| 1271 | slots->id_to_index[memslot->id] = -1; |
| 1272 | } |
| 1273 | |
| 1274 | /* |
| 1275 | * "Insert" a new memslot by incrementing the number of used slots. Returns |
| 1276 | * the new slot's initial index into the memslots array. |
| 1277 | */ |
| 1278 | static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) |
| 1279 | { |
| 1280 | return slots->used_slots++; |
| 1281 | } |
| 1282 | |
| 1283 | /* |
| 1284 | * Move a changed memslot backwards in the array by shifting existing slots |
| 1285 | * with a higher GFN toward the front of the array. Note, the changed memslot |
| 1286 | * itself is not preserved in the array, i.e. not swapped at this time, only |
| 1287 | * its new index into the array is tracked. Returns the changed memslot's |
| 1288 | * current index into the memslots array. |
| 1289 | */ |
| 1290 | static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, |
| 1291 | struct kvm_memory_slot *memslot) |
| 1292 | { |
| 1293 | struct kvm_memory_slot *mslots = slots->memslots; |
| 1294 | int i; |
| 1295 | |
| 1296 | if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || |
| 1297 | WARN_ON_ONCE(!slots->used_slots)) |
| 1298 | return -1; |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 1299 | |
| 1300 | /* |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1301 | * Move the target memslot backward in the array by shifting existing |
| 1302 | * memslots with a higher GFN (than the target memslot) towards the |
| 1303 | * front of the array. |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 1304 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1305 | for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { |
| 1306 | if (memslot->base_gfn > mslots[i + 1].base_gfn) |
| 1307 | break; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 1308 | |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1309 | WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); |
| 1310 | |
| 1311 | /* Shift the next memslot forward one and update its index. */ |
| 1312 | mslots[i] = mslots[i + 1]; |
| 1313 | slots->id_to_index[mslots[i].id] = i; |
| 1314 | } |
| 1315 | return i; |
| 1316 | } |
| 1317 | |
| 1318 | /* |
| 1319 | * Move a changed memslot forwards in the array by shifting existing slots with |
| 1320 | * a lower GFN toward the back of the array. Note, the changed memslot itself |
| 1321 | * is not preserved in the array, i.e. not swapped at this time, only its new |
| 1322 | * index into the array is tracked. Returns the changed memslot's final index |
| 1323 | * into the memslots array. |
| 1324 | */ |
| 1325 | static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, |
| 1326 | struct kvm_memory_slot *memslot, |
| 1327 | int start) |
| 1328 | { |
| 1329 | struct kvm_memory_slot *mslots = slots->memslots; |
| 1330 | int i; |
| 1331 | |
| 1332 | for (i = start; i > 0; i--) { |
| 1333 | if (memslot->base_gfn < mslots[i - 1].base_gfn) |
| 1334 | break; |
| 1335 | |
| 1336 | WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); |
| 1337 | |
| 1338 | /* Shift the next memslot back one and update its index. */ |
| 1339 | mslots[i] = mslots[i - 1]; |
| 1340 | slots->id_to_index[mslots[i].id] = i; |
| 1341 | } |
| 1342 | return i; |
| 1343 | } |
| 1344 | |
| 1345 | /* |
| 1346 | * Re-sort memslots based on their GFN to account for an added, deleted, or |
| 1347 | * moved memslot. Sorting memslots by GFN allows using a binary search during |
| 1348 | * memslot lookup. |
| 1349 | * |
| 1350 | * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry |
| 1351 | * at memslots[0] has the highest GFN. |
| 1352 | * |
| 1353 | * The sorting algorithm takes advantage of having initially sorted memslots |
| 1354 | * and knowing the position of the changed memslot. Sorting is also optimized |
| 1355 | * by not swapping the updated memslot and instead only shifting other memslots |
| 1356 | * and tracking the new index for the update memslot. Only once its final |
| 1357 | * index is known is the updated memslot copied into its position in the array. |
| 1358 | * |
| 1359 | * - When deleting a memslot, the deleted memslot simply needs to be moved to |
| 1360 | * the end of the array. |
| 1361 | * |
| 1362 | * - When creating a memslot, the algorithm "inserts" the new memslot at the |
| 1363 | * end of the array and then it forward to its correct location. |
| 1364 | * |
| 1365 | * - When moving a memslot, the algorithm first moves the updated memslot |
| 1366 | * backward to handle the scenario where the memslot's GFN was changed to a |
| 1367 | * lower value. update_memslots() then falls through and runs the same flow |
| 1368 | * as creating a memslot to move the memslot forward to handle the scenario |
| 1369 | * where its GFN was changed to a higher value. |
| 1370 | * |
| 1371 | * Note, slots are sorted from highest->lowest instead of lowest->highest for |
| 1372 | * historical reasons. Originally, invalid memslots where denoted by having |
| 1373 | * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots |
| 1374 | * to the end of the array. The current algorithm uses dedicated logic to |
| 1375 | * delete a memslot and thus does not rely on invalid memslots having GFN=0. |
| 1376 | * |
| 1377 | * The other historical motiviation for highest->lowest was to improve the |
| 1378 | * performance of memslot lookup. KVM originally used a linear search starting |
| 1379 | * at memslots[0]. On x86, the largest memslot usually has one of the highest, |
| 1380 | * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a |
| 1381 | * single memslot above the 4gb boundary. As the largest memslot is also the |
| 1382 | * most likely to be referenced, sorting it to the front of the array was |
| 1383 | * advantageous. The current binary search starts from the middle of the array |
| 1384 | * and uses an LRU pointer to improve performance for all memslots and GFNs. |
| 1385 | */ |
| 1386 | static void update_memslots(struct kvm_memslots *slots, |
| 1387 | struct kvm_memory_slot *memslot, |
| 1388 | enum kvm_mr_change change) |
| 1389 | { |
| 1390 | int i; |
| 1391 | |
| 1392 | if (change == KVM_MR_DELETE) { |
| 1393 | kvm_memslot_delete(slots, memslot); |
| 1394 | } else { |
| 1395 | if (change == KVM_MR_CREATE) |
| 1396 | i = kvm_memslot_insert_back(slots); |
| 1397 | else |
| 1398 | i = kvm_memslot_move_backward(slots, memslot); |
| 1399 | i = kvm_memslot_move_forward(slots, memslot, i); |
| 1400 | |
| 1401 | /* |
| 1402 | * Copy the memslot to its new position in memslots and update |
| 1403 | * its index accordingly. |
| 1404 | */ |
| 1405 | slots->memslots[i] = *memslot; |
| 1406 | slots->id_to_index[memslot->id] = i; |
| 1407 | } |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1408 | } |
| 1409 | |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1410 | static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1411 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1412 | u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; |
| 1413 | |
Christoffer Dall | 0f8a4de | 2014-08-26 14:00:37 +0200 | [diff] [blame] | 1414 | #ifdef __KVM_HAVE_READONLY_MEM |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1415 | valid_flags |= KVM_MEM_READONLY; |
| 1416 | #endif |
| 1417 | |
| 1418 | if (mem->flags & ~valid_flags) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1419 | return -EINVAL; |
| 1420 | |
| 1421 | return 0; |
| 1422 | } |
| 1423 | |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1424 | static struct kvm_memslots *install_new_memslots(struct kvm *kvm, |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1425 | int as_id, struct kvm_memslots *slots) |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1426 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1427 | struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1428 | u64 gen = old_memslots->generation; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1429 | |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1430 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
| 1431 | slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1432 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1433 | /* |
| 1434 | * Do not store the new memslots while there are invalidations in |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 1435 | * progress, otherwise the locking in invalidate_range_start and |
| 1436 | * invalidate_range_end will be unbalanced. |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1437 | */ |
| 1438 | spin_lock(&kvm->mn_invalidate_lock); |
| 1439 | prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); |
| 1440 | while (kvm->mn_active_invalidate_count) { |
| 1441 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1442 | spin_unlock(&kvm->mn_invalidate_lock); |
| 1443 | schedule(); |
| 1444 | spin_lock(&kvm->mn_invalidate_lock); |
| 1445 | } |
| 1446 | finish_rcuwait(&kvm->mn_memslots_update_rcuwait); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1447 | rcu_assign_pointer(kvm->memslots[as_id], slots); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1448 | spin_unlock(&kvm->mn_invalidate_lock); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1449 | |
| 1450 | /* |
| 1451 | * Acquired in kvm_set_memslot. Must be released before synchronize |
| 1452 | * SRCU below in order to avoid deadlock with another thread |
| 1453 | * acquiring the slots_arch_lock in an srcu critical section. |
| 1454 | */ |
| 1455 | mutex_unlock(&kvm->slots_arch_lock); |
| 1456 | |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1457 | synchronize_srcu_expedited(&kvm->srcu); |
Takuya Yoshikawa | e59dbe0 | 2013-07-04 13:40:29 +0900 | [diff] [blame] | 1458 | |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1459 | /* |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1460 | * Increment the new memslot generation a second time, dropping the |
Miaohe Lin | 0011679 | 2019-12-11 14:26:23 +0800 | [diff] [blame] | 1461 | * update in-progress flag and incrementing the generation based on |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1462 | * the number of address spaces. This provides a unique and easily |
| 1463 | * identifiable generation number while the memslots are in flux. |
| 1464 | */ |
| 1465 | gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
| 1466 | |
| 1467 | /* |
Paolo Bonzini | 4bd518f | 2017-02-03 20:44:51 -0800 | [diff] [blame] | 1468 | * Generations must be unique even across address spaces. We do not need |
| 1469 | * a global counter for that, instead the generation space is evenly split |
| 1470 | * across address spaces. For example, with two address spaces, address |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1471 | * space 0 will use generations 0, 2, 4, ... while address space 1 will |
| 1472 | * use generations 1, 3, 5, ... |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1473 | */ |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1474 | gen += KVM_ADDRESS_SPACE_NUM; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1475 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 1476 | kvm_arch_memslots_updated(kvm, gen); |
| 1477 | |
| 1478 | slots->generation = gen; |
Takuya Yoshikawa | e59dbe0 | 2013-07-04 13:40:29 +0900 | [diff] [blame] | 1479 | |
| 1480 | return old_memslots; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1481 | } |
| 1482 | |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1483 | static size_t kvm_memslots_size(int slots) |
| 1484 | { |
| 1485 | return sizeof(struct kvm_memslots) + |
| 1486 | (sizeof(struct kvm_memory_slot) * slots); |
| 1487 | } |
| 1488 | |
| 1489 | static void kvm_copy_memslots(struct kvm_memslots *to, |
| 1490 | struct kvm_memslots *from) |
| 1491 | { |
| 1492 | memcpy(to, from, kvm_memslots_size(from->used_slots)); |
| 1493 | } |
| 1494 | |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1495 | /* |
| 1496 | * Note, at a minimum, the current number of used slots must be allocated, even |
| 1497 | * when deleting a memslot, as we need a complete duplicate of the memslots for |
| 1498 | * use when invalidating a memslot prior to deleting/moving the memslot. |
| 1499 | */ |
| 1500 | static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, |
| 1501 | enum kvm_mr_change change) |
| 1502 | { |
| 1503 | struct kvm_memslots *slots; |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1504 | size_t new_size; |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1505 | |
| 1506 | if (change == KVM_MR_CREATE) |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1507 | new_size = kvm_memslots_size(old->used_slots + 1); |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1508 | else |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1509 | new_size = kvm_memslots_size(old->used_slots); |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1510 | |
| 1511 | slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); |
| 1512 | if (likely(slots)) |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1513 | kvm_copy_memslots(slots, old); |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1514 | |
| 1515 | return slots; |
| 1516 | } |
| 1517 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1518 | static int kvm_set_memslot(struct kvm *kvm, |
| 1519 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 1520 | struct kvm_memory_slot *old, |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1521 | struct kvm_memory_slot *new, int as_id, |
| 1522 | enum kvm_mr_change change) |
| 1523 | { |
| 1524 | struct kvm_memory_slot *slot; |
| 1525 | struct kvm_memslots *slots; |
| 1526 | int r; |
| 1527 | |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1528 | /* |
| 1529 | * Released in install_new_memslots. |
| 1530 | * |
| 1531 | * Must be held from before the current memslots are copied until |
| 1532 | * after the new memslots are installed with rcu_assign_pointer, |
| 1533 | * then released before the synchronize srcu in install_new_memslots. |
| 1534 | * |
| 1535 | * When modifying memslots outside of the slots_lock, must be held |
| 1536 | * before reading the pointer to the current memslots until after all |
| 1537 | * changes to those memslots are complete. |
| 1538 | * |
| 1539 | * These rules ensure that installing new memslots does not lose |
| 1540 | * changes made to the previous memslots. |
| 1541 | */ |
| 1542 | mutex_lock(&kvm->slots_arch_lock); |
| 1543 | |
Sean Christopherson | 3694725 | 2020-02-18 13:07:32 -0800 | [diff] [blame] | 1544 | slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1545 | if (!slots) { |
| 1546 | mutex_unlock(&kvm->slots_arch_lock); |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1547 | return -ENOMEM; |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1548 | } |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1549 | |
| 1550 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| 1551 | /* |
| 1552 | * Note, the INVALID flag needs to be in the appropriate entry |
| 1553 | * in the freshly allocated memslots, not in @old or @new. |
| 1554 | */ |
| 1555 | slot = id_to_memslot(slots, old->id); |
| 1556 | slot->flags |= KVM_MEMSLOT_INVALID; |
| 1557 | |
| 1558 | /* |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1559 | * We can re-use the memory from the old memslots. |
| 1560 | * It will be overwritten with a copy of the new memslots |
| 1561 | * after reacquiring the slots_arch_lock below. |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1562 | */ |
| 1563 | slots = install_new_memslots(kvm, as_id, slots); |
| 1564 | |
| 1565 | /* From this point no new shadow pages pointing to a deleted, |
| 1566 | * or moved, memslot will be created. |
| 1567 | * |
| 1568 | * validation of sp->gfn happens in: |
| 1569 | * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) |
| 1570 | * - kvm_is_visible_gfn (mmu_check_root) |
| 1571 | */ |
| 1572 | kvm_arch_flush_shadow_memslot(kvm, slot); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1573 | |
| 1574 | /* Released in install_new_memslots. */ |
| 1575 | mutex_lock(&kvm->slots_arch_lock); |
| 1576 | |
| 1577 | /* |
| 1578 | * The arch-specific fields of the memslots could have changed |
| 1579 | * between releasing the slots_arch_lock in |
| 1580 | * install_new_memslots and here, so get a fresh copy of the |
| 1581 | * slots. |
| 1582 | */ |
| 1583 | kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1584 | } |
| 1585 | |
| 1586 | r = kvm_arch_prepare_memory_region(kvm, new, mem, change); |
| 1587 | if (r) |
| 1588 | goto out_slots; |
| 1589 | |
| 1590 | update_memslots(slots, new, change); |
| 1591 | slots = install_new_memslots(kvm, as_id, slots); |
| 1592 | |
| 1593 | kvm_arch_commit_memory_region(kvm, mem, old, new, change); |
| 1594 | |
| 1595 | kvfree(slots); |
| 1596 | return 0; |
| 1597 | |
| 1598 | out_slots: |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1599 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| 1600 | slot = id_to_memslot(slots, old->id); |
| 1601 | slot->flags &= ~KVM_MEMSLOT_INVALID; |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1602 | slots = install_new_memslots(kvm, as_id, slots); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1603 | } else { |
| 1604 | mutex_unlock(&kvm->slots_arch_lock); |
| 1605 | } |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1606 | kvfree(slots); |
| 1607 | return r; |
| 1608 | } |
| 1609 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1610 | static int kvm_delete_memslot(struct kvm *kvm, |
| 1611 | const struct kvm_userspace_memory_region *mem, |
| 1612 | struct kvm_memory_slot *old, int as_id) |
| 1613 | { |
| 1614 | struct kvm_memory_slot new; |
| 1615 | int r; |
| 1616 | |
| 1617 | if (!old->npages) |
| 1618 | return -EINVAL; |
| 1619 | |
| 1620 | memset(&new, 0, sizeof(new)); |
| 1621 | new.id = old->id; |
Peter Xu | 9e9eb22 | 2020-10-14 11:26:46 -0700 | [diff] [blame] | 1622 | /* |
| 1623 | * This is only for debugging purpose; it should never be referenced |
| 1624 | * for a removed memslot. |
| 1625 | */ |
| 1626 | new.as_id = as_id; |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1627 | |
| 1628 | r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); |
| 1629 | if (r) |
| 1630 | return r; |
| 1631 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 1632 | kvm_free_memslot(kvm, old); |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1633 | return 0; |
| 1634 | } |
| 1635 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1636 | /* |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1637 | * Allocate some memory and give it an address in the guest physical address |
| 1638 | * space. |
| 1639 | * |
| 1640 | * Discontiguous memory is allowed, mostly for framebuffers. |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1641 | * |
Dominik Dingel | 02d5d55 | 2014-10-27 16:22:56 +0100 | [diff] [blame] | 1642 | * Must be called holding kvm->slots_lock for write. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1643 | */ |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1644 | int __kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1645 | const struct kvm_userspace_memory_region *mem) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1646 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1647 | struct kvm_memory_slot old, new; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1648 | struct kvm_memory_slot *tmp; |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1649 | enum kvm_mr_change change; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1650 | int as_id, id; |
| 1651 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1652 | |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1653 | r = check_memory_region_flags(mem); |
| 1654 | if (r) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1655 | return r; |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1656 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1657 | as_id = mem->slot >> 16; |
| 1658 | id = (u16)mem->slot; |
| 1659 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1660 | /* General sanity checks */ |
| 1661 | if (mem->memory_size & (PAGE_SIZE - 1)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1662 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1663 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1664 | return -EINVAL; |
Takuya Yoshikawa | fa3d315 | 2011-05-07 16:35:38 +0900 | [diff] [blame] | 1665 | /* We can read the guest memory with __xxx_user() later on. */ |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1666 | if ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
Marc Zyngier | 139bc8a | 2021-01-21 12:08:15 +0000 | [diff] [blame] | 1667 | (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 1668 | !access_ok((void __user *)(unsigned long)mem->userspace_addr, |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1669 | mem->memory_size)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1670 | return -EINVAL; |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1671 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1672 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1673 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1674 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1675 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1676 | /* |
| 1677 | * Make a full copy of the old memslot, the pointer will become stale |
| 1678 | * when the memslots are re-sorted by update_memslots(), and the old |
| 1679 | * memslot needs to be referenced after calling update_memslots(), e.g. |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1680 | * to free its resources and for arch specific behavior. |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1681 | */ |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1682 | tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); |
| 1683 | if (tmp) { |
| 1684 | old = *tmp; |
| 1685 | tmp = NULL; |
| 1686 | } else { |
| 1687 | memset(&old, 0, sizeof(old)); |
| 1688 | old.id = id; |
| 1689 | } |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1690 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1691 | if (!mem->memory_size) |
| 1692 | return kvm_delete_memslot(kvm, mem, &old, as_id); |
| 1693 | |
Peter Xu | 9e9eb22 | 2020-10-14 11:26:46 -0700 | [diff] [blame] | 1694 | new.as_id = as_id; |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1695 | new.id = id; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1696 | new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
| 1697 | new.npages = mem->memory_size >> PAGE_SHIFT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1698 | new.flags = mem->flags; |
Sean Christopherson | 414de7ab | 2020-02-18 13:07:20 -0800 | [diff] [blame] | 1699 | new.userspace_addr = mem->userspace_addr; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1700 | |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1701 | if (new.npages > KVM_MEM_MAX_NR_PAGES) |
| 1702 | return -EINVAL; |
| 1703 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1704 | if (!old.npages) { |
| 1705 | change = KVM_MR_CREATE; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1706 | new.dirty_bitmap = NULL; |
| 1707 | memset(&new.arch, 0, sizeof(new.arch)); |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1708 | } else { /* Modify an existing slot. */ |
| 1709 | if ((new.userspace_addr != old.userspace_addr) || |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1710 | (new.npages != old.npages) || |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1711 | ((new.flags ^ old.flags) & KVM_MEM_READONLY)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1712 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1713 | |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1714 | if (new.base_gfn != old.base_gfn) |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1715 | change = KVM_MR_MOVE; |
| 1716 | else if (new.flags != old.flags) |
| 1717 | change = KVM_MR_FLAGS_ONLY; |
| 1718 | else /* Nothing to change. */ |
| 1719 | return 0; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1720 | |
| 1721 | /* Copy dirty_bitmap and arch from the current memslot. */ |
| 1722 | new.dirty_bitmap = old.dirty_bitmap; |
| 1723 | memcpy(&new.arch, &old.arch, sizeof(new.arch)); |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1724 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1725 | |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1726 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1727 | /* Check for overlaps */ |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1728 | kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { |
| 1729 | if (tmp->id == id) |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1730 | continue; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1731 | if (!((new.base_gfn + new.npages <= tmp->base_gfn) || |
| 1732 | (new.base_gfn >= tmp->base_gfn + tmp->npages))) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1733 | return -EEXIST; |
Takuya Yoshikawa | 0a706be | 2013-01-11 18:26:55 +0900 | [diff] [blame] | 1734 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1735 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1736 | |
Sean Christopherson | 414de7ab | 2020-02-18 13:07:20 -0800 | [diff] [blame] | 1737 | /* Allocate/free page dirty bitmap as needed */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1738 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) |
Al Viro | 8b6d44c | 2007-02-09 16:38:40 +0000 | [diff] [blame] | 1739 | new.dirty_bitmap = NULL; |
Peter Xu | 044c59c | 2020-09-30 21:22:26 -0400 | [diff] [blame] | 1740 | else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1741 | r = kvm_alloc_dirty_bitmap(&new); |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1742 | if (r) |
| 1743 | return r; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1744 | |
| 1745 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
| 1746 | bitmap_set(new.dirty_bitmap, 0, new.npages); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1747 | } |
| 1748 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1749 | r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); |
| 1750 | if (r) |
| 1751 | goto out_bitmap; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 1752 | |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1753 | if (old.dirty_bitmap && !new.dirty_bitmap) |
| 1754 | kvm_destroy_dirty_bitmap(&old); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1755 | return 0; |
| 1756 | |
Sean Christopherson | bd0e96f | 2020-02-18 13:07:21 -0800 | [diff] [blame] | 1757 | out_bitmap: |
| 1758 | if (new.dirty_bitmap && !old.dirty_bitmap) |
| 1759 | kvm_destroy_dirty_bitmap(&new); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1760 | return r; |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1761 | } |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1762 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
| 1763 | |
| 1764 | int kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1765 | const struct kvm_userspace_memory_region *mem) |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1766 | { |
| 1767 | int r; |
| 1768 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1769 | mutex_lock(&kvm->slots_lock); |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1770 | r = __kvm_set_memory_region(kvm, mem); |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1771 | mutex_unlock(&kvm->slots_lock); |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1772 | return r; |
| 1773 | } |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1774 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
| 1775 | |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 1776 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| 1777 | struct kvm_userspace_memory_region *mem) |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1778 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1779 | if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1780 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1781 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1782 | return kvm_set_memory_region(kvm, mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1783 | } |
| 1784 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1785 | #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1786 | /** |
| 1787 | * kvm_get_dirty_log - get a snapshot of dirty pages |
| 1788 | * @kvm: pointer to kvm instance |
| 1789 | * @log: slot id and address to which we copy the log |
| 1790 | * @is_dirty: set to '1' if any dirty pages were found |
| 1791 | * @memslot: set to the associated memslot, always valid on success |
| 1792 | */ |
| 1793 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
| 1794 | int *is_dirty, struct kvm_memory_slot **memslot) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1795 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1796 | struct kvm_memslots *slots; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1797 | int i, as_id, id; |
Takuya Yoshikawa | 87bf6e7 | 2010-04-12 19:35:35 +0900 | [diff] [blame] | 1798 | unsigned long n; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1799 | unsigned long any = 0; |
| 1800 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 1801 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1802 | if (kvm->dirty_ring_size) |
| 1803 | return -ENXIO; |
| 1804 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1805 | *memslot = NULL; |
| 1806 | *is_dirty = 0; |
| 1807 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1808 | as_id = log->slot >> 16; |
| 1809 | id = (u16)log->slot; |
| 1810 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1811 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1812 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1813 | slots = __kvm_memslots(kvm, as_id); |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1814 | *memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1815 | if (!(*memslot) || !(*memslot)->dirty_bitmap) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1816 | return -ENOENT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1817 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1818 | kvm_arch_sync_dirty_log(kvm, *memslot); |
| 1819 | |
| 1820 | n = kvm_dirty_bitmap_bytes(*memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1821 | |
Uri Lublin | cd1a4a9 | 2007-02-22 16:43:09 +0200 | [diff] [blame] | 1822 | for (i = 0; !any && i < n/sizeof(long); ++i) |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1823 | any = (*memslot)->dirty_bitmap[i]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1824 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1825 | if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1826 | return -EFAULT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1827 | |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 1828 | if (any) |
| 1829 | *is_dirty = 1; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1830 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1831 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1832 | EXPORT_SYMBOL_GPL(kvm_get_dirty_log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1833 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1834 | #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1835 | /** |
Jiang Biao | b8b0022 | 2019-04-23 19:40:30 +0800 | [diff] [blame] | 1836 | * kvm_get_dirty_log_protect - get a snapshot of dirty pages |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1837 | * and reenable dirty page tracking for the corresponding pages. |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1838 | * @kvm: pointer to kvm instance |
| 1839 | * @log: slot id and address to which we copy the log |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1840 | * |
| 1841 | * We need to keep it in mind that VCPU threads can write to the bitmap |
| 1842 | * concurrently. So, to avoid losing track of dirty pages we keep the |
| 1843 | * following order: |
| 1844 | * |
| 1845 | * 1. Take a snapshot of the bit and clear it if needed. |
| 1846 | * 2. Write protect the corresponding page. |
| 1847 | * 3. Copy the snapshot to the userspace. |
| 1848 | * 4. Upon return caller flushes TLB's if needed. |
| 1849 | * |
| 1850 | * Between 2 and 4, the guest may write to the page using the remaining TLB |
| 1851 | * entry. This is not a problem because the page is reported dirty using |
| 1852 | * the snapshot taken before and step 4 ensures that writes done after |
| 1853 | * exiting to userspace will be logged for the next call. |
| 1854 | * |
| 1855 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1856 | static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1857 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1858 | struct kvm_memslots *slots; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1859 | struct kvm_memory_slot *memslot; |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1860 | int i, as_id, id; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1861 | unsigned long n; |
| 1862 | unsigned long *dirty_bitmap; |
| 1863 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1864 | bool flush; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1865 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 1866 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1867 | if (kvm->dirty_ring_size) |
| 1868 | return -ENXIO; |
| 1869 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1870 | as_id = log->slot >> 16; |
| 1871 | id = (u16)log->slot; |
| 1872 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1873 | return -EINVAL; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1874 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1875 | slots = __kvm_memslots(kvm, as_id); |
| 1876 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1877 | if (!memslot || !memslot->dirty_bitmap) |
| 1878 | return -ENOENT; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1879 | |
| 1880 | dirty_bitmap = memslot->dirty_bitmap; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1881 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1882 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 1883 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1884 | n = kvm_dirty_bitmap_bytes(memslot); |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1885 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1886 | if (kvm->manual_dirty_log_protect) { |
| 1887 | /* |
| 1888 | * Unlike kvm_get_dirty_log, we always return false in *flush, |
| 1889 | * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There |
| 1890 | * is some code duplication between this function and |
| 1891 | * kvm_get_dirty_log, but hopefully all architecture |
| 1892 | * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log |
| 1893 | * can be eliminated. |
| 1894 | */ |
| 1895 | dirty_bitmap_buffer = dirty_bitmap; |
| 1896 | } else { |
| 1897 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 1898 | memset(dirty_bitmap_buffer, 0, n); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1899 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1900 | KVM_MMU_LOCK(kvm); |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1901 | for (i = 0; i < n / sizeof(long); i++) { |
| 1902 | unsigned long mask; |
| 1903 | gfn_t offset; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1904 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1905 | if (!dirty_bitmap[i]) |
| 1906 | continue; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1907 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1908 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1909 | mask = xchg(&dirty_bitmap[i], 0); |
| 1910 | dirty_bitmap_buffer[i] = mask; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1911 | |
Lan Tianyu | a67794c | 2019-02-02 17:20:27 +0800 | [diff] [blame] | 1912 | offset = i * BITS_PER_LONG; |
| 1913 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 1914 | offset, mask); |
Takuya Yoshikawa | 58d2930 | 2015-03-17 16:19:58 +0900 | [diff] [blame] | 1915 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1916 | KVM_MMU_UNLOCK(kvm); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1917 | } |
| 1918 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1919 | if (flush) |
| 1920 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 1921 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1922 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 1923 | return -EFAULT; |
| 1924 | return 0; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1925 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1926 | |
| 1927 | |
| 1928 | /** |
| 1929 | * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot |
| 1930 | * @kvm: kvm instance |
| 1931 | * @log: slot id and address to which we copy the log |
| 1932 | * |
| 1933 | * Steps 1-4 below provide general overview of dirty page logging. See |
| 1934 | * kvm_get_dirty_log_protect() function description for additional details. |
| 1935 | * |
| 1936 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we |
| 1937 | * always flush the TLB (step 4) even if previous step failed and the dirty |
| 1938 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API |
| 1939 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures |
| 1940 | * writes will be marked dirty for next log read. |
| 1941 | * |
| 1942 | * 1. Take a snapshot of the bit and clear it if needed. |
| 1943 | * 2. Write protect the corresponding page. |
| 1944 | * 3. Copy the snapshot to the userspace. |
| 1945 | * 4. Flush TLB's if needed. |
| 1946 | */ |
| 1947 | static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 1948 | struct kvm_dirty_log *log) |
| 1949 | { |
| 1950 | int r; |
| 1951 | |
| 1952 | mutex_lock(&kvm->slots_lock); |
| 1953 | |
| 1954 | r = kvm_get_dirty_log_protect(kvm, log); |
| 1955 | |
| 1956 | mutex_unlock(&kvm->slots_lock); |
| 1957 | return r; |
| 1958 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1959 | |
| 1960 | /** |
| 1961 | * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap |
| 1962 | * and reenable dirty page tracking for the corresponding pages. |
| 1963 | * @kvm: pointer to kvm instance |
| 1964 | * @log: slot id and address from which to fetch the bitmap of dirty pages |
| 1965 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1966 | static int kvm_clear_dirty_log_protect(struct kvm *kvm, |
| 1967 | struct kvm_clear_dirty_log *log) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1968 | { |
| 1969 | struct kvm_memslots *slots; |
| 1970 | struct kvm_memory_slot *memslot; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1971 | int as_id, id; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1972 | gfn_t offset; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1973 | unsigned long i, n; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1974 | unsigned long *dirty_bitmap; |
| 1975 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1976 | bool flush; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1977 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 1978 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1979 | if (kvm->dirty_ring_size) |
| 1980 | return -ENXIO; |
| 1981 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1982 | as_id = log->slot >> 16; |
| 1983 | id = (u16)log->slot; |
| 1984 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| 1985 | return -EINVAL; |
| 1986 | |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 1987 | if (log->first_page & 63) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1988 | return -EINVAL; |
| 1989 | |
| 1990 | slots = __kvm_memslots(kvm, as_id); |
| 1991 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1992 | if (!memslot || !memslot->dirty_bitmap) |
| 1993 | return -ENOENT; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1994 | |
| 1995 | dirty_bitmap = memslot->dirty_bitmap; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1996 | |
Peter Xu | 4ddc920 | 2019-05-08 17:15:45 +0800 | [diff] [blame] | 1997 | n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 1998 | |
| 1999 | if (log->first_page > memslot->npages || |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 2000 | log->num_pages > memslot->npages - log->first_page || |
| 2001 | (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) |
| 2002 | return -EINVAL; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 2003 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2004 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 2005 | |
| 2006 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2007 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 2008 | if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) |
| 2009 | return -EFAULT; |
| 2010 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2011 | KVM_MMU_LOCK(kvm); |
Peter Xu | 53eac7a | 2019-05-08 17:15:46 +0800 | [diff] [blame] | 2012 | for (offset = log->first_page, i = offset / BITS_PER_LONG, |
| 2013 | n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2014 | i++, offset += BITS_PER_LONG) { |
| 2015 | unsigned long mask = *dirty_bitmap_buffer++; |
| 2016 | atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; |
| 2017 | if (!mask) |
| 2018 | continue; |
| 2019 | |
| 2020 | mask &= atomic_long_fetch_andnot(mask, p); |
| 2021 | |
| 2022 | /* |
| 2023 | * mask contains the bits that really have been cleared. This |
| 2024 | * never includes any bits beyond the length of the memslot (if |
| 2025 | * the length is not aligned to 64 pages), therefore it is not |
| 2026 | * a problem if userspace sets them in log->dirty_bitmap. |
| 2027 | */ |
| 2028 | if (mask) { |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2029 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2030 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 2031 | offset, mask); |
| 2032 | } |
| 2033 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2034 | KVM_MMU_UNLOCK(kvm); |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2035 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2036 | if (flush) |
| 2037 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 2038 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2039 | return 0; |
| 2040 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2041 | |
| 2042 | static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, |
| 2043 | struct kvm_clear_dirty_log *log) |
| 2044 | { |
| 2045 | int r; |
| 2046 | |
| 2047 | mutex_lock(&kvm->slots_lock); |
| 2048 | |
| 2049 | r = kvm_clear_dirty_log_protect(kvm, log); |
| 2050 | |
| 2051 | mutex_unlock(&kvm->slots_lock); |
| 2052 | return r; |
| 2053 | } |
| 2054 | #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2055 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2056 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
| 2057 | { |
| 2058 | return __gfn_to_memslot(kvm_memslots(kvm), gfn); |
| 2059 | } |
Avi Kivity | a1f4d395 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 2060 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2061 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2062 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2063 | { |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2064 | struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); |
| 2065 | struct kvm_memory_slot *slot; |
| 2066 | int slot_index; |
| 2067 | |
| 2068 | slot = try_get_memslot(slots, vcpu->last_used_slot, gfn); |
| 2069 | if (slot) |
| 2070 | return slot; |
| 2071 | |
| 2072 | /* |
| 2073 | * Fall back to searching all memslots. We purposely use |
| 2074 | * search_memslots() instead of __gfn_to_memslot() to avoid |
| 2075 | * thrashing the VM-wide last_used_index in kvm_memslots. |
| 2076 | */ |
| 2077 | slot = search_memslots(slots, gfn, &slot_index); |
| 2078 | if (slot) { |
| 2079 | vcpu->last_used_slot = slot_index; |
| 2080 | return slot; |
| 2081 | } |
| 2082 | |
| 2083 | return NULL; |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2084 | } |
Paolo Bonzini | e72436b | 2020-04-17 12:21:06 -0400 | [diff] [blame] | 2085 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2086 | |
Yaowei Bai | 33e9415 | 2015-11-14 11:21:06 +0800 | [diff] [blame] | 2087 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2088 | { |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 2089 | struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2090 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 2091 | return kvm_is_visible_memslot(memslot); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2092 | } |
| 2093 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
| 2094 | |
Vitaly Kuznetsov | 995decb | 2020-07-08 16:00:23 +0200 | [diff] [blame] | 2095 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2096 | { |
| 2097 | struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2098 | |
| 2099 | return kvm_is_visible_memslot(memslot); |
| 2100 | } |
| 2101 | EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); |
| 2102 | |
Sean Christopherson | f9b84e1 | 2020-01-08 12:24:37 -0800 | [diff] [blame] | 2103 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2104 | { |
| 2105 | struct vm_area_struct *vma; |
| 2106 | unsigned long addr, size; |
| 2107 | |
| 2108 | size = PAGE_SIZE; |
| 2109 | |
Sean Christopherson | 42cde48 | 2020-01-08 12:24:38 -0800 | [diff] [blame] | 2110 | addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2111 | if (kvm_is_error_hva(addr)) |
| 2112 | return PAGE_SIZE; |
| 2113 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2114 | mmap_read_lock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2115 | vma = find_vma(current->mm, addr); |
| 2116 | if (!vma) |
| 2117 | goto out; |
| 2118 | |
| 2119 | size = vma_kernel_pagesize(vma); |
| 2120 | |
| 2121 | out: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2122 | mmap_read_unlock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2123 | |
| 2124 | return size; |
| 2125 | } |
| 2126 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2127 | static bool memslot_is_readonly(struct kvm_memory_slot *slot) |
| 2128 | { |
| 2129 | return slot->flags & KVM_MEM_READONLY; |
| 2130 | } |
| 2131 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2132 | static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2133 | gfn_t *nr_pages, bool write) |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2134 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 2135 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 2136 | return KVM_HVA_ERR_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2137 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2138 | if (memslot_is_readonly(slot) && write) |
| 2139 | return KVM_HVA_ERR_RO_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2140 | |
| 2141 | if (nr_pages) |
| 2142 | *nr_pages = slot->npages - (gfn - slot->base_gfn); |
| 2143 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2144 | return __gfn_to_hva_memslot(slot, gfn); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2145 | } |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2146 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2147 | static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2148 | gfn_t *nr_pages) |
| 2149 | { |
| 2150 | return __gfn_to_hva_many(slot, gfn, nr_pages, true); |
| 2151 | } |
| 2152 | |
| 2153 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 2154 | gfn_t gfn) |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2155 | { |
| 2156 | return gfn_to_hva_many(slot, gfn, NULL); |
| 2157 | } |
| 2158 | EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); |
| 2159 | |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2160 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
| 2161 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2162 | return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2163 | } |
Sheng Yang | 0d15029 | 2008-04-25 21:44:50 +0800 | [diff] [blame] | 2164 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2165 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2166 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2167 | { |
| 2168 | return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); |
| 2169 | } |
| 2170 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); |
| 2171 | |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2172 | /* |
Wei Yang | 970c0d4 | 2018-10-09 10:41:15 +0800 | [diff] [blame] | 2173 | * Return the hva of a @gfn and the R/W attribute if possible. |
| 2174 | * |
| 2175 | * @slot: the kvm_memory_slot which contains @gfn |
| 2176 | * @gfn: the gfn to be translated |
| 2177 | * @writable: used to return the read/write attribute of the @slot if the hva |
| 2178 | * is valid and @writable is not NULL |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2179 | */ |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 2180 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
| 2181 | gfn_t gfn, bool *writable) |
Gleb Natapov | 8030089 | 2010-10-19 18:13:41 +0200 | [diff] [blame] | 2182 | { |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 2183 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
| 2184 | |
| 2185 | if (!kvm_is_error_hva(hva) && writable) |
Paolo Bonzini | ba6a354 | 2013-09-09 13:52:33 +0200 | [diff] [blame] | 2186 | *writable = !memslot_is_readonly(slot); |
| 2187 | |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 2188 | return hva; |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2189 | } |
| 2190 | |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 2191 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) |
| 2192 | { |
| 2193 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2194 | |
| 2195 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 2196 | } |
| 2197 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2198 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) |
| 2199 | { |
| 2200 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2201 | |
| 2202 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 2203 | } |
| 2204 | |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2205 | static inline int check_user_page_hwpoison(unsigned long addr) |
| 2206 | { |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 2207 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2208 | |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 2209 | rc = get_user_pages(addr, 1, flags, NULL, NULL); |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2210 | return rc == -EHWPOISON; |
| 2211 | } |
| 2212 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2213 | /* |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2214 | * The fast path to get the writable pfn which will be stored in @pfn, |
| 2215 | * true indicates success, otherwise false is returned. It's also the |
Miaohe Lin | 311497e | 2019-12-11 14:26:25 +0800 | [diff] [blame] | 2216 | * only part that runs if we can in atomic context. |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2217 | */ |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2218 | static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, |
| 2219 | bool *writable, kvm_pfn_t *pfn) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2220 | { |
| 2221 | struct page *page[1]; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2222 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2223 | /* |
| 2224 | * Fast pin a writable pfn only if it is a write fault request |
| 2225 | * or the caller allows to map a writable pfn for a read fault |
| 2226 | * request. |
| 2227 | */ |
| 2228 | if (!(write_fault || writable)) |
| 2229 | return false; |
| 2230 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2231 | if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2232 | *pfn = page_to_pfn(page[0]); |
| 2233 | |
| 2234 | if (writable) |
| 2235 | *writable = true; |
| 2236 | return true; |
| 2237 | } |
| 2238 | |
| 2239 | return false; |
| 2240 | } |
| 2241 | |
| 2242 | /* |
| 2243 | * The slow path to get the pfn of the specified host virtual address, |
| 2244 | * 1 indicates success, -errno is returned if error is detected. |
| 2245 | */ |
| 2246 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2247 | bool *writable, kvm_pfn_t *pfn) |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2248 | { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2249 | unsigned int flags = FOLL_HWPOISON; |
| 2250 | struct page *page; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2251 | int npages = 0; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2252 | |
| 2253 | might_sleep(); |
| 2254 | |
| 2255 | if (writable) |
| 2256 | *writable = write_fault; |
| 2257 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2258 | if (write_fault) |
| 2259 | flags |= FOLL_WRITE; |
| 2260 | if (async) |
| 2261 | flags |= FOLL_NOWAIT; |
Lorenzo Stoakes | d4944b0 | 2016-10-13 01:20:12 +0100 | [diff] [blame] | 2262 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2263 | npages = get_user_pages_unlocked(addr, 1, &page, flags); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2264 | if (npages != 1) |
| 2265 | return npages; |
| 2266 | |
| 2267 | /* map read fault as writable if possible */ |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2268 | if (unlikely(!write_fault) && writable) { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2269 | struct page *wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2270 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2271 | if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2272 | *writable = true; |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2273 | put_page(page); |
| 2274 | page = wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2275 | } |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2276 | } |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2277 | *pfn = page_to_pfn(page); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2278 | return npages; |
| 2279 | } |
| 2280 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2281 | static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) |
| 2282 | { |
| 2283 | if (unlikely(!(vma->vm_flags & VM_READ))) |
| 2284 | return false; |
| 2285 | |
| 2286 | if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) |
| 2287 | return false; |
| 2288 | |
| 2289 | return true; |
| 2290 | } |
| 2291 | |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2292 | static int kvm_try_get_pfn(kvm_pfn_t pfn) |
| 2293 | { |
| 2294 | if (kvm_is_reserved_pfn(pfn)) |
| 2295 | return 1; |
| 2296 | return get_page_unless_zero(pfn_to_page(pfn)); |
| 2297 | } |
| 2298 | |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2299 | static int hva_to_pfn_remapped(struct vm_area_struct *vma, |
| 2300 | unsigned long addr, bool *async, |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 2301 | bool write_fault, bool *writable, |
| 2302 | kvm_pfn_t *p_pfn) |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2303 | { |
Sean Christopherson | a954577 | 2021-02-08 12:19:40 -0800 | [diff] [blame] | 2304 | kvm_pfn_t pfn; |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2305 | pte_t *ptep; |
| 2306 | spinlock_t *ptl; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2307 | int r; |
| 2308 | |
Paolo Bonzini | 9fd6dad | 2021-02-05 05:07:11 -0500 | [diff] [blame] | 2309 | r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2310 | if (r) { |
| 2311 | /* |
| 2312 | * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does |
| 2313 | * not call the fault handler, so do it here. |
| 2314 | */ |
| 2315 | bool unlocked = false; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2316 | r = fixup_user_fault(current->mm, addr, |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2317 | (write_fault ? FAULT_FLAG_WRITE : 0), |
| 2318 | &unlocked); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2319 | if (unlocked) |
| 2320 | return -EAGAIN; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2321 | if (r) |
| 2322 | return r; |
| 2323 | |
Paolo Bonzini | 9fd6dad | 2021-02-05 05:07:11 -0500 | [diff] [blame] | 2324 | r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2325 | if (r) |
| 2326 | return r; |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2327 | } |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2328 | |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2329 | if (write_fault && !pte_write(*ptep)) { |
| 2330 | pfn = KVM_PFN_ERR_RO_FAULT; |
| 2331 | goto out; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2332 | } |
| 2333 | |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 2334 | if (writable) |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2335 | *writable = pte_write(*ptep); |
| 2336 | pfn = pte_pfn(*ptep); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2337 | |
| 2338 | /* |
| 2339 | * Get a reference here because callers of *hva_to_pfn* and |
| 2340 | * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the |
| 2341 | * returned pfn. This is only needed if the VMA has VM_MIXEDMAP |
| 2342 | * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will |
| 2343 | * simply do nothing for reserved pfns. |
| 2344 | * |
| 2345 | * Whoever called remap_pfn_range is also going to call e.g. |
| 2346 | * unmap_mapping_range before the underlying pages are freed, |
| 2347 | * causing a call to our MMU notifier. |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2348 | * |
| 2349 | * Certain IO or PFNMAP mappings can be backed with valid |
| 2350 | * struct pages, but be allocated without refcounting e.g., |
| 2351 | * tail pages of non-compound higher order allocations, which |
| 2352 | * would then underflow the refcount when the caller does the |
| 2353 | * required put_page. Don't allow those pages here. |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2354 | */ |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2355 | if (!kvm_try_get_pfn(pfn)) |
| 2356 | r = -EFAULT; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2357 | |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2358 | out: |
| 2359 | pte_unmap_unlock(ptep, ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2360 | *p_pfn = pfn; |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2361 | |
| 2362 | return r; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2363 | } |
| 2364 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2365 | /* |
| 2366 | * Pin guest page in memory and return its pfn. |
| 2367 | * @addr: host virtual address which maps memory to the guest |
| 2368 | * @atomic: whether this function can sleep |
| 2369 | * @async: whether this function need to wait IO complete if the |
| 2370 | * host page is not in the memory |
| 2371 | * @write_fault: whether we should get a writable host page |
| 2372 | * @writable: whether it allows to map a writable host page for !@write_fault |
| 2373 | * |
| 2374 | * The function will map a writable host page for these two cases: |
| 2375 | * 1): @write_fault = true |
| 2376 | * 2): @write_fault = false && @writable, @writable will tell the caller |
| 2377 | * whether the mapping is writable. |
| 2378 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2379 | static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2380 | bool write_fault, bool *writable) |
| 2381 | { |
| 2382 | struct vm_area_struct *vma; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2383 | kvm_pfn_t pfn = 0; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2384 | int npages, r; |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2385 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2386 | /* we can do it either atomically or asynchronously, not both */ |
| 2387 | BUG_ON(atomic && async); |
| 2388 | |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2389 | if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2390 | return pfn; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2391 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2392 | if (atomic) |
| 2393 | return KVM_PFN_ERR_FAULT; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2394 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2395 | npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); |
| 2396 | if (npages == 1) |
| 2397 | return pfn; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2398 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2399 | mmap_read_lock(current->mm); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2400 | if (npages == -EHWPOISON || |
| 2401 | (!async && check_user_page_hwpoison(addr))) { |
| 2402 | pfn = KVM_PFN_ERR_HWPOISON; |
| 2403 | goto exit; |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2404 | } |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2405 | |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2406 | retry: |
Liam Howlett | fc98c03 | 2021-06-28 19:39:17 -0700 | [diff] [blame] | 2407 | vma = vma_lookup(current->mm, addr); |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 2408 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2409 | if (vma == NULL) |
| 2410 | pfn = KVM_PFN_ERR_FAULT; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2411 | else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 2412 | r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2413 | if (r == -EAGAIN) |
| 2414 | goto retry; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2415 | if (r < 0) |
| 2416 | pfn = KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2417 | } else { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2418 | if (async && vma_is_valid(vma, write_fault)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2419 | *async = true; |
| 2420 | pfn = KVM_PFN_ERR_FAULT; |
| 2421 | } |
| 2422 | exit: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2423 | mmap_read_unlock(current->mm); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2424 | return pfn; |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2425 | } |
| 2426 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2427 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2428 | bool atomic, bool *async, bool write_fault, |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2429 | bool *writable, hva_t *hva) |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2430 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2431 | unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); |
| 2432 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2433 | if (hva) |
| 2434 | *hva = addr; |
| 2435 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2436 | if (addr == KVM_HVA_ERR_RO_BAD) { |
| 2437 | if (writable) |
| 2438 | *writable = false; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2439 | return KVM_PFN_ERR_RO_FAULT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2440 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2441 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2442 | if (kvm_is_error_hva(addr)) { |
| 2443 | if (writable) |
| 2444 | *writable = false; |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2445 | return KVM_PFN_NOSLOT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2446 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2447 | |
| 2448 | /* Do not map writable pfn in the readonly memslot. */ |
| 2449 | if (writable && memslot_is_readonly(slot)) { |
| 2450 | *writable = false; |
| 2451 | writable = NULL; |
| 2452 | } |
| 2453 | |
| 2454 | return hva_to_pfn(addr, atomic, async, write_fault, |
| 2455 | writable); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2456 | } |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 2457 | EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2458 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2459 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2460 | bool *writable) |
| 2461 | { |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2462 | return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2463 | write_fault, writable, NULL); |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2464 | } |
| 2465 | EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); |
| 2466 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2467 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2468 | { |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2469 | return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2470 | } |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2471 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2472 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2473 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2474 | { |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2475 | return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2476 | } |
| 2477 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); |
| 2478 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2479 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2480 | { |
| 2481 | return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2482 | } |
| 2483 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); |
| 2484 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2485 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2486 | { |
| 2487 | return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); |
| 2488 | } |
| 2489 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
| 2490 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2491 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2492 | { |
| 2493 | return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2494 | } |
| 2495 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); |
| 2496 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2497 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2498 | struct page **pages, int nr_pages) |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2499 | { |
| 2500 | unsigned long addr; |
Arnd Bergmann | 076b925 | 2017-08-10 14:14:39 +0200 | [diff] [blame] | 2501 | gfn_t entry = 0; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2502 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2503 | addr = gfn_to_hva_many(slot, gfn, &entry); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2504 | if (kvm_is_error_hva(addr)) |
| 2505 | return -1; |
| 2506 | |
| 2507 | if (entry < nr_pages) |
| 2508 | return 0; |
| 2509 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2510 | return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2511 | } |
| 2512 | EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); |
| 2513 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2514 | static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2515 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2516 | if (is_error_noslot_pfn(pfn)) |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 2517 | return KVM_ERR_PTR_BAD_PAGE; |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2518 | |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2519 | if (kvm_is_reserved_pfn(pfn)) { |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 2520 | WARN_ON(1); |
| 2521 | return KVM_ERR_PTR_BAD_PAGE; |
| 2522 | } |
| 2523 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2524 | return pfn_to_page(pfn); |
| 2525 | } |
| 2526 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2527 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
| 2528 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2529 | kvm_pfn_t pfn; |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2530 | |
| 2531 | pfn = gfn_to_pfn(kvm, gfn); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2532 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2533 | return kvm_pfn_to_page(pfn); |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2534 | } |
| 2535 | EXPORT_SYMBOL_GPL(gfn_to_page); |
| 2536 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2537 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) |
| 2538 | { |
| 2539 | if (pfn == 0) |
| 2540 | return; |
| 2541 | |
| 2542 | if (cache) |
| 2543 | cache->pfn = cache->gfn = 0; |
| 2544 | |
| 2545 | if (dirty) |
| 2546 | kvm_release_pfn_dirty(pfn); |
| 2547 | else |
| 2548 | kvm_release_pfn_clean(pfn); |
| 2549 | } |
| 2550 | |
| 2551 | static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2552 | struct gfn_to_pfn_cache *cache, u64 gen) |
| 2553 | { |
| 2554 | kvm_release_pfn(cache->pfn, cache->dirty, cache); |
| 2555 | |
| 2556 | cache->pfn = gfn_to_pfn_memslot(slot, gfn); |
| 2557 | cache->gfn = gfn; |
| 2558 | cache->dirty = false; |
| 2559 | cache->generation = gen; |
| 2560 | } |
| 2561 | |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2562 | static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2563 | struct kvm_host_map *map, |
| 2564 | struct gfn_to_pfn_cache *cache, |
| 2565 | bool atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2566 | { |
| 2567 | kvm_pfn_t pfn; |
| 2568 | void *hva = NULL; |
| 2569 | struct page *page = KVM_UNMAPPED_PAGE; |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2570 | struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2571 | u64 gen = slots->generation; |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2572 | |
| 2573 | if (!map) |
| 2574 | return -EINVAL; |
| 2575 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2576 | if (cache) { |
| 2577 | if (!cache->pfn || cache->gfn != gfn || |
| 2578 | cache->generation != gen) { |
| 2579 | if (atomic) |
| 2580 | return -EAGAIN; |
| 2581 | kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); |
| 2582 | } |
| 2583 | pfn = cache->pfn; |
| 2584 | } else { |
| 2585 | if (atomic) |
| 2586 | return -EAGAIN; |
| 2587 | pfn = gfn_to_pfn_memslot(slot, gfn); |
| 2588 | } |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2589 | if (is_error_noslot_pfn(pfn)) |
| 2590 | return -EINVAL; |
| 2591 | |
| 2592 | if (pfn_valid(pfn)) { |
| 2593 | page = pfn_to_page(pfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2594 | if (atomic) |
| 2595 | hva = kmap_atomic(page); |
| 2596 | else |
| 2597 | hva = kmap(page); |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2598 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2599 | } else if (!atomic) { |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2600 | hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2601 | } else { |
| 2602 | return -EINVAL; |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2603 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2604 | } |
| 2605 | |
| 2606 | if (!hva) |
| 2607 | return -EFAULT; |
| 2608 | |
| 2609 | map->page = page; |
| 2610 | map->hva = hva; |
| 2611 | map->pfn = pfn; |
| 2612 | map->gfn = gfn; |
| 2613 | |
| 2614 | return 0; |
| 2615 | } |
| 2616 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2617 | int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, |
| 2618 | struct gfn_to_pfn_cache *cache, bool atomic) |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2619 | { |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2620 | return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, |
| 2621 | cache, atomic); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2622 | } |
| 2623 | EXPORT_SYMBOL_GPL(kvm_map_gfn); |
| 2624 | |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2625 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) |
| 2626 | { |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2627 | return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, |
| 2628 | NULL, false); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2629 | } |
| 2630 | EXPORT_SYMBOL_GPL(kvm_vcpu_map); |
| 2631 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2632 | static void __kvm_unmap_gfn(struct kvm *kvm, |
| 2633 | struct kvm_memory_slot *memslot, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2634 | struct kvm_host_map *map, |
| 2635 | struct gfn_to_pfn_cache *cache, |
| 2636 | bool dirty, bool atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2637 | { |
| 2638 | if (!map) |
| 2639 | return; |
| 2640 | |
| 2641 | if (!map->hva) |
| 2642 | return; |
| 2643 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2644 | if (map->page != KVM_UNMAPPED_PAGE) { |
| 2645 | if (atomic) |
| 2646 | kunmap_atomic(map->hva); |
| 2647 | else |
| 2648 | kunmap(map->page); |
| 2649 | } |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2650 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2651 | else if (!atomic) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2652 | memunmap(map->hva); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2653 | else |
| 2654 | WARN_ONCE(1, "Unexpected unmapping in atomic context"); |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2655 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2656 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2657 | if (dirty) |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2658 | mark_page_dirty_in_slot(kvm, memslot, map->gfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2659 | |
| 2660 | if (cache) |
| 2661 | cache->dirty |= dirty; |
| 2662 | else |
| 2663 | kvm_release_pfn(map->pfn, dirty, NULL); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2664 | |
| 2665 | map->hva = NULL; |
| 2666 | map->page = NULL; |
| 2667 | } |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2668 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2669 | int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, |
| 2670 | struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2671 | { |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2672 | __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2673 | cache, dirty, atomic); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2674 | return 0; |
| 2675 | } |
| 2676 | EXPORT_SYMBOL_GPL(kvm_unmap_gfn); |
| 2677 | |
| 2678 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) |
| 2679 | { |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2680 | __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), |
| 2681 | map, NULL, dirty, false); |
Boris Ostrovsky | 1eff70a | 2019-11-12 16:35:06 +0000 | [diff] [blame] | 2682 | } |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2683 | EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); |
| 2684 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2685 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2686 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2687 | kvm_pfn_t pfn; |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2688 | |
| 2689 | pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); |
| 2690 | |
| 2691 | return kvm_pfn_to_page(pfn); |
| 2692 | } |
| 2693 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); |
| 2694 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2695 | void kvm_release_page_clean(struct page *page) |
| 2696 | { |
Xiao Guangrong | 32cad84 | 2012-08-03 15:42:52 +0800 | [diff] [blame] | 2697 | WARN_ON(is_error_page(page)); |
| 2698 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2699 | kvm_release_pfn_clean(page_to_pfn(page)); |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2700 | } |
| 2701 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
| 2702 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2703 | void kvm_release_pfn_clean(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2704 | { |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2705 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2706 | put_page(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2707 | } |
| 2708 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
| 2709 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2710 | void kvm_release_page_dirty(struct page *page) |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2711 | { |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2712 | WARN_ON(is_error_page(page)); |
| 2713 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2714 | kvm_release_pfn_dirty(page_to_pfn(page)); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2715 | } |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2716 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2717 | |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2718 | void kvm_release_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2719 | { |
| 2720 | kvm_set_pfn_dirty(pfn); |
| 2721 | kvm_release_pfn_clean(pfn); |
| 2722 | } |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2723 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2724 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2725 | void kvm_set_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2726 | { |
Miaohe Lin | d29c03a | 2019-12-05 11:05:05 +0800 | [diff] [blame] | 2727 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
| 2728 | SetPageDirty(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2729 | } |
| 2730 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
| 2731 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2732 | void kvm_set_pfn_accessed(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2733 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 2734 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2735 | mark_page_accessed(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2736 | } |
| 2737 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
| 2738 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2739 | void kvm_get_pfn(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2740 | { |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2741 | if (!kvm_is_reserved_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2742 | get_page(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2743 | } |
| 2744 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |
| 2745 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2746 | static int next_segment(unsigned long len, int offset) |
| 2747 | { |
| 2748 | if (len > PAGE_SIZE - offset) |
| 2749 | return PAGE_SIZE - offset; |
| 2750 | else |
| 2751 | return len; |
| 2752 | } |
| 2753 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2754 | static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2755 | void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2756 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2757 | int r; |
| 2758 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2759 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2760 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2761 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2762 | return -EFAULT; |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2763 | r = __copy_from_user(data, (void __user *)addr + offset, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2764 | if (r) |
| 2765 | return -EFAULT; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2766 | return 0; |
| 2767 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2768 | |
| 2769 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| 2770 | int len) |
| 2771 | { |
| 2772 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2773 | |
| 2774 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2775 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2776 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
| 2777 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2778 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, |
| 2779 | int offset, int len) |
| 2780 | { |
| 2781 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2782 | |
| 2783 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2784 | } |
| 2785 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); |
| 2786 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2787 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) |
| 2788 | { |
| 2789 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2790 | int seg; |
| 2791 | int offset = offset_in_page(gpa); |
| 2792 | int ret; |
| 2793 | |
| 2794 | while ((seg = next_segment(len, offset)) != 0) { |
| 2795 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); |
| 2796 | if (ret < 0) |
| 2797 | return ret; |
| 2798 | offset = 0; |
| 2799 | len -= seg; |
| 2800 | data += seg; |
| 2801 | ++gfn; |
| 2802 | } |
| 2803 | return 0; |
| 2804 | } |
| 2805 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
| 2806 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2807 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) |
| 2808 | { |
| 2809 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2810 | int seg; |
| 2811 | int offset = offset_in_page(gpa); |
| 2812 | int ret; |
| 2813 | |
| 2814 | while ((seg = next_segment(len, offset)) != 0) { |
| 2815 | ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); |
| 2816 | if (ret < 0) |
| 2817 | return ret; |
| 2818 | offset = 0; |
| 2819 | len -= seg; |
| 2820 | data += seg; |
| 2821 | ++gfn; |
| 2822 | } |
| 2823 | return 0; |
| 2824 | } |
| 2825 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); |
| 2826 | |
| 2827 | static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2828 | void *data, int offset, unsigned long len) |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2829 | { |
| 2830 | int r; |
| 2831 | unsigned long addr; |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2832 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2833 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2834 | if (kvm_is_error_hva(addr)) |
| 2835 | return -EFAULT; |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2836 | pagefault_disable(); |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2837 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2838 | pagefault_enable(); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2839 | if (r) |
| 2840 | return -EFAULT; |
| 2841 | return 0; |
| 2842 | } |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2843 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2844 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 2845 | void *data, unsigned long len) |
| 2846 | { |
| 2847 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2848 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2849 | int offset = offset_in_page(gpa); |
| 2850 | |
| 2851 | return __kvm_read_guest_atomic(slot, gfn, data, offset, len); |
| 2852 | } |
| 2853 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); |
| 2854 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2855 | static int __kvm_write_guest_page(struct kvm *kvm, |
| 2856 | struct kvm_memory_slot *memslot, gfn_t gfn, |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2857 | const void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2858 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2859 | int r; |
| 2860 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2861 | |
Radim Krčmář | 251eb84 | 2015-04-10 21:47:27 +0200 | [diff] [blame] | 2862 | addr = gfn_to_hva_memslot(memslot, gfn); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2863 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2864 | return -EFAULT; |
Xiao Guangrong | 8b0cedf | 2011-05-15 23:22:04 +0800 | [diff] [blame] | 2865 | r = __copy_to_user((void __user *)addr + offset, data, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2866 | if (r) |
| 2867 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2868 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2869 | return 0; |
| 2870 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2871 | |
| 2872 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, |
| 2873 | const void *data, int offset, int len) |
| 2874 | { |
| 2875 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2876 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2877 | return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2878 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2879 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
| 2880 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2881 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2882 | const void *data, int offset, int len) |
| 2883 | { |
| 2884 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2885 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2886 | return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2887 | } |
| 2888 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); |
| 2889 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2890 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| 2891 | unsigned long len) |
| 2892 | { |
| 2893 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2894 | int seg; |
| 2895 | int offset = offset_in_page(gpa); |
| 2896 | int ret; |
| 2897 | |
| 2898 | while ((seg = next_segment(len, offset)) != 0) { |
| 2899 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); |
| 2900 | if (ret < 0) |
| 2901 | return ret; |
| 2902 | offset = 0; |
| 2903 | len -= seg; |
| 2904 | data += seg; |
| 2905 | ++gfn; |
| 2906 | } |
| 2907 | return 0; |
| 2908 | } |
Wincy Van | ff651cb | 2014-12-11 08:52:58 +0300 | [diff] [blame] | 2909 | EXPORT_SYMBOL_GPL(kvm_write_guest); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2910 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2911 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| 2912 | unsigned long len) |
| 2913 | { |
| 2914 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2915 | int seg; |
| 2916 | int offset = offset_in_page(gpa); |
| 2917 | int ret; |
| 2918 | |
| 2919 | while ((seg = next_segment(len, offset)) != 0) { |
| 2920 | ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); |
| 2921 | if (ret < 0) |
| 2922 | return ret; |
| 2923 | offset = 0; |
| 2924 | len -= seg; |
| 2925 | data += seg; |
| 2926 | ++gfn; |
| 2927 | } |
| 2928 | return 0; |
| 2929 | } |
| 2930 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); |
| 2931 | |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2932 | static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, |
| 2933 | struct gfn_to_hva_cache *ghc, |
| 2934 | gpa_t gpa, unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2935 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2936 | int offset = offset_in_page(gpa); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2937 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
| 2938 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; |
| 2939 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; |
| 2940 | gfn_t nr_pages_avail; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2941 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2942 | /* Update ghc->generation before performing any error checks. */ |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2943 | ghc->generation = slots->generation; |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2944 | |
| 2945 | if (start_gfn > end_gfn) { |
| 2946 | ghc->hva = KVM_HVA_ERR_BAD; |
| 2947 | return -EINVAL; |
| 2948 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2949 | |
| 2950 | /* |
| 2951 | * If the requested region crosses two memslots, we still |
| 2952 | * verify that the entire region is valid here. |
| 2953 | */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2954 | for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2955 | ghc->memslot = __gfn_to_memslot(slots, start_gfn); |
| 2956 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, |
| 2957 | &nr_pages_avail); |
| 2958 | if (kvm_is_error_hva(ghc->hva)) |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2959 | return -EFAULT; |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2960 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2961 | |
| 2962 | /* Use the slow path for cross page reads and writes. */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2963 | if (nr_pages_needed == 1) |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 2964 | ghc->hva += offset; |
| 2965 | else |
| 2966 | ghc->memslot = NULL; |
| 2967 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 2968 | ghc->gpa = gpa; |
| 2969 | ghc->len = len; |
| 2970 | return 0; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2971 | } |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2972 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2973 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2974 | gpa_t gpa, unsigned long len) |
| 2975 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2976 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 2977 | return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); |
| 2978 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2979 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2980 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2981 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Jim Mattson | 7a86dab | 2018-12-14 14:34:43 -0800 | [diff] [blame] | 2982 | void *data, unsigned int offset, |
| 2983 | unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2984 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 2985 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2986 | int r; |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2987 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2988 | |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 2989 | BUG_ON(len + offset > ghc->len); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2990 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 2991 | if (slots->generation != ghc->generation) { |
| 2992 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 2993 | return -EFAULT; |
| 2994 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 2995 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2996 | if (kvm_is_error_hva(ghc->hva)) |
| 2997 | return -EFAULT; |
| 2998 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 2999 | if (unlikely(!ghc->memslot)) |
| 3000 | return kvm_write_guest(kvm, gpa, data, len); |
| 3001 | |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3002 | r = __copy_to_user((void __user *)ghc->hva + offset, data, len); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3003 | if (r) |
| 3004 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3005 | mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3006 | |
| 3007 | return 0; |
| 3008 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3009 | EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3010 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3011 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3012 | void *data, unsigned long len) |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3013 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3014 | return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3015 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3016 | EXPORT_SYMBOL_GPL(kvm_write_guest_cached); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3017 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3018 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3019 | void *data, unsigned int offset, |
| 3020 | unsigned long len) |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3021 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3022 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3023 | int r; |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3024 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3025 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3026 | BUG_ON(len + offset > ghc->len); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3027 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 3028 | if (slots->generation != ghc->generation) { |
| 3029 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 3030 | return -EFAULT; |
| 3031 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3032 | |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3033 | if (kvm_is_error_hva(ghc->hva)) |
| 3034 | return -EFAULT; |
| 3035 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 3036 | if (unlikely(!ghc->memslot)) |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3037 | return kvm_read_guest(kvm, gpa, data, len); |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 3038 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3039 | r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3040 | if (r) |
| 3041 | return -EFAULT; |
| 3042 | |
| 3043 | return 0; |
| 3044 | } |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3045 | EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); |
| 3046 | |
| 3047 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3048 | void *data, unsigned long len) |
| 3049 | { |
| 3050 | return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); |
| 3051 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3052 | EXPORT_SYMBOL_GPL(kvm_read_guest_cached); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3053 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3054 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) |
| 3055 | { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 3056 | const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3057 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3058 | int seg; |
| 3059 | int offset = offset_in_page(gpa); |
| 3060 | int ret; |
| 3061 | |
Kevin Mulvey | bfda0e8 | 2015-02-20 08:21:36 -0500 | [diff] [blame] | 3062 | while ((seg = next_segment(len, offset)) != 0) { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 3063 | ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3064 | if (ret < 0) |
| 3065 | return ret; |
| 3066 | offset = 0; |
| 3067 | len -= seg; |
| 3068 | ++gfn; |
| 3069 | } |
| 3070 | return 0; |
| 3071 | } |
| 3072 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
| 3073 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3074 | void mark_page_dirty_in_slot(struct kvm *kvm, |
| 3075 | struct kvm_memory_slot *memslot, |
| 3076 | gfn_t gfn) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3077 | { |
Peter Xu | 044c59c | 2020-09-30 21:22:26 -0400 | [diff] [blame] | 3078 | if (memslot && kvm_slot_dirty_track_enabled(memslot)) { |
Rusty Russell | 7e9d619 | 2007-07-31 20:41:14 +1000 | [diff] [blame] | 3079 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3080 | u32 slot = (memslot->as_id << 16) | memslot->id; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3081 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3082 | if (kvm->dirty_ring_size) |
| 3083 | kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), |
| 3084 | slot, rel_gfn); |
| 3085 | else |
| 3086 | set_bit_le(rel_gfn, memslot->dirty_bitmap); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3087 | } |
| 3088 | } |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 3089 | EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3090 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3091 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
| 3092 | { |
| 3093 | struct kvm_memory_slot *memslot; |
| 3094 | |
| 3095 | memslot = gfn_to_memslot(kvm, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3096 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3097 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 3098 | EXPORT_SYMBOL_GPL(mark_page_dirty); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3099 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 3100 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 3101 | { |
| 3102 | struct kvm_memory_slot *memslot; |
| 3103 | |
| 3104 | memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3105 | mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 3106 | } |
| 3107 | EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); |
| 3108 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 3109 | void kvm_sigset_activate(struct kvm_vcpu *vcpu) |
| 3110 | { |
| 3111 | if (!vcpu->sigset_active) |
| 3112 | return; |
| 3113 | |
| 3114 | /* |
| 3115 | * This does a lockless modification of ->real_blocked, which is fine |
| 3116 | * because, only current can change ->real_blocked and all readers of |
| 3117 | * ->real_blocked don't care as long ->real_blocked is always a subset |
| 3118 | * of ->blocked. |
| 3119 | */ |
| 3120 | sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); |
| 3121 | } |
| 3122 | |
| 3123 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) |
| 3124 | { |
| 3125 | if (!vcpu->sigset_active) |
| 3126 | return; |
| 3127 | |
| 3128 | sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); |
| 3129 | sigemptyset(¤t->real_blocked); |
| 3130 | } |
| 3131 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3132 | static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 3133 | { |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3134 | unsigned int old, val, grow, grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3135 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3136 | old = val = vcpu->halt_poll_ns; |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3137 | grow_start = READ_ONCE(halt_poll_ns_grow_start); |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3138 | grow = READ_ONCE(halt_poll_ns_grow); |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 3139 | if (!grow) |
| 3140 | goto out; |
| 3141 | |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3142 | val *= grow; |
| 3143 | if (val < grow_start) |
| 3144 | val = grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3145 | |
David Matlack | 258785e | 2021-05-06 15:24:43 +0000 | [diff] [blame] | 3146 | if (val > vcpu->kvm->max_halt_poll_ns) |
| 3147 | val = vcpu->kvm->max_halt_poll_ns; |
David Matlack | 313f636 | 2016-03-08 16:19:44 -0800 | [diff] [blame] | 3148 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3149 | vcpu->halt_poll_ns = val; |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 3150 | out: |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3151 | trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3152 | } |
| 3153 | |
| 3154 | static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 3155 | { |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3156 | unsigned int old, val, shrink; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3157 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3158 | old = val = vcpu->halt_poll_ns; |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3159 | shrink = READ_ONCE(halt_poll_ns_shrink); |
| 3160 | if (shrink == 0) |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3161 | val = 0; |
| 3162 | else |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3163 | val /= shrink; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3164 | |
| 3165 | vcpu->halt_poll_ns = val; |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3166 | trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3167 | } |
| 3168 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3169 | static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) |
| 3170 | { |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3171 | int ret = -EINTR; |
| 3172 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 3173 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3174 | if (kvm_arch_vcpu_runnable(vcpu)) { |
| 3175 | kvm_make_request(KVM_REQ_UNHALT, vcpu); |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3176 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3177 | } |
| 3178 | if (kvm_cpu_has_pending_timer(vcpu)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3179 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3180 | if (signal_pending(current)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3181 | goto out; |
Marcelo Tosatti | 084071d | 2021-05-25 10:41:17 -0300 | [diff] [blame] | 3182 | if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) |
| 3183 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3184 | |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3185 | ret = 0; |
| 3186 | out: |
| 3187 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 3188 | return ret; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3189 | } |
| 3190 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3191 | static inline void |
| 3192 | update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) |
| 3193 | { |
| 3194 | if (waited) |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3195 | vcpu->stat.generic.halt_poll_fail_ns += poll_ns; |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3196 | else |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3197 | vcpu->stat.generic.halt_poll_success_ns += poll_ns; |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3198 | } |
| 3199 | |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3200 | /* |
| 3201 | * The vCPU has executed a HLT instruction with in-kernel mode enabled. |
| 3202 | */ |
Hollis Blanchard | 8776e51 | 2007-10-31 17:24:24 -0500 | [diff] [blame] | 3203 | void kvm_vcpu_block(struct kvm_vcpu *vcpu) |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3204 | { |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3205 | ktime_t start, cur, poll_end; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3206 | bool waited = false; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3207 | u64 block_ns; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3208 | |
Marc Zyngier | 07ab0f8 | 2019-08-02 11:37:09 +0100 | [diff] [blame] | 3209 | kvm_arch_vcpu_blocking(vcpu); |
| 3210 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3211 | start = cur = poll_end = ktime_get(); |
Christian Borntraeger | cdd6ad3 | 2019-03-05 05:30:01 -0500 | [diff] [blame] | 3212 | if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { |
Wanpeng Li | 19020f8 | 2015-09-03 22:07:37 +0800 | [diff] [blame] | 3213 | ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3214 | |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3215 | ++vcpu->stat.generic.halt_attempted_poll; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3216 | do { |
| 3217 | /* |
| 3218 | * This sets KVM_REQ_UNHALT if an interrupt |
| 3219 | * arrives. |
| 3220 | */ |
| 3221 | if (kvm_vcpu_check_block(vcpu) < 0) { |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3222 | ++vcpu->stat.generic.halt_successful_poll; |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 3223 | if (!vcpu_valid_wakeup(vcpu)) |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3224 | ++vcpu->stat.generic.halt_poll_invalid; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3225 | goto out; |
| 3226 | } |
Li RongQing | 7477565 | 2021-07-27 19:12:47 +0800 | [diff] [blame] | 3227 | cpu_relax(); |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3228 | poll_end = cur = ktime_get(); |
Wanpeng Li | 6bd5b74 | 2021-05-18 05:00:31 -0700 | [diff] [blame] | 3229 | } while (kvm_vcpu_can_poll(cur, stop)); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3230 | } |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3231 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3232 | prepare_to_rcuwait(&vcpu->wait); |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 3233 | for (;;) { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3234 | set_current_state(TASK_INTERRUPTIBLE); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3235 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3236 | if (kvm_vcpu_check_block(vcpu) < 0) |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 3237 | break; |
| 3238 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3239 | waited = true; |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3240 | schedule(); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3241 | } |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3242 | finish_rcuwait(&vcpu->wait); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3243 | cur = ktime_get(); |
Jing Zhang | 87bcc5f | 2021-08-02 16:56:32 +0000 | [diff] [blame^] | 3244 | if (waited) { |
| 3245 | vcpu->stat.generic.halt_wait_ns += |
| 3246 | ktime_to_ns(cur) - ktime_to_ns(poll_end); |
| 3247 | } |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3248 | out: |
Marc Zyngier | 07ab0f8 | 2019-08-02 11:37:09 +0100 | [diff] [blame] | 3249 | kvm_arch_vcpu_unblocking(vcpu); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3250 | block_ns = ktime_to_ns(cur) - ktime_to_ns(start); |
| 3251 | |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3252 | update_halt_poll_stats( |
| 3253 | vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); |
| 3254 | |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3255 | if (!kvm_arch_no_poll(vcpu)) { |
| 3256 | if (!vcpu_valid_wakeup(vcpu)) { |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3257 | shrink_halt_poll_ns(vcpu); |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3258 | } else if (vcpu->kvm->max_halt_poll_ns) { |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3259 | if (block_ns <= vcpu->halt_poll_ns) |
| 3260 | ; |
| 3261 | /* we had a long block, shrink polling */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3262 | else if (vcpu->halt_poll_ns && |
| 3263 | block_ns > vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3264 | shrink_halt_poll_ns(vcpu); |
| 3265 | /* we had a short halt and our poll time is too small */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3266 | else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && |
| 3267 | block_ns < vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3268 | grow_halt_poll_ns(vcpu); |
| 3269 | } else { |
| 3270 | vcpu->halt_poll_ns = 0; |
| 3271 | } |
| 3272 | } |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3273 | |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 3274 | trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); |
| 3275 | kvm_arch_vcpu_block_finish(vcpu); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3276 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 3277 | EXPORT_SYMBOL_GPL(kvm_vcpu_block); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3278 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3279 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3280 | { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3281 | struct rcuwait *waitp; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3282 | |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3283 | waitp = kvm_arch_vcpu_get_wait(vcpu); |
| 3284 | if (rcuwait_wake_up(waitp)) { |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 3285 | WRITE_ONCE(vcpu->ready, true); |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3286 | ++vcpu->stat.generic.halt_wakeup; |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3287 | return true; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3288 | } |
| 3289 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3290 | return false; |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 3291 | } |
| 3292 | EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); |
| 3293 | |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 3294 | #ifndef CONFIG_S390 |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 3295 | /* |
| 3296 | * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. |
| 3297 | */ |
| 3298 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
| 3299 | { |
| 3300 | int me; |
| 3301 | int cpu = vcpu->cpu; |
| 3302 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3303 | if (kvm_vcpu_wake_up(vcpu)) |
| 3304 | return; |
| 3305 | |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3306 | me = get_cpu(); |
| 3307 | if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) |
| 3308 | if (kvm_arch_vcpu_should_kick(vcpu)) |
| 3309 | smp_send_reschedule(cpu); |
| 3310 | put_cpu(); |
| 3311 | } |
Yang Zhang | a20ed54 | 2013-04-11 19:25:15 +0800 | [diff] [blame] | 3312 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 3313 | #endif /* !CONFIG_S390 */ |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3314 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 3315 | int kvm_vcpu_yield_to(struct kvm_vcpu *target) |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3316 | { |
| 3317 | struct pid *pid; |
| 3318 | struct task_struct *task = NULL; |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 3319 | int ret = 0; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3320 | |
| 3321 | rcu_read_lock(); |
| 3322 | pid = rcu_dereference(target->pid); |
| 3323 | if (pid) |
Sam Bobroff | 27fbe64b | 2014-09-19 09:40:41 +1000 | [diff] [blame] | 3324 | task = get_pid_task(pid, PIDTYPE_PID); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3325 | rcu_read_unlock(); |
| 3326 | if (!task) |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3327 | return ret; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3328 | ret = yield_to(task, 1); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3329 | put_task_struct(task); |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3330 | |
| 3331 | return ret; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3332 | } |
| 3333 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); |
| 3334 | |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3335 | /* |
| 3336 | * Helper that checks whether a VCPU is eligible for directed yield. |
| 3337 | * Most eligible candidate to yield is decided by following heuristics: |
| 3338 | * |
| 3339 | * (a) VCPU which has not done pl-exit or cpu relax intercepted recently |
| 3340 | * (preempted lock holder), indicated by @in_spin_loop. |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 3341 | * Set at the beginning and cleared at the end of interception/PLE handler. |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3342 | * |
| 3343 | * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get |
| 3344 | * chance last time (mostly it has become eligible now since we have probably |
| 3345 | * yielded to lockholder in last iteration. This is done by toggling |
| 3346 | * @dy_eligible each time a VCPU checked for eligibility.) |
| 3347 | * |
| 3348 | * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding |
| 3349 | * to preempted lock-holder could result in wrong VCPU selection and CPU |
| 3350 | * burning. Giving priority for a potential lock-holder increases lock |
| 3351 | * progress. |
| 3352 | * |
| 3353 | * Since algorithm is based on heuristics, accessing another VCPU data without |
| 3354 | * locking does not harm. It may result in trying to yield to same VCPU, fail |
| 3355 | * and continue with next VCPU and so on. |
| 3356 | */ |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 3357 | static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3358 | { |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3359 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3360 | bool eligible; |
| 3361 | |
| 3362 | eligible = !vcpu->spin_loop.in_spin_loop || |
Christian Borntraeger | 3465611 | 2014-09-04 21:13:31 +0200 | [diff] [blame] | 3363 | vcpu->spin_loop.dy_eligible; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3364 | |
| 3365 | if (vcpu->spin_loop.in_spin_loop) |
| 3366 | kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); |
| 3367 | |
| 3368 | return eligible; |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3369 | #else |
| 3370 | return true; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3371 | #endif |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3372 | } |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3373 | |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 3374 | /* |
| 3375 | * Unlike kvm_arch_vcpu_runnable, this function is called outside |
| 3376 | * a vcpu_load/vcpu_put pair. However, for most architectures |
| 3377 | * kvm_arch_vcpu_runnable does not require vcpu_load. |
| 3378 | */ |
| 3379 | bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
| 3380 | { |
| 3381 | return kvm_arch_vcpu_runnable(vcpu); |
| 3382 | } |
| 3383 | |
| 3384 | static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) |
| 3385 | { |
| 3386 | if (kvm_arch_dy_runnable(vcpu)) |
| 3387 | return true; |
| 3388 | |
| 3389 | #ifdef CONFIG_KVM_ASYNC_PF |
| 3390 | if (!list_empty_careful(&vcpu->async_pf.done)) |
| 3391 | return true; |
| 3392 | #endif |
| 3393 | |
| 3394 | return false; |
| 3395 | } |
| 3396 | |
Wanpeng Li | 52acd22 | 2021-04-16 11:08:10 +0800 | [diff] [blame] | 3397 | bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) |
| 3398 | { |
| 3399 | return false; |
| 3400 | } |
| 3401 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 3402 | void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3403 | { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3404 | struct kvm *kvm = me->kvm; |
| 3405 | struct kvm_vcpu *vcpu; |
| 3406 | int last_boosted_vcpu = me->kvm->last_boosted_vcpu; |
| 3407 | int yielded = 0; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3408 | int try = 3; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3409 | int pass; |
| 3410 | int i; |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3411 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 3412 | kvm_vcpu_set_in_spin_loop(me, true); |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3413 | /* |
| 3414 | * We boost the priority of a VCPU that is runnable but not |
| 3415 | * currently running, because it got preempted by something |
| 3416 | * else and called schedule in __vcpu_run. Hopefully that |
| 3417 | * VCPU is holding the lock that we need and will release it. |
| 3418 | * We approximate round-robin by starting at the last boosted VCPU. |
| 3419 | */ |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3420 | for (pass = 0; pass < 2 && !yielded && try; pass++) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3421 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Rik van Riel | 5cfc2aa | 2012-06-19 16:51:04 -0400 | [diff] [blame] | 3422 | if (!pass && i <= last_boosted_vcpu) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3423 | i = last_boosted_vcpu; |
| 3424 | continue; |
| 3425 | } else if (pass && i > last_boosted_vcpu) |
| 3426 | break; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 3427 | if (!READ_ONCE(vcpu->ready)) |
Raghavendra K T | 7bc7ae2 | 2013-03-04 23:32:27 +0530 | [diff] [blame] | 3428 | continue; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3429 | if (vcpu == me) |
| 3430 | continue; |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3431 | if (rcuwait_active(&vcpu->wait) && |
| 3432 | !vcpu_dy_runnable(vcpu)) |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3433 | continue; |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 3434 | if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && |
Wanpeng Li | 52acd22 | 2021-04-16 11:08:10 +0800 | [diff] [blame] | 3435 | !kvm_arch_dy_has_pending_interrupt(vcpu) && |
| 3436 | !kvm_arch_vcpu_in_kernel(vcpu)) |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 3437 | continue; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3438 | if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) |
| 3439 | continue; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3440 | |
| 3441 | yielded = kvm_vcpu_yield_to(vcpu); |
| 3442 | if (yielded > 0) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3443 | kvm->last_boosted_vcpu = i; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3444 | break; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3445 | } else if (yielded < 0) { |
| 3446 | try--; |
| 3447 | if (!try) |
| 3448 | break; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3449 | } |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3450 | } |
| 3451 | } |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 3452 | kvm_vcpu_set_in_spin_loop(me, false); |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3453 | |
| 3454 | /* Ensure vcpu is not eligible during next spinloop */ |
| 3455 | kvm_vcpu_set_dy_eligible(me, false); |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3456 | } |
| 3457 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
| 3458 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3459 | static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) |
| 3460 | { |
| 3461 | #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 |
| 3462 | return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && |
| 3463 | (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + |
| 3464 | kvm->dirty_ring_size / PAGE_SIZE); |
| 3465 | #else |
| 3466 | return false; |
| 3467 | #endif |
| 3468 | } |
| 3469 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 3470 | static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3471 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 3472 | struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3473 | struct page *page; |
| 3474 | |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3475 | if (vmf->pgoff == 0) |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3476 | page = virt_to_page(vcpu->run); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3477 | #ifdef CONFIG_X86 |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3478 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3479 | page = virt_to_page(vcpu->arch.pio_data); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3480 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 3481 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3482 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) |
| 3483 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); |
| 3484 | #endif |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3485 | else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) |
| 3486 | page = kvm_dirty_ring_get_page( |
| 3487 | &vcpu->dirty_ring, |
| 3488 | vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3489 | else |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 3490 | return kvm_arch_vcpu_fault(vcpu, vmf); |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3491 | get_page(page); |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3492 | vmf->page = page; |
| 3493 | return 0; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3494 | } |
| 3495 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 3496 | static const struct vm_operations_struct kvm_vcpu_vm_ops = { |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3497 | .fault = kvm_vcpu_fault, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3498 | }; |
| 3499 | |
| 3500 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) |
| 3501 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3502 | struct kvm_vcpu *vcpu = file->private_data; |
| 3503 | unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 3504 | |
| 3505 | if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || |
| 3506 | kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && |
| 3507 | ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) |
| 3508 | return -EINVAL; |
| 3509 | |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3510 | vma->vm_ops = &kvm_vcpu_vm_ops; |
| 3511 | return 0; |
| 3512 | } |
| 3513 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3514 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) |
| 3515 | { |
| 3516 | struct kvm_vcpu *vcpu = filp->private_data; |
| 3517 | |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3518 | kvm_put_kvm(vcpu->kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3519 | return 0; |
| 3520 | } |
| 3521 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 3522 | static struct file_operations kvm_vcpu_fops = { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3523 | .release = kvm_vcpu_release, |
| 3524 | .unlocked_ioctl = kvm_vcpu_ioctl, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3525 | .mmap = kvm_vcpu_mmap, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 3526 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 3527 | KVM_COMPAT(kvm_vcpu_compat_ioctl), |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3528 | }; |
| 3529 | |
| 3530 | /* |
| 3531 | * Allocates an inode for the vcpu. |
| 3532 | */ |
| 3533 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) |
| 3534 | { |
Masatake YAMATO | e46b469 | 2018-01-20 04:04:22 +0900 | [diff] [blame] | 3535 | char name[8 + 1 + ITOA_MAX_LEN + 1]; |
| 3536 | |
| 3537 | snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); |
| 3538 | return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3539 | } |
| 3540 | |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3541 | static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3542 | { |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3543 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3544 | struct dentry *debugfs_dentry; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3545 | char dir_name[ITOA_MAX_LEN * 2]; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3546 | |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3547 | if (!debugfs_initialized()) |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3548 | return; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3549 | |
| 3550 | snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3551 | debugfs_dentry = debugfs_create_dir(dir_name, |
| 3552 | vcpu->kvm->debugfs_dentry); |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3553 | |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3554 | kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3555 | #endif |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3556 | } |
| 3557 | |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3558 | /* |
| 3559 | * Creates some virtual cpus. Good luck creating more than one. |
| 3560 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3561 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3562 | { |
| 3563 | int r; |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3564 | struct kvm_vcpu *vcpu; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3565 | struct page *page; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3566 | |
Greg Kurz | 0b1b1df | 2016-05-09 18:13:37 +0200 | [diff] [blame] | 3567 | if (id >= KVM_MAX_VCPU_ID) |
Andy Honig | 338c7db | 2013-11-18 16:09:22 -0800 | [diff] [blame] | 3568 | return -EINVAL; |
| 3569 | |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3570 | mutex_lock(&kvm->lock); |
| 3571 | if (kvm->created_vcpus == KVM_MAX_VCPUS) { |
| 3572 | mutex_unlock(&kvm->lock); |
| 3573 | return -EINVAL; |
| 3574 | } |
| 3575 | |
| 3576 | kvm->created_vcpus++; |
| 3577 | mutex_unlock(&kvm->lock); |
| 3578 | |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 3579 | r = kvm_arch_vcpu_precreate(kvm, id); |
| 3580 | if (r) |
| 3581 | goto vcpu_decrement; |
| 3582 | |
Sean Christopherson | 85f4793 | 2021-04-06 12:07:40 -0700 | [diff] [blame] | 3583 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3584 | if (!vcpu) { |
| 3585 | r = -ENOMEM; |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3586 | goto vcpu_decrement; |
| 3587 | } |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3588 | |
Peter Xu | fcd97ad | 2020-01-09 09:57:12 -0500 | [diff] [blame] | 3589 | BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); |
Shakeel Butt | 93bb59c | 2020-12-18 14:01:38 -0800 | [diff] [blame] | 3590 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3591 | if (!page) { |
| 3592 | r = -ENOMEM; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3593 | goto vcpu_free; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3594 | } |
| 3595 | vcpu->run = page_address(page); |
| 3596 | |
| 3597 | kvm_vcpu_init(vcpu, kvm, id); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3598 | |
| 3599 | r = kvm_arch_vcpu_create(vcpu); |
| 3600 | if (r) |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3601 | goto vcpu_free_run_page; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3602 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3603 | if (kvm->dirty_ring_size) { |
| 3604 | r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, |
| 3605 | id, kvm->dirty_ring_size); |
| 3606 | if (r) |
| 3607 | goto arch_vcpu_destroy; |
| 3608 | } |
| 3609 | |
Shaohua Li | 11ec280 | 2007-07-23 14:51:37 +0800 | [diff] [blame] | 3610 | mutex_lock(&kvm->lock); |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3611 | if (kvm_get_vcpu_by_id(kvm, id)) { |
| 3612 | r = -EEXIST; |
| 3613 | goto unlock_vcpu_destroy; |
| 3614 | } |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3615 | |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 3616 | vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); |
| 3617 | BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3618 | |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 3619 | /* Fill the stats id string for the vcpu */ |
| 3620 | snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", |
| 3621 | task_pid_nr(current), id); |
| 3622 | |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3623 | /* Now it's all set up, let userspace reach it */ |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3624 | kvm_get_kvm(kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3625 | r = create_vcpu_fd(vcpu); |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3626 | if (r < 0) { |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 3627 | kvm_put_kvm_no_destroy(kvm); |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3628 | goto unlock_vcpu_destroy; |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3629 | } |
| 3630 | |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 3631 | kvm->vcpus[vcpu->vcpu_idx] = vcpu; |
Paolo Bonzini | dd48924 | 2015-07-29 11:32:20 +0200 | [diff] [blame] | 3632 | |
| 3633 | /* |
| 3634 | * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus |
| 3635 | * before kvm->online_vcpu's incremented value. |
| 3636 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3637 | smp_wmb(); |
| 3638 | atomic_inc(&kvm->online_vcpus); |
| 3639 | |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3640 | mutex_unlock(&kvm->lock); |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 3641 | kvm_arch_vcpu_postcreate(vcpu); |
Paolo Bonzini | 63d0434 | 2020-04-01 00:42:22 +0200 | [diff] [blame] | 3642 | kvm_create_vcpu_debugfs(vcpu); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3643 | return r; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3644 | |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3645 | unlock_vcpu_destroy: |
Glauber Costa | 7d8fece | 2008-09-17 23:16:59 -0300 | [diff] [blame] | 3646 | mutex_unlock(&kvm->lock); |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3647 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
| 3648 | arch_vcpu_destroy: |
Hollis Blanchard | d40ccc6 | 2007-11-19 14:04:43 -0600 | [diff] [blame] | 3649 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3650 | vcpu_free_run_page: |
| 3651 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3652 | vcpu_free: |
| 3653 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3654 | vcpu_decrement: |
| 3655 | mutex_lock(&kvm->lock); |
| 3656 | kvm->created_vcpus--; |
| 3657 | mutex_unlock(&kvm->lock); |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3658 | return r; |
| 3659 | } |
| 3660 | |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3661 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) |
| 3662 | { |
| 3663 | if (sigset) { |
| 3664 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 3665 | vcpu->sigset_active = 1; |
| 3666 | vcpu->sigset = *sigset; |
| 3667 | } else |
| 3668 | vcpu->sigset_active = 0; |
| 3669 | return 0; |
| 3670 | } |
| 3671 | |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 3672 | static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, |
| 3673 | size_t size, loff_t *offset) |
| 3674 | { |
| 3675 | struct kvm_vcpu *vcpu = file->private_data; |
| 3676 | |
| 3677 | return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, |
| 3678 | &kvm_vcpu_stats_desc[0], &vcpu->stat, |
| 3679 | sizeof(vcpu->stat), user_buffer, size, offset); |
| 3680 | } |
| 3681 | |
| 3682 | static const struct file_operations kvm_vcpu_stats_fops = { |
| 3683 | .read = kvm_vcpu_stats_read, |
| 3684 | .llseek = noop_llseek, |
| 3685 | }; |
| 3686 | |
| 3687 | static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) |
| 3688 | { |
| 3689 | int fd; |
| 3690 | struct file *file; |
| 3691 | char name[15 + ITOA_MAX_LEN + 1]; |
| 3692 | |
| 3693 | snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); |
| 3694 | |
| 3695 | fd = get_unused_fd_flags(O_CLOEXEC); |
| 3696 | if (fd < 0) |
| 3697 | return fd; |
| 3698 | |
| 3699 | file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); |
| 3700 | if (IS_ERR(file)) { |
| 3701 | put_unused_fd(fd); |
| 3702 | return PTR_ERR(file); |
| 3703 | } |
| 3704 | file->f_mode |= FMODE_PREAD; |
| 3705 | fd_install(fd, file); |
| 3706 | |
| 3707 | return fd; |
| 3708 | } |
| 3709 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3710 | static long kvm_vcpu_ioctl(struct file *filp, |
| 3711 | unsigned int ioctl, unsigned long arg) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3712 | { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3713 | struct kvm_vcpu *vcpu = filp->private_data; |
Al Viro | 2f366987 | 2007-02-09 16:38:35 +0000 | [diff] [blame] | 3714 | void __user *argp = (void __user *)arg; |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 3715 | int r; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3716 | struct kvm_fpu *fpu = NULL; |
| 3717 | struct kvm_sregs *kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3718 | |
Sean Christopherson | 0b8f117 | 2021-07-02 15:04:23 -0700 | [diff] [blame] | 3719 | if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 3720 | return -EIO; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3721 | |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 3722 | if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) |
| 3723 | return -EINVAL; |
| 3724 | |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3725 | /* |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3726 | * Some architectures have vcpu ioctls that are asynchronous to vcpu |
| 3727 | * execution; mutex_lock() would break them. |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3728 | */ |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3729 | r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); |
| 3730 | if (r != -ENOIOCTLCMD) |
Michael S. Tsirkin | 9fc7744 | 2012-09-16 11:50:30 +0300 | [diff] [blame] | 3731 | return r; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3732 | |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 3733 | if (mutex_lock_killable(&vcpu->mutex)) |
| 3734 | return -EINTR; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3735 | switch (ioctl) { |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3736 | case KVM_RUN: { |
| 3737 | struct pid *oldpid; |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 3738 | r = -EINVAL; |
| 3739 | if (arg) |
| 3740 | goto out; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3741 | oldpid = rcu_access_pointer(vcpu->pid); |
Eric W. Biederman | 71dbc8a | 2017-07-16 21:39:32 -0500 | [diff] [blame] | 3742 | if (unlikely(oldpid != task_pid(current))) { |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3743 | /* The thread running this VCPU changed. */ |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3744 | struct pid *newpid; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3745 | |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3746 | r = kvm_arch_vcpu_run_pid_change(vcpu); |
| 3747 | if (r) |
| 3748 | break; |
| 3749 | |
| 3750 | newpid = get_task_pid(current, PIDTYPE_PID); |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3751 | rcu_assign_pointer(vcpu->pid, newpid); |
| 3752 | if (oldpid) |
| 3753 | synchronize_rcu(); |
| 3754 | put_pid(oldpid); |
| 3755 | } |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 3756 | r = kvm_arch_vcpu_ioctl_run(vcpu); |
Gleb Natapov | 64be500 | 2010-10-24 16:49:08 +0200 | [diff] [blame] | 3757 | trace_kvm_userspace_exit(vcpu->run->exit_reason, r); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3758 | break; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3759 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3760 | case KVM_GET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3761 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3762 | |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3763 | r = -ENOMEM; |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3764 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3765 | if (!kvm_regs) |
| 3766 | goto out; |
| 3767 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3768 | if (r) |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3769 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3770 | r = -EFAULT; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3771 | if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) |
| 3772 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3773 | r = 0; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3774 | out_free1: |
| 3775 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3776 | break; |
| 3777 | } |
| 3778 | case KVM_SET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3779 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3780 | |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3781 | kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); |
| 3782 | if (IS_ERR(kvm_regs)) { |
| 3783 | r = PTR_ERR(kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3784 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3785 | } |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3786 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3787 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3788 | break; |
| 3789 | } |
| 3790 | case KVM_GET_SREGS: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3791 | kvm_sregs = kzalloc(sizeof(struct kvm_sregs), |
| 3792 | GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3793 | r = -ENOMEM; |
| 3794 | if (!kvm_sregs) |
| 3795 | goto out; |
| 3796 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3797 | if (r) |
| 3798 | goto out; |
| 3799 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3800 | if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3801 | goto out; |
| 3802 | r = 0; |
| 3803 | break; |
| 3804 | } |
| 3805 | case KVM_SET_SREGS: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3806 | kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); |
| 3807 | if (IS_ERR(kvm_sregs)) { |
| 3808 | r = PTR_ERR(kvm_sregs); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 3809 | kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3810 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3811 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3812 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3813 | break; |
| 3814 | } |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3815 | case KVM_GET_MP_STATE: { |
| 3816 | struct kvm_mp_state mp_state; |
| 3817 | |
| 3818 | r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); |
| 3819 | if (r) |
| 3820 | goto out; |
| 3821 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3822 | if (copy_to_user(argp, &mp_state, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3823 | goto out; |
| 3824 | r = 0; |
| 3825 | break; |
| 3826 | } |
| 3827 | case KVM_SET_MP_STATE: { |
| 3828 | struct kvm_mp_state mp_state; |
| 3829 | |
| 3830 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3831 | if (copy_from_user(&mp_state, argp, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3832 | goto out; |
| 3833 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3834 | break; |
| 3835 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3836 | case KVM_TRANSLATE: { |
| 3837 | struct kvm_translation tr; |
| 3838 | |
| 3839 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3840 | if (copy_from_user(&tr, argp, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3841 | goto out; |
Zhang Xiantao | 8b00679 | 2007-11-16 13:05:55 +0800 | [diff] [blame] | 3842 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3843 | if (r) |
| 3844 | goto out; |
| 3845 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3846 | if (copy_to_user(argp, &tr, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3847 | goto out; |
| 3848 | r = 0; |
| 3849 | break; |
| 3850 | } |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3851 | case KVM_SET_GUEST_DEBUG: { |
| 3852 | struct kvm_guest_debug dbg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3853 | |
| 3854 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3855 | if (copy_from_user(&dbg, argp, sizeof(dbg))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3856 | goto out; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3857 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3858 | break; |
| 3859 | } |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3860 | case KVM_SET_SIGNAL_MASK: { |
| 3861 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 3862 | struct kvm_signal_mask kvm_sigmask; |
| 3863 | sigset_t sigset, *p; |
| 3864 | |
| 3865 | p = NULL; |
| 3866 | if (argp) { |
| 3867 | r = -EFAULT; |
| 3868 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3869 | sizeof(kvm_sigmask))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3870 | goto out; |
| 3871 | r = -EINVAL; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3872 | if (kvm_sigmask.len != sizeof(sigset)) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3873 | goto out; |
| 3874 | r = -EFAULT; |
| 3875 | if (copy_from_user(&sigset, sigmask_arg->sigset, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3876 | sizeof(sigset))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3877 | goto out; |
| 3878 | p = &sigset; |
| 3879 | } |
Andi Kleen | 376d41f | 2010-06-10 13:10:47 +0200 | [diff] [blame] | 3880 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3881 | break; |
| 3882 | } |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3883 | case KVM_GET_FPU: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3884 | fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3885 | r = -ENOMEM; |
| 3886 | if (!fpu) |
| 3887 | goto out; |
| 3888 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3889 | if (r) |
| 3890 | goto out; |
| 3891 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3892 | if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3893 | goto out; |
| 3894 | r = 0; |
| 3895 | break; |
| 3896 | } |
| 3897 | case KVM_SET_FPU: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3898 | fpu = memdup_user(argp, sizeof(*fpu)); |
| 3899 | if (IS_ERR(fpu)) { |
| 3900 | r = PTR_ERR(fpu); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 3901 | fpu = NULL; |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3902 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3903 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3904 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 3905 | break; |
| 3906 | } |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 3907 | case KVM_GET_STATS_FD: { |
| 3908 | r = kvm_vcpu_ioctl_get_stats_fd(vcpu); |
| 3909 | break; |
| 3910 | } |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3911 | default: |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 3912 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3913 | } |
| 3914 | out: |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 3915 | mutex_unlock(&vcpu->mutex); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3916 | kfree(fpu); |
| 3917 | kfree(kvm_sregs); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3918 | return r; |
| 3919 | } |
| 3920 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 3921 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3922 | static long kvm_vcpu_compat_ioctl(struct file *filp, |
| 3923 | unsigned int ioctl, unsigned long arg) |
| 3924 | { |
| 3925 | struct kvm_vcpu *vcpu = filp->private_data; |
| 3926 | void __user *argp = compat_ptr(arg); |
| 3927 | int r; |
| 3928 | |
Sean Christopherson | 0b8f117 | 2021-07-02 15:04:23 -0700 | [diff] [blame] | 3929 | if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3930 | return -EIO; |
| 3931 | |
| 3932 | switch (ioctl) { |
| 3933 | case KVM_SET_SIGNAL_MASK: { |
| 3934 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 3935 | struct kvm_signal_mask kvm_sigmask; |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3936 | sigset_t sigset; |
| 3937 | |
| 3938 | if (argp) { |
| 3939 | r = -EFAULT; |
| 3940 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3941 | sizeof(kvm_sigmask))) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3942 | goto out; |
| 3943 | r = -EINVAL; |
Al Viro | 3968cf6 | 2017-09-03 21:45:17 -0400 | [diff] [blame] | 3944 | if (kvm_sigmask.len != sizeof(compat_sigset_t)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3945 | goto out; |
| 3946 | r = -EFAULT; |
Paolo Bonzini | 1393b4a | 2020-07-02 05:39:31 -0400 | [diff] [blame] | 3947 | if (get_compat_sigset(&sigset, |
| 3948 | (compat_sigset_t __user *)sigmask_arg->sigset)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3949 | goto out; |
Alan Cox | 760a9a3 | 2012-08-22 14:34:11 +0100 | [diff] [blame] | 3950 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
| 3951 | } else |
| 3952 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 3953 | break; |
| 3954 | } |
| 3955 | default: |
| 3956 | r = kvm_vcpu_ioctl(filp, ioctl, arg); |
| 3957 | } |
| 3958 | |
| 3959 | out: |
| 3960 | return r; |
| 3961 | } |
| 3962 | #endif |
| 3963 | |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 3964 | static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) |
| 3965 | { |
| 3966 | struct kvm_device *dev = filp->private_data; |
| 3967 | |
| 3968 | if (dev->ops->mmap) |
| 3969 | return dev->ops->mmap(dev, vma); |
| 3970 | |
| 3971 | return -ENODEV; |
| 3972 | } |
| 3973 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3974 | static int kvm_device_ioctl_attr(struct kvm_device *dev, |
| 3975 | int (*accessor)(struct kvm_device *dev, |
| 3976 | struct kvm_device_attr *attr), |
| 3977 | unsigned long arg) |
| 3978 | { |
| 3979 | struct kvm_device_attr attr; |
| 3980 | |
| 3981 | if (!accessor) |
| 3982 | return -EPERM; |
| 3983 | |
| 3984 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) |
| 3985 | return -EFAULT; |
| 3986 | |
| 3987 | return accessor(dev, &attr); |
| 3988 | } |
| 3989 | |
| 3990 | static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, |
| 3991 | unsigned long arg) |
| 3992 | { |
| 3993 | struct kvm_device *dev = filp->private_data; |
| 3994 | |
Sean Christopherson | 0b8f117 | 2021-07-02 15:04:23 -0700 | [diff] [blame] | 3995 | if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged) |
Sean Christopherson | ddba918 | 2019-02-15 12:48:39 -0800 | [diff] [blame] | 3996 | return -EIO; |
| 3997 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 3998 | switch (ioctl) { |
| 3999 | case KVM_SET_DEVICE_ATTR: |
| 4000 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); |
| 4001 | case KVM_GET_DEVICE_ATTR: |
| 4002 | return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); |
| 4003 | case KVM_HAS_DEVICE_ATTR: |
| 4004 | return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); |
| 4005 | default: |
| 4006 | if (dev->ops->ioctl) |
| 4007 | return dev->ops->ioctl(dev, ioctl, arg); |
| 4008 | |
| 4009 | return -ENOTTY; |
| 4010 | } |
| 4011 | } |
| 4012 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4013 | static int kvm_device_release(struct inode *inode, struct file *filp) |
| 4014 | { |
| 4015 | struct kvm_device *dev = filp->private_data; |
| 4016 | struct kvm *kvm = dev->kvm; |
| 4017 | |
Cédric Le Goater | 2bde9b3 | 2019-04-18 12:39:41 +0200 | [diff] [blame] | 4018 | if (dev->ops->release) { |
| 4019 | mutex_lock(&kvm->lock); |
| 4020 | list_del(&dev->vm_node); |
| 4021 | dev->ops->release(dev); |
| 4022 | mutex_unlock(&kvm->lock); |
| 4023 | } |
| 4024 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4025 | kvm_put_kvm(kvm); |
| 4026 | return 0; |
| 4027 | } |
| 4028 | |
| 4029 | static const struct file_operations kvm_device_fops = { |
| 4030 | .unlocked_ioctl = kvm_device_ioctl, |
| 4031 | .release = kvm_device_release, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4032 | KVM_COMPAT(kvm_device_ioctl), |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 4033 | .mmap = kvm_device_mmap, |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4034 | }; |
| 4035 | |
| 4036 | struct kvm_device *kvm_device_from_filp(struct file *filp) |
| 4037 | { |
| 4038 | if (filp->f_op != &kvm_device_fops) |
| 4039 | return NULL; |
| 4040 | |
| 4041 | return filp->private_data; |
| 4042 | } |
| 4043 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4044 | static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4045 | #ifdef CONFIG_KVM_MPIC |
| 4046 | [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, |
| 4047 | [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, |
| 4048 | #endif |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4049 | }; |
| 4050 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4051 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4052 | { |
| 4053 | if (type >= ARRAY_SIZE(kvm_device_ops_table)) |
| 4054 | return -ENOSPC; |
| 4055 | |
| 4056 | if (kvm_device_ops_table[type] != NULL) |
| 4057 | return -EEXIST; |
| 4058 | |
| 4059 | kvm_device_ops_table[type] = ops; |
| 4060 | return 0; |
| 4061 | } |
| 4062 | |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 4063 | void kvm_unregister_device_ops(u32 type) |
| 4064 | { |
| 4065 | if (kvm_device_ops_table[type] != NULL) |
| 4066 | kvm_device_ops_table[type] = NULL; |
| 4067 | } |
| 4068 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4069 | static int kvm_ioctl_create_device(struct kvm *kvm, |
| 4070 | struct kvm_create_device *cd) |
| 4071 | { |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4072 | const struct kvm_device_ops *ops = NULL; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4073 | struct kvm_device *dev; |
| 4074 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4075 | int type; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4076 | int ret; |
| 4077 | |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4078 | if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4079 | return -ENODEV; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4080 | |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4081 | type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); |
| 4082 | ops = kvm_device_ops_table[type]; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4083 | if (ops == NULL) |
| 4084 | return -ENODEV; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4085 | |
| 4086 | if (test) |
| 4087 | return 0; |
| 4088 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4089 | dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4090 | if (!dev) |
| 4091 | return -ENOMEM; |
| 4092 | |
| 4093 | dev->ops = ops; |
| 4094 | dev->kvm = kvm; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4095 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4096 | mutex_lock(&kvm->lock); |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4097 | ret = ops->create(dev, type); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4098 | if (ret < 0) { |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4099 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4100 | kfree(dev); |
| 4101 | return ret; |
| 4102 | } |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4103 | list_add(&dev->vm_node, &kvm->devices); |
| 4104 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4105 | |
Christoffer Dall | 023e9fd | 2016-08-09 19:13:00 +0200 | [diff] [blame] | 4106 | if (ops->init) |
| 4107 | ops->init(dev); |
| 4108 | |
Jann Horn | cfa3938 | 2019-01-26 01:54:33 +0100 | [diff] [blame] | 4109 | kvm_get_kvm(kvm); |
Yann Droneaud | 24009b0 | 2013-08-24 22:14:07 +0200 | [diff] [blame] | 4110 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4111 | if (ret < 0) { |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 4112 | kvm_put_kvm_no_destroy(kvm); |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4113 | mutex_lock(&kvm->lock); |
| 4114 | list_del(&dev->vm_node); |
| 4115 | mutex_unlock(&kvm->lock); |
Dan Carpenter | a0f1d21 | 2016-11-30 22:21:05 +0300 | [diff] [blame] | 4116 | ops->destroy(dev); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4117 | return ret; |
| 4118 | } |
| 4119 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4120 | cd->fd = ret; |
| 4121 | return 0; |
| 4122 | } |
| 4123 | |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4124 | static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) |
| 4125 | { |
| 4126 | switch (arg) { |
| 4127 | case KVM_CAP_USER_MEMORY: |
| 4128 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
| 4129 | case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4130 | case KVM_CAP_INTERNAL_ERROR_DATA: |
| 4131 | #ifdef CONFIG_HAVE_KVM_MSI |
| 4132 | case KVM_CAP_SIGNAL_MSI: |
| 4133 | #endif |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 4134 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Paolo Bonzini | dc9be0f | 2015-03-05 11:54:46 +0100 | [diff] [blame] | 4135 | case KVM_CAP_IRQFD: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4136 | case KVM_CAP_IRQFD_RESAMPLE: |
| 4137 | #endif |
Jason Wang | e9ea506 | 2015-09-15 14:41:59 +0800 | [diff] [blame] | 4138 | case KVM_CAP_IOEVENTFD_ANY_LENGTH: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4139 | case KVM_CAP_CHECK_EXTENSION_VM: |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4140 | case KVM_CAP_ENABLE_CAP_VM: |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 4141 | case KVM_CAP_HALT_POLL: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4142 | return 1; |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4143 | #ifdef CONFIG_KVM_MMIO |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 4144 | case KVM_CAP_COALESCED_MMIO: |
| 4145 | return KVM_COALESCED_MMIO_PAGE_OFFSET; |
Peng Hao | 0804c84 | 2018-10-14 07:09:55 +0800 | [diff] [blame] | 4146 | case KVM_CAP_COALESCED_PIO: |
| 4147 | return 1; |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 4148 | #endif |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4149 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4150 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: |
| 4151 | return KVM_DIRTY_LOG_MANUAL_CAPS; |
| 4152 | #endif |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4153 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 4154 | case KVM_CAP_IRQ_ROUTING: |
| 4155 | return KVM_MAX_IRQ_ROUTES; |
| 4156 | #endif |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 4157 | #if KVM_ADDRESS_SPACE_NUM > 1 |
| 4158 | case KVM_CAP_MULTI_ADDRESS_SPACE: |
| 4159 | return KVM_ADDRESS_SPACE_NUM; |
| 4160 | #endif |
Paolo Bonzini | c110ae5 | 2019-03-28 17:24:03 +0100 | [diff] [blame] | 4161 | case KVM_CAP_NR_MEMSLOTS: |
| 4162 | return KVM_USER_MEM_SLOTS; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4163 | case KVM_CAP_DIRTY_LOG_RING: |
| 4164 | #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 |
| 4165 | return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); |
| 4166 | #else |
| 4167 | return 0; |
| 4168 | #endif |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 4169 | case KVM_CAP_BINARY_STATS_FD: |
| 4170 | return 1; |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4171 | default: |
| 4172 | break; |
| 4173 | } |
| 4174 | return kvm_vm_ioctl_check_extension(kvm, arg); |
| 4175 | } |
| 4176 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4177 | static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) |
| 4178 | { |
| 4179 | int r; |
| 4180 | |
| 4181 | if (!KVM_DIRTY_LOG_PAGE_OFFSET) |
| 4182 | return -EINVAL; |
| 4183 | |
| 4184 | /* the size should be power of 2 */ |
| 4185 | if (!size || (size & (size - 1))) |
| 4186 | return -EINVAL; |
| 4187 | |
| 4188 | /* Should be bigger to keep the reserved entries, or a page */ |
| 4189 | if (size < kvm_dirty_ring_get_rsvd_entries() * |
| 4190 | sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) |
| 4191 | return -EINVAL; |
| 4192 | |
| 4193 | if (size > KVM_DIRTY_RING_MAX_ENTRIES * |
| 4194 | sizeof(struct kvm_dirty_gfn)) |
| 4195 | return -E2BIG; |
| 4196 | |
| 4197 | /* We only allow it to set once */ |
| 4198 | if (kvm->dirty_ring_size) |
| 4199 | return -EINVAL; |
| 4200 | |
| 4201 | mutex_lock(&kvm->lock); |
| 4202 | |
| 4203 | if (kvm->created_vcpus) { |
| 4204 | /* We don't allow to change this value after vcpu created */ |
| 4205 | r = -EINVAL; |
| 4206 | } else { |
| 4207 | kvm->dirty_ring_size = size; |
| 4208 | r = 0; |
| 4209 | } |
| 4210 | |
| 4211 | mutex_unlock(&kvm->lock); |
| 4212 | return r; |
| 4213 | } |
| 4214 | |
| 4215 | static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) |
| 4216 | { |
| 4217 | int i; |
| 4218 | struct kvm_vcpu *vcpu; |
| 4219 | int cleared = 0; |
| 4220 | |
| 4221 | if (!kvm->dirty_ring_size) |
| 4222 | return -EINVAL; |
| 4223 | |
| 4224 | mutex_lock(&kvm->slots_lock); |
| 4225 | |
| 4226 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 4227 | cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); |
| 4228 | |
| 4229 | mutex_unlock(&kvm->slots_lock); |
| 4230 | |
| 4231 | if (cleared) |
| 4232 | kvm_flush_remote_tlbs(kvm); |
| 4233 | |
| 4234 | return cleared; |
| 4235 | } |
| 4236 | |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4237 | int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 4238 | struct kvm_enable_cap *cap) |
| 4239 | { |
| 4240 | return -EINVAL; |
| 4241 | } |
| 4242 | |
| 4243 | static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, |
| 4244 | struct kvm_enable_cap *cap) |
| 4245 | { |
| 4246 | switch (cap->cap) { |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4247 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4248 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { |
| 4249 | u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; |
| 4250 | |
| 4251 | if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) |
| 4252 | allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; |
| 4253 | |
| 4254 | if (cap->flags || (cap->args[0] & ~allowed_options)) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4255 | return -EINVAL; |
| 4256 | kvm->manual_dirty_log_protect = cap->args[0]; |
| 4257 | return 0; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4258 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4259 | #endif |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 4260 | case KVM_CAP_HALT_POLL: { |
| 4261 | if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) |
| 4262 | return -EINVAL; |
| 4263 | |
| 4264 | kvm->max_halt_poll_ns = cap->args[0]; |
| 4265 | return 0; |
| 4266 | } |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4267 | case KVM_CAP_DIRTY_LOG_RING: |
| 4268 | return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4269 | default: |
| 4270 | return kvm_vm_ioctl_enable_cap(kvm, cap); |
| 4271 | } |
| 4272 | } |
| 4273 | |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4274 | static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, |
| 4275 | size_t size, loff_t *offset) |
| 4276 | { |
| 4277 | struct kvm *kvm = file->private_data; |
| 4278 | |
| 4279 | return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, |
| 4280 | &kvm_vm_stats_desc[0], &kvm->stat, |
| 4281 | sizeof(kvm->stat), user_buffer, size, offset); |
| 4282 | } |
| 4283 | |
| 4284 | static const struct file_operations kvm_vm_stats_fops = { |
| 4285 | .read = kvm_vm_stats_read, |
| 4286 | .llseek = noop_llseek, |
| 4287 | }; |
| 4288 | |
| 4289 | static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) |
| 4290 | { |
| 4291 | int fd; |
| 4292 | struct file *file; |
| 4293 | |
| 4294 | fd = get_unused_fd_flags(O_CLOEXEC); |
| 4295 | if (fd < 0) |
| 4296 | return fd; |
| 4297 | |
| 4298 | file = anon_inode_getfile("kvm-vm-stats", |
| 4299 | &kvm_vm_stats_fops, kvm, O_RDONLY); |
| 4300 | if (IS_ERR(file)) { |
| 4301 | put_unused_fd(fd); |
| 4302 | return PTR_ERR(file); |
| 4303 | } |
| 4304 | file->f_mode |= FMODE_PREAD; |
| 4305 | fd_install(fd, file); |
| 4306 | |
| 4307 | return fd; |
| 4308 | } |
| 4309 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4310 | static long kvm_vm_ioctl(struct file *filp, |
| 4311 | unsigned int ioctl, unsigned long arg) |
| 4312 | { |
| 4313 | struct kvm *kvm = filp->private_data; |
| 4314 | void __user *argp = (void __user *)arg; |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 4315 | int r; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4316 | |
Sean Christopherson | 0b8f117 | 2021-07-02 15:04:23 -0700 | [diff] [blame] | 4317 | if (kvm->mm != current->mm || kvm->vm_bugged) |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 4318 | return -EIO; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4319 | switch (ioctl) { |
| 4320 | case KVM_CREATE_VCPU: |
| 4321 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4322 | break; |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4323 | case KVM_ENABLE_CAP: { |
| 4324 | struct kvm_enable_cap cap; |
| 4325 | |
| 4326 | r = -EFAULT; |
| 4327 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 4328 | goto out; |
| 4329 | r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); |
| 4330 | break; |
| 4331 | } |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 4332 | case KVM_SET_USER_MEMORY_REGION: { |
| 4333 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 4334 | |
| 4335 | r = -EFAULT; |
| 4336 | if (copy_from_user(&kvm_userspace_mem, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4337 | sizeof(kvm_userspace_mem))) |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 4338 | goto out; |
| 4339 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 4340 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4341 | break; |
| 4342 | } |
| 4343 | case KVM_GET_DIRTY_LOG: { |
| 4344 | struct kvm_dirty_log log; |
| 4345 | |
| 4346 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4347 | if (copy_from_user(&log, argp, sizeof(log))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4348 | goto out; |
Avi Kivity | 2c6f5df | 2007-02-20 18:27:58 +0200 | [diff] [blame] | 4349 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4350 | break; |
| 4351 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4352 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4353 | case KVM_CLEAR_DIRTY_LOG: { |
| 4354 | struct kvm_clear_dirty_log log; |
| 4355 | |
| 4356 | r = -EFAULT; |
| 4357 | if (copy_from_user(&log, argp, sizeof(log))) |
| 4358 | goto out; |
| 4359 | r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
| 4360 | break; |
| 4361 | } |
| 4362 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4363 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4364 | case KVM_REGISTER_COALESCED_MMIO: { |
| 4365 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 4366 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4367 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4368 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4369 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4370 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4371 | break; |
| 4372 | } |
| 4373 | case KVM_UNREGISTER_COALESCED_MMIO: { |
| 4374 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 4375 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4376 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4377 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4378 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4379 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4380 | break; |
| 4381 | } |
| 4382 | #endif |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4383 | case KVM_IRQFD: { |
| 4384 | struct kvm_irqfd data; |
| 4385 | |
| 4386 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4387 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4388 | goto out; |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 4389 | r = kvm_irqfd(kvm, &data); |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4390 | break; |
| 4391 | } |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 4392 | case KVM_IOEVENTFD: { |
| 4393 | struct kvm_ioeventfd data; |
| 4394 | |
| 4395 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4396 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 4397 | goto out; |
| 4398 | r = kvm_ioeventfd(kvm, &data); |
| 4399 | break; |
| 4400 | } |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 4401 | #ifdef CONFIG_HAVE_KVM_MSI |
| 4402 | case KVM_SIGNAL_MSI: { |
| 4403 | struct kvm_msi msi; |
| 4404 | |
| 4405 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4406 | if (copy_from_user(&msi, argp, sizeof(msi))) |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 4407 | goto out; |
| 4408 | r = kvm_send_userspace_msi(kvm, &msi); |
| 4409 | break; |
| 4410 | } |
| 4411 | #endif |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4412 | #ifdef __KVM_HAVE_IRQ_LINE |
| 4413 | case KVM_IRQ_LINE_STATUS: |
| 4414 | case KVM_IRQ_LINE: { |
| 4415 | struct kvm_irq_level irq_event; |
| 4416 | |
| 4417 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4418 | if (copy_from_user(&irq_event, argp, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4419 | goto out; |
| 4420 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 4421 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
| 4422 | ioctl == KVM_IRQ_LINE_STATUS); |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4423 | if (r) |
| 4424 | goto out; |
| 4425 | |
| 4426 | r = -EFAULT; |
| 4427 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4428 | if (copy_to_user(argp, &irq_event, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4429 | goto out; |
| 4430 | } |
| 4431 | |
| 4432 | r = 0; |
| 4433 | break; |
| 4434 | } |
| 4435 | #endif |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4436 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 4437 | case KVM_SET_GSI_ROUTING: { |
| 4438 | struct kvm_irq_routing routing; |
| 4439 | struct kvm_irq_routing __user *urouting; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4440 | struct kvm_irq_routing_entry *entries = NULL; |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4441 | |
| 4442 | r = -EFAULT; |
| 4443 | if (copy_from_user(&routing, argp, sizeof(routing))) |
| 4444 | goto out; |
| 4445 | r = -EINVAL; |
David Hildenbrand | 5c0aea0 | 2017-04-28 17:06:20 +0200 | [diff] [blame] | 4446 | if (!kvm_arch_can_set_irq_routing(kvm)) |
| 4447 | goto out; |
Xiubo Li | caf1ff2 | 2016-06-15 18:00:33 +0800 | [diff] [blame] | 4448 | if (routing.nr > KVM_MAX_IRQ_ROUTES) |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4449 | goto out; |
| 4450 | if (routing.flags) |
| 4451 | goto out; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4452 | if (routing.nr) { |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4453 | urouting = argp; |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 4454 | entries = vmemdup_user(urouting->entries, |
| 4455 | array_size(sizeof(*entries), |
| 4456 | routing.nr)); |
| 4457 | if (IS_ERR(entries)) { |
| 4458 | r = PTR_ERR(entries); |
| 4459 | goto out; |
| 4460 | } |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4461 | } |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4462 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
| 4463 | routing.flags); |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 4464 | kvfree(entries); |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4465 | break; |
| 4466 | } |
| 4467 | #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4468 | case KVM_CREATE_DEVICE: { |
| 4469 | struct kvm_create_device cd; |
| 4470 | |
| 4471 | r = -EFAULT; |
| 4472 | if (copy_from_user(&cd, argp, sizeof(cd))) |
| 4473 | goto out; |
| 4474 | |
| 4475 | r = kvm_ioctl_create_device(kvm, &cd); |
| 4476 | if (r) |
| 4477 | goto out; |
| 4478 | |
| 4479 | r = -EFAULT; |
| 4480 | if (copy_to_user(argp, &cd, sizeof(cd))) |
| 4481 | goto out; |
| 4482 | |
| 4483 | r = 0; |
| 4484 | break; |
| 4485 | } |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4486 | case KVM_CHECK_EXTENSION: |
| 4487 | r = kvm_vm_ioctl_check_extension_generic(kvm, arg); |
| 4488 | break; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4489 | case KVM_RESET_DIRTY_RINGS: |
| 4490 | r = kvm_vm_ioctl_reset_dirty_pages(kvm); |
| 4491 | break; |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4492 | case KVM_GET_STATS_FD: |
| 4493 | r = kvm_vm_ioctl_get_stats_fd(kvm); |
| 4494 | break; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4495 | default: |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 4496 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4497 | } |
| 4498 | out: |
| 4499 | return r; |
| 4500 | } |
| 4501 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 4502 | #ifdef CONFIG_KVM_COMPAT |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4503 | struct compat_kvm_dirty_log { |
| 4504 | __u32 slot; |
| 4505 | __u32 padding1; |
| 4506 | union { |
| 4507 | compat_uptr_t dirty_bitmap; /* one bit per page */ |
| 4508 | __u64 padding2; |
| 4509 | }; |
| 4510 | }; |
| 4511 | |
Paolo Bonzini | 8750f9b | 2021-07-27 08:43:10 -0400 | [diff] [blame] | 4512 | struct compat_kvm_clear_dirty_log { |
| 4513 | __u32 slot; |
| 4514 | __u32 num_pages; |
| 4515 | __u64 first_page; |
| 4516 | union { |
| 4517 | compat_uptr_t dirty_bitmap; /* one bit per page */ |
| 4518 | __u64 padding2; |
| 4519 | }; |
| 4520 | }; |
| 4521 | |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4522 | static long kvm_vm_compat_ioctl(struct file *filp, |
| 4523 | unsigned int ioctl, unsigned long arg) |
| 4524 | { |
| 4525 | struct kvm *kvm = filp->private_data; |
| 4526 | int r; |
| 4527 | |
Sean Christopherson | 0b8f117 | 2021-07-02 15:04:23 -0700 | [diff] [blame] | 4528 | if (kvm->mm != current->mm || kvm->vm_bugged) |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4529 | return -EIO; |
| 4530 | switch (ioctl) { |
Paolo Bonzini | 8750f9b | 2021-07-27 08:43:10 -0400 | [diff] [blame] | 4531 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4532 | case KVM_CLEAR_DIRTY_LOG: { |
| 4533 | struct compat_kvm_clear_dirty_log compat_log; |
| 4534 | struct kvm_clear_dirty_log log; |
| 4535 | |
| 4536 | if (copy_from_user(&compat_log, (void __user *)arg, |
| 4537 | sizeof(compat_log))) |
| 4538 | return -EFAULT; |
| 4539 | log.slot = compat_log.slot; |
| 4540 | log.num_pages = compat_log.num_pages; |
| 4541 | log.first_page = compat_log.first_page; |
| 4542 | log.padding2 = compat_log.padding2; |
| 4543 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
| 4544 | |
| 4545 | r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
| 4546 | break; |
| 4547 | } |
| 4548 | #endif |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4549 | case KVM_GET_DIRTY_LOG: { |
| 4550 | struct compat_kvm_dirty_log compat_log; |
| 4551 | struct kvm_dirty_log log; |
| 4552 | |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4553 | if (copy_from_user(&compat_log, (void __user *)arg, |
| 4554 | sizeof(compat_log))) |
Markus Elfring | f6a3b16 | 2017-01-22 11:30:21 +0100 | [diff] [blame] | 4555 | return -EFAULT; |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4556 | log.slot = compat_log.slot; |
| 4557 | log.padding1 = compat_log.padding1; |
| 4558 | log.padding2 = compat_log.padding2; |
| 4559 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
| 4560 | |
| 4561 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4562 | break; |
| 4563 | } |
| 4564 | default: |
| 4565 | r = kvm_vm_ioctl(filp, ioctl, arg); |
| 4566 | } |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4567 | return r; |
| 4568 | } |
| 4569 | #endif |
| 4570 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 4571 | static struct file_operations kvm_vm_fops = { |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4572 | .release = kvm_vm_release, |
| 4573 | .unlocked_ioctl = kvm_vm_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4574 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4575 | KVM_COMPAT(kvm_vm_compat_ioctl), |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4576 | }; |
| 4577 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4578 | bool file_is_kvm(struct file *file) |
| 4579 | { |
| 4580 | return file && file->f_op == &kvm_vm_fops; |
| 4581 | } |
| 4582 | EXPORT_SYMBOL_GPL(file_is_kvm); |
| 4583 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4584 | static int kvm_dev_ioctl_create_vm(unsigned long type) |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4585 | { |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4586 | int r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4587 | struct kvm *kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4588 | struct file *file; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4589 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4590 | kvm = kvm_create_vm(type); |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 4591 | if (IS_ERR(kvm)) |
| 4592 | return PTR_ERR(kvm); |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4593 | #ifdef CONFIG_KVM_MMIO |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4594 | r = kvm_coalesced_mmio_init(kvm); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4595 | if (r < 0) |
| 4596 | goto put_kvm; |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4597 | #endif |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4598 | r = get_unused_fd_flags(O_CLOEXEC); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4599 | if (r < 0) |
| 4600 | goto put_kvm; |
| 4601 | |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4602 | snprintf(kvm->stats_id, sizeof(kvm->stats_id), |
| 4603 | "kvm-%d", task_pid_nr(current)); |
| 4604 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4605 | file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); |
| 4606 | if (IS_ERR(file)) { |
| 4607 | put_unused_fd(r); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4608 | r = PTR_ERR(file); |
| 4609 | goto put_kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4610 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4611 | |
Paolo Bonzini | 525df86 | 2017-06-27 15:45:09 +0200 | [diff] [blame] | 4612 | /* |
| 4613 | * Don't call kvm_put_kvm anymore at this point; file->f_op is |
| 4614 | * already set, with ->release() being kvm_vm_release(). In error |
| 4615 | * cases it will be called by the final fput(file) and will take |
| 4616 | * care of doing kvm_put_kvm(kvm). |
| 4617 | */ |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4618 | if (kvm_create_vm_debugfs(kvm, r) < 0) { |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4619 | put_unused_fd(r); |
| 4620 | fput(file); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4621 | return -ENOMEM; |
| 4622 | } |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4623 | kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4624 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4625 | fd_install(r, file); |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4626 | return r; |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4627 | |
| 4628 | put_kvm: |
| 4629 | kvm_put_kvm(kvm); |
| 4630 | return r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4631 | } |
| 4632 | |
| 4633 | static long kvm_dev_ioctl(struct file *filp, |
| 4634 | unsigned int ioctl, unsigned long arg) |
| 4635 | { |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4636 | long r = -EINVAL; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4637 | |
| 4638 | switch (ioctl) { |
| 4639 | case KVM_GET_API_VERSION: |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 4640 | if (arg) |
| 4641 | goto out; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4642 | r = KVM_API_VERSION; |
| 4643 | break; |
| 4644 | case KVM_CREATE_VM: |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4645 | r = kvm_dev_ioctl_create_vm(arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4646 | break; |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 4647 | case KVM_CHECK_EXTENSION: |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 4648 | r = kvm_vm_ioctl_check_extension_generic(NULL, arg); |
Avi Kivity | 5d308f4 | 2007-03-01 17:56:20 +0200 | [diff] [blame] | 4649 | break; |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4650 | case KVM_GET_VCPU_MMAP_SIZE: |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4651 | if (arg) |
| 4652 | goto out; |
Avi Kivity | adb1ff4 | 2008-01-24 15:13:08 +0200 | [diff] [blame] | 4653 | r = PAGE_SIZE; /* struct kvm_run */ |
| 4654 | #ifdef CONFIG_X86 |
| 4655 | r += PAGE_SIZE; /* pio data page */ |
| 4656 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4657 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4658 | r += PAGE_SIZE; /* coalesced mmio ring page */ |
| 4659 | #endif |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4660 | break; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4661 | case KVM_TRACE_ENABLE: |
| 4662 | case KVM_TRACE_PAUSE: |
| 4663 | case KVM_TRACE_DISABLE: |
Marcelo Tosatti | 2023a29 | 2009-06-18 11:47:28 -0300 | [diff] [blame] | 4664 | r = -EOPNOTSUPP; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4665 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4666 | default: |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 4667 | return kvm_arch_dev_ioctl(filp, ioctl, arg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4668 | } |
| 4669 | out: |
| 4670 | return r; |
| 4671 | } |
| 4672 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4673 | static struct file_operations kvm_chardev_ops = { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4674 | .unlocked_ioctl = kvm_dev_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4675 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4676 | KVM_COMPAT(kvm_dev_ioctl), |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4677 | }; |
| 4678 | |
| 4679 | static struct miscdevice kvm_dev = { |
Avi Kivity | bbe4432 | 2007-03-04 13:27:36 +0200 | [diff] [blame] | 4680 | KVM_MINOR, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4681 | "kvm", |
| 4682 | &kvm_chardev_ops, |
| 4683 | }; |
| 4684 | |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4685 | static void hardware_enable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4686 | { |
| 4687 | int cpu = raw_smp_processor_id(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4688 | int r; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4689 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4690 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4691 | return; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4692 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4693 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4694 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4695 | r = kvm_arch_hardware_enable(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4696 | |
| 4697 | if (r) { |
| 4698 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
| 4699 | atomic_inc(&hardware_enable_failed); |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4700 | pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4701 | } |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4702 | } |
| 4703 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4704 | static int kvm_starting_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4705 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4706 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4707 | if (kvm_usage_count) |
| 4708 | hardware_enable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4709 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4710 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4711 | } |
| 4712 | |
| 4713 | static void hardware_disable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4714 | { |
| 4715 | int cpu = raw_smp_processor_id(); |
| 4716 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4717 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4718 | return; |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4719 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4720 | kvm_arch_hardware_disable(); |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4721 | } |
| 4722 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4723 | static int kvm_dying_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4724 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4725 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4726 | if (kvm_usage_count) |
| 4727 | hardware_disable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4728 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4729 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4730 | } |
| 4731 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4732 | static void hardware_disable_all_nolock(void) |
| 4733 | { |
| 4734 | BUG_ON(!kvm_usage_count); |
| 4735 | |
| 4736 | kvm_usage_count--; |
| 4737 | if (!kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4738 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4739 | } |
| 4740 | |
| 4741 | static void hardware_disable_all(void) |
| 4742 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4743 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4744 | hardware_disable_all_nolock(); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4745 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4746 | } |
| 4747 | |
| 4748 | static int hardware_enable_all(void) |
| 4749 | { |
| 4750 | int r = 0; |
| 4751 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4752 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4753 | |
| 4754 | kvm_usage_count++; |
| 4755 | if (kvm_usage_count == 1) { |
| 4756 | atomic_set(&hardware_enable_failed, 0); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4757 | on_each_cpu(hardware_enable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4758 | |
| 4759 | if (atomic_read(&hardware_enable_failed)) { |
| 4760 | hardware_disable_all_nolock(); |
| 4761 | r = -EBUSY; |
| 4762 | } |
| 4763 | } |
| 4764 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4765 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4766 | |
| 4767 | return r; |
| 4768 | } |
| 4769 | |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4770 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 4771 | void *v) |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4772 | { |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4773 | /* |
| 4774 | * Some (well, at least mine) BIOSes hang on reboot if |
| 4775 | * in vmx root mode. |
| 4776 | * |
| 4777 | * And Intel TXT required VMX off for all cpu when system shutdown. |
| 4778 | */ |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4779 | pr_info("kvm: exiting hardware virtualization\n"); |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4780 | kvm_rebooting = true; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4781 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4782 | return NOTIFY_OK; |
| 4783 | } |
| 4784 | |
| 4785 | static struct notifier_block kvm_reboot_notifier = { |
| 4786 | .notifier_call = kvm_reboot, |
| 4787 | .priority = 0, |
| 4788 | }; |
| 4789 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4790 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4791 | { |
| 4792 | int i; |
| 4793 | |
| 4794 | for (i = 0; i < bus->dev_count; i++) { |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4795 | struct kvm_io_device *pos = bus->range[i].dev; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4796 | |
| 4797 | kvm_iodevice_destructor(pos); |
| 4798 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4799 | kfree(bus); |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4800 | } |
| 4801 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4802 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, |
Xiubo Li | 20e87b7 | 2015-02-26 14:58:25 +0800 | [diff] [blame] | 4803 | const struct kvm_io_range *r2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4804 | { |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4805 | gpa_t addr1 = r1->addr; |
| 4806 | gpa_t addr2 = r2->addr; |
| 4807 | |
| 4808 | if (addr1 < addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4809 | return -1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4810 | |
| 4811 | /* If r2->len == 0, match the exact address. If r2->len != 0, |
| 4812 | * accept any overlapping write. Any order is acceptable for |
| 4813 | * overlapping ranges, because kvm_io_bus_get_first_dev ensures |
| 4814 | * we process all of them. |
| 4815 | */ |
| 4816 | if (r2->len) { |
| 4817 | addr1 += r1->len; |
| 4818 | addr2 += r2->len; |
| 4819 | } |
| 4820 | |
| 4821 | if (addr1 > addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4822 | return 1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4823 | |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4824 | return 0; |
| 4825 | } |
| 4826 | |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4827 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
| 4828 | { |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4829 | return kvm_io_bus_cmp(p1, p2); |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4830 | } |
| 4831 | |
Geoff Levand | 39369f7 | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 4832 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4833 | gpa_t addr, int len) |
| 4834 | { |
| 4835 | struct kvm_io_range *range, key; |
| 4836 | int off; |
| 4837 | |
| 4838 | key = (struct kvm_io_range) { |
| 4839 | .addr = addr, |
| 4840 | .len = len, |
| 4841 | }; |
| 4842 | |
| 4843 | range = bsearch(&key, bus->range, bus->dev_count, |
| 4844 | sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); |
| 4845 | if (range == NULL) |
| 4846 | return -ENOENT; |
| 4847 | |
| 4848 | off = range - bus->range; |
| 4849 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4850 | while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4851 | off--; |
| 4852 | |
| 4853 | return off; |
| 4854 | } |
| 4855 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4856 | static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4857 | struct kvm_io_range *range, const void *val) |
| 4858 | { |
| 4859 | int idx; |
| 4860 | |
| 4861 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
| 4862 | if (idx < 0) |
| 4863 | return -EOPNOTSUPP; |
| 4864 | |
| 4865 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4866 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4867 | if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4868 | range->len, val)) |
| 4869 | return idx; |
| 4870 | idx++; |
| 4871 | } |
| 4872 | |
| 4873 | return -EOPNOTSUPP; |
| 4874 | } |
| 4875 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4876 | /* kvm_io_bus_write - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4877 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4878 | int len, const void *val) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4879 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4880 | struct kvm_io_bus *bus; |
| 4881 | struct kvm_io_range range; |
| 4882 | int r; |
| 4883 | |
| 4884 | range = (struct kvm_io_range) { |
| 4885 | .addr = addr, |
| 4886 | .len = len, |
| 4887 | }; |
| 4888 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4889 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4890 | if (!bus) |
| 4891 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4892 | r = __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4893 | return r < 0 ? r : 0; |
| 4894 | } |
Leo Yan | a242010 | 2019-02-22 16:10:09 +0800 | [diff] [blame] | 4895 | EXPORT_SYMBOL_GPL(kvm_io_bus_write); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4896 | |
| 4897 | /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4898 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
| 4899 | gpa_t addr, int len, const void *val, long cookie) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4900 | { |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 4901 | struct kvm_io_bus *bus; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4902 | struct kvm_io_range range; |
| 4903 | |
| 4904 | range = (struct kvm_io_range) { |
| 4905 | .addr = addr, |
| 4906 | .len = len, |
| 4907 | }; |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 4908 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4909 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4910 | if (!bus) |
| 4911 | return -ENOMEM; |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4912 | |
| 4913 | /* First try the device referenced by cookie. */ |
| 4914 | if ((cookie >= 0) && (cookie < bus->dev_count) && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4915 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4916 | if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4917 | val)) |
| 4918 | return cookie; |
| 4919 | |
| 4920 | /* |
| 4921 | * cookie contained garbage; fall back to search and return the |
| 4922 | * correct cookie value. |
| 4923 | */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4924 | return __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4925 | } |
| 4926 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4927 | static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
| 4928 | struct kvm_io_range *range, void *val) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4929 | { |
| 4930 | int idx; |
| 4931 | |
| 4932 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4933 | if (idx < 0) |
| 4934 | return -EOPNOTSUPP; |
| 4935 | |
| 4936 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4937 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4938 | if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4939 | range->len, val)) |
| 4940 | return idx; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4941 | idx++; |
| 4942 | } |
| 4943 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4944 | return -EOPNOTSUPP; |
| 4945 | } |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4946 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4947 | /* kvm_io_bus_read - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4948 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4949 | int len, void *val) |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 4950 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4951 | struct kvm_io_bus *bus; |
| 4952 | struct kvm_io_range range; |
| 4953 | int r; |
| 4954 | |
| 4955 | range = (struct kvm_io_range) { |
| 4956 | .addr = addr, |
| 4957 | .len = len, |
| 4958 | }; |
| 4959 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4960 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4961 | if (!bus) |
| 4962 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 4963 | r = __kvm_io_bus_read(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 4964 | return r < 0 ? r : 0; |
| 4965 | } |
| 4966 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 4967 | /* Caller must hold slots_lock. */ |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4968 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 4969 | int len, struct kvm_io_device *dev) |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 4970 | { |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4971 | int i; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4972 | struct kvm_io_bus *new_bus, *bus; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4973 | struct kvm_io_range range; |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4974 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 4975 | bus = kvm_get_bus(kvm, bus_idx); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 4976 | if (!bus) |
| 4977 | return -ENOMEM; |
| 4978 | |
Amos Kong | 6ea34c9 | 2013-05-25 06:44:15 +0800 | [diff] [blame] | 4979 | /* exclude ioeventfd which is limited by maximum fd */ |
| 4980 | if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 4981 | return -ENOSPC; |
| 4982 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 4983 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4984 | GFP_KERNEL_ACCOUNT); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4985 | if (!new_bus) |
| 4986 | return -ENOMEM; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 4987 | |
| 4988 | range = (struct kvm_io_range) { |
| 4989 | .addr = addr, |
| 4990 | .len = len, |
| 4991 | .dev = dev, |
| 4992 | }; |
| 4993 | |
| 4994 | for (i = 0; i < bus->dev_count; i++) |
| 4995 | if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) |
| 4996 | break; |
| 4997 | |
| 4998 | memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
| 4999 | new_bus->dev_count++; |
| 5000 | new_bus->range[i] = range; |
| 5001 | memcpy(new_bus->range + i + 1, bus->range + i, |
| 5002 | (bus->dev_count - i) * sizeof(struct kvm_io_range)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5003 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 5004 | synchronize_srcu_expedited(&kvm->srcu); |
| 5005 | kfree(bus); |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5006 | |
| 5007 | return 0; |
| 5008 | } |
| 5009 | |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5010 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 5011 | struct kvm_io_device *dev) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5012 | { |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5013 | int i, j; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5014 | struct kvm_io_bus *new_bus, *bus; |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 5015 | |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5016 | lockdep_assert_held(&kvm->slots_lock); |
| 5017 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 5018 | bus = kvm_get_bus(kvm, bus_idx); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 5019 | if (!bus) |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5020 | return 0; |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 5021 | |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5022 | for (i = 0; i < bus->dev_count; i++) { |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5023 | if (bus->range[i].dev == dev) { |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5024 | break; |
| 5025 | } |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5026 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5027 | |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5028 | if (i == bus->dev_count) |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5029 | return 0; |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5030 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 5031 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5032 | GFP_KERNEL_ACCOUNT); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5033 | if (new_bus) { |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 5034 | memcpy(new_bus, bus, struct_size(bus, range, i)); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5035 | new_bus->dev_count--; |
| 5036 | memcpy(new_bus->range + i, bus->range + i + 1, |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 5037 | flex_array_size(new_bus, range, new_bus->dev_count - i)); |
Sean Christopherson | 2ee3757 | 2021-04-12 15:20:48 -0700 | [diff] [blame] | 5038 | } |
| 5039 | |
| 5040 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 5041 | synchronize_srcu_expedited(&kvm->srcu); |
| 5042 | |
| 5043 | /* Destroy the old bus _after_ installing the (null) bus. */ |
| 5044 | if (!new_bus) { |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5045 | pr_err("kvm: failed to shrink bus, removing it completely\n"); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5046 | for (j = 0; j < bus->dev_count; j++) { |
| 5047 | if (j == i) |
| 5048 | continue; |
| 5049 | kvm_iodevice_destructor(bus->range[j].dev); |
| 5050 | } |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5051 | } |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5052 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5053 | kfree(bus); |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5054 | return new_bus ? 0 : -ENOMEM; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 5055 | } |
| 5056 | |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 5057 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 5058 | gpa_t addr) |
| 5059 | { |
| 5060 | struct kvm_io_bus *bus; |
| 5061 | int dev_idx, srcu_idx; |
| 5062 | struct kvm_io_device *iodev = NULL; |
| 5063 | |
| 5064 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 5065 | |
| 5066 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5067 | if (!bus) |
| 5068 | goto out_unlock; |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 5069 | |
| 5070 | dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); |
| 5071 | if (dev_idx < 0) |
| 5072 | goto out_unlock; |
| 5073 | |
| 5074 | iodev = bus->range[dev_idx].dev; |
| 5075 | |
| 5076 | out_unlock: |
| 5077 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 5078 | |
| 5079 | return iodev; |
| 5080 | } |
| 5081 | EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); |
| 5082 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5083 | static int kvm_debugfs_open(struct inode *inode, struct file *file, |
| 5084 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
| 5085 | const char *fmt) |
| 5086 | { |
| 5087 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 5088 | inode->i_private; |
| 5089 | |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 5090 | /* |
| 5091 | * The debugfs files are a reference to the kvm struct which |
| 5092 | * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe |
| 5093 | * avoids the race between open and the removal of the debugfs directory. |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5094 | */ |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 5095 | if (!kvm_get_kvm_safe(stat_data->kvm)) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5096 | return -ENOENT; |
| 5097 | |
Paolo Bonzini | 833b45d | 2019-09-30 18:48:44 +0200 | [diff] [blame] | 5098 | if (simple_attr_open(inode, file, get, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5099 | kvm_stats_debugfs_mode(stat_data->desc) & 0222 |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5100 | ? set : NULL, |
| 5101 | fmt)) { |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5102 | kvm_put_kvm(stat_data->kvm); |
| 5103 | return -ENOMEM; |
| 5104 | } |
| 5105 | |
| 5106 | return 0; |
| 5107 | } |
| 5108 | |
| 5109 | static int kvm_debugfs_release(struct inode *inode, struct file *file) |
| 5110 | { |
| 5111 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 5112 | inode->i_private; |
| 5113 | |
| 5114 | simple_attr_release(inode, file); |
| 5115 | kvm_put_kvm(stat_data->kvm); |
| 5116 | |
| 5117 | return 0; |
| 5118 | } |
| 5119 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5120 | static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5121 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5122 | *val = *(u64 *)((void *)(&kvm->stat) + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5123 | |
| 5124 | return 0; |
| 5125 | } |
| 5126 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5127 | static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5128 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5129 | *(u64 *)((void *)(&kvm->stat) + offset) = 0; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5130 | |
| 5131 | return 0; |
| 5132 | } |
| 5133 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5134 | static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5135 | { |
| 5136 | int i; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5137 | struct kvm_vcpu *vcpu; |
| 5138 | |
| 5139 | *val = 0; |
| 5140 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5141 | kvm_for_each_vcpu(i, vcpu, kvm) |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5142 | *val += *(u64 *)((void *)(&vcpu->stat) + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5143 | |
| 5144 | return 0; |
| 5145 | } |
| 5146 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5147 | static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5148 | { |
| 5149 | int i; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5150 | struct kvm_vcpu *vcpu; |
| 5151 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5152 | kvm_for_each_vcpu(i, vcpu, kvm) |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5153 | *(u64 *)((void *)(&vcpu->stat) + offset) = 0; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5154 | |
| 5155 | return 0; |
| 5156 | } |
| 5157 | |
| 5158 | static int kvm_stat_data_get(void *data, u64 *val) |
| 5159 | { |
| 5160 | int r = -EFAULT; |
| 5161 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 5162 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5163 | switch (stat_data->kind) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5164 | case KVM_STAT_VM: |
| 5165 | r = kvm_get_stat_per_vm(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5166 | stat_data->desc->desc.offset, val); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5167 | break; |
| 5168 | case KVM_STAT_VCPU: |
| 5169 | r = kvm_get_stat_per_vcpu(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5170 | stat_data->desc->desc.offset, val); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5171 | break; |
| 5172 | } |
| 5173 | |
| 5174 | return r; |
| 5175 | } |
| 5176 | |
| 5177 | static int kvm_stat_data_clear(void *data, u64 val) |
| 5178 | { |
| 5179 | int r = -EFAULT; |
| 5180 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 5181 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5182 | if (val) |
| 5183 | return -EINVAL; |
| 5184 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5185 | switch (stat_data->kind) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5186 | case KVM_STAT_VM: |
| 5187 | r = kvm_clear_stat_per_vm(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5188 | stat_data->desc->desc.offset); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5189 | break; |
| 5190 | case KVM_STAT_VCPU: |
| 5191 | r = kvm_clear_stat_per_vcpu(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5192 | stat_data->desc->desc.offset); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5193 | break; |
| 5194 | } |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5195 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5196 | return r; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5197 | } |
| 5198 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5199 | static int kvm_stat_data_open(struct inode *inode, struct file *file) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5200 | { |
| 5201 | __simple_attr_check_format("%llu\n", 0ull); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5202 | return kvm_debugfs_open(inode, file, kvm_stat_data_get, |
| 5203 | kvm_stat_data_clear, "%llu\n"); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5204 | } |
| 5205 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5206 | static const struct file_operations stat_fops_per_vm = { |
| 5207 | .owner = THIS_MODULE, |
| 5208 | .open = kvm_stat_data_open, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5209 | .release = kvm_debugfs_release, |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5210 | .read = simple_attr_read, |
| 5211 | .write = simple_attr_write, |
| 5212 | .llseek = no_llseek, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5213 | }; |
| 5214 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5215 | static int vm_stat_get(void *_offset, u64 *val) |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5216 | { |
| 5217 | unsigned offset = (long)_offset; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5218 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5219 | u64 tmp_val; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5220 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5221 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5222 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5223 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5224 | kvm_get_stat_per_vm(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5225 | *val += tmp_val; |
| 5226 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5227 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5228 | return 0; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5229 | } |
| 5230 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5231 | static int vm_stat_clear(void *_offset, u64 val) |
| 5232 | { |
| 5233 | unsigned offset = (long)_offset; |
| 5234 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5235 | |
| 5236 | if (val) |
| 5237 | return -EINVAL; |
| 5238 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5239 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5240 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5241 | kvm_clear_stat_per_vm(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5242 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5243 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5244 | |
| 5245 | return 0; |
| 5246 | } |
| 5247 | |
| 5248 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5249 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5250 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5251 | static int vcpu_stat_get(void *_offset, u64 *val) |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5252 | { |
| 5253 | unsigned offset = (long)_offset; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5254 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5255 | u64 tmp_val; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5256 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5257 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5258 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5259 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5260 | kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5261 | *val += tmp_val; |
| 5262 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5263 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5264 | return 0; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5265 | } |
| 5266 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5267 | static int vcpu_stat_clear(void *_offset, u64 val) |
| 5268 | { |
| 5269 | unsigned offset = (long)_offset; |
| 5270 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5271 | |
| 5272 | if (val) |
| 5273 | return -EINVAL; |
| 5274 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5275 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5276 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5277 | kvm_clear_stat_per_vcpu(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5278 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5279 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5280 | |
| 5281 | return 0; |
| 5282 | } |
| 5283 | |
| 5284 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, |
| 5285 | "%llu\n"); |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5286 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5287 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5288 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) |
| 5289 | { |
| 5290 | struct kobj_uevent_env *env; |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5291 | unsigned long long created, active; |
| 5292 | |
| 5293 | if (!kvm_dev.this_device || !kvm) |
| 5294 | return; |
| 5295 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5296 | mutex_lock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5297 | if (type == KVM_EVENT_CREATE_VM) { |
| 5298 | kvm_createvm_count++; |
| 5299 | kvm_active_vms++; |
| 5300 | } else if (type == KVM_EVENT_DESTROY_VM) { |
| 5301 | kvm_active_vms--; |
| 5302 | } |
| 5303 | created = kvm_createvm_count; |
| 5304 | active = kvm_active_vms; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5305 | mutex_unlock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5306 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5307 | env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5308 | if (!env) |
| 5309 | return; |
| 5310 | |
| 5311 | add_uevent_var(env, "CREATED=%llu", created); |
| 5312 | add_uevent_var(env, "COUNT=%llu", active); |
| 5313 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5314 | if (type == KVM_EVENT_CREATE_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5315 | add_uevent_var(env, "EVENT=create"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5316 | kvm->userspace_pid = task_pid_nr(current); |
| 5317 | } else if (type == KVM_EVENT_DESTROY_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5318 | add_uevent_var(env, "EVENT=destroy"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5319 | } |
| 5320 | add_uevent_var(env, "PID=%d", kvm->userspace_pid); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5321 | |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 5322 | if (kvm->debugfs_dentry) { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5323 | char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5324 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5325 | if (p) { |
| 5326 | tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); |
| 5327 | if (!IS_ERR(tmp)) |
| 5328 | add_uevent_var(env, "STATS_PATH=%s", tmp); |
| 5329 | kfree(p); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5330 | } |
| 5331 | } |
| 5332 | /* no need for checks, since we are adding at most only 5 keys */ |
| 5333 | env->envp[env->envp_idx++] = NULL; |
| 5334 | kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); |
| 5335 | kfree(env); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5336 | } |
| 5337 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 5338 | static void kvm_init_debug(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5339 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5340 | const struct file_operations *fops; |
| 5341 | const struct _kvm_stats_desc *pdesc; |
| 5342 | int i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5343 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 5344 | kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 5345 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5346 | for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { |
| 5347 | pdesc = &kvm_vm_stats_desc[i]; |
| 5348 | if (kvm_stats_debugfs_mode(pdesc) & 0222) |
| 5349 | fops = &vm_stat_fops; |
| 5350 | else |
| 5351 | fops = &vm_stat_readonly_fops; |
| 5352 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 5353 | kvm_debugfs_dir, |
| 5354 | (void *)(long)pdesc->desc.offset, fops); |
| 5355 | } |
| 5356 | |
| 5357 | for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { |
| 5358 | pdesc = &kvm_vcpu_stats_desc[i]; |
| 5359 | if (kvm_stats_debugfs_mode(pdesc) & 0222) |
| 5360 | fops = &vcpu_stat_fops; |
| 5361 | else |
| 5362 | fops = &vcpu_stat_readonly_fops; |
| 5363 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 5364 | kvm_debugfs_dir, |
| 5365 | (void *)(long)pdesc->desc.offset, fops); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 5366 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5367 | } |
| 5368 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5369 | static int kvm_suspend(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5370 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 5371 | if (kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5372 | hardware_disable_nolock(NULL); |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5373 | return 0; |
| 5374 | } |
| 5375 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5376 | static void kvm_resume(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5377 | { |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 5378 | if (kvm_usage_count) { |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 5379 | #ifdef CONFIG_LOCKDEP |
| 5380 | WARN_ON(lockdep_is_held(&kvm_count_lock)); |
| 5381 | #endif |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5382 | hardware_enable_nolock(NULL); |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 5383 | } |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5384 | } |
| 5385 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5386 | static struct syscore_ops kvm_syscore_ops = { |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5387 | .suspend = kvm_suspend, |
| 5388 | .resume = kvm_resume, |
| 5389 | }; |
| 5390 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5391 | static inline |
| 5392 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
| 5393 | { |
| 5394 | return container_of(pn, struct kvm_vcpu, preempt_notifier); |
| 5395 | } |
| 5396 | |
| 5397 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
| 5398 | { |
| 5399 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 5400 | |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 5401 | WRITE_ONCE(vcpu->preempted, false); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 5402 | WRITE_ONCE(vcpu->ready, false); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5403 | |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5404 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Radim Krčmář | e790d9e | 2014-08-21 18:08:05 +0200 | [diff] [blame] | 5405 | kvm_arch_sched_in(vcpu, cpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5406 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5407 | } |
| 5408 | |
| 5409 | static void kvm_sched_out(struct preempt_notifier *pn, |
| 5410 | struct task_struct *next) |
| 5411 | { |
| 5412 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
| 5413 | |
Peter Zijlstra | 3ba9f93 | 2021-06-11 10:28:13 +0200 | [diff] [blame] | 5414 | if (current->on_rq) { |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 5415 | WRITE_ONCE(vcpu->preempted, true); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 5416 | WRITE_ONCE(vcpu->ready, true); |
| 5417 | } |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5418 | kvm_arch_vcpu_put(vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5419 | __this_cpu_write(kvm_running_vcpu, NULL); |
| 5420 | } |
| 5421 | |
| 5422 | /** |
| 5423 | * kvm_get_running_vcpu - get the vcpu running on the current CPU. |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 5424 | * |
| 5425 | * We can disable preemption locally around accessing the per-CPU variable, |
| 5426 | * and use the resolved vcpu pointer after enabling preemption again, |
| 5427 | * because even if the current thread is migrated to another CPU, reading |
| 5428 | * the per-CPU value later will give us the same value as we update the |
| 5429 | * per-CPU variable in the preempt notifier handlers. |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5430 | */ |
| 5431 | struct kvm_vcpu *kvm_get_running_vcpu(void) |
| 5432 | { |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 5433 | struct kvm_vcpu *vcpu; |
| 5434 | |
| 5435 | preempt_disable(); |
| 5436 | vcpu = __this_cpu_read(kvm_running_vcpu); |
| 5437 | preempt_enable(); |
| 5438 | |
| 5439 | return vcpu; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5440 | } |
Wanpeng Li | 379a3c8 | 2020-04-28 14:23:27 +0800 | [diff] [blame] | 5441 | EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5442 | |
| 5443 | /** |
| 5444 | * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
| 5445 | */ |
| 5446 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) |
| 5447 | { |
| 5448 | return &kvm_running_vcpu; |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5449 | } |
| 5450 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5451 | struct kvm_cpu_compat_check { |
| 5452 | void *opaque; |
| 5453 | int *ret; |
| 5454 | }; |
| 5455 | |
| 5456 | static void check_processor_compat(void *data) |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 5457 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5458 | struct kvm_cpu_compat_check *c = data; |
| 5459 | |
| 5460 | *c->ret = kvm_arch_check_processor_compat(c->opaque); |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 5461 | } |
| 5462 | |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 5463 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5464 | struct module *module) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5465 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5466 | struct kvm_cpu_compat_check c; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5467 | int r; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5468 | int cpu; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5469 | |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 5470 | r = kvm_arch_init(opaque); |
| 5471 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5472 | goto out_fail; |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5473 | |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5474 | /* |
| 5475 | * kvm_arch_init makes sure there's at most one caller |
| 5476 | * for architectures that support multiple implementations, |
| 5477 | * like intel and amd on x86. |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5478 | * kvm_arch_init must be called before kvm_irqfd_init to avoid creating |
| 5479 | * conflicts in case kvm is already setup for another implementation. |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5480 | */ |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5481 | r = kvm_irqfd_init(); |
| 5482 | if (r) |
| 5483 | goto out_irqfd; |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5484 | |
Avi Kivity | 8437a617 | 2009-06-06 14:52:35 -0700 | [diff] [blame] | 5485 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5486 | r = -ENOMEM; |
| 5487 | goto out_free_0; |
| 5488 | } |
| 5489 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5490 | r = kvm_arch_hardware_setup(opaque); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5491 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5492 | goto out_free_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5493 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5494 | c.ret = &r; |
| 5495 | c.opaque = opaque; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5496 | for_each_online_cpu(cpu) { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5497 | smp_call_function_single(cpu, check_processor_compat, &c, 1); |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5498 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5499 | goto out_free_2; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5500 | } |
| 5501 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 5502 | r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5503 | kvm_starting_cpu, kvm_dying_cpu); |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 5504 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5505 | goto out_free_2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5506 | register_reboot_notifier(&kvm_reboot_notifier); |
| 5507 | |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5508 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 5509 | if (!vcpu_align) |
| 5510 | vcpu_align = __alignof__(struct kvm_vcpu); |
Paolo Bonzini | 4651573 | 2017-10-26 15:45:46 +0200 | [diff] [blame] | 5511 | kvm_vcpu_cache = |
| 5512 | kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, |
| 5513 | SLAB_ACCOUNT, |
| 5514 | offsetof(struct kvm_vcpu, arch), |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 5515 | offsetofend(struct kvm_vcpu, stats_id) |
| 5516 | - offsetof(struct kvm_vcpu, arch), |
Paolo Bonzini | 4651573 | 2017-10-26 15:45:46 +0200 | [diff] [blame] | 5517 | NULL); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5518 | if (!kvm_vcpu_cache) { |
| 5519 | r = -ENOMEM; |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5520 | goto out_free_3; |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5521 | } |
| 5522 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5523 | r = kvm_async_pf_init(); |
| 5524 | if (r) |
| 5525 | goto out_free; |
| 5526 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5527 | kvm_chardev_ops.owner = module; |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 5528 | kvm_vm_fops.owner = module; |
| 5529 | kvm_vcpu_fops.owner = module; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5530 | |
| 5531 | r = misc_register(&kvm_dev); |
| 5532 | if (r) { |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 5533 | pr_err("kvm: misc device register failed\n"); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5534 | goto out_unreg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5535 | } |
| 5536 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5537 | register_syscore_ops(&kvm_syscore_ops); |
| 5538 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5539 | kvm_preempt_ops.sched_in = kvm_sched_in; |
| 5540 | kvm_preempt_ops.sched_out = kvm_sched_out; |
| 5541 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 5542 | kvm_init_debug(); |
Darrick J. Wong | 0ea4ed8 | 2009-10-14 16:21:00 -0700 | [diff] [blame] | 5543 | |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 5544 | r = kvm_vfio_ops_init(); |
| 5545 | WARN_ON(r); |
| 5546 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 5547 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5548 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5549 | out_unreg: |
| 5550 | kvm_async_pf_deinit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5551 | out_free: |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5552 | kmem_cache_destroy(kvm_vcpu_cache); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5553 | out_free_3: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5554 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5555 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5556 | out_free_2: |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5557 | kvm_arch_hardware_unsetup(); |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5558 | out_free_1: |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5559 | free_cpumask_var(cpus_hardware_enabled); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5560 | out_free_0: |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 5561 | kvm_irqfd_exit(); |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5562 | out_irqfd: |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5563 | kvm_arch_exit(); |
| 5564 | out_fail: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5565 | return r; |
| 5566 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5567 | EXPORT_SYMBOL_GPL(kvm_init); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5568 | |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5569 | void kvm_exit(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5570 | { |
Janosch Frank | 4bd33b5 | 2015-10-14 12:37:35 +0200 | [diff] [blame] | 5571 | debugfs_remove_recursive(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5572 | misc_deregister(&kvm_dev); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5573 | kmem_cache_destroy(kvm_vcpu_cache); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5574 | kvm_async_pf_deinit(); |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5575 | unregister_syscore_ops(&kvm_syscore_ops); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5576 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5577 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5578 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5579 | kvm_arch_hardware_unsetup(); |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 5580 | kvm_arch_exit(); |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 5581 | kvm_irqfd_exit(); |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5582 | free_cpumask_var(cpus_hardware_enabled); |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 5583 | kvm_vfio_ops_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5584 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5585 | EXPORT_SYMBOL_GPL(kvm_exit); |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5586 | |
| 5587 | struct kvm_vm_worker_thread_context { |
| 5588 | struct kvm *kvm; |
| 5589 | struct task_struct *parent; |
| 5590 | struct completion init_done; |
| 5591 | kvm_vm_thread_fn_t thread_fn; |
| 5592 | uintptr_t data; |
| 5593 | int err; |
| 5594 | }; |
| 5595 | |
| 5596 | static int kvm_vm_worker_thread(void *context) |
| 5597 | { |
| 5598 | /* |
| 5599 | * The init_context is allocated on the stack of the parent thread, so |
| 5600 | * we have to locally copy anything that is needed beyond initialization |
| 5601 | */ |
| 5602 | struct kvm_vm_worker_thread_context *init_context = context; |
| 5603 | struct kvm *kvm = init_context->kvm; |
| 5604 | kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; |
| 5605 | uintptr_t data = init_context->data; |
| 5606 | int err; |
| 5607 | |
| 5608 | err = kthread_park(current); |
| 5609 | /* kthread_park(current) is never supposed to return an error */ |
| 5610 | WARN_ON(err != 0); |
| 5611 | if (err) |
| 5612 | goto init_complete; |
| 5613 | |
| 5614 | err = cgroup_attach_task_all(init_context->parent, current); |
| 5615 | if (err) { |
| 5616 | kvm_err("%s: cgroup_attach_task_all failed with err %d\n", |
| 5617 | __func__, err); |
| 5618 | goto init_complete; |
| 5619 | } |
| 5620 | |
| 5621 | set_user_nice(current, task_nice(init_context->parent)); |
| 5622 | |
| 5623 | init_complete: |
| 5624 | init_context->err = err; |
| 5625 | complete(&init_context->init_done); |
| 5626 | init_context = NULL; |
| 5627 | |
| 5628 | if (err) |
| 5629 | return err; |
| 5630 | |
| 5631 | /* Wait to be woken up by the spawner before proceeding. */ |
| 5632 | kthread_parkme(); |
| 5633 | |
| 5634 | if (!kthread_should_stop()) |
| 5635 | err = thread_fn(kvm, data); |
| 5636 | |
| 5637 | return err; |
| 5638 | } |
| 5639 | |
| 5640 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, |
| 5641 | uintptr_t data, const char *name, |
| 5642 | struct task_struct **thread_ptr) |
| 5643 | { |
| 5644 | struct kvm_vm_worker_thread_context init_context = {}; |
| 5645 | struct task_struct *thread; |
| 5646 | |
| 5647 | *thread_ptr = NULL; |
| 5648 | init_context.kvm = kvm; |
| 5649 | init_context.parent = current; |
| 5650 | init_context.thread_fn = thread_fn; |
| 5651 | init_context.data = data; |
| 5652 | init_completion(&init_context.init_done); |
| 5653 | |
| 5654 | thread = kthread_run(kvm_vm_worker_thread, &init_context, |
| 5655 | "%s-%d", name, task_pid_nr(current)); |
| 5656 | if (IS_ERR(thread)) |
| 5657 | return PTR_ERR(thread); |
| 5658 | |
| 5659 | /* kthread_run is never supposed to return NULL */ |
| 5660 | WARN_ON(thread == NULL); |
| 5661 | |
| 5662 | wait_for_completion(&init_context.init_done); |
| 5663 | |
| 5664 | if (!init_context.err) |
| 5665 | *thread_ptr = thread; |
| 5666 | |
| 5667 | return init_context.err; |
| 5668 | } |