Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 10 | * |
| 11 | * Authors: |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 14 | */ |
| 15 | |
Andre Przywara | af669ac | 2015-03-26 14:39:29 +0000 | [diff] [blame] | 16 | #include <kvm/iodev.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 18 | #include <linux/kvm_host.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 19 | #include <linux/kvm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/errno.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 22 | #include <linux/percpu.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
| 24 | #include <linux/miscdevice.h> |
| 25 | #include <linux/vmalloc.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 26 | #include <linux/reboot.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/highmem.h> |
| 29 | #include <linux/file.h> |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 30 | #include <linux/syscore_ops.h> |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 31 | #include <linux/cpu.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 32 | #include <linux/sched/signal.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 33 | #include <linux/sched/mm.h> |
Ingo Molnar | 03441a3 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 34 | #include <linux/sched/stat.h> |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 35 | #include <linux/cpumask.h> |
| 36 | #include <linux/smp.h> |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 37 | #include <linux/anon_inodes.h> |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 38 | #include <linux/profile.h> |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 39 | #include <linux/kvm_para.h> |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 40 | #include <linux/pagemap.h> |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 41 | #include <linux/mman.h> |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 42 | #include <linux/swap.h> |
Sheng Yang | e56d532 | 2009-03-12 21:45:39 +0800 | [diff] [blame] | 43 | #include <linux/bitops.h> |
Marcelo Tosatti | 547de29 | 2009-05-07 17:55:13 -0300 | [diff] [blame] | 44 | #include <linux/spinlock.h> |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 45 | #include <linux/compat.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 46 | #include <linux/srcu.h> |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 47 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 48 | #include <linux/slab.h> |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 49 | #include <linux/sort.h> |
| 50 | #include <linux/bsearch.h> |
Paolo Bonzini | c011d23 | 2019-05-17 14:08:53 +0200 | [diff] [blame] | 51 | #include <linux/io.h> |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 52 | #include <linux/lockdep.h> |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 53 | #include <linux/kthread.h> |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 54 | #include <linux/suspend.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 55 | |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 56 | #include <asm/processor.h> |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 57 | #include <asm/ioctl.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 58 | #include <linux/uaccess.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 59 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 60 | #include "coalesced_mmio.h" |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 61 | #include "async_pf.h" |
David Woodhouse | 982ed0d | 2021-12-10 16:36:21 +0000 | [diff] [blame] | 62 | #include "kvm_mm.h" |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 63 | #include "vfio.h" |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 64 | |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 65 | #define CREATE_TRACE_POINTS |
| 66 | #include <trace/events/kvm.h> |
| 67 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 68 | #include <linux/kvm_dirty_ring.h> |
| 69 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 70 | /* Worst case buffer size needed for holding an integer. */ |
| 71 | #define ITOA_MAX_LEN 12 |
| 72 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 73 | MODULE_AUTHOR("Qumranet"); |
| 74 | MODULE_LICENSE("GPL"); |
| 75 | |
David Hildenbrand | 920552b | 2015-09-18 12:34:53 +0200 | [diff] [blame] | 76 | /* Architectures should define their poll value according to the halt latency */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 77 | unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 78 | module_param(halt_poll_ns, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 79 | EXPORT_SYMBOL_GPL(halt_poll_ns); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 80 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 81 | /* Default doubles per-vcpu halt_poll_ns. */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 82 | unsigned int halt_poll_ns_grow = 2; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 83 | module_param(halt_poll_ns_grow, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 84 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 85 | |
Nir Weiner | 49113d3 | 2019-01-27 12:17:15 +0200 | [diff] [blame] | 86 | /* The start value to grow halt_poll_ns from */ |
| 87 | unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ |
| 88 | module_param(halt_poll_ns_grow_start, uint, 0644); |
| 89 | EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); |
| 90 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 91 | /* Default resets per-vcpu halt_poll_ns . */ |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 92 | unsigned int halt_poll_ns_shrink; |
Roman Storozhenko | 039c5d1 | 2017-06-27 12:51:18 +0300 | [diff] [blame] | 93 | module_param(halt_poll_ns_shrink, uint, 0644); |
Suraj Jitindar Singh | ec76d81 | 2016-10-14 11:53:19 +1100 | [diff] [blame] | 94 | EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 95 | |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 96 | /* |
| 97 | * Ordering of locks: |
| 98 | * |
Xiubo Li | b7d409d | 2015-02-26 14:58:24 +0800 | [diff] [blame] | 99 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 100 | */ |
| 101 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 102 | DEFINE_MUTEX(kvm_lock); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 103 | static DEFINE_RAW_SPINLOCK(kvm_count_lock); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 104 | LIST_HEAD(vm_list); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 105 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 106 | static cpumask_var_t cpus_hardware_enabled; |
Xiubo Li | f4fee93 | 2015-02-26 14:58:21 +0800 | [diff] [blame] | 107 | static int kvm_usage_count; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 108 | static atomic_t hardware_enable_failed; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 109 | |
Sean Christopherson | aaba298 | 2019-12-18 13:55:16 -0800 | [diff] [blame] | 110 | static struct kmem_cache *kvm_vcpu_cache; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 111 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 112 | static __read_mostly struct preempt_ops kvm_preempt_ops; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 113 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 114 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 115 | struct dentry *kvm_debugfs_dir; |
Paul Mackerras | e23a808 | 2015-03-28 14:21:01 +1100 | [diff] [blame] | 116 | EXPORT_SYMBOL_GPL(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 117 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 118 | static const struct file_operations stat_fops_per_vm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 119 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 120 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
| 121 | unsigned long arg); |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 122 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 123 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| 124 | unsigned long arg); |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 125 | #define KVM_COMPAT(c) .compat_ioctl = (c) |
| 126 | #else |
Marc Zyngier | 9cb09e7 | 2019-11-14 13:17:39 +0000 | [diff] [blame] | 127 | /* |
| 128 | * For architectures that don't implement a compat infrastructure, |
| 129 | * adopt a double line of defense: |
| 130 | * - Prevent a compat task from opening /dev/kvm |
| 131 | * - If the open has been done by a 64bit task, and the KVM fd |
| 132 | * passed to a compat task, let the ioctls fail. |
| 133 | */ |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 134 | static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, |
| 135 | unsigned long arg) { return -EINVAL; } |
Marc Zyngier | b9876e6 | 2019-11-13 16:05:23 +0000 | [diff] [blame] | 136 | |
| 137 | static int kvm_no_compat_open(struct inode *inode, struct file *file) |
| 138 | { |
| 139 | return is_compat_task() ? -ENODEV : 0; |
| 140 | } |
| 141 | #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ |
| 142 | .open = kvm_no_compat_open |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 143 | #endif |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 144 | static int hardware_enable_all(void); |
| 145 | static void hardware_disable_all(void); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 146 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 147 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 148 | |
Andi Kleen | 5248013 | 2014-02-08 08:51:57 +0100 | [diff] [blame] | 149 | __visible bool kvm_rebooting; |
Avi Kivity | b7c4145 | 2010-12-02 17:52:50 +0200 | [diff] [blame] | 150 | EXPORT_SYMBOL_GPL(kvm_rebooting); |
Avi Kivity | 4ecac3f | 2008-05-13 13:23:38 +0300 | [diff] [blame] | 151 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 152 | #define KVM_EVENT_CREATE_VM 0 |
| 153 | #define KVM_EVENT_DESTROY_VM 1 |
| 154 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); |
| 155 | static unsigned long long kvm_createvm_count; |
| 156 | static unsigned long long kvm_active_vms; |
| 157 | |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 158 | static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); |
| 159 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 160 | __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
| 161 | unsigned long start, unsigned long end) |
Radim Krčmář | b1394e7 | 2017-11-30 19:05:45 +0100 | [diff] [blame] | 162 | { |
| 163 | } |
| 164 | |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 165 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) |
| 166 | { |
| 167 | /* |
| 168 | * The metadata used by is_zone_device_page() to determine whether or |
| 169 | * not a page is ZONE_DEVICE is guaranteed to be valid if and only if |
| 170 | * the device has been pinned, e.g. by get_user_pages(). WARN if the |
| 171 | * page_count() is zero to help detect bad usage of this helper. |
| 172 | */ |
| 173 | if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) |
| 174 | return false; |
| 175 | |
| 176 | return is_zone_device_page(pfn_to_page(pfn)); |
| 177 | } |
| 178 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 179 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn) |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 180 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 181 | /* |
| 182 | * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting |
| 183 | * perspective they are "normal" pages, albeit with slightly different |
| 184 | * usage rules. |
| 185 | */ |
Andrea Arcangeli | 11feeb4 | 2013-07-25 03:04:38 +0200 | [diff] [blame] | 186 | if (pfn_valid(pfn)) |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 187 | return PageReserved(pfn_to_page(pfn)) && |
Zhuang Yanying | 7df003c | 2019-10-12 11:37:31 +0800 | [diff] [blame] | 188 | !is_zero_pfn(pfn) && |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 189 | !kvm_is_zone_device_pfn(pfn); |
Ben-Ami Yassour | cbff90a | 2008-07-28 19:26:24 +0300 | [diff] [blame] | 190 | |
| 191 | return true; |
| 192 | } |
| 193 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 194 | /* |
| 195 | * Switches to specified vcpu, until a matching vcpu_put() |
| 196 | */ |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 197 | void vcpu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 198 | { |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 199 | int cpu = get_cpu(); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 200 | |
| 201 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 202 | preempt_notifier_register(&vcpu->preempt_notifier); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 203 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 204 | put_cpu(); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 205 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 206 | EXPORT_SYMBOL_GPL(vcpu_load); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 207 | |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 208 | void vcpu_put(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 209 | { |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 210 | preempt_disable(); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 211 | kvm_arch_vcpu_put(vcpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 212 | preempt_notifier_unregister(&vcpu->preempt_notifier); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 213 | __this_cpu_write(kvm_running_vcpu, NULL); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 214 | preempt_enable(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 215 | } |
Jim Mattson | 2f1fe81 | 2016-07-08 15:36:06 -0700 | [diff] [blame] | 216 | EXPORT_SYMBOL_GPL(vcpu_put); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 217 | |
Paolo Bonzini | 7a97cec | 2017-04-27 14:33:43 +0200 | [diff] [blame] | 218 | /* TODO: merge with kvm_arch_vcpu_should_kick */ |
| 219 | static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) |
| 220 | { |
| 221 | int mode = kvm_vcpu_exiting_guest_mode(vcpu); |
| 222 | |
| 223 | /* |
| 224 | * We need to wait for the VCPU to reenable interrupts and get out of |
| 225 | * READING_SHADOW_PAGE_TABLES mode. |
| 226 | */ |
| 227 | if (req & KVM_REQUEST_WAIT) |
| 228 | return mode != OUTSIDE_GUEST_MODE; |
| 229 | |
| 230 | /* |
| 231 | * Need to kick a running VCPU, but otherwise there is nothing to do. |
| 232 | */ |
| 233 | return mode == IN_GUEST_MODE; |
| 234 | } |
| 235 | |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 236 | static void ack_flush(void *_completed) |
| 237 | { |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 238 | } |
| 239 | |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 240 | static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) |
Paolo Bonzini | b49defe | 2017-06-30 13:25:45 +0200 | [diff] [blame] | 241 | { |
Paolo Bonzini | b49defe | 2017-06-30 13:25:45 +0200 | [diff] [blame] | 242 | if (cpumask_empty(cpus)) |
| 243 | return false; |
| 244 | |
| 245 | smp_call_function_many(cpus, ack_flush, NULL, wait); |
| 246 | return true; |
| 247 | } |
| 248 | |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 249 | static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 250 | unsigned int req, struct cpumask *tmp, |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 251 | int current_cpu) |
| 252 | { |
| 253 | int cpu; |
| 254 | |
| 255 | kvm_make_request(req, vcpu); |
| 256 | |
| 257 | if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) |
| 258 | return; |
| 259 | |
| 260 | /* |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 261 | * Note, the vCPU could get migrated to a different pCPU at any point |
| 262 | * after kvm_request_needs_ipi(), which could result in sending an IPI |
| 263 | * to the previous pCPU. But, that's OK because the purpose of the IPI |
| 264 | * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is |
| 265 | * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES |
| 266 | * after this point is also OK, as the requirement is only that KVM wait |
| 267 | * for vCPUs that were reading SPTEs _before_ any changes were |
| 268 | * finalized. See kvm_vcpu_kick() for more details on handling requests. |
| 269 | */ |
| 270 | if (kvm_request_needs_ipi(vcpu, req)) { |
| 271 | cpu = READ_ONCE(vcpu->cpu); |
| 272 | if (cpu != -1 && cpu != current_cpu) |
| 273 | __cpumask_set_cpu(cpu, tmp); |
| 274 | } |
| 275 | } |
| 276 | |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 277 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 278 | unsigned long *vcpu_bitmap) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 279 | { |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 280 | struct kvm_vcpu *vcpu; |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 281 | struct cpumask *cpus; |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 282 | int i, me; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 283 | bool called; |
Rusty Russell | 6ef7a1b | 2008-12-08 20:28:04 +1030 | [diff] [blame] | 284 | |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 285 | me = get_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 286 | |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 287 | cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); |
| 288 | cpumask_clear(cpus); |
| 289 | |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 290 | for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { |
| 291 | vcpu = kvm_get_vcpu(kvm, i); |
Vitaly Kuznetsov | 381cecc | 2021-09-03 09:51:38 +0200 | [diff] [blame] | 292 | if (!vcpu) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 293 | continue; |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 294 | kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 295 | } |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 296 | |
Vitaly Kuznetsov | 620b243 | 2021-09-03 09:51:41 +0200 | [diff] [blame] | 297 | called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); |
Xiao Guangrong | 3cba413 | 2011-01-12 15:41:22 +0800 | [diff] [blame] | 298 | put_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 299 | |
| 300 | return called; |
| 301 | } |
| 302 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 303 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
| 304 | struct kvm_vcpu *except) |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 305 | { |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 306 | struct kvm_vcpu *vcpu; |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 307 | struct cpumask *cpus; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 308 | unsigned long i; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 309 | bool called; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 310 | int me; |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 311 | |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 312 | me = get_cpu(); |
| 313 | |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 314 | cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); |
| 315 | cpumask_clear(cpus); |
| 316 | |
Vitaly Kuznetsov | ae0946c | 2021-09-03 09:51:37 +0200 | [diff] [blame] | 317 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 318 | if (vcpu == except) |
| 319 | continue; |
| 320 | kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); |
| 321 | } |
| 322 | |
| 323 | called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); |
| 324 | put_cpu(); |
Vitaly Kuznetsov | 7053df4 | 2018-05-16 17:21:28 +0200 | [diff] [blame] | 325 | |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 326 | return called; |
| 327 | } |
| 328 | |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 329 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) |
| 330 | { |
| 331 | return kvm_make_all_cpus_request_except(kvm, req, NULL); |
| 332 | } |
Marcelo Tosatti | a248602 | 2021-05-26 14:20:14 -0300 | [diff] [blame] | 333 | EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); |
Suravee Suthikulpanit | 54163a3 | 2020-05-06 08:17:53 -0500 | [diff] [blame] | 334 | |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 335 | #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL |
Rusty Russell | 4984689 | 2008-12-08 20:26:24 +1030 | [diff] [blame] | 336 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 337 | { |
Jing Zhang | 3cc4e14 | 2021-08-17 00:26:39 +0000 | [diff] [blame] | 338 | ++kvm->stat.generic.remote_tlb_flush_requests; |
Lai Jiangshan | 6bc6db0 | 2021-09-18 08:56:29 +0800 | [diff] [blame] | 339 | |
Lan Tianyu | 4ae3cb3 | 2016-03-13 11:10:28 +0800 | [diff] [blame] | 340 | /* |
| 341 | * We want to publish modifications to the page tables before reading |
| 342 | * mode. Pairs with a memory barrier in arch-specific code. |
| 343 | * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest |
| 344 | * and smp_mb in walk_shadow_page_lockless_begin/end. |
| 345 | * - powerpc: smp_mb in kvmppc_prepare_to_enter. |
| 346 | * |
| 347 | * There is already an smp_mb__after_atomic() before |
| 348 | * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that |
| 349 | * barrier here. |
| 350 | */ |
Tianyu Lan | b08660e | 2018-07-19 08:40:17 +0000 | [diff] [blame] | 351 | if (!kvm_arch_flush_remote_tlb(kvm) |
| 352 | || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 353 | ++kvm->stat.generic.remote_tlb_flush; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 354 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 355 | EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); |
Mario Smarduch | a6d5101 | 2015-01-15 15:58:52 -0800 | [diff] [blame] | 356 | #endif |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 357 | |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 358 | void kvm_reload_remote_mmus(struct kvm *kvm) |
| 359 | { |
Tang Chen | 445b823 | 2014-09-24 15:57:55 +0800 | [diff] [blame] | 360 | kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 361 | } |
| 362 | |
Sean Christopherson | 6926f95 | 2020-07-02 19:35:39 -0700 | [diff] [blame] | 363 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
| 364 | static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, |
| 365 | gfp_t gfp_flags) |
| 366 | { |
| 367 | gfp_flags |= mc->gfp_zero; |
| 368 | |
| 369 | if (mc->kmem_cache) |
| 370 | return kmem_cache_alloc(mc->kmem_cache, gfp_flags); |
| 371 | else |
| 372 | return (void *)__get_free_page(gfp_flags); |
| 373 | } |
| 374 | |
| 375 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) |
| 376 | { |
| 377 | void *obj; |
| 378 | |
| 379 | if (mc->nobjs >= min) |
| 380 | return 0; |
| 381 | while (mc->nobjs < ARRAY_SIZE(mc->objects)) { |
| 382 | obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); |
| 383 | if (!obj) |
| 384 | return mc->nobjs >= min ? 0 : -ENOMEM; |
| 385 | mc->objects[mc->nobjs++] = obj; |
| 386 | } |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) |
| 391 | { |
| 392 | return mc->nobjs; |
| 393 | } |
| 394 | |
| 395 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| 396 | { |
| 397 | while (mc->nobjs) { |
| 398 | if (mc->kmem_cache) |
| 399 | kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); |
| 400 | else |
| 401 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| 406 | { |
| 407 | void *p; |
| 408 | |
| 409 | if (WARN_ON(!mc->nobjs)) |
| 410 | p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); |
| 411 | else |
| 412 | p = mc->objects[--mc->nobjs]; |
| 413 | BUG_ON(!p); |
| 414 | return p; |
| 415 | } |
| 416 | #endif |
| 417 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 418 | static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 419 | { |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 420 | mutex_init(&vcpu->mutex); |
| 421 | vcpu->cpu = -1; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 422 | vcpu->kvm = kvm; |
| 423 | vcpu->vcpu_id = id; |
Rik van Riel | 34bb10b | 2011-02-01 09:52:41 -0500 | [diff] [blame] | 424 | vcpu->pid = NULL; |
Sean Christopherson | 510958e | 2021-10-08 19:11:57 -0700 | [diff] [blame] | 425 | #ifndef __KVM_HAVE_ARCH_WQP |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 426 | rcuwait_init(&vcpu->wait); |
Sean Christopherson | 510958e | 2021-10-08 19:11:57 -0700 | [diff] [blame] | 427 | #endif |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 428 | kvm_async_pf_vcpu_init(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 429 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 430 | kvm_vcpu_set_in_spin_loop(vcpu, false); |
| 431 | kvm_vcpu_set_dy_eligible(vcpu, false); |
Raghavendra K T | 3a08a8f | 2013-03-04 23:32:07 +0530 | [diff] [blame] | 432 | vcpu->preempted = false; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 433 | vcpu->ready = false; |
Sean Christopherson | d5c48de | 2019-12-18 13:55:17 -0800 | [diff] [blame] | 434 | preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 435 | vcpu->last_used_slot = NULL; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 436 | } |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 437 | |
Marc Zyngier | 27592ae | 2021-11-16 16:03:57 +0000 | [diff] [blame] | 438 | static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 439 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 440 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 441 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 442 | |
Sean Christopherson | 9941d22 | 2019-12-18 13:55:29 -0800 | [diff] [blame] | 443 | /* |
| 444 | * No need for rcu_read_lock as VCPU_RUN is the only place that changes |
| 445 | * the vcpu->pid pointer, and at destruction time all file descriptors |
| 446 | * are already gone. |
| 447 | */ |
| 448 | put_pid(rcu_dereference_protected(vcpu->pid, 1)); |
| 449 | |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 450 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 451 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 452 | } |
Marc Zyngier | 27592ae | 2021-11-16 16:03:57 +0000 | [diff] [blame] | 453 | |
| 454 | void kvm_destroy_vcpus(struct kvm *kvm) |
| 455 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 456 | unsigned long i; |
Marc Zyngier | 27592ae | 2021-11-16 16:03:57 +0000 | [diff] [blame] | 457 | struct kvm_vcpu *vcpu; |
| 458 | |
| 459 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 460 | kvm_vcpu_destroy(vcpu); |
Marc Zyngier | c5b0775 | 2021-11-16 16:04:01 +0000 | [diff] [blame] | 461 | xa_erase(&kvm->vcpu_array, i); |
Marc Zyngier | 27592ae | 2021-11-16 16:03:57 +0000 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | atomic_set(&kvm->online_vcpus, 0); |
| 465 | } |
| 466 | EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); |
Sean Christopherson | 4543bdc | 2019-12-18 13:55:14 -0800 | [diff] [blame] | 467 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 468 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 469 | static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
| 470 | { |
| 471 | return container_of(mn, struct kvm, mmu_notifier); |
| 472 | } |
| 473 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 474 | static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, |
| 475 | struct mm_struct *mm, |
| 476 | unsigned long start, unsigned long end) |
| 477 | { |
| 478 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 479 | int idx; |
| 480 | |
| 481 | idx = srcu_read_lock(&kvm->srcu); |
| 482 | kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); |
| 483 | srcu_read_unlock(&kvm->srcu, idx); |
| 484 | } |
| 485 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 486 | typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); |
| 487 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 488 | typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, |
| 489 | unsigned long end); |
| 490 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 491 | struct kvm_hva_range { |
| 492 | unsigned long start; |
| 493 | unsigned long end; |
| 494 | pte_t pte; |
| 495 | hva_handler_t handler; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 496 | on_lock_fn_t on_lock; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 497 | bool flush_on_ret; |
| 498 | bool may_block; |
| 499 | }; |
| 500 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 501 | /* |
| 502 | * Use a dedicated stub instead of NULL to indicate that there is no callback |
| 503 | * function/handler. The compiler technically can't guarantee that a real |
| 504 | * function will have a non-zero address, and so it will generate code to |
| 505 | * check for !NULL, whereas comparing against a stub will be elided at compile |
| 506 | * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). |
| 507 | */ |
| 508 | static void kvm_null_fn(void) |
| 509 | { |
| 510 | |
| 511 | } |
| 512 | #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) |
| 513 | |
Maciej S. Szmigiero | ed92273 | 2021-12-06 20:54:28 +0100 | [diff] [blame] | 514 | /* Iterate over each memslot intersecting [start, last] (inclusive) range */ |
| 515 | #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ |
| 516 | for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ |
| 517 | node; \ |
| 518 | node = interval_tree_iter_next(node, start, last)) \ |
| 519 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 520 | static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, |
| 521 | const struct kvm_hva_range *range) |
| 522 | { |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 523 | bool ret = false, locked = false; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 524 | struct kvm_gfn_range gfn_range; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 525 | struct kvm_memory_slot *slot; |
| 526 | struct kvm_memslots *slots; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 527 | int i, idx; |
| 528 | |
Maciej S. Szmigiero | ed92273 | 2021-12-06 20:54:28 +0100 | [diff] [blame] | 529 | if (WARN_ON_ONCE(range->end <= range->start)) |
| 530 | return 0; |
| 531 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 532 | /* A null handler is allowed if and only if on_lock() is provided. */ |
| 533 | if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && |
| 534 | IS_KVM_NULL_FN(range->handler))) |
| 535 | return 0; |
| 536 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 537 | idx = srcu_read_lock(&kvm->srcu); |
| 538 | |
| 539 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
Maciej S. Szmigiero | ed92273 | 2021-12-06 20:54:28 +0100 | [diff] [blame] | 540 | struct interval_tree_node *node; |
| 541 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 542 | slots = __kvm_memslots(kvm, i); |
Maciej S. Szmigiero | ed92273 | 2021-12-06 20:54:28 +0100 | [diff] [blame] | 543 | kvm_for_each_memslot_in_hva_range(node, slots, |
| 544 | range->start, range->end - 1) { |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 545 | unsigned long hva_start, hva_end; |
| 546 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 547 | slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 548 | hva_start = max(range->start, slot->userspace_addr); |
| 549 | hva_end = min(range->end, slot->userspace_addr + |
| 550 | (slot->npages << PAGE_SHIFT)); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 551 | |
| 552 | /* |
| 553 | * To optimize for the likely case where the address |
| 554 | * range is covered by zero or one memslots, don't |
| 555 | * bother making these conditional (to avoid writes on |
| 556 | * the second or later invocation of the handler). |
| 557 | */ |
| 558 | gfn_range.pte = range->pte; |
| 559 | gfn_range.may_block = range->may_block; |
| 560 | |
| 561 | /* |
| 562 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 563 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| 564 | */ |
| 565 | gfn_range.start = hva_to_gfn_memslot(hva_start, slot); |
| 566 | gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); |
| 567 | gfn_range.slot = slot; |
| 568 | |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 569 | if (!locked) { |
| 570 | locked = true; |
| 571 | KVM_MMU_LOCK(kvm); |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 572 | if (!IS_KVM_NULL_FN(range->on_lock)) |
| 573 | range->on_lock(kvm, range->start, range->end); |
| 574 | if (IS_KVM_NULL_FN(range->handler)) |
| 575 | break; |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 576 | } |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 577 | ret |= range->handler(kvm, &gfn_range); |
| 578 | } |
| 579 | } |
| 580 | |
Lai Jiangshan | 6bc6db0 | 2021-09-18 08:56:29 +0800 | [diff] [blame] | 581 | if (range->flush_on_ret && ret) |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 582 | kvm_flush_remote_tlbs(kvm); |
| 583 | |
Sean Christopherson | 8931a45 | 2021-04-01 17:56:56 -0700 | [diff] [blame] | 584 | if (locked) |
| 585 | KVM_MMU_UNLOCK(kvm); |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 586 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 587 | srcu_read_unlock(&kvm->srcu, idx); |
| 588 | |
| 589 | /* The notifiers are averse to booleans. :-( */ |
| 590 | return (int)ret; |
| 591 | } |
| 592 | |
| 593 | static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, |
| 594 | unsigned long start, |
| 595 | unsigned long end, |
| 596 | pte_t pte, |
| 597 | hva_handler_t handler) |
| 598 | { |
| 599 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 600 | const struct kvm_hva_range range = { |
| 601 | .start = start, |
| 602 | .end = end, |
| 603 | .pte = pte, |
| 604 | .handler = handler, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 605 | .on_lock = (void *)kvm_null_fn, |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 606 | .flush_on_ret = true, |
| 607 | .may_block = false, |
| 608 | }; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 609 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 610 | return __kvm_handle_hva_range(kvm, &range); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 611 | } |
| 612 | |
| 613 | static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, |
| 614 | unsigned long start, |
| 615 | unsigned long end, |
| 616 | hva_handler_t handler) |
| 617 | { |
| 618 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 619 | const struct kvm_hva_range range = { |
| 620 | .start = start, |
| 621 | .end = end, |
| 622 | .pte = __pte(0), |
| 623 | .handler = handler, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 624 | .on_lock = (void *)kvm_null_fn, |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 625 | .flush_on_ret = false, |
| 626 | .may_block = false, |
| 627 | }; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 628 | |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 629 | return __kvm_handle_hva_range(kvm, &range); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 630 | } |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 631 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
| 632 | struct mm_struct *mm, |
| 633 | unsigned long address, |
| 634 | pte_t pte) |
| 635 | { |
| 636 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 637 | |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 638 | trace_kvm_set_spte_hva(address); |
| 639 | |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 640 | /* |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 641 | * .change_pte() must be surrounded by .invalidate_range_{start,end}(). |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 642 | * If mmu_notifier_count is zero, then no in-progress invalidations, |
| 643 | * including this one, found a relevant memslot at start(); rechecking |
| 644 | * memslots here is unnecessary. Note, a false positive (count elevated |
| 645 | * by a different invalidation) is sub-optimal but functionally ok. |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 646 | */ |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 647 | WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 648 | if (!READ_ONCE(kvm->mmu_notifier_count)) |
| 649 | return; |
Sean Christopherson | c13fda2 | 2021-04-02 02:56:49 +0200 | [diff] [blame] | 650 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 651 | kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 652 | } |
| 653 | |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 654 | void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 655 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 656 | { |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 657 | /* |
| 658 | * The count increase must become visible at unlock time as no |
| 659 | * spte can be established without taking the mmu_lock and |
| 660 | * count is also read inside the mmu_lock critical section. |
| 661 | */ |
| 662 | kvm->mmu_notifier_count++; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 663 | if (likely(kvm->mmu_notifier_count == 1)) { |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 664 | kvm->mmu_notifier_range_start = start; |
| 665 | kvm->mmu_notifier_range_end = end; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 666 | } else { |
| 667 | /* |
| 668 | * Fully tracking multiple concurrent ranges has dimishing |
| 669 | * returns. Keep things simple and just find the minimal range |
| 670 | * which includes the current and new ranges. As there won't be |
| 671 | * enough information to subtract a range after its invalidate |
| 672 | * completes, any ranges invalidated concurrently will |
| 673 | * accumulate and persist until all outstanding invalidates |
| 674 | * complete. |
| 675 | */ |
| 676 | kvm->mmu_notifier_range_start = |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 677 | min(kvm->mmu_notifier_range_start, start); |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 678 | kvm->mmu_notifier_range_end = |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 679 | max(kvm->mmu_notifier_range_end, end); |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 680 | } |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 681 | } |
| 682 | |
| 683 | static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 684 | const struct mmu_notifier_range *range) |
| 685 | { |
| 686 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 687 | const struct kvm_hva_range hva_range = { |
| 688 | .start = range->start, |
| 689 | .end = range->end, |
| 690 | .pte = __pte(0), |
| 691 | .handler = kvm_unmap_gfn_range, |
| 692 | .on_lock = kvm_inc_notifier_count, |
| 693 | .flush_on_ret = true, |
| 694 | .may_block = mmu_notifier_range_blockable(range), |
| 695 | }; |
| 696 | |
| 697 | trace_kvm_unmap_hva_range(range->start, range->end); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 698 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 699 | /* |
| 700 | * Prevent memslot modification between range_start() and range_end() |
| 701 | * so that conditionally locking provides the same result in both |
| 702 | * functions. Without that guarantee, the mmu_notifier_count |
| 703 | * adjustments will be imbalanced. |
| 704 | * |
| 705 | * Pairs with the decrement in range_end(). |
| 706 | */ |
| 707 | spin_lock(&kvm->mn_invalidate_lock); |
| 708 | kvm->mn_active_invalidate_count++; |
| 709 | spin_unlock(&kvm->mn_invalidate_lock); |
| 710 | |
David Woodhouse | 982ed0d | 2021-12-10 16:36:21 +0000 | [diff] [blame] | 711 | gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, |
| 712 | hva_range.may_block); |
| 713 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 714 | __kvm_handle_hva_range(kvm, &hva_range); |
Takuya Yoshikawa | 565f3be | 2012-02-10 15:28:31 +0900 | [diff] [blame] | 715 | |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 716 | return 0; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 717 | } |
| 718 | |
Maxim Levitsky | edb298c | 2021-08-10 23:52:39 +0300 | [diff] [blame] | 719 | void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 720 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 721 | { |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 722 | /* |
| 723 | * This sequence increase will notify the kvm page fault that |
| 724 | * the page that is going to be mapped in the spte could have |
| 725 | * been freed. |
| 726 | */ |
| 727 | kvm->mmu_notifier_seq++; |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 728 | smp_wmb(); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 729 | /* |
| 730 | * The above sequence increase must be visible before the |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 731 | * below count decrease, which is ensured by the smp_wmb above |
| 732 | * in conjunction with the smp_rmb in mmu_notifier_retry(). |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 733 | */ |
| 734 | kvm->mmu_notifier_count--; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 735 | } |
| 736 | |
| 737 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
| 738 | const struct mmu_notifier_range *range) |
| 739 | { |
| 740 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
| 741 | const struct kvm_hva_range hva_range = { |
| 742 | .start = range->start, |
| 743 | .end = range->end, |
| 744 | .pte = __pte(0), |
| 745 | .handler = (void *)kvm_null_fn, |
| 746 | .on_lock = kvm_dec_notifier_count, |
| 747 | .flush_on_ret = false, |
| 748 | .may_block = mmu_notifier_range_blockable(range), |
| 749 | }; |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 750 | bool wake; |
Sean Christopherson | f922bd9 | 2021-04-01 17:56:55 -0700 | [diff] [blame] | 751 | |
| 752 | __kvm_handle_hva_range(kvm, &hva_range); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 753 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 754 | /* Pairs with the increment in range_start(). */ |
| 755 | spin_lock(&kvm->mn_invalidate_lock); |
| 756 | wake = (--kvm->mn_active_invalidate_count == 0); |
| 757 | spin_unlock(&kvm->mn_invalidate_lock); |
| 758 | |
| 759 | /* |
| 760 | * There can only be one waiter, since the wait happens under |
| 761 | * slots_lock. |
| 762 | */ |
| 763 | if (wake) |
| 764 | rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); |
| 765 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 766 | BUG_ON(kvm->mmu_notifier_count < 0); |
| 767 | } |
| 768 | |
| 769 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, |
| 770 | struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 771 | unsigned long start, |
| 772 | unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 773 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 774 | trace_kvm_age_hva(start, end); |
| 775 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 776 | return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 777 | } |
| 778 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 779 | static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, |
| 780 | struct mm_struct *mm, |
| 781 | unsigned long start, |
| 782 | unsigned long end) |
| 783 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 784 | trace_kvm_age_hva(start, end); |
| 785 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 786 | /* |
| 787 | * Even though we do not flush TLB, this will still adversely |
| 788 | * affect performance on pre-Haswell Intel EPT, where there is |
| 789 | * no EPT Access Bit to clear so that we have to tear down EPT |
| 790 | * tables instead. If we find this unacceptable, we can always |
| 791 | * add a parameter to kvm_age_hva so that it effectively doesn't |
| 792 | * do anything on clear_young. |
| 793 | * |
| 794 | * Also note that currently we never issue secondary TLB flushes |
| 795 | * from clear_young, leaving this job up to the regular system |
| 796 | * cadence. If we find this inaccurate, we might come up with a |
| 797 | * more sophisticated heuristic later. |
| 798 | */ |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 799 | return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 800 | } |
| 801 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 802 | static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, |
| 803 | struct mm_struct *mm, |
| 804 | unsigned long address) |
| 805 | { |
Sean Christopherson | 501b918 | 2021-03-25 19:19:48 -0700 | [diff] [blame] | 806 | trace_kvm_test_age_hva(address); |
| 807 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 808 | return kvm_handle_hva_range_no_flush(mn, address, address + 1, |
| 809 | kvm_test_age_gfn); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 810 | } |
| 811 | |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 812 | static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
| 813 | struct mm_struct *mm) |
| 814 | { |
| 815 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 816 | int idx; |
| 817 | |
| 818 | idx = srcu_read_lock(&kvm->srcu); |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 819 | kvm_arch_flush_shadow_all(kvm); |
Lai Jiangshan | eda2bed | 2010-04-20 14:29:29 +0800 | [diff] [blame] | 820 | srcu_read_unlock(&kvm->srcu, idx); |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 821 | } |
| 822 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 823 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
Eiichi Tsukata | e649b3f | 2020-06-06 13:26:27 +0900 | [diff] [blame] | 824 | .invalidate_range = kvm_mmu_notifier_invalidate_range, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 825 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
| 826 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
| 827 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 828 | .clear_young = kvm_mmu_notifier_clear_young, |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 829 | .test_young = kvm_mmu_notifier_test_young, |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 830 | .change_pte = kvm_mmu_notifier_change_pte, |
Marcelo Tosatti | 85db06e | 2008-12-10 21:23:26 +0100 | [diff] [blame] | 831 | .release = kvm_mmu_notifier_release, |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 832 | }; |
Avi Kivity | 4c07b0a | 2009-12-20 14:54:04 +0200 | [diff] [blame] | 833 | |
| 834 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 835 | { |
| 836 | kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; |
| 837 | return mmu_notifier_register(&kvm->mmu_notifier, current->mm); |
| 838 | } |
| 839 | |
| 840 | #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ |
| 841 | |
| 842 | static int kvm_init_mmu_notifier(struct kvm *kvm) |
| 843 | { |
| 844 | return 0; |
| 845 | } |
| 846 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 847 | #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ |
| 848 | |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 849 | #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER |
| 850 | static int kvm_pm_notifier_call(struct notifier_block *bl, |
| 851 | unsigned long state, |
| 852 | void *unused) |
| 853 | { |
| 854 | struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); |
| 855 | |
| 856 | return kvm_arch_pm_notifier(kvm, state); |
| 857 | } |
| 858 | |
| 859 | static void kvm_init_pm_notifier(struct kvm *kvm) |
| 860 | { |
| 861 | kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; |
| 862 | /* Suspend KVM before we suspend ftrace, RCU, etc. */ |
| 863 | kvm->pm_notifier.priority = INT_MAX; |
| 864 | register_pm_notifier(&kvm->pm_notifier); |
| 865 | } |
| 866 | |
| 867 | static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| 868 | { |
| 869 | unregister_pm_notifier(&kvm->pm_notifier); |
| 870 | } |
| 871 | #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| 872 | static void kvm_init_pm_notifier(struct kvm *kvm) |
| 873 | { |
| 874 | } |
| 875 | |
| 876 | static void kvm_destroy_pm_notifier(struct kvm *kvm) |
| 877 | { |
| 878 | } |
| 879 | #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ |
| 880 | |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 881 | static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) |
| 882 | { |
| 883 | if (!memslot->dirty_bitmap) |
| 884 | return; |
| 885 | |
| 886 | kvfree(memslot->dirty_bitmap); |
| 887 | memslot->dirty_bitmap = NULL; |
| 888 | } |
| 889 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 890 | /* This does not remove the slot from struct kvm_memslots data structures */ |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 891 | static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 892 | { |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 893 | kvm_destroy_dirty_bitmap(slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 894 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 895 | kvm_arch_free_memslot(kvm, slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 896 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 897 | kfree(slot); |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) |
| 901 | { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 902 | struct hlist_node *idnode; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 903 | struct kvm_memory_slot *memslot; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 904 | int bkt; |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 905 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 906 | /* |
| 907 | * The same memslot objects live in both active and inactive sets, |
| 908 | * arbitrarily free using index '1' so the second invocation of this |
| 909 | * function isn't operating over a structure with dangling pointers |
| 910 | * (even though this function isn't actually touching them). |
| 911 | */ |
| 912 | if (!slots->node_idx) |
Paolo Bonzini | a47d2b0 | 2015-05-17 11:41:37 +0200 | [diff] [blame] | 913 | return; |
| 914 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 915 | hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 916 | kvm_free_memslot(kvm, memslot); |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 917 | } |
| 918 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 919 | static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) |
| 920 | { |
| 921 | switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { |
| 922 | case KVM_STATS_TYPE_INSTANT: |
| 923 | return 0444; |
| 924 | case KVM_STATS_TYPE_CUMULATIVE: |
| 925 | case KVM_STATS_TYPE_PEAK: |
| 926 | default: |
| 927 | return 0644; |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 932 | static void kvm_destroy_vm_debugfs(struct kvm *kvm) |
| 933 | { |
| 934 | int i; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 935 | int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| 936 | kvm_vcpu_stats_header.num_desc; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 937 | |
| 938 | if (!kvm->debugfs_dentry) |
| 939 | return; |
| 940 | |
| 941 | debugfs_remove_recursive(kvm->debugfs_dentry); |
| 942 | |
Luiz Capitulino | 9d5a1dc | 2016-09-07 14:47:21 -0400 | [diff] [blame] | 943 | if (kvm->debugfs_stat_data) { |
| 944 | for (i = 0; i < kvm_debugfs_num_entries; i++) |
| 945 | kfree(kvm->debugfs_stat_data[i]); |
| 946 | kfree(kvm->debugfs_stat_data); |
| 947 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 948 | } |
| 949 | |
| 950 | static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) |
| 951 | { |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 952 | static DEFINE_MUTEX(kvm_debugfs_lock); |
| 953 | struct dentry *dent; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 954 | char dir_name[ITOA_MAX_LEN * 2]; |
| 955 | struct kvm_stat_data *stat_data; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 956 | const struct _kvm_stats_desc *pdesc; |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 957 | int i, ret; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 958 | int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + |
| 959 | kvm_vcpu_stats_header.num_desc; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 960 | |
| 961 | if (!debugfs_initialized()) |
| 962 | return 0; |
| 963 | |
| 964 | snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 965 | mutex_lock(&kvm_debugfs_lock); |
| 966 | dent = debugfs_lookup(dir_name, kvm_debugfs_dir); |
| 967 | if (dent) { |
| 968 | pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); |
| 969 | dput(dent); |
| 970 | mutex_unlock(&kvm_debugfs_lock); |
| 971 | return 0; |
| 972 | } |
| 973 | dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); |
| 974 | mutex_unlock(&kvm_debugfs_lock); |
| 975 | if (IS_ERR(dent)) |
| 976 | return 0; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 977 | |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 978 | kvm->debugfs_dentry = dent; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 979 | kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, |
| 980 | sizeof(*kvm->debugfs_stat_data), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 981 | GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 982 | if (!kvm->debugfs_stat_data) |
| 983 | return -ENOMEM; |
| 984 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 985 | for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { |
| 986 | pdesc = &kvm_vm_stats_desc[i]; |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 987 | stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 988 | if (!stat_data) |
| 989 | return -ENOMEM; |
| 990 | |
| 991 | stat_data->kvm = kvm; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 992 | stat_data->desc = pdesc; |
| 993 | stat_data->kind = KVM_STAT_VM; |
| 994 | kvm->debugfs_stat_data[i] = stat_data; |
| 995 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 996 | kvm->debugfs_dentry, stat_data, |
| 997 | &stat_fops_per_vm); |
| 998 | } |
| 999 | |
| 1000 | for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { |
| 1001 | pdesc = &kvm_vcpu_stats_desc[i]; |
| 1002 | stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); |
| 1003 | if (!stat_data) |
| 1004 | return -ENOMEM; |
| 1005 | |
| 1006 | stat_data->kvm = kvm; |
| 1007 | stat_data->desc = pdesc; |
| 1008 | stat_data->kind = KVM_STAT_VCPU; |
Pavel Skripkin | 004d62e | 2021-07-01 22:55:00 +0300 | [diff] [blame] | 1009 | kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 1010 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 1011 | kvm->debugfs_dentry, stat_data, |
| 1012 | &stat_fops_per_vm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1013 | } |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 1014 | |
| 1015 | ret = kvm_arch_create_vm_debugfs(kvm); |
| 1016 | if (ret) { |
| 1017 | kvm_destroy_vm_debugfs(kvm); |
| 1018 | return i; |
| 1019 | } |
| 1020 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1021 | return 0; |
| 1022 | } |
| 1023 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1024 | /* |
| 1025 | * Called after the VM is otherwise initialized, but just before adding it to |
| 1026 | * the vm_list. |
| 1027 | */ |
| 1028 | int __weak kvm_arch_post_init_vm(struct kvm *kvm) |
| 1029 | { |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
| 1033 | /* |
| 1034 | * Called just after removing the VM from the vm_list, but before doing any |
| 1035 | * other destruction. |
| 1036 | */ |
| 1037 | void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) |
| 1038 | { |
| 1039 | } |
| 1040 | |
Peter Xu | 3165af7 | 2021-07-30 18:04:49 -0400 | [diff] [blame] | 1041 | /* |
| 1042 | * Called after per-vm debugfs created. When called kvm->debugfs_dentry should |
| 1043 | * be setup already, so we can create arch-specific debugfs entries under it. |
| 1044 | * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so |
| 1045 | * a per-arch destroy interface is not needed. |
| 1046 | */ |
| 1047 | int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) |
| 1048 | { |
| 1049 | return 0; |
| 1050 | } |
| 1051 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 1052 | static struct kvm *kvm_create_vm(unsigned long type) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1053 | { |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1054 | struct kvm *kvm = kvm_arch_alloc_vm(); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1055 | struct kvm_memslots *slots; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1056 | int r = -ENOMEM; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1057 | int i, j; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1058 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1059 | if (!kvm) |
| 1060 | return ERR_PTR(-ENOMEM); |
| 1061 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1062 | KVM_MMU_LOCK_INIT(kvm); |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 1063 | mmgrab(current->mm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1064 | kvm->mm = current->mm; |
| 1065 | kvm_eventfd_init(kvm); |
| 1066 | mutex_init(&kvm->lock); |
| 1067 | mutex_init(&kvm->irq_lock); |
| 1068 | mutex_init(&kvm->slots_lock); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1069 | mutex_init(&kvm->slots_arch_lock); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1070 | spin_lock_init(&kvm->mn_invalidate_lock); |
| 1071 | rcuwait_init(&kvm->mn_memslots_update_rcuwait); |
Marc Zyngier | c5b0775 | 2021-11-16 16:04:01 +0000 | [diff] [blame] | 1072 | xa_init(&kvm->vcpu_array); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1073 | |
David Woodhouse | 982ed0d | 2021-12-10 16:36:21 +0000 | [diff] [blame] | 1074 | INIT_LIST_HEAD(&kvm->gpc_list); |
| 1075 | spin_lock_init(&kvm->gpc_lock); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1076 | |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1077 | INIT_LIST_HEAD(&kvm->devices); |
| 1078 | |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1079 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
| 1080 | |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 1081 | if (init_srcu_struct(&kvm->srcu)) |
| 1082 | goto out_err_no_srcu; |
| 1083 | if (init_srcu_struct(&kvm->irq_srcu)) |
| 1084 | goto out_err_no_irq_srcu; |
| 1085 | |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 1086 | refcount_set(&kvm->users_count, 1); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1087 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1088 | for (j = 0; j < 2; j++) { |
| 1089 | slots = &kvm->__memslots[i][j]; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1090 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1091 | atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); |
| 1092 | slots->hva_tree = RB_ROOT_CACHED; |
| 1093 | slots->gfn_tree = RB_ROOT; |
| 1094 | hash_init(slots->id_hash); |
| 1095 | slots->node_idx = j; |
| 1096 | |
| 1097 | /* Generations must be different for each address space. */ |
| 1098 | slots->generation = i; |
| 1099 | } |
| 1100 | |
| 1101 | rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1102 | } |
| 1103 | |
| 1104 | for (i = 0; i < KVM_NR_BUSES; i++) { |
| 1105 | rcu_assign_pointer(kvm->buses[i], |
| 1106 | kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); |
| 1107 | if (!kvm->buses[i]) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1108 | goto out_err_no_arch_destroy_vm; |
Jim Mattson | 9121923 | 2019-10-24 16:03:26 -0700 | [diff] [blame] | 1109 | } |
| 1110 | |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 1111 | kvm->max_halt_poll_ns = halt_poll_ns; |
| 1112 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 1113 | r = kvm_arch_init_vm(kvm, type); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1114 | if (r) |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1115 | goto out_err_no_arch_destroy_vm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1116 | |
| 1117 | r = hardware_enable_all(); |
| 1118 | if (r) |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 1119 | goto out_err_no_disable; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1120 | |
Paolo Bonzini | c77dcac | 2014-08-06 14:24:45 +0200 | [diff] [blame] | 1121 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Gleb Natapov | 136bdfe | 2009-08-24 11:54:23 +0300 | [diff] [blame] | 1122 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 1123 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1124 | |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 1125 | r = kvm_init_mmu_notifier(kvm); |
| 1126 | if (r) |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1127 | goto out_err_no_mmu_notifier; |
| 1128 | |
| 1129 | r = kvm_arch_post_init_vm(kvm); |
| 1130 | if (r) |
Mike Waychison | 74b5c5b | 2011-06-03 13:04:53 -0700 | [diff] [blame] | 1131 | goto out_err; |
| 1132 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1133 | mutex_lock(&kvm_lock); |
Rusty Russell | 5e58cfe | 2007-07-23 17:08:21 +1000 | [diff] [blame] | 1134 | list_add(&kvm->vm_list, &vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1135 | mutex_unlock(&kvm_lock); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1136 | |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1137 | preempt_notifier_inc(); |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 1138 | kvm_init_pm_notifier(kvm); |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1139 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1140 | return kvm; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1141 | |
| 1142 | out_err: |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1143 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 1144 | if (kvm->mmu_notifier.ops) |
| 1145 | mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); |
| 1146 | #endif |
| 1147 | out_err_no_mmu_notifier: |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1148 | hardware_disable_all(); |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 1149 | out_err_no_disable: |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1150 | kvm_arch_destroy_vm(kvm); |
Jim Mattson | a97b0e7 | 2019-10-25 13:34:58 +0200 | [diff] [blame] | 1151 | out_err_no_arch_destroy_vm: |
Paolo Bonzini | e2d3fca | 2019-11-04 13:23:53 +0100 | [diff] [blame] | 1152 | WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 1153 | for (i = 0; i < KVM_NR_BUSES; i++) |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1154 | kfree(kvm_get_bus(kvm, i)); |
Paolo Bonzini | 8a44119 | 2019-11-04 12:16:49 +0100 | [diff] [blame] | 1155 | cleanup_srcu_struct(&kvm->irq_srcu); |
| 1156 | out_err_no_irq_srcu: |
| 1157 | cleanup_srcu_struct(&kvm->srcu); |
| 1158 | out_err_no_srcu: |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1159 | kvm_arch_free_vm(kvm); |
Paolo Bonzini | e9ad4ec | 2016-03-21 10:15:25 +0100 | [diff] [blame] | 1160 | mmdrop(current->mm); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1161 | return ERR_PTR(r); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1162 | } |
| 1163 | |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1164 | static void kvm_destroy_devices(struct kvm *kvm) |
| 1165 | { |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 1166 | struct kvm_device *dev, *tmp; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1167 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 1168 | /* |
| 1169 | * We do not need to take the kvm->lock here, because nobody else |
| 1170 | * has a reference to the struct kvm at this point and therefore |
| 1171 | * cannot access the devices list anyhow. |
| 1172 | */ |
Geliang Tang | e6e3b5a | 2016-01-01 19:47:12 +0800 | [diff] [blame] | 1173 | list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { |
| 1174 | list_del(&dev->vm_node); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1175 | dev->ops->destroy(dev); |
| 1176 | } |
| 1177 | } |
| 1178 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1179 | static void kvm_destroy_vm(struct kvm *kvm) |
| 1180 | { |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 1181 | int i; |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 1182 | struct mm_struct *mm = kvm->mm; |
| 1183 | |
Sergey Senozhatsky | 2fdef3a | 2021-06-06 11:10:44 +0900 | [diff] [blame] | 1184 | kvm_destroy_pm_notifier(kvm); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 1185 | kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 1186 | kvm_destroy_vm_debugfs(kvm); |
Sheng Yang | ad8ba2c | 2009-01-06 10:03:02 +0800 | [diff] [blame] | 1187 | kvm_arch_sync_events(kvm); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1188 | mutex_lock(&kvm_lock); |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 1189 | list_del(&kvm->vm_list); |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 1190 | mutex_unlock(&kvm_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1191 | kvm_arch_pre_destroy_vm(kvm); |
| 1192 | |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1193 | kvm_free_irq_routing(kvm); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 1194 | for (i = 0; i < KVM_NR_BUSES; i++) { |
Paolo Bonzini | 3898da9 | 2017-08-02 17:55:54 +0200 | [diff] [blame] | 1195 | struct kvm_io_bus *bus = kvm_get_bus(kvm, i); |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 1196 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 1197 | if (bus) |
| 1198 | kvm_io_bus_destroy(bus); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 1199 | kvm->buses[i] = NULL; |
| 1200 | } |
Avi Kivity | 980da6c | 2009-12-20 15:13:43 +0200 | [diff] [blame] | 1201 | kvm_coalesced_mmio_free(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1202 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 1203 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1204 | /* |
| 1205 | * At this point, pending calls to invalidate_range_start() |
| 1206 | * have completed but no more MMU notifiers will run, so |
| 1207 | * mn_active_invalidate_count may remain unbalanced. |
| 1208 | * No threads can be waiting in install_new_memslots as the |
| 1209 | * last reference on KVM has been dropped, but freeing |
| 1210 | * memslots would deadlock without this manual intervention. |
| 1211 | */ |
| 1212 | WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); |
| 1213 | kvm->mn_active_invalidate_count = 0; |
Gleb Natapov | f00be0c | 2009-03-19 12:20:36 +0200 | [diff] [blame] | 1214 | #else |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 1215 | kvm_arch_flush_shadow_all(kvm); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1216 | #endif |
Zhang Xiantao | d19a9cd | 2007-11-18 18:43:45 +0800 | [diff] [blame] | 1217 | kvm_arch_destroy_vm(kvm); |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1218 | kvm_destroy_devices(kvm); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1219 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 1220 | kvm_free_memslots(kvm, &kvm->__memslots[i][0]); |
| 1221 | kvm_free_memslots(kvm, &kvm->__memslots[i][1]); |
| 1222 | } |
Paolo Bonzini | 820b3fc | 2014-06-03 13:44:17 +0200 | [diff] [blame] | 1223 | cleanup_srcu_struct(&kvm->irq_srcu); |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 1224 | cleanup_srcu_struct(&kvm->srcu); |
| 1225 | kvm_arch_free_vm(kvm); |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 1226 | preempt_notifier_dec(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 1227 | hardware_disable_all(); |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 1228 | mmdrop(mm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1229 | } |
| 1230 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1231 | void kvm_get_kvm(struct kvm *kvm) |
| 1232 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 1233 | refcount_inc(&kvm->users_count); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1234 | } |
| 1235 | EXPORT_SYMBOL_GPL(kvm_get_kvm); |
| 1236 | |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 1237 | /* |
| 1238 | * Make sure the vm is not during destruction, which is a safe version of |
| 1239 | * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. |
| 1240 | */ |
| 1241 | bool kvm_get_kvm_safe(struct kvm *kvm) |
| 1242 | { |
| 1243 | return refcount_inc_not_zero(&kvm->users_count); |
| 1244 | } |
| 1245 | EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); |
| 1246 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1247 | void kvm_put_kvm(struct kvm *kvm) |
| 1248 | { |
Elena Reshetova | e3736c3 | 2017-02-20 13:06:21 +0200 | [diff] [blame] | 1249 | if (refcount_dec_and_test(&kvm->users_count)) |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1250 | kvm_destroy_vm(kvm); |
| 1251 | } |
| 1252 | EXPORT_SYMBOL_GPL(kvm_put_kvm); |
| 1253 | |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 1254 | /* |
| 1255 | * Used to put a reference that was taken on behalf of an object associated |
| 1256 | * with a user-visible file descriptor, e.g. a vcpu or device, if installation |
| 1257 | * of the new file descriptor fails and the reference cannot be transferred to |
| 1258 | * its final owner. In such cases, the caller is still actively using @kvm and |
| 1259 | * will fail miserably if the refcount unexpectedly hits zero. |
| 1260 | */ |
| 1261 | void kvm_put_kvm_no_destroy(struct kvm *kvm) |
| 1262 | { |
| 1263 | WARN_ON(refcount_dec_and_test(&kvm->users_count)); |
| 1264 | } |
| 1265 | EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1266 | |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 1267 | static int kvm_vm_release(struct inode *inode, struct file *filp) |
| 1268 | { |
| 1269 | struct kvm *kvm = filp->private_data; |
| 1270 | |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1271 | kvm_irqfd_release(kvm); |
| 1272 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 1273 | kvm_put_kvm(kvm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1274 | return 0; |
| 1275 | } |
| 1276 | |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1277 | /* |
| 1278 | * Allocation size is twice as large as the actual dirty bitmap size. |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1279 | * See kvm_vm_ioctl_get_dirty_log() why this is needed. |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1280 | */ |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1281 | static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1282 | { |
Takuya Yoshikawa | 515a012 | 2010-10-27 18:23:54 +0900 | [diff] [blame] | 1283 | unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1284 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 1285 | memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1286 | if (!memslot->dirty_bitmap) |
| 1287 | return -ENOMEM; |
| 1288 | |
Takuya Yoshikawa | a36a57b1 | 2010-10-27 18:22:19 +0900 | [diff] [blame] | 1289 | return 0; |
| 1290 | } |
| 1291 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1292 | static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1293 | { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1294 | struct kvm_memslots *active = __kvm_memslots(kvm, as_id); |
| 1295 | int node_idx_inactive = active->node_idx ^ 1; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 1296 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1297 | return &kvm->__memslots[as_id][node_idx_inactive]; |
| 1298 | } |
Igor Mammedov | 0e60b07 | 2014-12-01 17:29:26 +0000 | [diff] [blame] | 1299 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1300 | /* |
| 1301 | * Helper to get the address space ID when one of memslot pointers may be NULL. |
| 1302 | * This also serves as a sanity that at least one of the pointers is non-NULL, |
| 1303 | * and that their address space IDs don't diverge. |
| 1304 | */ |
| 1305 | static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, |
| 1306 | struct kvm_memory_slot *b) |
| 1307 | { |
| 1308 | if (WARN_ON_ONCE(!a && !b)) |
| 1309 | return 0; |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1310 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1311 | if (!a) |
| 1312 | return b->as_id; |
| 1313 | if (!b) |
| 1314 | return a->as_id; |
Sean Christopherson | 0774a96 | 2020-03-20 13:55:40 -0700 | [diff] [blame] | 1315 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1316 | WARN_ON_ONCE(a->as_id != b->as_id); |
| 1317 | return a->as_id; |
| 1318 | } |
| 1319 | |
| 1320 | static void kvm_insert_gfn_node(struct kvm_memslots *slots, |
| 1321 | struct kvm_memory_slot *slot) |
| 1322 | { |
| 1323 | struct rb_root *gfn_tree = &slots->gfn_tree; |
| 1324 | struct rb_node **node, *parent; |
| 1325 | int idx = slots->node_idx; |
| 1326 | |
| 1327 | parent = NULL; |
| 1328 | for (node = &gfn_tree->rb_node; *node; ) { |
| 1329 | struct kvm_memory_slot *tmp; |
| 1330 | |
| 1331 | tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); |
| 1332 | parent = *node; |
| 1333 | if (slot->base_gfn < tmp->base_gfn) |
| 1334 | node = &(*node)->rb_left; |
| 1335 | else if (slot->base_gfn > tmp->base_gfn) |
| 1336 | node = &(*node)->rb_right; |
| 1337 | else |
| 1338 | BUG(); |
Igor Mammedov | 7f379cf | 2014-12-01 17:29:24 +0000 | [diff] [blame] | 1339 | } |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1340 | |
| 1341 | rb_link_node(&slot->gfn_node[idx], parent, node); |
| 1342 | rb_insert_color(&slot->gfn_node[idx], gfn_tree); |
| 1343 | } |
| 1344 | |
| 1345 | static void kvm_erase_gfn_node(struct kvm_memslots *slots, |
| 1346 | struct kvm_memory_slot *slot) |
| 1347 | { |
| 1348 | rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); |
| 1349 | } |
| 1350 | |
| 1351 | static void kvm_replace_gfn_node(struct kvm_memslots *slots, |
| 1352 | struct kvm_memory_slot *old, |
| 1353 | struct kvm_memory_slot *new) |
| 1354 | { |
| 1355 | int idx = slots->node_idx; |
| 1356 | |
| 1357 | WARN_ON_ONCE(old->base_gfn != new->base_gfn); |
| 1358 | |
| 1359 | rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], |
| 1360 | &slots->gfn_tree); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1361 | } |
| 1362 | |
| 1363 | /* |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1364 | * Replace @old with @new in the inactive memslots. |
| 1365 | * |
| 1366 | * With NULL @old this simply adds @new. |
| 1367 | * With NULL @new this simply removes @old. |
| 1368 | * |
| 1369 | * If @new is non-NULL its hva_node[slots_idx] range has to be set |
| 1370 | * appropriately. |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1371 | */ |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1372 | static void kvm_replace_memslot(struct kvm *kvm, |
Maciej S. Szmigiero | 26b8345 | 2021-12-06 20:54:27 +0100 | [diff] [blame] | 1373 | struct kvm_memory_slot *old, |
| 1374 | struct kvm_memory_slot *new) |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1375 | { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1376 | int as_id = kvm_memslots_get_as_id(old, new); |
| 1377 | struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); |
| 1378 | int idx = slots->node_idx; |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1379 | |
Maciej S. Szmigiero | 26b8345 | 2021-12-06 20:54:27 +0100 | [diff] [blame] | 1380 | if (old) { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1381 | hash_del(&old->id_node[idx]); |
| 1382 | interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1383 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1384 | if ((long)old == atomic_long_read(&slots->last_used_slot)) |
| 1385 | atomic_long_set(&slots->last_used_slot, (long)new); |
| 1386 | |
| 1387 | if (!new) { |
| 1388 | kvm_erase_gfn_node(slots, old); |
Maciej S. Szmigiero | 26b8345 | 2021-12-06 20:54:27 +0100 | [diff] [blame] | 1389 | return; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1390 | } |
| 1391 | } |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 1392 | |
| 1393 | /* |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1394 | * Initialize @new's hva range. Do this even when replacing an @old |
| 1395 | * slot, kvm_copy_memslot() deliberately does not touch node data. |
Paolo Bonzini | efbeec7 | 2014-12-27 18:01:00 +0100 | [diff] [blame] | 1396 | */ |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1397 | new->hva_node[idx].start = new->userspace_addr; |
| 1398 | new->hva_node[idx].last = new->userspace_addr + |
| 1399 | (new->npages << PAGE_SHIFT) - 1; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 1400 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1401 | /* |
| 1402 | * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), |
| 1403 | * hva_node needs to be swapped with remove+insert even though hva can't |
| 1404 | * change when replacing an existing slot. |
| 1405 | */ |
| 1406 | hash_add(slots->id_hash, &new->id_node[idx], new->id); |
| 1407 | interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1408 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1409 | /* |
| 1410 | * If the memslot gfn is unchanged, rb_replace_node() can be used to |
| 1411 | * switch the node in the gfn tree instead of removing the old and |
| 1412 | * inserting the new as two separate operations. Replacement is a |
| 1413 | * single O(1) operation versus two O(log(n)) operations for |
| 1414 | * remove+insert. |
| 1415 | */ |
| 1416 | if (old && old->base_gfn == new->base_gfn) { |
| 1417 | kvm_replace_gfn_node(slots, old, new); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1418 | } else { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1419 | if (old) |
| 1420 | kvm_erase_gfn_node(slots, old); |
| 1421 | kvm_insert_gfn_node(slots, new); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1422 | } |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 1423 | } |
| 1424 | |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1425 | static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1426 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1427 | u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; |
| 1428 | |
Christoffer Dall | 0f8a4de | 2014-08-26 14:00:37 +0200 | [diff] [blame] | 1429 | #ifdef __KVM_HAVE_READONLY_MEM |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 1430 | valid_flags |= KVM_MEM_READONLY; |
| 1431 | #endif |
| 1432 | |
| 1433 | if (mem->flags & ~valid_flags) |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1434 | return -EINVAL; |
| 1435 | |
| 1436 | return 0; |
| 1437 | } |
| 1438 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1439 | static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1440 | { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1441 | struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); |
| 1442 | |
| 1443 | /* Grab the generation from the activate memslots. */ |
| 1444 | u64 gen = __kvm_memslots(kvm, as_id)->generation; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1445 | |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1446 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
| 1447 | slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1448 | |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1449 | /* |
| 1450 | * Do not store the new memslots while there are invalidations in |
Paolo Bonzini | 071064f | 2021-08-03 03:45:41 -0400 | [diff] [blame] | 1451 | * progress, otherwise the locking in invalidate_range_start and |
| 1452 | * invalidate_range_end will be unbalanced. |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1453 | */ |
| 1454 | spin_lock(&kvm->mn_invalidate_lock); |
| 1455 | prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); |
| 1456 | while (kvm->mn_active_invalidate_count) { |
| 1457 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1458 | spin_unlock(&kvm->mn_invalidate_lock); |
| 1459 | schedule(); |
| 1460 | spin_lock(&kvm->mn_invalidate_lock); |
| 1461 | } |
| 1462 | finish_rcuwait(&kvm->mn_memslots_update_rcuwait); |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1463 | rcu_assign_pointer(kvm->memslots[as_id], slots); |
Paolo Bonzini | 52ac8b3 | 2021-05-27 08:09:15 -0400 | [diff] [blame] | 1464 | spin_unlock(&kvm->mn_invalidate_lock); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1465 | |
| 1466 | /* |
| 1467 | * Acquired in kvm_set_memslot. Must be released before synchronize |
| 1468 | * SRCU below in order to avoid deadlock with another thread |
| 1469 | * acquiring the slots_arch_lock in an srcu critical section. |
| 1470 | */ |
| 1471 | mutex_unlock(&kvm->slots_arch_lock); |
| 1472 | |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1473 | synchronize_srcu_expedited(&kvm->srcu); |
Takuya Yoshikawa | e59dbe0 | 2013-07-04 13:40:29 +0900 | [diff] [blame] | 1474 | |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1475 | /* |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1476 | * Increment the new memslot generation a second time, dropping the |
Miaohe Lin | 0011679 | 2019-12-11 14:26:23 +0800 | [diff] [blame] | 1477 | * update in-progress flag and incrementing the generation based on |
Sean Christopherson | 361209e | 2019-02-05 13:01:14 -0800 | [diff] [blame] | 1478 | * the number of address spaces. This provides a unique and easily |
| 1479 | * identifiable generation number while the memslots are in flux. |
| 1480 | */ |
| 1481 | gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; |
| 1482 | |
| 1483 | /* |
Paolo Bonzini | 4bd518f | 2017-02-03 20:44:51 -0800 | [diff] [blame] | 1484 | * Generations must be unique even across address spaces. We do not need |
| 1485 | * a global counter for that, instead the generation space is evenly split |
| 1486 | * across address spaces. For example, with two address spaces, address |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1487 | * space 0 will use generations 0, 2, 4, ... while address space 1 will |
| 1488 | * use generations 1, 3, 5, ... |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1489 | */ |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 1490 | gen += KVM_ADDRESS_SPACE_NUM; |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 1491 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 1492 | kvm_arch_memslots_updated(kvm, gen); |
| 1493 | |
| 1494 | slots->generation = gen; |
Gleb Natapov | 7ec4fb4 | 2012-12-24 17:49:30 +0200 | [diff] [blame] | 1495 | } |
| 1496 | |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1497 | static int kvm_prepare_memory_region(struct kvm *kvm, |
| 1498 | const struct kvm_memory_slot *old, |
| 1499 | struct kvm_memory_slot *new, |
| 1500 | enum kvm_mr_change change) |
Ben Gardon | ddc12f2 | 2021-05-18 10:34:10 -0700 | [diff] [blame] | 1501 | { |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1502 | int r; |
| 1503 | |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1504 | /* |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1505 | * If dirty logging is disabled, nullify the bitmap; the old bitmap |
| 1506 | * will be freed on "commit". If logging is enabled in both old and |
| 1507 | * new, reuse the existing bitmap. If logging is enabled only in the |
| 1508 | * new and KVM isn't using a ring buffer, allocate and initialize a |
| 1509 | * new bitmap. |
| 1510 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1511 | if (change != KVM_MR_DELETE) { |
| 1512 | if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) |
| 1513 | new->dirty_bitmap = NULL; |
| 1514 | else if (old && old->dirty_bitmap) |
| 1515 | new->dirty_bitmap = old->dirty_bitmap; |
| 1516 | else if (!kvm->dirty_ring_size) { |
| 1517 | r = kvm_alloc_dirty_bitmap(new); |
| 1518 | if (r) |
| 1519 | return r; |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1520 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1521 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
| 1522 | bitmap_set(new->dirty_bitmap, 0, new->npages); |
| 1523 | } |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1524 | } |
| 1525 | |
| 1526 | r = kvm_arch_prepare_memory_region(kvm, old, new, change); |
| 1527 | |
| 1528 | /* Free the bitmap on failure if it was allocated above. */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1529 | if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap) |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1530 | kvm_destroy_dirty_bitmap(new); |
| 1531 | |
| 1532 | return r; |
| 1533 | } |
| 1534 | |
| 1535 | static void kvm_commit_memory_region(struct kvm *kvm, |
| 1536 | struct kvm_memory_slot *old, |
| 1537 | const struct kvm_memory_slot *new, |
| 1538 | enum kvm_mr_change change) |
| 1539 | { |
| 1540 | /* |
| 1541 | * Update the total number of memslot pages before calling the arch |
| 1542 | * hook so that architectures can consume the result directly. |
| 1543 | */ |
| 1544 | if (change == KVM_MR_DELETE) |
| 1545 | kvm->nr_memslot_pages -= old->npages; |
| 1546 | else if (change == KVM_MR_CREATE) |
| 1547 | kvm->nr_memslot_pages += new->npages; |
| 1548 | |
| 1549 | kvm_arch_commit_memory_region(kvm, old, new, change); |
| 1550 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1551 | switch (change) { |
| 1552 | case KVM_MR_CREATE: |
| 1553 | /* Nothing more to do. */ |
| 1554 | break; |
| 1555 | case KVM_MR_DELETE: |
| 1556 | /* Free the old memslot and all its metadata. */ |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1557 | kvm_free_memslot(kvm, old); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1558 | break; |
| 1559 | case KVM_MR_MOVE: |
| 1560 | case KVM_MR_FLAGS_ONLY: |
| 1561 | /* |
| 1562 | * Free the dirty bitmap as needed; the below check encompasses |
| 1563 | * both the flags and whether a ring buffer is being used) |
| 1564 | */ |
| 1565 | if (old->dirty_bitmap && !new->dirty_bitmap) |
| 1566 | kvm_destroy_dirty_bitmap(old); |
| 1567 | |
| 1568 | /* |
| 1569 | * The final quirk. Free the detached, old slot, but only its |
| 1570 | * memory, not any metadata. Metadata, including arch specific |
| 1571 | * data, may be reused by @new. |
| 1572 | */ |
| 1573 | kfree(old); |
| 1574 | break; |
| 1575 | default: |
| 1576 | BUG(); |
| 1577 | } |
| 1578 | } |
| 1579 | |
| 1580 | /* |
| 1581 | * Activate @new, which must be installed in the inactive slots by the caller, |
| 1582 | * by swapping the active slots and then propagating @new to @old once @old is |
| 1583 | * unreachable and can be safely modified. |
| 1584 | * |
| 1585 | * With NULL @old this simply adds @new to @active (while swapping the sets). |
| 1586 | * With NULL @new this simply removes @old from @active and frees it |
| 1587 | * (while also swapping the sets). |
| 1588 | */ |
| 1589 | static void kvm_activate_memslot(struct kvm *kvm, |
| 1590 | struct kvm_memory_slot *old, |
| 1591 | struct kvm_memory_slot *new) |
| 1592 | { |
| 1593 | int as_id = kvm_memslots_get_as_id(old, new); |
| 1594 | |
| 1595 | kvm_swap_active_memslots(kvm, as_id); |
| 1596 | |
| 1597 | /* Propagate the new memslot to the now inactive memslots. */ |
| 1598 | kvm_replace_memslot(kvm, old, new); |
| 1599 | } |
| 1600 | |
| 1601 | static void kvm_copy_memslot(struct kvm_memory_slot *dest, |
| 1602 | const struct kvm_memory_slot *src) |
| 1603 | { |
| 1604 | dest->base_gfn = src->base_gfn; |
| 1605 | dest->npages = src->npages; |
| 1606 | dest->dirty_bitmap = src->dirty_bitmap; |
| 1607 | dest->arch = src->arch; |
| 1608 | dest->userspace_addr = src->userspace_addr; |
| 1609 | dest->flags = src->flags; |
| 1610 | dest->id = src->id; |
| 1611 | dest->as_id = src->as_id; |
| 1612 | } |
| 1613 | |
| 1614 | static void kvm_invalidate_memslot(struct kvm *kvm, |
| 1615 | struct kvm_memory_slot *old, |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1616 | struct kvm_memory_slot *invalid_slot) |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1617 | { |
| 1618 | /* |
| 1619 | * Mark the current slot INVALID. As with all memslot modifications, |
| 1620 | * this must be done on an unreachable slot to avoid modifying the |
| 1621 | * current slot in the active tree. |
| 1622 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1623 | kvm_copy_memslot(invalid_slot, old); |
| 1624 | invalid_slot->flags |= KVM_MEMSLOT_INVALID; |
| 1625 | kvm_replace_memslot(kvm, old, invalid_slot); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1626 | |
| 1627 | /* |
| 1628 | * Activate the slot that is now marked INVALID, but don't propagate |
| 1629 | * the slot to the now inactive slots. The slot is either going to be |
| 1630 | * deleted or recreated as a new slot. |
| 1631 | */ |
| 1632 | kvm_swap_active_memslots(kvm, old->as_id); |
| 1633 | |
| 1634 | /* |
| 1635 | * From this point no new shadow pages pointing to a deleted, or moved, |
| 1636 | * memslot will be created. Validation of sp->gfn happens in: |
| 1637 | * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) |
| 1638 | * - kvm_is_visible_gfn (mmu_check_root) |
| 1639 | */ |
Maciej S. Szmigiero | bcb63dc | 2021-12-06 20:54:31 +0100 | [diff] [blame] | 1640 | kvm_arch_flush_shadow_memslot(kvm, old); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1641 | |
| 1642 | /* Was released by kvm_swap_active_memslots, reacquire. */ |
| 1643 | mutex_lock(&kvm->slots_arch_lock); |
| 1644 | |
| 1645 | /* |
| 1646 | * Copy the arch-specific field of the newly-installed slot back to the |
| 1647 | * old slot as the arch data could have changed between releasing |
| 1648 | * slots_arch_lock in install_new_memslots() and re-acquiring the lock |
| 1649 | * above. Writers are required to retrieve memslots *after* acquiring |
| 1650 | * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. |
| 1651 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1652 | old->arch = invalid_slot->arch; |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1653 | } |
| 1654 | |
| 1655 | static void kvm_create_memslot(struct kvm *kvm, |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1656 | struct kvm_memory_slot *new) |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1657 | { |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1658 | /* Add the new memslot to the inactive set and activate. */ |
| 1659 | kvm_replace_memslot(kvm, NULL, new); |
| 1660 | kvm_activate_memslot(kvm, NULL, new); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1661 | } |
| 1662 | |
| 1663 | static void kvm_delete_memslot(struct kvm *kvm, |
| 1664 | struct kvm_memory_slot *old, |
| 1665 | struct kvm_memory_slot *invalid_slot) |
| 1666 | { |
| 1667 | /* |
| 1668 | * Remove the old memslot (in the inactive memslots) by passing NULL as |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1669 | * the "new" slot, and for the invalid version in the active slots. |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1670 | */ |
| 1671 | kvm_replace_memslot(kvm, old, NULL); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1672 | kvm_activate_memslot(kvm, invalid_slot, NULL); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1673 | } |
| 1674 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1675 | static void kvm_move_memslot(struct kvm *kvm, |
| 1676 | struct kvm_memory_slot *old, |
| 1677 | struct kvm_memory_slot *new, |
| 1678 | struct kvm_memory_slot *invalid_slot) |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1679 | { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1680 | /* |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1681 | * Replace the old memslot in the inactive slots, and then swap slots |
| 1682 | * and replace the current INVALID with the new as well. |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1683 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1684 | kvm_replace_memslot(kvm, old, new); |
| 1685 | kvm_activate_memslot(kvm, invalid_slot, new); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1686 | } |
| 1687 | |
| 1688 | static void kvm_update_flags_memslot(struct kvm *kvm, |
| 1689 | struct kvm_memory_slot *old, |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1690 | struct kvm_memory_slot *new) |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1691 | { |
| 1692 | /* |
| 1693 | * Similar to the MOVE case, but the slot doesn't need to be zapped as |
| 1694 | * an intermediate step. Instead, the old memslot is simply replaced |
| 1695 | * with a new, updated copy in both memslot sets. |
| 1696 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1697 | kvm_replace_memslot(kvm, old, new); |
| 1698 | kvm_activate_memslot(kvm, old, new); |
Sean Christopherson | 0792166 | 2021-12-06 20:54:19 +0100 | [diff] [blame] | 1699 | } |
| 1700 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1701 | static int kvm_set_memslot(struct kvm *kvm, |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1702 | struct kvm_memory_slot *old, |
Sean Christopherson | ce5f021 | 2021-12-06 20:54:10 +0100 | [diff] [blame] | 1703 | struct kvm_memory_slot *new, |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1704 | enum kvm_mr_change change) |
| 1705 | { |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1706 | struct kvm_memory_slot *invalid_slot; |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1707 | int r; |
| 1708 | |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1709 | /* |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1710 | * Released in kvm_swap_active_memslots. |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1711 | * |
| 1712 | * Must be held from before the current memslots are copied until |
| 1713 | * after the new memslots are installed with rcu_assign_pointer, |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1714 | * then released before the synchronize srcu in kvm_swap_active_memslots. |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1715 | * |
| 1716 | * When modifying memslots outside of the slots_lock, must be held |
| 1717 | * before reading the pointer to the current memslots until after all |
| 1718 | * changes to those memslots are complete. |
| 1719 | * |
| 1720 | * These rules ensure that installing new memslots does not lose |
| 1721 | * changes made to the previous memslots. |
| 1722 | */ |
| 1723 | mutex_lock(&kvm->slots_arch_lock); |
| 1724 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1725 | /* |
| 1726 | * Invalidate the old slot if it's being deleted or moved. This is |
| 1727 | * done prior to actually deleting/moving the memslot to allow vCPUs to |
| 1728 | * continue running by ensuring there are no mappings or shadow pages |
| 1729 | * for the memslot when it is deleted/moved. Without pre-invalidation |
| 1730 | * (and without a lock), a window would exist between effecting the |
| 1731 | * delete/move and committing the changes in arch code where KVM or a |
| 1732 | * guest could access a non-existent memslot. |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1733 | * |
| 1734 | * Modifications are done on a temporary, unreachable slot. The old |
| 1735 | * slot needs to be preserved in case a later step fails and the |
| 1736 | * invalidation needs to be reverted. |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1737 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1738 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| 1739 | invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); |
| 1740 | if (!invalid_slot) { |
| 1741 | mutex_unlock(&kvm->slots_arch_lock); |
| 1742 | return -ENOMEM; |
| 1743 | } |
| 1744 | kvm_invalidate_memslot(kvm, old, invalid_slot); |
Ben Gardon | b10a038 | 2021-05-18 10:34:11 -0700 | [diff] [blame] | 1745 | } |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1746 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1747 | r = kvm_prepare_memory_region(kvm, old, new, change); |
| 1748 | if (r) { |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1749 | /* |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1750 | * For DELETE/MOVE, revert the above INVALID change. No |
| 1751 | * modifications required since the original slot was preserved |
| 1752 | * in the inactive slots. Changing the active memslots also |
| 1753 | * release slots_arch_lock. |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1754 | */ |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1755 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { |
| 1756 | kvm_activate_memslot(kvm, invalid_slot, old); |
| 1757 | kfree(invalid_slot); |
| 1758 | } else { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1759 | mutex_unlock(&kvm->slots_arch_lock); |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1760 | } |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1761 | return r; |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1762 | } |
| 1763 | |
Sean Christopherson | bda44d8 | 2021-11-04 00:25:02 +0000 | [diff] [blame] | 1764 | /* |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1765 | * For DELETE and MOVE, the working slot is now active as the INVALID |
| 1766 | * version of the old slot. MOVE is particularly special as it reuses |
| 1767 | * the old slot and returns a copy of the old slot (in working_slot). |
| 1768 | * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the |
| 1769 | * old slot is detached but otherwise preserved. |
Sean Christopherson | bda44d8 | 2021-11-04 00:25:02 +0000 | [diff] [blame] | 1770 | */ |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1771 | if (change == KVM_MR_CREATE) |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1772 | kvm_create_memslot(kvm, new); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1773 | else if (change == KVM_MR_DELETE) |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1774 | kvm_delete_memslot(kvm, old, invalid_slot); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1775 | else if (change == KVM_MR_MOVE) |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1776 | kvm_move_memslot(kvm, old, new, invalid_slot); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1777 | else if (change == KVM_MR_FLAGS_ONLY) |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1778 | kvm_update_flags_memslot(kvm, old, new); |
Maciej S. Szmigiero | 4e4d30c | 2021-12-06 20:54:09 +0100 | [diff] [blame] | 1779 | else |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1780 | BUG(); |
Sean Christopherson | bda44d8 | 2021-11-04 00:25:02 +0000 | [diff] [blame] | 1781 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1782 | /* Free the temporary INVALID slot used for DELETE and MOVE. */ |
| 1783 | if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) |
| 1784 | kfree(invalid_slot); |
Sean Christopherson | bda44d8 | 2021-11-04 00:25:02 +0000 | [diff] [blame] | 1785 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 1786 | /* |
| 1787 | * No need to refresh new->arch, changes after dropping slots_arch_lock |
| 1788 | * will directly hit the final, active memsot. Architectures are |
| 1789 | * responsible for knowing that new->arch may be stale. |
| 1790 | */ |
| 1791 | kvm_commit_memory_region(kvm, old, new, change); |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1792 | |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1793 | return 0; |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1794 | } |
| 1795 | |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1796 | static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, |
| 1797 | gfn_t start, gfn_t end) |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1798 | { |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1799 | struct kvm_memslot_iter iter; |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1800 | |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1801 | kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { |
| 1802 | if (iter.slot->id != id) |
| 1803 | return true; |
| 1804 | } |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1805 | |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1806 | return false; |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1807 | } |
| 1808 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1809 | /* |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1810 | * Allocate some memory and give it an address in the guest physical address |
| 1811 | * space. |
| 1812 | * |
| 1813 | * Discontiguous memory is allowed, mostly for framebuffers. |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1814 | * |
Dominik Dingel | 02d5d55 | 2014-10-27 16:22:56 +0100 | [diff] [blame] | 1815 | * Must be called holding kvm->slots_lock for write. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1816 | */ |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1817 | int __kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1818 | const struct kvm_userspace_memory_region *mem) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1819 | { |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1820 | struct kvm_memory_slot *old, *new; |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1821 | struct kvm_memslots *slots; |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1822 | enum kvm_mr_change change; |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1823 | unsigned long npages; |
| 1824 | gfn_t base_gfn; |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1825 | int as_id, id; |
| 1826 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1827 | |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1828 | r = check_memory_region_flags(mem); |
| 1829 | if (r) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1830 | return r; |
Xiao Guangrong | a50d64d | 2012-08-21 10:58:13 +0800 | [diff] [blame] | 1831 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1832 | as_id = mem->slot >> 16; |
| 1833 | id = (u16)mem->slot; |
| 1834 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1835 | /* General sanity checks */ |
Sean Christopherson | 6b285a5 | 2021-11-04 00:25:03 +0000 | [diff] [blame] | 1836 | if ((mem->memory_size & (PAGE_SIZE - 1)) || |
| 1837 | (mem->memory_size != (unsigned long)mem->memory_size)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1838 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1839 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1840 | return -EINVAL; |
Takuya Yoshikawa | fa3d315 | 2011-05-07 16:35:38 +0900 | [diff] [blame] | 1841 | /* We can read the guest memory with __xxx_user() later on. */ |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1842 | if ((mem->userspace_addr & (PAGE_SIZE - 1)) || |
Marc Zyngier | 139bc8a | 2021-01-21 12:08:15 +0000 | [diff] [blame] | 1843 | (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 1844 | !access_ok((void __user *)(unsigned long)mem->userspace_addr, |
Paolo Bonzini | 09d952c | 2020-06-01 04:17:45 -0400 | [diff] [blame] | 1845 | mem->memory_size)) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1846 | return -EINVAL; |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1847 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1848 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1849 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1850 | return -EINVAL; |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1851 | if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) |
Sean Christopherson | 163da37 | 2020-02-18 13:07:28 -0800 | [diff] [blame] | 1852 | return -EINVAL; |
| 1853 | |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1854 | slots = __kvm_memslots(kvm, as_id); |
| 1855 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1856 | /* |
Sean Christopherson | 7cd0855 | 2021-12-06 20:54:22 +0100 | [diff] [blame] | 1857 | * Note, the old memslot (and the pointer itself!) may be invalidated |
| 1858 | * and/or destroyed by kvm_set_memslot(). |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1859 | */ |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1860 | old = id_to_memslot(slots, id); |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1861 | |
Sean Christopherson | 47ea7d9 | 2021-12-06 20:54:08 +0100 | [diff] [blame] | 1862 | if (!mem->memory_size) { |
Sean Christopherson | 7cd0855 | 2021-12-06 20:54:22 +0100 | [diff] [blame] | 1863 | if (!old || !old->npages) |
Sean Christopherson | 71a4c30 | 2020-02-18 13:07:22 -0800 | [diff] [blame] | 1864 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1865 | |
Sean Christopherson | 7cd0855 | 2021-12-06 20:54:22 +0100 | [diff] [blame] | 1866 | if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) |
Sean Christopherson | 47ea7d9 | 2021-12-06 20:54:08 +0100 | [diff] [blame] | 1867 | return -EIO; |
| 1868 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1869 | return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); |
Sean Christopherson | 47ea7d9 | 2021-12-06 20:54:08 +0100 | [diff] [blame] | 1870 | } |
Takuya Yoshikawa | 75d61fb | 2013-01-30 19:40:41 +0900 | [diff] [blame] | 1871 | |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1872 | base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); |
| 1873 | npages = (mem->memory_size >> PAGE_SHIFT); |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1874 | |
Sean Christopherson | 7cd0855 | 2021-12-06 20:54:22 +0100 | [diff] [blame] | 1875 | if (!old || !old->npages) { |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1876 | change = KVM_MR_CREATE; |
Sean Christopherson | afa319a | 2021-12-06 20:54:07 +0100 | [diff] [blame] | 1877 | |
| 1878 | /* |
| 1879 | * To simplify KVM internals, the total number of pages across |
| 1880 | * all memslots must fit in an unsigned long. |
| 1881 | */ |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1882 | if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) |
Sean Christopherson | afa319a | 2021-12-06 20:54:07 +0100 | [diff] [blame] | 1883 | return -EINVAL; |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1884 | } else { /* Modify an existing slot. */ |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1885 | if ((mem->userspace_addr != old->userspace_addr) || |
| 1886 | (npages != old->npages) || |
| 1887 | ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) |
Takuya Yoshikawa | f64c039 | 2013-01-29 11:00:07 +0900 | [diff] [blame] | 1888 | return -EINVAL; |
| 1889 | |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1890 | if (base_gfn != old->base_gfn) |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1891 | change = KVM_MR_MOVE; |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1892 | else if (mem->flags != old->flags) |
Sean Christopherson | 5c0b4f3 | 2020-02-18 13:07:26 -0800 | [diff] [blame] | 1893 | change = KVM_MR_FLAGS_ONLY; |
| 1894 | else /* Nothing to change. */ |
| 1895 | return 0; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1896 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1897 | |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1898 | if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && |
Sean Christopherson | 0f9bdef | 2021-12-06 20:54:34 +0100 | [diff] [blame] | 1899 | kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) |
Maciej S. Szmigiero | 44401a2 | 2021-12-06 20:54:33 +0100 | [diff] [blame] | 1900 | return -EEXIST; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1901 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1902 | /* Allocate a slot that will persist in the memslot. */ |
| 1903 | new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); |
| 1904 | if (!new) |
| 1905 | return -ENOMEM; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 1906 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1907 | new->as_id = as_id; |
| 1908 | new->id = id; |
| 1909 | new->base_gfn = base_gfn; |
| 1910 | new->npages = npages; |
| 1911 | new->flags = mem->flags; |
| 1912 | new->userspace_addr = mem->userspace_addr; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1913 | |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1914 | r = kvm_set_memslot(kvm, old, new, change); |
Sean Christopherson | cf47f50 | 2020-02-18 13:07:23 -0800 | [diff] [blame] | 1915 | if (r) |
Sean Christopherson | 244893f | 2021-12-06 20:54:35 +0100 | [diff] [blame] | 1916 | kfree(new); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1917 | return r; |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1918 | } |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1919 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); |
| 1920 | |
| 1921 | int kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1922 | const struct kvm_userspace_memory_region *mem) |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1923 | { |
| 1924 | int r; |
| 1925 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1926 | mutex_lock(&kvm->slots_lock); |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1927 | r = __kvm_set_memory_region(kvm, mem); |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 1928 | mutex_unlock(&kvm->slots_lock); |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 1929 | return r; |
| 1930 | } |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1931 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
| 1932 | |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 1933 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| 1934 | struct kvm_userspace_memory_region *mem) |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 1935 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1936 | if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 1937 | return -EINVAL; |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 1938 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 1939 | return kvm_set_memory_region(kvm, mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1940 | } |
| 1941 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1942 | #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1943 | /** |
| 1944 | * kvm_get_dirty_log - get a snapshot of dirty pages |
| 1945 | * @kvm: pointer to kvm instance |
| 1946 | * @log: slot id and address to which we copy the log |
| 1947 | * @is_dirty: set to '1' if any dirty pages were found |
| 1948 | * @memslot: set to the associated memslot, always valid on success |
| 1949 | */ |
| 1950 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, |
| 1951 | int *is_dirty, struct kvm_memory_slot **memslot) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1952 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1953 | struct kvm_memslots *slots; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1954 | int i, as_id, id; |
Takuya Yoshikawa | 87bf6e7 | 2010-04-12 19:35:35 +0900 | [diff] [blame] | 1955 | unsigned long n; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1956 | unsigned long any = 0; |
| 1957 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 1958 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 1959 | if (kvm->dirty_ring_size) |
| 1960 | return -ENXIO; |
| 1961 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1962 | *memslot = NULL; |
| 1963 | *is_dirty = 0; |
| 1964 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1965 | as_id = log->slot >> 16; |
| 1966 | id = (u16)log->slot; |
| 1967 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1968 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1969 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 1970 | slots = __kvm_memslots(kvm, as_id); |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1971 | *memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1972 | if (!(*memslot) || !(*memslot)->dirty_bitmap) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1973 | return -ENOENT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1974 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1975 | kvm_arch_sync_dirty_log(kvm, *memslot); |
| 1976 | |
| 1977 | n = kvm_dirty_bitmap_bytes(*memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1978 | |
Uri Lublin | cd1a4a9 | 2007-02-22 16:43:09 +0200 | [diff] [blame] | 1979 | for (i = 0; !any && i < n/sizeof(long); ++i) |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1980 | any = (*memslot)->dirty_bitmap[i]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1981 | |
Sean Christopherson | 2a49f61 | 2020-02-18 13:07:30 -0800 | [diff] [blame] | 1982 | if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1983 | return -EFAULT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1984 | |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 1985 | if (any) |
| 1986 | *is_dirty = 1; |
Markus Elfring | 843574a | 2017-01-22 17:41:07 +0100 | [diff] [blame] | 1987 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1988 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 1989 | EXPORT_SYMBOL_GPL(kvm_get_dirty_log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1990 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 1991 | #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1992 | /** |
Jiang Biao | b8b0022 | 2019-04-23 19:40:30 +0800 | [diff] [blame] | 1993 | * kvm_get_dirty_log_protect - get a snapshot of dirty pages |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 1994 | * and reenable dirty page tracking for the corresponding pages. |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1995 | * @kvm: pointer to kvm instance |
| 1996 | * @log: slot id and address to which we copy the log |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 1997 | * |
| 1998 | * We need to keep it in mind that VCPU threads can write to the bitmap |
| 1999 | * concurrently. So, to avoid losing track of dirty pages we keep the |
| 2000 | * following order: |
| 2001 | * |
| 2002 | * 1. Take a snapshot of the bit and clear it if needed. |
| 2003 | * 2. Write protect the corresponding page. |
| 2004 | * 3. Copy the snapshot to the userspace. |
| 2005 | * 4. Upon return caller flushes TLB's if needed. |
| 2006 | * |
| 2007 | * Between 2 and 4, the guest may write to the page using the remaining TLB |
| 2008 | * entry. This is not a problem because the page is reported dirty using |
| 2009 | * the snapshot taken before and step 4 ensures that writes done after |
| 2010 | * exiting to userspace will be logged for the next call. |
| 2011 | * |
| 2012 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2013 | static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2014 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 2015 | struct kvm_memslots *slots; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2016 | struct kvm_memory_slot *memslot; |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 2017 | int i, as_id, id; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2018 | unsigned long n; |
| 2019 | unsigned long *dirty_bitmap; |
| 2020 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2021 | bool flush; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2022 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 2023 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 2024 | if (kvm->dirty_ring_size) |
| 2025 | return -ENXIO; |
| 2026 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 2027 | as_id = log->slot >> 16; |
| 2028 | id = (u16)log->slot; |
| 2029 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 2030 | return -EINVAL; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2031 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 2032 | slots = __kvm_memslots(kvm, as_id); |
| 2033 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 2034 | if (!memslot || !memslot->dirty_bitmap) |
| 2035 | return -ENOENT; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2036 | |
| 2037 | dirty_bitmap = memslot->dirty_bitmap; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2038 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2039 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 2040 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2041 | n = kvm_dirty_bitmap_bytes(memslot); |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2042 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2043 | if (kvm->manual_dirty_log_protect) { |
| 2044 | /* |
| 2045 | * Unlike kvm_get_dirty_log, we always return false in *flush, |
| 2046 | * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There |
| 2047 | * is some code duplication between this function and |
| 2048 | * kvm_get_dirty_log, but hopefully all architecture |
| 2049 | * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log |
| 2050 | * can be eliminated. |
| 2051 | */ |
| 2052 | dirty_bitmap_buffer = dirty_bitmap; |
| 2053 | } else { |
| 2054 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 2055 | memset(dirty_bitmap_buffer, 0, n); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2056 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2057 | KVM_MMU_LOCK(kvm); |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2058 | for (i = 0; i < n / sizeof(long); i++) { |
| 2059 | unsigned long mask; |
| 2060 | gfn_t offset; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2061 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2062 | if (!dirty_bitmap[i]) |
| 2063 | continue; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2064 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2065 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2066 | mask = xchg(&dirty_bitmap[i], 0); |
| 2067 | dirty_bitmap_buffer[i] = mask; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2068 | |
Lan Tianyu | a67794c | 2019-02-02 17:20:27 +0800 | [diff] [blame] | 2069 | offset = i * BITS_PER_LONG; |
| 2070 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 2071 | offset, mask); |
Takuya Yoshikawa | 58d2930 | 2015-03-17 16:19:58 +0900 | [diff] [blame] | 2072 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2073 | KVM_MMU_UNLOCK(kvm); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2074 | } |
| 2075 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2076 | if (flush) |
| 2077 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 2078 | |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2079 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) |
Markus Elfring | 58d6db3 | 2017-01-22 17:30:16 +0100 | [diff] [blame] | 2080 | return -EFAULT; |
| 2081 | return 0; |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2082 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2083 | |
| 2084 | |
| 2085 | /** |
| 2086 | * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot |
| 2087 | * @kvm: kvm instance |
| 2088 | * @log: slot id and address to which we copy the log |
| 2089 | * |
| 2090 | * Steps 1-4 below provide general overview of dirty page logging. See |
| 2091 | * kvm_get_dirty_log_protect() function description for additional details. |
| 2092 | * |
| 2093 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we |
| 2094 | * always flush the TLB (step 4) even if previous step failed and the dirty |
| 2095 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API |
| 2096 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures |
| 2097 | * writes will be marked dirty for next log read. |
| 2098 | * |
| 2099 | * 1. Take a snapshot of the bit and clear it if needed. |
| 2100 | * 2. Write protect the corresponding page. |
| 2101 | * 3. Copy the snapshot to the userspace. |
| 2102 | * 4. Flush TLB's if needed. |
| 2103 | */ |
| 2104 | static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 2105 | struct kvm_dirty_log *log) |
| 2106 | { |
| 2107 | int r; |
| 2108 | |
| 2109 | mutex_lock(&kvm->slots_lock); |
| 2110 | |
| 2111 | r = kvm_get_dirty_log_protect(kvm, log); |
| 2112 | |
| 2113 | mutex_unlock(&kvm->slots_lock); |
| 2114 | return r; |
| 2115 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2116 | |
| 2117 | /** |
| 2118 | * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap |
| 2119 | * and reenable dirty page tracking for the corresponding pages. |
| 2120 | * @kvm: pointer to kvm instance |
| 2121 | * @log: slot id and address from which to fetch the bitmap of dirty pages |
| 2122 | */ |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2123 | static int kvm_clear_dirty_log_protect(struct kvm *kvm, |
| 2124 | struct kvm_clear_dirty_log *log) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2125 | { |
| 2126 | struct kvm_memslots *slots; |
| 2127 | struct kvm_memory_slot *memslot; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 2128 | int as_id, id; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2129 | gfn_t offset; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 2130 | unsigned long i, n; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2131 | unsigned long *dirty_bitmap; |
| 2132 | unsigned long *dirty_bitmap_buffer; |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2133 | bool flush; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2134 | |
Peter Xu | b2cc64c | 2020-09-30 21:22:24 -0400 | [diff] [blame] | 2135 | /* Dirty ring tracking is exclusive to dirty log tracking */ |
| 2136 | if (kvm->dirty_ring_size) |
| 2137 | return -ENXIO; |
| 2138 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2139 | as_id = log->slot >> 16; |
| 2140 | id = (u16)log->slot; |
| 2141 | if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) |
| 2142 | return -EINVAL; |
| 2143 | |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 2144 | if (log->first_page & 63) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2145 | return -EINVAL; |
| 2146 | |
| 2147 | slots = __kvm_memslots(kvm, as_id); |
| 2148 | memslot = id_to_memslot(slots, id); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 2149 | if (!memslot || !memslot->dirty_bitmap) |
| 2150 | return -ENOENT; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2151 | |
| 2152 | dirty_bitmap = memslot->dirty_bitmap; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2153 | |
Peter Xu | 4ddc920 | 2019-05-08 17:15:45 +0800 | [diff] [blame] | 2154 | n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 2155 | |
| 2156 | if (log->first_page > memslot->npages || |
Paolo Bonzini | 76d58e0 | 2019-04-17 15:28:44 +0200 | [diff] [blame] | 2157 | log->num_pages > memslot->npages - log->first_page || |
| 2158 | (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) |
| 2159 | return -EINVAL; |
Tomas Bortoli | 98938aa | 2019-01-02 18:29:37 +0100 | [diff] [blame] | 2160 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2161 | kvm_arch_sync_dirty_log(kvm, memslot); |
| 2162 | |
| 2163 | flush = false; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2164 | dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |
| 2165 | if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) |
| 2166 | return -EFAULT; |
| 2167 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2168 | KVM_MMU_LOCK(kvm); |
Peter Xu | 53eac7a | 2019-05-08 17:15:46 +0800 | [diff] [blame] | 2169 | for (offset = log->first_page, i = offset / BITS_PER_LONG, |
| 2170 | n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2171 | i++, offset += BITS_PER_LONG) { |
| 2172 | unsigned long mask = *dirty_bitmap_buffer++; |
| 2173 | atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; |
| 2174 | if (!mask) |
| 2175 | continue; |
| 2176 | |
| 2177 | mask &= atomic_long_fetch_andnot(mask, p); |
| 2178 | |
| 2179 | /* |
| 2180 | * mask contains the bits that really have been cleared. This |
| 2181 | * never includes any bits beyond the length of the memslot (if |
| 2182 | * the length is not aligned to 64 pages), therefore it is not |
| 2183 | * a problem if userspace sets them in log->dirty_bitmap. |
| 2184 | */ |
| 2185 | if (mask) { |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2186 | flush = true; |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2187 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
| 2188 | offset, mask); |
| 2189 | } |
| 2190 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2191 | KVM_MMU_UNLOCK(kvm); |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2192 | |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2193 | if (flush) |
| 2194 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
| 2195 | |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 2196 | return 0; |
| 2197 | } |
Sean Christopherson | 0dff084 | 2020-02-18 13:07:29 -0800 | [diff] [blame] | 2198 | |
| 2199 | static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, |
| 2200 | struct kvm_clear_dirty_log *log) |
| 2201 | { |
| 2202 | int r; |
| 2203 | |
| 2204 | mutex_lock(&kvm->slots_lock); |
| 2205 | |
| 2206 | r = kvm_clear_dirty_log_protect(kvm, log); |
| 2207 | |
| 2208 | mutex_unlock(&kvm->slots_lock); |
| 2209 | return r; |
| 2210 | } |
| 2211 | #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 2212 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2213 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
| 2214 | { |
| 2215 | return __gfn_to_memslot(kvm_memslots(kvm), gfn); |
| 2216 | } |
Avi Kivity | a1f4d395 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 2217 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2218 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2219 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2220 | { |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2221 | struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 2222 | u64 gen = slots->generation; |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2223 | struct kvm_memory_slot *slot; |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2224 | |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 2225 | /* |
| 2226 | * This also protects against using a memslot from a different address space, |
| 2227 | * since different address spaces have different generation numbers. |
| 2228 | */ |
| 2229 | if (unlikely(gen != vcpu->last_used_slot_gen)) { |
| 2230 | vcpu->last_used_slot = NULL; |
| 2231 | vcpu->last_used_slot_gen = gen; |
| 2232 | } |
| 2233 | |
| 2234 | slot = try_get_memslot(vcpu->last_used_slot, gfn); |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2235 | if (slot) |
| 2236 | return slot; |
| 2237 | |
| 2238 | /* |
| 2239 | * Fall back to searching all memslots. We purposely use |
| 2240 | * search_memslots() instead of __gfn_to_memslot() to avoid |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 2241 | * thrashing the VM-wide last_used_slot in kvm_memslots. |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2242 | */ |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 2243 | slot = search_memslots(slots, gfn, false); |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2244 | if (slot) { |
Maciej S. Szmigiero | a54d806 | 2021-12-06 20:54:30 +0100 | [diff] [blame] | 2245 | vcpu->last_used_slot = slot; |
David Matlack | fe22ed8 | 2021-08-04 22:28:40 +0000 | [diff] [blame] | 2246 | return slot; |
| 2247 | } |
| 2248 | |
| 2249 | return NULL; |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2250 | } |
| 2251 | |
Yaowei Bai | 33e9415 | 2015-11-14 11:21:06 +0800 | [diff] [blame] | 2252 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2253 | { |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 2254 | struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2255 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 2256 | return kvm_is_visible_memslot(memslot); |
Izik Eidus | e0d62c7 | 2007-10-24 23:57:46 +0200 | [diff] [blame] | 2257 | } |
| 2258 | EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); |
| 2259 | |
Vitaly Kuznetsov | 995decb | 2020-07-08 16:00:23 +0200 | [diff] [blame] | 2260 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2261 | { |
| 2262 | struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2263 | |
| 2264 | return kvm_is_visible_memslot(memslot); |
| 2265 | } |
| 2266 | EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); |
| 2267 | |
Sean Christopherson | f9b84e1 | 2020-01-08 12:24:37 -0800 | [diff] [blame] | 2268 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2269 | { |
| 2270 | struct vm_area_struct *vma; |
| 2271 | unsigned long addr, size; |
| 2272 | |
| 2273 | size = PAGE_SIZE; |
| 2274 | |
Sean Christopherson | 42cde48 | 2020-01-08 12:24:38 -0800 | [diff] [blame] | 2275 | addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2276 | if (kvm_is_error_hva(addr)) |
| 2277 | return PAGE_SIZE; |
| 2278 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2279 | mmap_read_lock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2280 | vma = find_vma(current->mm, addr); |
| 2281 | if (!vma) |
| 2282 | goto out; |
| 2283 | |
| 2284 | size = vma_kernel_pagesize(vma); |
| 2285 | |
| 2286 | out: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2287 | mmap_read_unlock(current->mm); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 2288 | |
| 2289 | return size; |
| 2290 | } |
| 2291 | |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 2292 | static bool memslot_is_readonly(const struct kvm_memory_slot *slot) |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2293 | { |
| 2294 | return slot->flags & KVM_MEM_READONLY; |
| 2295 | } |
| 2296 | |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 2297 | static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2298 | gfn_t *nr_pages, bool write) |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2299 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 2300 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 2301 | return KVM_HVA_ERR_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2302 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2303 | if (memslot_is_readonly(slot) && write) |
| 2304 | return KVM_HVA_ERR_RO_BAD; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2305 | |
| 2306 | if (nr_pages) |
| 2307 | *nr_pages = slot->npages - (gfn - slot->base_gfn); |
| 2308 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2309 | return __gfn_to_hva_memslot(slot, gfn); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2310 | } |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2311 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2312 | static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2313 | gfn_t *nr_pages) |
| 2314 | { |
| 2315 | return __gfn_to_hva_many(slot, gfn, nr_pages, true); |
| 2316 | } |
| 2317 | |
| 2318 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 2319 | gfn_t gfn) |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2320 | { |
| 2321 | return gfn_to_hva_many(slot, gfn, NULL); |
| 2322 | } |
| 2323 | EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); |
| 2324 | |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2325 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
| 2326 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 2327 | return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2328 | } |
Sheng Yang | 0d15029 | 2008-04-25 21:44:50 +0800 | [diff] [blame] | 2329 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2330 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2331 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2332 | { |
| 2333 | return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); |
| 2334 | } |
| 2335 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); |
| 2336 | |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2337 | /* |
Wei Yang | 970c0d4 | 2018-10-09 10:41:15 +0800 | [diff] [blame] | 2338 | * Return the hva of a @gfn and the R/W attribute if possible. |
| 2339 | * |
| 2340 | * @slot: the kvm_memory_slot which contains @gfn |
| 2341 | * @gfn: the gfn to be translated |
| 2342 | * @writable: used to return the read/write attribute of the @slot if the hva |
| 2343 | * is valid and @writable is not NULL |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2344 | */ |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 2345 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
| 2346 | gfn_t gfn, bool *writable) |
Gleb Natapov | 8030089 | 2010-10-19 18:13:41 +0200 | [diff] [blame] | 2347 | { |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 2348 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
| 2349 | |
| 2350 | if (!kvm_is_error_hva(hva) && writable) |
Paolo Bonzini | ba6a354 | 2013-09-09 13:52:33 +0200 | [diff] [blame] | 2351 | *writable = !memslot_is_readonly(slot); |
| 2352 | |
Gleb Natapov | a2ac07f | 2013-10-01 19:58:36 +0300 | [diff] [blame] | 2353 | return hva; |
Xiao Guangrong | 86ab8cf | 2012-08-21 10:59:53 +0800 | [diff] [blame] | 2354 | } |
| 2355 | |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 2356 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) |
| 2357 | { |
| 2358 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2359 | |
| 2360 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 2361 | } |
| 2362 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2363 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) |
| 2364 | { |
| 2365 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2366 | |
| 2367 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
| 2368 | } |
| 2369 | |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2370 | static inline int check_user_page_hwpoison(unsigned long addr) |
| 2371 | { |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 2372 | int rc, flags = FOLL_HWPOISON | FOLL_WRITE; |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2373 | |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 2374 | rc = get_user_pages(addr, 1, flags, NULL, NULL); |
Huang Ying | fafc3db | 2011-01-30 11:15:49 +0800 | [diff] [blame] | 2375 | return rc == -EHWPOISON; |
| 2376 | } |
| 2377 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2378 | /* |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2379 | * The fast path to get the writable pfn which will be stored in @pfn, |
| 2380 | * true indicates success, otherwise false is returned. It's also the |
Miaohe Lin | 311497e | 2019-12-11 14:26:25 +0800 | [diff] [blame] | 2381 | * only part that runs if we can in atomic context. |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2382 | */ |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2383 | static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, |
| 2384 | bool *writable, kvm_pfn_t *pfn) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2385 | { |
| 2386 | struct page *page[1]; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2387 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2388 | /* |
| 2389 | * Fast pin a writable pfn only if it is a write fault request |
| 2390 | * or the caller allows to map a writable pfn for a read fault |
| 2391 | * request. |
| 2392 | */ |
| 2393 | if (!(write_fault || writable)) |
| 2394 | return false; |
| 2395 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2396 | if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2397 | *pfn = page_to_pfn(page[0]); |
| 2398 | |
| 2399 | if (writable) |
| 2400 | *writable = true; |
| 2401 | return true; |
| 2402 | } |
| 2403 | |
| 2404 | return false; |
| 2405 | } |
| 2406 | |
| 2407 | /* |
| 2408 | * The slow path to get the pfn of the specified host virtual address, |
| 2409 | * 1 indicates success, -errno is returned if error is detected. |
| 2410 | */ |
| 2411 | static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2412 | bool *writable, kvm_pfn_t *pfn) |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2413 | { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2414 | unsigned int flags = FOLL_HWPOISON; |
| 2415 | struct page *page; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2416 | int npages = 0; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2417 | |
| 2418 | might_sleep(); |
| 2419 | |
| 2420 | if (writable) |
| 2421 | *writable = write_fault; |
| 2422 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2423 | if (write_fault) |
| 2424 | flags |= FOLL_WRITE; |
| 2425 | if (async) |
| 2426 | flags |= FOLL_NOWAIT; |
Lorenzo Stoakes | d4944b0 | 2016-10-13 01:20:12 +0100 | [diff] [blame] | 2427 | |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2428 | npages = get_user_pages_unlocked(addr, 1, &page, flags); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2429 | if (npages != 1) |
| 2430 | return npages; |
| 2431 | |
| 2432 | /* map read fault as writable if possible */ |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2433 | if (unlikely(!write_fault) && writable) { |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2434 | struct page *wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2435 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2436 | if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2437 | *writable = true; |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2438 | put_page(page); |
| 2439 | page = wpage; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2440 | } |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2441 | } |
Al Viro | ce53053 | 2017-11-19 17:47:33 -0500 | [diff] [blame] | 2442 | *pfn = page_to_pfn(page); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2443 | return npages; |
| 2444 | } |
| 2445 | |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2446 | static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) |
| 2447 | { |
| 2448 | if (unlikely(!(vma->vm_flags & VM_READ))) |
| 2449 | return false; |
| 2450 | |
| 2451 | if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) |
| 2452 | return false; |
| 2453 | |
| 2454 | return true; |
| 2455 | } |
| 2456 | |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2457 | static int kvm_try_get_pfn(kvm_pfn_t pfn) |
| 2458 | { |
| 2459 | if (kvm_is_reserved_pfn(pfn)) |
| 2460 | return 1; |
| 2461 | return get_page_unless_zero(pfn_to_page(pfn)); |
| 2462 | } |
| 2463 | |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2464 | static int hva_to_pfn_remapped(struct vm_area_struct *vma, |
Xianting Tian | 1625566 | 2022-01-24 10:04:56 +0800 | [diff] [blame] | 2465 | unsigned long addr, bool write_fault, |
| 2466 | bool *writable, kvm_pfn_t *p_pfn) |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2467 | { |
Sean Christopherson | a954577 | 2021-02-08 12:19:40 -0800 | [diff] [blame] | 2468 | kvm_pfn_t pfn; |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2469 | pte_t *ptep; |
| 2470 | spinlock_t *ptl; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2471 | int r; |
| 2472 | |
Paolo Bonzini | 9fd6dad | 2021-02-05 05:07:11 -0500 | [diff] [blame] | 2473 | r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2474 | if (r) { |
| 2475 | /* |
| 2476 | * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does |
| 2477 | * not call the fault handler, so do it here. |
| 2478 | */ |
| 2479 | bool unlocked = false; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2480 | r = fixup_user_fault(current->mm, addr, |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2481 | (write_fault ? FAULT_FLAG_WRITE : 0), |
| 2482 | &unlocked); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2483 | if (unlocked) |
| 2484 | return -EAGAIN; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2485 | if (r) |
| 2486 | return r; |
| 2487 | |
Paolo Bonzini | 9fd6dad | 2021-02-05 05:07:11 -0500 | [diff] [blame] | 2488 | r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2489 | if (r) |
| 2490 | return r; |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2491 | } |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2492 | |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2493 | if (write_fault && !pte_write(*ptep)) { |
| 2494 | pfn = KVM_PFN_ERR_RO_FAULT; |
| 2495 | goto out; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2496 | } |
| 2497 | |
KarimAllah Ahmed | a340b3e | 2018-01-17 19:18:56 +0100 | [diff] [blame] | 2498 | if (writable) |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2499 | *writable = pte_write(*ptep); |
| 2500 | pfn = pte_pfn(*ptep); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2501 | |
| 2502 | /* |
| 2503 | * Get a reference here because callers of *hva_to_pfn* and |
| 2504 | * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the |
| 2505 | * returned pfn. This is only needed if the VMA has VM_MIXEDMAP |
Marc Zyngier | 36c3ce6 | 2021-07-26 16:35:52 +0100 | [diff] [blame] | 2506 | * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2507 | * simply do nothing for reserved pfns. |
| 2508 | * |
| 2509 | * Whoever called remap_pfn_range is also going to call e.g. |
| 2510 | * unmap_mapping_range before the underlying pages are freed, |
| 2511 | * causing a call to our MMU notifier. |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2512 | * |
| 2513 | * Certain IO or PFNMAP mappings can be backed with valid |
| 2514 | * struct pages, but be allocated without refcounting e.g., |
| 2515 | * tail pages of non-compound higher order allocations, which |
| 2516 | * would then underflow the refcount when the caller does the |
| 2517 | * required put_page. Don't allow those pages here. |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2518 | */ |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2519 | if (!kvm_try_get_pfn(pfn)) |
| 2520 | r = -EFAULT; |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2521 | |
Paolo Bonzini | bd2fae8 | 2021-02-01 05:12:11 -0500 | [diff] [blame] | 2522 | out: |
| 2523 | pte_unmap_unlock(ptep, ptl); |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 2524 | *p_pfn = pfn; |
Nicholas Piggin | f8be156 | 2021-06-24 08:29:04 -0400 | [diff] [blame] | 2525 | |
| 2526 | return r; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2527 | } |
| 2528 | |
Xiao Guangrong | 12ce13f | 2012-08-21 11:00:49 +0800 | [diff] [blame] | 2529 | /* |
| 2530 | * Pin guest page in memory and return its pfn. |
| 2531 | * @addr: host virtual address which maps memory to the guest |
| 2532 | * @atomic: whether this function can sleep |
| 2533 | * @async: whether this function need to wait IO complete if the |
| 2534 | * host page is not in the memory |
| 2535 | * @write_fault: whether we should get a writable host page |
| 2536 | * @writable: whether it allows to map a writable host page for !@write_fault |
| 2537 | * |
| 2538 | * The function will map a writable host page for these two cases: |
| 2539 | * 1): @write_fault = true |
| 2540 | * 2): @write_fault = false && @writable, @writable will tell the caller |
| 2541 | * whether the mapping is writable. |
| 2542 | */ |
David Woodhouse | 982ed0d | 2021-12-10 16:36:21 +0000 | [diff] [blame] | 2543 | kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, |
| 2544 | bool write_fault, bool *writable) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2545 | { |
| 2546 | struct vm_area_struct *vma; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2547 | kvm_pfn_t pfn = 0; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2548 | int npages, r; |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2549 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2550 | /* we can do it either atomically or asynchronously, not both */ |
| 2551 | BUG_ON(atomic && async); |
| 2552 | |
Paolo Bonzini | b9b33da | 2018-07-27 17:44:41 +0200 | [diff] [blame] | 2553 | if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2554 | return pfn; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2555 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2556 | if (atomic) |
| 2557 | return KVM_PFN_ERR_FAULT; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2558 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2559 | npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); |
| 2560 | if (npages == 1) |
| 2561 | return pfn; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2562 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2563 | mmap_read_lock(current->mm); |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2564 | if (npages == -EHWPOISON || |
| 2565 | (!async && check_user_page_hwpoison(addr))) { |
| 2566 | pfn = KVM_PFN_ERR_HWPOISON; |
| 2567 | goto exit; |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2568 | } |
Izik Eidus | 539cb66 | 2007-11-11 22:05:04 +0200 | [diff] [blame] | 2569 | |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2570 | retry: |
Liam Howlett | fc98c03 | 2021-06-28 19:39:17 -0700 | [diff] [blame] | 2571 | vma = vma_lookup(current->mm, addr); |
Anthony Liguori | 8d4e128 | 2007-10-18 09:59:34 -0500 | [diff] [blame] | 2572 | |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2573 | if (vma == NULL) |
| 2574 | pfn = KVM_PFN_ERR_FAULT; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2575 | else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { |
Xianting Tian | 1625566 | 2022-01-24 10:04:56 +0800 | [diff] [blame] | 2576 | r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); |
Paolo Bonzini | a8387d0 | 2020-05-29 05:42:55 -0400 | [diff] [blame] | 2577 | if (r == -EAGAIN) |
| 2578 | goto retry; |
Paolo Bonzini | 92176a8 | 2016-06-07 16:22:47 +0200 | [diff] [blame] | 2579 | if (r < 0) |
| 2580 | pfn = KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2581 | } else { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2582 | if (async && vma_is_valid(vma, write_fault)) |
Xiao Guangrong | 2fc8431 | 2012-08-21 11:00:22 +0800 | [diff] [blame] | 2583 | *async = true; |
| 2584 | pfn = KVM_PFN_ERR_FAULT; |
| 2585 | } |
| 2586 | exit: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2587 | mmap_read_unlock(current->mm); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2588 | return pfn; |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2589 | } |
| 2590 | |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 2591 | kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2592 | bool atomic, bool *async, bool write_fault, |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2593 | bool *writable, hva_t *hva) |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2594 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2595 | unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); |
| 2596 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2597 | if (hva) |
| 2598 | *hva = addr; |
| 2599 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2600 | if (addr == KVM_HVA_ERR_RO_BAD) { |
| 2601 | if (writable) |
| 2602 | *writable = false; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2603 | return KVM_PFN_ERR_RO_FAULT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2604 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2605 | |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2606 | if (kvm_is_error_hva(addr)) { |
| 2607 | if (writable) |
| 2608 | *writable = false; |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2609 | return KVM_PFN_NOSLOT; |
Paolo Bonzini | b2740d3 | 2016-02-23 15:36:01 +0100 | [diff] [blame] | 2610 | } |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2611 | |
| 2612 | /* Do not map writable pfn in the readonly memslot. */ |
| 2613 | if (writable && memslot_is_readonly(slot)) { |
| 2614 | *writable = false; |
| 2615 | writable = NULL; |
| 2616 | } |
| 2617 | |
| 2618 | return hva_to_pfn(addr, atomic, async, write_fault, |
| 2619 | writable); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2620 | } |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 2621 | EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 2622 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2623 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2624 | bool *writable) |
| 2625 | { |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2626 | return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2627 | write_fault, writable, NULL); |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2628 | } |
| 2629 | EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); |
| 2630 | |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 2631 | kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2632 | { |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2633 | return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2634 | } |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2635 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); |
Marcelo Tosatti | 506f0d6 | 2009-12-23 14:35:19 -0200 | [diff] [blame] | 2636 | |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 2637 | kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2638 | { |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2639 | return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2640 | } |
| 2641 | EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); |
| 2642 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2643 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2644 | { |
| 2645 | return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2646 | } |
| 2647 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); |
| 2648 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2649 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) |
Paolo Bonzini | e37afc6 | 2015-05-19 16:09:04 +0200 | [diff] [blame] | 2650 | { |
| 2651 | return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); |
| 2652 | } |
| 2653 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
| 2654 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2655 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2656 | { |
| 2657 | return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); |
| 2658 | } |
| 2659 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); |
| 2660 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2661 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2662 | struct page **pages, int nr_pages) |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2663 | { |
| 2664 | unsigned long addr; |
Arnd Bergmann | 076b925 | 2017-08-10 14:14:39 +0200 | [diff] [blame] | 2665 | gfn_t entry = 0; |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2666 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2667 | addr = gfn_to_hva_many(slot, gfn, &entry); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2668 | if (kvm_is_error_hva(addr)) |
| 2669 | return -1; |
| 2670 | |
| 2671 | if (entry < nr_pages) |
| 2672 | return 0; |
| 2673 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2674 | return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 2675 | } |
| 2676 | EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); |
| 2677 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2678 | static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2679 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2680 | if (is_error_noslot_pfn(pfn)) |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 2681 | return KVM_ERR_PTR_BAD_PAGE; |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2682 | |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2683 | if (kvm_is_reserved_pfn(pfn)) { |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 2684 | WARN_ON(1); |
| 2685 | return KVM_ERR_PTR_BAD_PAGE; |
| 2686 | } |
| 2687 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2688 | return pfn_to_page(pfn); |
| 2689 | } |
| 2690 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2691 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
| 2692 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2693 | kvm_pfn_t pfn; |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2694 | |
| 2695 | pfn = gfn_to_pfn(kvm, gfn); |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2696 | |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2697 | return kvm_pfn_to_page(pfn); |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 2698 | } |
| 2699 | EXPORT_SYMBOL_GPL(gfn_to_page); |
| 2700 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2701 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2702 | { |
| 2703 | if (pfn == 0) |
| 2704 | return; |
| 2705 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2706 | if (dirty) |
| 2707 | kvm_release_pfn_dirty(pfn); |
| 2708 | else |
| 2709 | kvm_release_pfn_clean(pfn); |
| 2710 | } |
| 2711 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2712 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2713 | { |
| 2714 | kvm_pfn_t pfn; |
| 2715 | void *hva = NULL; |
| 2716 | struct page *page = KVM_UNMAPPED_PAGE; |
| 2717 | |
| 2718 | if (!map) |
| 2719 | return -EINVAL; |
| 2720 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2721 | pfn = gfn_to_pfn(vcpu->kvm, gfn); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2722 | if (is_error_noslot_pfn(pfn)) |
| 2723 | return -EINVAL; |
| 2724 | |
| 2725 | if (pfn_valid(pfn)) { |
| 2726 | page = pfn_to_page(pfn); |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2727 | hva = kmap(page); |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2728 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2729 | } else { |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2730 | hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); |
Paolo Bonzini | d30b214 | 2019-05-20 12:06:36 +0200 | [diff] [blame] | 2731 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2732 | } |
| 2733 | |
| 2734 | if (!hva) |
| 2735 | return -EFAULT; |
| 2736 | |
| 2737 | map->page = page; |
| 2738 | map->hva = hva; |
| 2739 | map->pfn = pfn; |
| 2740 | map->gfn = gfn; |
| 2741 | |
| 2742 | return 0; |
| 2743 | } |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2744 | EXPORT_SYMBOL_GPL(kvm_vcpu_map); |
| 2745 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2746 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2747 | { |
| 2748 | if (!map) |
| 2749 | return; |
| 2750 | |
| 2751 | if (!map->hva) |
| 2752 | return; |
| 2753 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2754 | if (map->page != KVM_UNMAPPED_PAGE) |
| 2755 | kunmap(map->page); |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2756 | #ifdef CONFIG_HAS_IOMEM |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2757 | else |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2758 | memunmap(map->hva); |
Christian Borntraeger | eb1f2f3 | 2019-05-27 10:28:25 +0200 | [diff] [blame] | 2759 | #endif |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2760 | |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2761 | if (dirty) |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2762 | kvm_vcpu_mark_page_dirty(vcpu, map->gfn); |
Boris Ostrovsky | 9172481 | 2019-12-05 01:30:51 +0000 | [diff] [blame] | 2763 | |
David Woodhouse | 357a18a | 2021-11-15 16:50:27 +0000 | [diff] [blame] | 2764 | kvm_release_pfn(map->pfn, dirty); |
KarimAllah Ahmed | e45adf6 | 2019-01-31 21:24:34 +0100 | [diff] [blame] | 2765 | |
| 2766 | map->hva = NULL; |
| 2767 | map->page = NULL; |
| 2768 | } |
| 2769 | EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); |
| 2770 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2771 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2772 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2773 | kvm_pfn_t pfn; |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2774 | |
| 2775 | pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); |
| 2776 | |
| 2777 | return kvm_pfn_to_page(pfn); |
| 2778 | } |
| 2779 | EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); |
| 2780 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2781 | void kvm_release_page_clean(struct page *page) |
| 2782 | { |
Xiao Guangrong | 32cad84 | 2012-08-03 15:42:52 +0800 | [diff] [blame] | 2783 | WARN_ON(is_error_page(page)); |
| 2784 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2785 | kvm_release_pfn_clean(page_to_pfn(page)); |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2786 | } |
| 2787 | EXPORT_SYMBOL_GPL(kvm_release_page_clean); |
| 2788 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2789 | void kvm_release_pfn_clean(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2790 | { |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 2791 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2792 | put_page(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2793 | } |
| 2794 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
| 2795 | |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2796 | void kvm_release_page_dirty(struct page *page) |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2797 | { |
Xiao Guangrong | a276632 | 2012-07-26 11:58:59 +0800 | [diff] [blame] | 2798 | WARN_ON(is_error_page(page)); |
| 2799 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2800 | kvm_release_pfn_dirty(page_to_pfn(page)); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2801 | } |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 2802 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 2803 | |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2804 | void kvm_release_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2805 | { |
| 2806 | kvm_set_pfn_dirty(pfn); |
| 2807 | kvm_release_pfn_clean(pfn); |
| 2808 | } |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 2809 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2810 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2811 | void kvm_set_pfn_dirty(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2812 | { |
Miaohe Lin | d29c03a | 2019-12-05 11:05:05 +0800 | [diff] [blame] | 2813 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
| 2814 | SetPageDirty(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2815 | } |
| 2816 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
| 2817 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2818 | void kvm_set_pfn_accessed(kvm_pfn_t pfn) |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2819 | { |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 2820 | if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) |
Anthony Liguori | 2e2e373 | 2008-04-30 15:37:07 -0500 | [diff] [blame] | 2821 | mark_page_accessed(pfn_to_page(pfn)); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2822 | } |
| 2823 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
| 2824 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2825 | static int next_segment(unsigned long len, int offset) |
| 2826 | { |
| 2827 | if (len > PAGE_SIZE - offset) |
| 2828 | return PAGE_SIZE - offset; |
| 2829 | else |
| 2830 | return len; |
| 2831 | } |
| 2832 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2833 | static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2834 | void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2835 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2836 | int r; |
| 2837 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2838 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2839 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2840 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2841 | return -EFAULT; |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2842 | r = __copy_from_user(data, (void __user *)addr + offset, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2843 | if (r) |
| 2844 | return -EFAULT; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2845 | return 0; |
| 2846 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2847 | |
| 2848 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| 2849 | int len) |
| 2850 | { |
| 2851 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2852 | |
| 2853 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2854 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2855 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
| 2856 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2857 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, |
| 2858 | int offset, int len) |
| 2859 | { |
| 2860 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2861 | |
| 2862 | return __kvm_read_guest_page(slot, gfn, data, offset, len); |
| 2863 | } |
| 2864 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); |
| 2865 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2866 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) |
| 2867 | { |
| 2868 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2869 | int seg; |
| 2870 | int offset = offset_in_page(gpa); |
| 2871 | int ret; |
| 2872 | |
| 2873 | while ((seg = next_segment(len, offset)) != 0) { |
| 2874 | ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); |
| 2875 | if (ret < 0) |
| 2876 | return ret; |
| 2877 | offset = 0; |
| 2878 | len -= seg; |
| 2879 | data += seg; |
| 2880 | ++gfn; |
| 2881 | } |
| 2882 | return 0; |
| 2883 | } |
| 2884 | EXPORT_SYMBOL_GPL(kvm_read_guest); |
| 2885 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2886 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) |
| 2887 | { |
| 2888 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2889 | int seg; |
| 2890 | int offset = offset_in_page(gpa); |
| 2891 | int ret; |
| 2892 | |
| 2893 | while ((seg = next_segment(len, offset)) != 0) { |
| 2894 | ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); |
| 2895 | if (ret < 0) |
| 2896 | return ret; |
| 2897 | offset = 0; |
| 2898 | len -= seg; |
| 2899 | data += seg; |
| 2900 | ++gfn; |
| 2901 | } |
| 2902 | return 0; |
| 2903 | } |
| 2904 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); |
| 2905 | |
| 2906 | static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 2907 | void *data, int offset, unsigned long len) |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2908 | { |
| 2909 | int r; |
| 2910 | unsigned long addr; |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2911 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2912 | addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2913 | if (kvm_is_error_hva(addr)) |
| 2914 | return -EFAULT; |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2915 | pagefault_disable(); |
Paolo Bonzini | 3180a7f | 2015-04-02 14:08:20 +0200 | [diff] [blame] | 2916 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
Andrea Arcangeli | 0aac03f | 2008-01-30 19:57:35 +0100 | [diff] [blame] | 2917 | pagefault_enable(); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2918 | if (r) |
| 2919 | return -EFAULT; |
| 2920 | return 0; |
| 2921 | } |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 2922 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2923 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 2924 | void *data, unsigned long len) |
| 2925 | { |
| 2926 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2927 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2928 | int offset = offset_in_page(gpa); |
| 2929 | |
| 2930 | return __kvm_read_guest_atomic(slot, gfn, data, offset, len); |
| 2931 | } |
| 2932 | EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); |
| 2933 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2934 | static int __kvm_write_guest_page(struct kvm *kvm, |
| 2935 | struct kvm_memory_slot *memslot, gfn_t gfn, |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2936 | const void *data, int offset, int len) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2937 | { |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2938 | int r; |
| 2939 | unsigned long addr; |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2940 | |
Radim Krčmář | 251eb84 | 2015-04-10 21:47:27 +0200 | [diff] [blame] | 2941 | addr = gfn_to_hva_memslot(memslot, gfn); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2942 | if (kvm_is_error_hva(addr)) |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2943 | return -EFAULT; |
Xiao Guangrong | 8b0cedf | 2011-05-15 23:22:04 +0800 | [diff] [blame] | 2944 | r = __copy_to_user((void __user *)addr + offset, data, len); |
Izik Eidus | e0506bc | 2007-11-11 22:10:22 +0200 | [diff] [blame] | 2945 | if (r) |
| 2946 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2947 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2948 | return 0; |
| 2949 | } |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2950 | |
| 2951 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, |
| 2952 | const void *data, int offset, int len) |
| 2953 | { |
| 2954 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); |
| 2955 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2956 | return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2957 | } |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2958 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
| 2959 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2960 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2961 | const void *data, int offset, int len) |
| 2962 | { |
| 2963 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 2964 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 2965 | return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2966 | } |
| 2967 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); |
| 2968 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2969 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| 2970 | unsigned long len) |
| 2971 | { |
| 2972 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2973 | int seg; |
| 2974 | int offset = offset_in_page(gpa); |
| 2975 | int ret; |
| 2976 | |
| 2977 | while ((seg = next_segment(len, offset)) != 0) { |
| 2978 | ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); |
| 2979 | if (ret < 0) |
| 2980 | return ret; |
| 2981 | offset = 0; |
| 2982 | len -= seg; |
| 2983 | data += seg; |
| 2984 | ++gfn; |
| 2985 | } |
| 2986 | return 0; |
| 2987 | } |
Wincy Van | ff651cb | 2014-12-11 08:52:58 +0300 | [diff] [blame] | 2988 | EXPORT_SYMBOL_GPL(kvm_write_guest); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 2989 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 2990 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| 2991 | unsigned long len) |
| 2992 | { |
| 2993 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2994 | int seg; |
| 2995 | int offset = offset_in_page(gpa); |
| 2996 | int ret; |
| 2997 | |
| 2998 | while ((seg = next_segment(len, offset)) != 0) { |
| 2999 | ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); |
| 3000 | if (ret < 0) |
| 3001 | return ret; |
| 3002 | offset = 0; |
| 3003 | len -= seg; |
| 3004 | data += seg; |
| 3005 | ++gfn; |
| 3006 | } |
| 3007 | return 0; |
| 3008 | } |
| 3009 | EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); |
| 3010 | |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 3011 | static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, |
| 3012 | struct gfn_to_hva_cache *ghc, |
| 3013 | gpa_t gpa, unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3014 | { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3015 | int offset = offset_in_page(gpa); |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3016 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
| 3017 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; |
| 3018 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; |
| 3019 | gfn_t nr_pages_avail; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3020 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3021 | /* Update ghc->generation before performing any error checks. */ |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3022 | ghc->generation = slots->generation; |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3023 | |
| 3024 | if (start_gfn > end_gfn) { |
| 3025 | ghc->hva = KVM_HVA_ERR_BAD; |
| 3026 | return -EINVAL; |
| 3027 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 3028 | |
| 3029 | /* |
| 3030 | * If the requested region crosses two memslots, we still |
| 3031 | * verify that the entire region is valid here. |
| 3032 | */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3033 | for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 3034 | ghc->memslot = __gfn_to_memslot(slots, start_gfn); |
| 3035 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, |
| 3036 | &nr_pages_avail); |
| 3037 | if (kvm_is_error_hva(ghc->hva)) |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3038 | return -EFAULT; |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3039 | } |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 3040 | |
| 3041 | /* Use the slow path for cross page reads and writes. */ |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3042 | if (nr_pages_needed == 1) |
Jim Mattson | f1b9dd5 | 2018-12-17 13:53:33 -0800 | [diff] [blame] | 3043 | ghc->hva += offset; |
| 3044 | else |
| 3045 | ghc->memslot = NULL; |
| 3046 | |
Sean Christopherson | 6ad1e29 | 2020-01-09 14:58:55 -0500 | [diff] [blame] | 3047 | ghc->gpa = gpa; |
| 3048 | ghc->len = len; |
| 3049 | return 0; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3050 | } |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 3051 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3052 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 3053 | gpa_t gpa, unsigned long len) |
| 3054 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3055 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Paolo Bonzini | 5a2d436 | 2017-02-03 20:32:28 -0800 | [diff] [blame] | 3056 | return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); |
| 3057 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3058 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3059 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3060 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Jim Mattson | 7a86dab | 2018-12-14 14:34:43 -0800 | [diff] [blame] | 3061 | void *data, unsigned int offset, |
| 3062 | unsigned long len) |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3063 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3064 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3065 | int r; |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3066 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3067 | |
Paolo Bonzini | 5f25e71 | 2021-11-22 18:24:01 -0500 | [diff] [blame] | 3068 | if (WARN_ON_ONCE(len + offset > ghc->len)) |
| 3069 | return -EINVAL; |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3070 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 3071 | if (slots->generation != ghc->generation) { |
| 3072 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 3073 | return -EFAULT; |
| 3074 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3075 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3076 | if (kvm_is_error_hva(ghc->hva)) |
| 3077 | return -EFAULT; |
| 3078 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 3079 | if (unlikely(!ghc->memslot)) |
| 3080 | return kvm_write_guest(kvm, gpa, data, len); |
| 3081 | |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3082 | r = __copy_to_user((void __user *)ghc->hva + offset, data, len); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3083 | if (r) |
| 3084 | return -EFAULT; |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3085 | mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3086 | |
| 3087 | return 0; |
| 3088 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3089 | EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3090 | |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3091 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3092 | void *data, unsigned long len) |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3093 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3094 | return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); |
Pan Xinhui | 4ec6e86 | 2016-11-02 05:08:34 -0400 | [diff] [blame] | 3095 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3096 | EXPORT_SYMBOL_GPL(kvm_write_guest_cached); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3097 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3098 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3099 | void *data, unsigned int offset, |
| 3100 | unsigned long len) |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3101 | { |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3102 | struct kvm_memslots *slots = kvm_memslots(kvm); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3103 | int r; |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3104 | gpa_t gpa = ghc->gpa + offset; |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3105 | |
Paolo Bonzini | 5f25e71 | 2021-11-22 18:24:01 -0500 | [diff] [blame] | 3106 | if (WARN_ON_ONCE(len + offset > ghc->len)) |
| 3107 | return -EINVAL; |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3108 | |
Sean Christopherson | dc9ce71 | 2020-01-09 15:56:20 -0800 | [diff] [blame] | 3109 | if (slots->generation != ghc->generation) { |
| 3110 | if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |
| 3111 | return -EFAULT; |
| 3112 | } |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 3113 | |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3114 | if (kvm_is_error_hva(ghc->hva)) |
| 3115 | return -EFAULT; |
| 3116 | |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 3117 | if (unlikely(!ghc->memslot)) |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3118 | return kvm_read_guest(kvm, gpa, data, len); |
Sean Christopherson | fcfbc61 | 2020-01-09 15:56:18 -0800 | [diff] [blame] | 3119 | |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3120 | r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3121 | if (r) |
| 3122 | return -EFAULT; |
| 3123 | |
| 3124 | return 0; |
| 3125 | } |
Vitaly Kuznetsov | 0958f0c | 2020-05-25 16:41:19 +0200 | [diff] [blame] | 3126 | EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); |
| 3127 | |
| 3128 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 3129 | void *data, unsigned long len) |
| 3130 | { |
| 3131 | return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); |
| 3132 | } |
Paolo Bonzini | 4e335d9 | 2017-05-02 16:20:18 +0200 | [diff] [blame] | 3133 | EXPORT_SYMBOL_GPL(kvm_read_guest_cached); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 3134 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3135 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) |
| 3136 | { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 3137 | const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3138 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3139 | int seg; |
| 3140 | int offset = offset_in_page(gpa); |
| 3141 | int ret; |
| 3142 | |
Kevin Mulvey | bfda0e8 | 2015-02-20 08:21:36 -0500 | [diff] [blame] | 3143 | while ((seg = next_segment(len, offset)) != 0) { |
Paolo Bonzini | 2f54144 | 2020-11-06 05:25:09 -0500 | [diff] [blame] | 3144 | ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 3145 | if (ret < 0) |
| 3146 | return ret; |
| 3147 | offset = 0; |
| 3148 | len -= seg; |
| 3149 | ++gfn; |
| 3150 | } |
| 3151 | return 0; |
| 3152 | } |
| 3153 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
| 3154 | |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3155 | void mark_page_dirty_in_slot(struct kvm *kvm, |
Ben Gardon | 8283e36 | 2021-11-15 15:45:58 -0800 | [diff] [blame] | 3156 | const struct kvm_memory_slot *memslot, |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3157 | gfn_t gfn) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3158 | { |
David Woodhouse | 2efd61a | 2021-12-10 16:36:20 +0000 | [diff] [blame] | 3159 | struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); |
| 3160 | |
Christian Borntraeger | e09fccb | 2022-01-13 13:29:24 +0100 | [diff] [blame] | 3161 | #ifdef CONFIG_HAVE_KVM_DIRTY_RING |
David Woodhouse | 2efd61a | 2021-12-10 16:36:20 +0000 | [diff] [blame] | 3162 | if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) |
| 3163 | return; |
Christian Borntraeger | e09fccb | 2022-01-13 13:29:24 +0100 | [diff] [blame] | 3164 | #endif |
David Woodhouse | 2efd61a | 2021-12-10 16:36:20 +0000 | [diff] [blame] | 3165 | |
Peter Xu | 044c59c | 2020-09-30 21:22:26 -0400 | [diff] [blame] | 3166 | if (memslot && kvm_slot_dirty_track_enabled(memslot)) { |
Rusty Russell | 7e9d619 | 2007-07-31 20:41:14 +1000 | [diff] [blame] | 3167 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3168 | u32 slot = (memslot->as_id << 16) | memslot->id; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3169 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3170 | if (kvm->dirty_ring_size) |
David Woodhouse | 2efd61a | 2021-12-10 16:36:20 +0000 | [diff] [blame] | 3171 | kvm_dirty_ring_push(&vcpu->dirty_ring, |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3172 | slot, rel_gfn); |
| 3173 | else |
| 3174 | set_bit_le(rel_gfn, memslot->dirty_bitmap); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3175 | } |
| 3176 | } |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 3177 | EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3178 | |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3179 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
| 3180 | { |
| 3181 | struct kvm_memory_slot *memslot; |
| 3182 | |
| 3183 | memslot = gfn_to_memslot(kvm, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3184 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3185 | } |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 3186 | EXPORT_SYMBOL_GPL(mark_page_dirty); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 3187 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 3188 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 3189 | { |
| 3190 | struct kvm_memory_slot *memslot; |
| 3191 | |
| 3192 | memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Peter Xu | 28bd726 | 2020-09-30 21:20:34 -0400 | [diff] [blame] | 3193 | mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 3194 | } |
| 3195 | EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); |
| 3196 | |
Jan H. Schönherr | 20b7035 | 2017-11-24 22:39:01 +0100 | [diff] [blame] | 3197 | void kvm_sigset_activate(struct kvm_vcpu *vcpu) |
| 3198 | { |
| 3199 | if (!vcpu->sigset_active) |
| 3200 | return; |
| 3201 | |
| 3202 | /* |
| 3203 | * This does a lockless modification of ->real_blocked, which is fine |
| 3204 | * because, only current can change ->real_blocked and all readers of |
| 3205 | * ->real_blocked don't care as long ->real_blocked is always a subset |
| 3206 | * of ->blocked. |
| 3207 | */ |
| 3208 | sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); |
| 3209 | } |
| 3210 | |
| 3211 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) |
| 3212 | { |
| 3213 | if (!vcpu->sigset_active) |
| 3214 | return; |
| 3215 | |
| 3216 | sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); |
| 3217 | sigemptyset(¤t->real_blocked); |
| 3218 | } |
| 3219 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3220 | static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 3221 | { |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3222 | unsigned int old, val, grow, grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3223 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3224 | old = val = vcpu->halt_poll_ns; |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3225 | grow_start = READ_ONCE(halt_poll_ns_grow_start); |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3226 | grow = READ_ONCE(halt_poll_ns_grow); |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 3227 | if (!grow) |
| 3228 | goto out; |
| 3229 | |
Nir Weiner | dee339b | 2019-01-27 12:17:16 +0200 | [diff] [blame] | 3230 | val *= grow; |
| 3231 | if (val < grow_start) |
| 3232 | val = grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3233 | |
David Matlack | 258785e | 2021-05-06 15:24:43 +0000 | [diff] [blame] | 3234 | if (val > vcpu->kvm->max_halt_poll_ns) |
| 3235 | val = vcpu->kvm->max_halt_poll_ns; |
David Matlack | 313f636 | 2016-03-08 16:19:44 -0800 | [diff] [blame] | 3236 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3237 | vcpu->halt_poll_ns = val; |
Nir Weiner | 7fa08e7 | 2019-01-27 12:17:14 +0200 | [diff] [blame] | 3238 | out: |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3239 | trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3240 | } |
| 3241 | |
| 3242 | static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) |
| 3243 | { |
Sergey Senozhatsky | ae232ea | 2021-09-02 12:11:00 +0900 | [diff] [blame] | 3244 | unsigned int old, val, shrink, grow_start; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3245 | |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3246 | old = val = vcpu->halt_poll_ns; |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3247 | shrink = READ_ONCE(halt_poll_ns_shrink); |
Sergey Senozhatsky | ae232ea | 2021-09-02 12:11:00 +0900 | [diff] [blame] | 3248 | grow_start = READ_ONCE(halt_poll_ns_grow_start); |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3249 | if (shrink == 0) |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3250 | val = 0; |
| 3251 | else |
Christian Borntraeger | 6b6de68 | 2016-02-09 13:47:55 +0100 | [diff] [blame] | 3252 | val /= shrink; |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3253 | |
Sergey Senozhatsky | ae232ea | 2021-09-02 12:11:00 +0900 | [diff] [blame] | 3254 | if (val < grow_start) |
| 3255 | val = 0; |
| 3256 | |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3257 | vcpu->halt_poll_ns = val; |
Wanpeng Li | 2cbd782 | 2015-09-03 22:07:39 +0800 | [diff] [blame] | 3258 | trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3259 | } |
| 3260 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3261 | static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) |
| 3262 | { |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3263 | int ret = -EINTR; |
| 3264 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 3265 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3266 | if (kvm_arch_vcpu_runnable(vcpu)) { |
| 3267 | kvm_make_request(KVM_REQ_UNHALT, vcpu); |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3268 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3269 | } |
| 3270 | if (kvm_cpu_has_pending_timer(vcpu)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3271 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3272 | if (signal_pending(current)) |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3273 | goto out; |
Marcelo Tosatti | 084071d | 2021-05-25 10:41:17 -0300 | [diff] [blame] | 3274 | if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) |
| 3275 | goto out; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3276 | |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 3277 | ret = 0; |
| 3278 | out: |
| 3279 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 3280 | return ret; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3281 | } |
| 3282 | |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3283 | /* |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3284 | * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is |
| 3285 | * pending. This is mostly used when halting a vCPU, but may also be used |
| 3286 | * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3287 | */ |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3288 | bool kvm_vcpu_block(struct kvm_vcpu *vcpu) |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3289 | { |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3290 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3291 | bool waited = false; |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3292 | |
Jing Zhang | c385833 | 2021-10-08 19:12:08 -0700 | [diff] [blame] | 3293 | vcpu->stat.generic.blocking = 1; |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3294 | |
Marc Zyngier | 07ab0f8 | 2019-08-02 11:37:09 +0100 | [diff] [blame] | 3295 | kvm_arch_vcpu_blocking(vcpu); |
| 3296 | |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3297 | prepare_to_rcuwait(wait); |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 3298 | for (;;) { |
Davidlohr Bueso | da4ad88 | 2020-04-23 22:48:37 -0700 | [diff] [blame] | 3299 | set_current_state(TASK_INTERRUPTIBLE); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3300 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3301 | if (kvm_vcpu_check_block(vcpu) < 0) |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 3302 | break; |
| 3303 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3304 | waited = true; |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3305 | schedule(); |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3306 | } |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3307 | finish_rcuwait(wait); |
| 3308 | |
| 3309 | kvm_arch_vcpu_unblocking(vcpu); |
| 3310 | |
Jing Zhang | c385833 | 2021-10-08 19:12:08 -0700 | [diff] [blame] | 3311 | vcpu->stat.generic.blocking = 0; |
| 3312 | |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3313 | return waited; |
| 3314 | } |
| 3315 | |
Sean Christopherson | 29e7289 | 2021-10-08 19:11:59 -0700 | [diff] [blame] | 3316 | static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, |
| 3317 | ktime_t end, bool success) |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3318 | { |
Sean Christopherson | 30c94347 | 2021-10-08 19:12:00 -0700 | [diff] [blame] | 3319 | struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; |
Sean Christopherson | 29e7289 | 2021-10-08 19:11:59 -0700 | [diff] [blame] | 3320 | u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); |
| 3321 | |
Sean Christopherson | 30c94347 | 2021-10-08 19:12:00 -0700 | [diff] [blame] | 3322 | ++vcpu->stat.generic.halt_attempted_poll; |
| 3323 | |
| 3324 | if (success) { |
| 3325 | ++vcpu->stat.generic.halt_successful_poll; |
| 3326 | |
| 3327 | if (!vcpu_valid_wakeup(vcpu)) |
| 3328 | ++vcpu->stat.generic.halt_poll_invalid; |
| 3329 | |
| 3330 | stats->halt_poll_success_ns += poll_ns; |
| 3331 | KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); |
| 3332 | } else { |
| 3333 | stats->halt_poll_fail_ns += poll_ns; |
| 3334 | KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); |
| 3335 | } |
Marcelo Tosatti | e5c239c | 2008-05-08 19:47:01 -0300 | [diff] [blame] | 3336 | } |
Eddie Dong | b6958ce | 2007-07-18 12:15:21 +0300 | [diff] [blame] | 3337 | |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3338 | /* |
| 3339 | * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt |
| 3340 | * polling is enabled, busy wait for a short time before blocking to avoid the |
| 3341 | * expensive block+unblock sequence if a wake event arrives soon after the vCPU |
| 3342 | * is halted. |
| 3343 | */ |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3344 | void kvm_vcpu_halt(struct kvm_vcpu *vcpu) |
Yaozu Dong | 3fca036 | 2007-04-25 16:49:19 +0300 | [diff] [blame] | 3345 | { |
Sean Christopherson | 6f39091 | 2021-10-08 19:11:56 -0700 | [diff] [blame] | 3346 | bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); |
Sean Christopherson | 8df6a61 | 2021-10-08 19:11:58 -0700 | [diff] [blame] | 3347 | bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3348 | ktime_t start, cur, poll_end; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3349 | bool waited = false; |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3350 | u64 halt_ns; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3351 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3352 | start = cur = poll_end = ktime_get(); |
Sean Christopherson | 8df6a61 | 2021-10-08 19:11:58 -0700 | [diff] [blame] | 3353 | if (do_halt_poll) { |
Sean Christopherson | 109a982 | 2021-10-08 19:12:09 -0700 | [diff] [blame] | 3354 | ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3355 | |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3356 | do { |
| 3357 | /* |
| 3358 | * This sets KVM_REQ_UNHALT if an interrupt |
| 3359 | * arrives. |
| 3360 | */ |
Sean Christopherson | 30c94347 | 2021-10-08 19:12:00 -0700 | [diff] [blame] | 3361 | if (kvm_vcpu_check_block(vcpu) < 0) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3362 | goto out; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3363 | cpu_relax(); |
| 3364 | poll_end = cur = ktime_get(); |
| 3365 | } while (kvm_vcpu_can_poll(cur, stop)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3366 | } |
| 3367 | |
Sean Christopherson | fac4268 | 2021-10-08 19:12:07 -0700 | [diff] [blame] | 3368 | waited = kvm_vcpu_block(vcpu); |
Sean Christopherson | f6c60d0 | 2021-10-08 19:12:04 -0700 | [diff] [blame] | 3369 | |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3370 | cur = ktime_get(); |
Jing Zhang | 87bcc5f | 2021-08-02 16:56:32 +0000 | [diff] [blame] | 3371 | if (waited) { |
| 3372 | vcpu->stat.generic.halt_wait_ns += |
| 3373 | ktime_to_ns(cur) - ktime_to_ns(poll_end); |
Jing Zhang | 8ccba53 | 2021-08-02 16:56:33 +0000 | [diff] [blame] | 3374 | KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, |
| 3375 | ktime_to_ns(cur) - ktime_to_ns(poll_end)); |
Jing Zhang | 87bcc5f | 2021-08-02 16:56:32 +0000 | [diff] [blame] | 3376 | } |
Paolo Bonzini | f781951 | 2015-02-04 18:20:58 +0100 | [diff] [blame] | 3377 | out: |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3378 | /* The total time the vCPU was "halted", including polling time. */ |
| 3379 | halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3380 | |
Sean Christopherson | 29e7289 | 2021-10-08 19:11:59 -0700 | [diff] [blame] | 3381 | /* |
| 3382 | * Note, halt-polling is considered successful so long as the vCPU was |
| 3383 | * never actually scheduled out, i.e. even if the wake event arrived |
| 3384 | * after of the halt-polling loop itself, but before the full wait. |
| 3385 | */ |
Sean Christopherson | 8df6a61 | 2021-10-08 19:11:58 -0700 | [diff] [blame] | 3386 | if (do_halt_poll) |
Sean Christopherson | 29e7289 | 2021-10-08 19:11:59 -0700 | [diff] [blame] | 3387 | update_halt_poll_stats(vcpu, start, poll_end, !waited); |
David Matlack | cb95312 | 2020-05-08 11:22:40 -0700 | [diff] [blame] | 3388 | |
Sean Christopherson | 6f39091 | 2021-10-08 19:11:56 -0700 | [diff] [blame] | 3389 | if (halt_poll_allowed) { |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3390 | if (!vcpu_valid_wakeup(vcpu)) { |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3391 | shrink_halt_poll_ns(vcpu); |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3392 | } else if (vcpu->kvm->max_halt_poll_ns) { |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3393 | if (halt_ns <= vcpu->halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3394 | ; |
| 3395 | /* we had a long block, shrink polling */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3396 | else if (vcpu->halt_poll_ns && |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3397 | halt_ns > vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3398 | shrink_halt_poll_ns(vcpu); |
| 3399 | /* we had a short halt and our poll time is too small */ |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 3400 | else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3401 | halt_ns < vcpu->kvm->max_halt_poll_ns) |
Wanpeng Li | 44551b2 | 2019-09-29 09:06:56 +0800 | [diff] [blame] | 3402 | grow_halt_poll_ns(vcpu); |
| 3403 | } else { |
| 3404 | vcpu->halt_poll_ns = 0; |
| 3405 | } |
| 3406 | } |
Wanpeng Li | aca6ff2 | 2015-09-03 22:07:38 +0800 | [diff] [blame] | 3407 | |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3408 | trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3409 | } |
Sean Christopherson | 91b99ea | 2021-10-08 19:12:06 -0700 | [diff] [blame] | 3410 | EXPORT_SYMBOL_GPL(kvm_vcpu_halt); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3411 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3412 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3413 | { |
Sean Christopherson | d92a5d1 | 2021-10-08 19:12:12 -0700 | [diff] [blame] | 3414 | if (__kvm_vcpu_wake_up(vcpu)) { |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 3415 | WRITE_ONCE(vcpu->ready, true); |
Jing Zhang | 0193cc9 | 2021-06-18 22:27:03 +0000 | [diff] [blame] | 3416 | ++vcpu->stat.generic.halt_wakeup; |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3417 | return true; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3418 | } |
| 3419 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3420 | return false; |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 3421 | } |
| 3422 | EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); |
| 3423 | |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 3424 | #ifndef CONFIG_S390 |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 3425 | /* |
| 3426 | * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. |
| 3427 | */ |
| 3428 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
| 3429 | { |
Sean Christopherson | 85b6404 | 2021-08-27 11:25:09 +0200 | [diff] [blame] | 3430 | int me, cpu; |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 3431 | |
Radim Krčmář | 178f02f | 2017-04-26 22:32:26 +0200 | [diff] [blame] | 3432 | if (kvm_vcpu_wake_up(vcpu)) |
| 3433 | return; |
| 3434 | |
Paolo Bonzini | aefdc2e | 2021-10-20 06:38:05 -0400 | [diff] [blame] | 3435 | me = get_cpu(); |
| 3436 | /* |
| 3437 | * The only state change done outside the vcpu mutex is IN_GUEST_MODE |
| 3438 | * to EXITING_GUEST_MODE. Therefore the moderately expensive "should |
| 3439 | * kick" check does not need atomic operations if kvm_vcpu_kick is used |
| 3440 | * within the vCPU thread itself. |
| 3441 | */ |
| 3442 | if (vcpu == __this_cpu_read(kvm_running_vcpu)) { |
| 3443 | if (vcpu->mode == IN_GUEST_MODE) |
| 3444 | WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); |
| 3445 | goto out; |
| 3446 | } |
| 3447 | |
Sean Christopherson | 85b6404 | 2021-08-27 11:25:09 +0200 | [diff] [blame] | 3448 | /* |
| 3449 | * Note, the vCPU could get migrated to a different pCPU at any point |
| 3450 | * after kvm_arch_vcpu_should_kick(), which could result in sending an |
| 3451 | * IPI to the previous pCPU. But, that's ok because the purpose of the |
| 3452 | * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the |
| 3453 | * vCPU also requires it to leave IN_GUEST_MODE. |
| 3454 | */ |
Sean Christopherson | 85b6404 | 2021-08-27 11:25:09 +0200 | [diff] [blame] | 3455 | if (kvm_arch_vcpu_should_kick(vcpu)) { |
| 3456 | cpu = READ_ONCE(vcpu->cpu); |
| 3457 | if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3458 | smp_send_reschedule(cpu); |
Sean Christopherson | 85b6404 | 2021-08-27 11:25:09 +0200 | [diff] [blame] | 3459 | } |
Paolo Bonzini | aefdc2e | 2021-10-20 06:38:05 -0400 | [diff] [blame] | 3460 | out: |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3461 | put_cpu(); |
| 3462 | } |
Yang Zhang | a20ed54 | 2013-04-11 19:25:15 +0800 | [diff] [blame] | 3463 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); |
Paolo Bonzini | 0266c89 | 2017-05-04 15:14:13 +0200 | [diff] [blame] | 3464 | #endif /* !CONFIG_S390 */ |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 3465 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 3466 | int kvm_vcpu_yield_to(struct kvm_vcpu *target) |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3467 | { |
| 3468 | struct pid *pid; |
| 3469 | struct task_struct *task = NULL; |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 3470 | int ret = 0; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3471 | |
| 3472 | rcu_read_lock(); |
| 3473 | pid = rcu_dereference(target->pid); |
| 3474 | if (pid) |
Sam Bobroff | 27fbe64b | 2014-09-19 09:40:41 +1000 | [diff] [blame] | 3475 | task = get_pid_task(pid, PIDTYPE_PID); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3476 | rcu_read_unlock(); |
| 3477 | if (!task) |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3478 | return ret; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3479 | ret = yield_to(task, 1); |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3480 | put_task_struct(task); |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3481 | |
| 3482 | return ret; |
Konstantin Weitz | 41628d3 | 2012-04-25 15:30:38 +0200 | [diff] [blame] | 3483 | } |
| 3484 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); |
| 3485 | |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3486 | /* |
| 3487 | * Helper that checks whether a VCPU is eligible for directed yield. |
| 3488 | * Most eligible candidate to yield is decided by following heuristics: |
| 3489 | * |
| 3490 | * (a) VCPU which has not done pl-exit or cpu relax intercepted recently |
| 3491 | * (preempted lock holder), indicated by @in_spin_loop. |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 3492 | * Set at the beginning and cleared at the end of interception/PLE handler. |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3493 | * |
| 3494 | * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get |
| 3495 | * chance last time (mostly it has become eligible now since we have probably |
| 3496 | * yielded to lockholder in last iteration. This is done by toggling |
| 3497 | * @dy_eligible each time a VCPU checked for eligibility.) |
| 3498 | * |
| 3499 | * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding |
| 3500 | * to preempted lock-holder could result in wrong VCPU selection and CPU |
| 3501 | * burning. Giving priority for a potential lock-holder increases lock |
| 3502 | * progress. |
| 3503 | * |
| 3504 | * Since algorithm is based on heuristics, accessing another VCPU data without |
| 3505 | * locking does not harm. It may result in trying to yield to same VCPU, fail |
| 3506 | * and continue with next VCPU and so on. |
| 3507 | */ |
Stephen Hemminger | 7940876 | 2013-12-29 12:12:29 -0800 | [diff] [blame] | 3508 | static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3509 | { |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3510 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3511 | bool eligible; |
| 3512 | |
| 3513 | eligible = !vcpu->spin_loop.in_spin_loop || |
Christian Borntraeger | 3465611 | 2014-09-04 21:13:31 +0200 | [diff] [blame] | 3514 | vcpu->spin_loop.dy_eligible; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3515 | |
| 3516 | if (vcpu->spin_loop.in_spin_loop) |
| 3517 | kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); |
| 3518 | |
| 3519 | return eligible; |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3520 | #else |
| 3521 | return true; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3522 | #endif |
Scott Wood | 4a55dd7 | 2014-01-09 18:43:16 -0600 | [diff] [blame] | 3523 | } |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3524 | |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 3525 | /* |
| 3526 | * Unlike kvm_arch_vcpu_runnable, this function is called outside |
| 3527 | * a vcpu_load/vcpu_put pair. However, for most architectures |
| 3528 | * kvm_arch_vcpu_runnable does not require vcpu_load. |
| 3529 | */ |
| 3530 | bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) |
| 3531 | { |
| 3532 | return kvm_arch_vcpu_runnable(vcpu); |
| 3533 | } |
| 3534 | |
| 3535 | static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) |
| 3536 | { |
| 3537 | if (kvm_arch_dy_runnable(vcpu)) |
| 3538 | return true; |
| 3539 | |
| 3540 | #ifdef CONFIG_KVM_ASYNC_PF |
| 3541 | if (!list_empty_careful(&vcpu->async_pf.done)) |
| 3542 | return true; |
| 3543 | #endif |
| 3544 | |
| 3545 | return false; |
| 3546 | } |
| 3547 | |
Wanpeng Li | 52acd22 | 2021-04-16 11:08:10 +0800 | [diff] [blame] | 3548 | bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) |
| 3549 | { |
| 3550 | return false; |
| 3551 | } |
| 3552 | |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 3553 | void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3554 | { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3555 | struct kvm *kvm = me->kvm; |
| 3556 | struct kvm_vcpu *vcpu; |
| 3557 | int last_boosted_vcpu = me->kvm->last_boosted_vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 3558 | unsigned long i; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3559 | int yielded = 0; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3560 | int try = 3; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3561 | int pass; |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3562 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 3563 | kvm_vcpu_set_in_spin_loop(me, true); |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3564 | /* |
| 3565 | * We boost the priority of a VCPU that is runnable but not |
| 3566 | * currently running, because it got preempted by something |
| 3567 | * else and called schedule in __vcpu_run. Hopefully that |
| 3568 | * VCPU is holding the lock that we need and will release it. |
| 3569 | * We approximate round-robin by starting at the last boosted VCPU. |
| 3570 | */ |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3571 | for (pass = 0; pass < 2 && !yielded && try; pass++) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3572 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Rik van Riel | 5cfc2aa | 2012-06-19 16:51:04 -0400 | [diff] [blame] | 3573 | if (!pass && i <= last_boosted_vcpu) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3574 | i = last_boosted_vcpu; |
| 3575 | continue; |
| 3576 | } else if (pass && i > last_boosted_vcpu) |
| 3577 | break; |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 3578 | if (!READ_ONCE(vcpu->ready)) |
Raghavendra K T | 7bc7ae2 | 2013-03-04 23:32:27 +0530 | [diff] [blame] | 3579 | continue; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3580 | if (vcpu == me) |
| 3581 | continue; |
Sean Christopherson | d92a5d1 | 2021-10-08 19:12:12 -0700 | [diff] [blame] | 3582 | if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3583 | continue; |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 3584 | if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && |
Wanpeng Li | 52acd22 | 2021-04-16 11:08:10 +0800 | [diff] [blame] | 3585 | !kvm_arch_dy_has_pending_interrupt(vcpu) && |
| 3586 | !kvm_arch_vcpu_in_kernel(vcpu)) |
Longpeng(Mike) | 199b576 | 2017-08-08 12:05:32 +0800 | [diff] [blame] | 3587 | continue; |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3588 | if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) |
| 3589 | continue; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3590 | |
| 3591 | yielded = kvm_vcpu_yield_to(vcpu); |
| 3592 | if (yielded > 0) { |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3593 | kvm->last_boosted_vcpu = i; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3594 | break; |
Raghavendra K T | c45c528 | 2013-01-22 13:09:24 +0530 | [diff] [blame] | 3595 | } else if (yielded < 0) { |
| 3596 | try--; |
| 3597 | if (!try) |
| 3598 | break; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3599 | } |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 3600 | } |
| 3601 | } |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 3602 | kvm_vcpu_set_in_spin_loop(me, false); |
Raghavendra K T | 06e48c5 | 2012-07-19 15:17:52 +0530 | [diff] [blame] | 3603 | |
| 3604 | /* Ensure vcpu is not eligible during next spinloop */ |
| 3605 | kvm_vcpu_set_dy_eligible(me, false); |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 3606 | } |
| 3607 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
| 3608 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3609 | static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) |
| 3610 | { |
David Woodhouse | dc70ec2 | 2021-11-21 12:54:40 +0000 | [diff] [blame] | 3611 | #ifdef CONFIG_HAVE_KVM_DIRTY_RING |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3612 | return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && |
| 3613 | (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + |
| 3614 | kvm->dirty_ring_size / PAGE_SIZE); |
| 3615 | #else |
| 3616 | return false; |
| 3617 | #endif |
| 3618 | } |
| 3619 | |
Souptick Joarder | 1499fa8 | 2018-04-19 00:49:58 +0530 | [diff] [blame] | 3620 | static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3621 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 3622 | struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3623 | struct page *page; |
| 3624 | |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3625 | if (vmf->pgoff == 0) |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3626 | page = virt_to_page(vcpu->run); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3627 | #ifdef CONFIG_X86 |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3628 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3629 | page = virt_to_page(vcpu->arch.pio_data); |
Avi Kivity | 0956676 | 2008-01-23 18:14:23 +0200 | [diff] [blame] | 3630 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 3631 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 3632 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) |
| 3633 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); |
| 3634 | #endif |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3635 | else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) |
| 3636 | page = kvm_dirty_ring_get_page( |
| 3637 | &vcpu->dirty_ring, |
| 3638 | vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 3639 | else |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 3640 | return kvm_arch_vcpu_fault(vcpu, vmf); |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3641 | get_page(page); |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3642 | vmf->page = page; |
| 3643 | return 0; |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3644 | } |
| 3645 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 3646 | static const struct vm_operations_struct kvm_vcpu_vm_ops = { |
npiggin@suse.de | e4a533a | 2007-12-05 18:15:52 +1100 | [diff] [blame] | 3647 | .fault = kvm_vcpu_fault, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3648 | }; |
| 3649 | |
| 3650 | static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) |
| 3651 | { |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3652 | struct kvm_vcpu *vcpu = file->private_data; |
Yang Li | 11476d2 | 2021-09-29 15:28:46 +0800 | [diff] [blame] | 3653 | unsigned long pages = vma_pages(vma); |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3654 | |
| 3655 | if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || |
| 3656 | kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && |
| 3657 | ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) |
| 3658 | return -EINVAL; |
| 3659 | |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3660 | vma->vm_ops = &kvm_vcpu_vm_ops; |
| 3661 | return 0; |
| 3662 | } |
| 3663 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3664 | static int kvm_vcpu_release(struct inode *inode, struct file *filp) |
| 3665 | { |
| 3666 | struct kvm_vcpu *vcpu = filp->private_data; |
| 3667 | |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3668 | kvm_put_kvm(vcpu->kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3669 | return 0; |
| 3670 | } |
| 3671 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 3672 | static struct file_operations kvm_vcpu_fops = { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3673 | .release = kvm_vcpu_release, |
| 3674 | .unlocked_ioctl = kvm_vcpu_ioctl, |
Avi Kivity | 9a2bb7f | 2007-02-22 12:58:31 +0200 | [diff] [blame] | 3675 | .mmap = kvm_vcpu_mmap, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 3676 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 3677 | KVM_COMPAT(kvm_vcpu_compat_ioctl), |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3678 | }; |
| 3679 | |
| 3680 | /* |
| 3681 | * Allocates an inode for the vcpu. |
| 3682 | */ |
| 3683 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) |
| 3684 | { |
Masatake YAMATO | e46b469 | 2018-01-20 04:04:22 +0900 | [diff] [blame] | 3685 | char name[8 + 1 + ITOA_MAX_LEN + 1]; |
| 3686 | |
| 3687 | snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); |
| 3688 | return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3689 | } |
| 3690 | |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3691 | static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3692 | { |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3693 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3694 | struct dentry *debugfs_dentry; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3695 | char dir_name[ITOA_MAX_LEN * 2]; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3696 | |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3697 | if (!debugfs_initialized()) |
Greg KH | 3e7093d | 2019-07-31 20:56:20 +0200 | [diff] [blame] | 3698 | return; |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3699 | |
| 3700 | snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3701 | debugfs_dentry = debugfs_create_dir(dir_name, |
| 3702 | vcpu->kvm->debugfs_dentry); |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3703 | |
Paolo Bonzini | d56f513 | 2020-06-04 15:16:52 +0200 | [diff] [blame] | 3704 | kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); |
Paolo Bonzini | 741cbba | 2019-08-03 08:14:25 +0200 | [diff] [blame] | 3705 | #endif |
Luiz Capitulino | 45b5939 | 2016-09-16 10:27:35 -0400 | [diff] [blame] | 3706 | } |
| 3707 | |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3708 | /* |
| 3709 | * Creates some virtual cpus. Good luck creating more than one. |
| 3710 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3711 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3712 | { |
| 3713 | int r; |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3714 | struct kvm_vcpu *vcpu; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3715 | struct page *page; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3716 | |
Juergen Gross | a1c42dd | 2021-09-13 15:57:44 +0200 | [diff] [blame] | 3717 | if (id >= KVM_MAX_VCPU_IDS) |
Andy Honig | 338c7db | 2013-11-18 16:09:22 -0800 | [diff] [blame] | 3718 | return -EINVAL; |
| 3719 | |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3720 | mutex_lock(&kvm->lock); |
| 3721 | if (kvm->created_vcpus == KVM_MAX_VCPUS) { |
| 3722 | mutex_unlock(&kvm->lock); |
| 3723 | return -EINVAL; |
| 3724 | } |
| 3725 | |
| 3726 | kvm->created_vcpus++; |
| 3727 | mutex_unlock(&kvm->lock); |
| 3728 | |
Sean Christopherson | 897cc38 | 2019-12-18 13:55:09 -0800 | [diff] [blame] | 3729 | r = kvm_arch_vcpu_precreate(kvm, id); |
| 3730 | if (r) |
| 3731 | goto vcpu_decrement; |
| 3732 | |
Sean Christopherson | 85f4793 | 2021-04-06 12:07:40 -0700 | [diff] [blame] | 3733 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3734 | if (!vcpu) { |
| 3735 | r = -ENOMEM; |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3736 | goto vcpu_decrement; |
| 3737 | } |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3738 | |
Peter Xu | fcd97ad | 2020-01-09 09:57:12 -0500 | [diff] [blame] | 3739 | BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); |
Shakeel Butt | 93bb59c | 2020-12-18 14:01:38 -0800 | [diff] [blame] | 3740 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3741 | if (!page) { |
| 3742 | r = -ENOMEM; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3743 | goto vcpu_free; |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3744 | } |
| 3745 | vcpu->run = page_address(page); |
| 3746 | |
| 3747 | kvm_vcpu_init(vcpu, kvm, id); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3748 | |
| 3749 | r = kvm_arch_vcpu_create(vcpu); |
| 3750 | if (r) |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3751 | goto vcpu_free_run_page; |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3752 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3753 | if (kvm->dirty_ring_size) { |
| 3754 | r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, |
| 3755 | id, kvm->dirty_ring_size); |
| 3756 | if (r) |
| 3757 | goto arch_vcpu_destroy; |
| 3758 | } |
| 3759 | |
Shaohua Li | 11ec280 | 2007-07-23 14:51:37 +0800 | [diff] [blame] | 3760 | mutex_lock(&kvm->lock); |
David Hildenbrand | e09fefd | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 3761 | if (kvm_get_vcpu_by_id(kvm, id)) { |
| 3762 | r = -EEXIST; |
| 3763 | goto unlock_vcpu_destroy; |
| 3764 | } |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3765 | |
Radim Krčmář | 8750e72 | 2019-11-07 07:53:42 -0500 | [diff] [blame] | 3766 | vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); |
Marc Zyngier | c5b0775 | 2021-11-16 16:04:01 +0000 | [diff] [blame] | 3767 | r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); |
| 3768 | BUG_ON(r == -EBUSY); |
| 3769 | if (r) |
| 3770 | goto unlock_vcpu_destroy; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3771 | |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 3772 | /* Fill the stats id string for the vcpu */ |
| 3773 | snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", |
| 3774 | task_pid_nr(current), id); |
| 3775 | |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3776 | /* Now it's all set up, let userspace reach it */ |
Al Viro | 66c0b39 | 2008-04-19 20:33:56 +0100 | [diff] [blame] | 3777 | kvm_get_kvm(kvm); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3778 | r = create_vcpu_fd(vcpu); |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3779 | if (r < 0) { |
Marc Zyngier | c5b0775 | 2021-11-16 16:04:01 +0000 | [diff] [blame] | 3780 | xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 3781 | kvm_put_kvm_no_destroy(kvm); |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3782 | goto unlock_vcpu_destroy; |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3783 | } |
| 3784 | |
Paolo Bonzini | dd48924 | 2015-07-29 11:32:20 +0200 | [diff] [blame] | 3785 | /* |
Marc Zyngier | c5b0775 | 2021-11-16 16:04:01 +0000 | [diff] [blame] | 3786 | * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu |
| 3787 | * pointer before kvm->online_vcpu's incremented value. |
Paolo Bonzini | dd48924 | 2015-07-29 11:32:20 +0200 | [diff] [blame] | 3788 | */ |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3789 | smp_wmb(); |
| 3790 | atomic_inc(&kvm->online_vcpus); |
| 3791 | |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 3792 | mutex_unlock(&kvm->lock); |
Marcelo Tosatti | 42897d8 | 2012-11-27 23:29:02 -0200 | [diff] [blame] | 3793 | kvm_arch_vcpu_postcreate(vcpu); |
Paolo Bonzini | 63d0434 | 2020-04-01 00:42:22 +0200 | [diff] [blame] | 3794 | kvm_create_vcpu_debugfs(vcpu); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3795 | return r; |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3796 | |
Jan Kiszka | d780592 | 2011-05-23 10:33:05 +0200 | [diff] [blame] | 3797 | unlock_vcpu_destroy: |
Glauber Costa | 7d8fece | 2008-09-17 23:16:59 -0300 | [diff] [blame] | 3798 | mutex_unlock(&kvm->lock); |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 3799 | kvm_dirty_ring_free(&vcpu->dirty_ring); |
| 3800 | arch_vcpu_destroy: |
Hollis Blanchard | d40ccc6 | 2007-11-19 14:04:43 -0600 | [diff] [blame] | 3801 | kvm_arch_vcpu_destroy(vcpu); |
Sean Christopherson | 8bd826d | 2019-12-18 13:55:30 -0800 | [diff] [blame] | 3802 | vcpu_free_run_page: |
| 3803 | free_page((unsigned long)vcpu->run); |
Sean Christopherson | e529ef6 | 2019-12-18 13:55:15 -0800 | [diff] [blame] | 3804 | vcpu_free: |
| 3805 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Paolo Bonzini | 6c7caeb | 2016-06-13 14:48:25 +0200 | [diff] [blame] | 3806 | vcpu_decrement: |
| 3807 | mutex_lock(&kvm->lock); |
| 3808 | kvm->created_vcpus--; |
| 3809 | mutex_unlock(&kvm->lock); |
Avi Kivity | c5ea766 | 2007-02-20 18:41:05 +0200 | [diff] [blame] | 3810 | return r; |
| 3811 | } |
| 3812 | |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 3813 | static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) |
| 3814 | { |
| 3815 | if (sigset) { |
| 3816 | sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 3817 | vcpu->sigset_active = 1; |
| 3818 | vcpu->sigset = *sigset; |
| 3819 | } else |
| 3820 | vcpu->sigset_active = 0; |
| 3821 | return 0; |
| 3822 | } |
| 3823 | |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 3824 | static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, |
| 3825 | size_t size, loff_t *offset) |
| 3826 | { |
| 3827 | struct kvm_vcpu *vcpu = file->private_data; |
| 3828 | |
| 3829 | return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, |
| 3830 | &kvm_vcpu_stats_desc[0], &vcpu->stat, |
| 3831 | sizeof(vcpu->stat), user_buffer, size, offset); |
| 3832 | } |
| 3833 | |
| 3834 | static const struct file_operations kvm_vcpu_stats_fops = { |
| 3835 | .read = kvm_vcpu_stats_read, |
| 3836 | .llseek = noop_llseek, |
| 3837 | }; |
| 3838 | |
| 3839 | static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) |
| 3840 | { |
| 3841 | int fd; |
| 3842 | struct file *file; |
| 3843 | char name[15 + ITOA_MAX_LEN + 1]; |
| 3844 | |
| 3845 | snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); |
| 3846 | |
| 3847 | fd = get_unused_fd_flags(O_CLOEXEC); |
| 3848 | if (fd < 0) |
| 3849 | return fd; |
| 3850 | |
| 3851 | file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); |
| 3852 | if (IS_ERR(file)) { |
| 3853 | put_unused_fd(fd); |
| 3854 | return PTR_ERR(file); |
| 3855 | } |
| 3856 | file->f_mode |= FMODE_PREAD; |
| 3857 | fd_install(fd, file); |
| 3858 | |
| 3859 | return fd; |
| 3860 | } |
| 3861 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3862 | static long kvm_vcpu_ioctl(struct file *filp, |
| 3863 | unsigned int ioctl, unsigned long arg) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3864 | { |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 3865 | struct kvm_vcpu *vcpu = filp->private_data; |
Al Viro | 2f366987 | 2007-02-09 16:38:35 +0000 | [diff] [blame] | 3866 | void __user *argp = (void __user *)arg; |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 3867 | int r; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3868 | struct kvm_fpu *fpu = NULL; |
| 3869 | struct kvm_sregs *kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3870 | |
Paolo Bonzini | f4d3165 | 2021-11-11 10:13:38 -0500 | [diff] [blame] | 3871 | if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 3872 | return -EIO; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3873 | |
David Matlack | 2ea75be | 2014-09-19 16:03:25 -0700 | [diff] [blame] | 3874 | if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) |
| 3875 | return -EINVAL; |
| 3876 | |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3877 | /* |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3878 | * Some architectures have vcpu ioctls that are asynchronous to vcpu |
| 3879 | * execution; mutex_lock() would break them. |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3880 | */ |
Paolo Bonzini | 5cb0944 | 2017-12-12 17:41:34 +0100 | [diff] [blame] | 3881 | r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); |
| 3882 | if (r != -ENOIOCTLCMD) |
Michael S. Tsirkin | 9fc7744 | 2012-09-16 11:50:30 +0300 | [diff] [blame] | 3883 | return r; |
Avi Kivity | 2122ff5 | 2010-05-13 11:25:04 +0300 | [diff] [blame] | 3884 | |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 3885 | if (mutex_lock_killable(&vcpu->mutex)) |
| 3886 | return -EINTR; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3887 | switch (ioctl) { |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3888 | case KVM_RUN: { |
| 3889 | struct pid *oldpid; |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 3890 | r = -EINVAL; |
| 3891 | if (arg) |
| 3892 | goto out; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3893 | oldpid = rcu_access_pointer(vcpu->pid); |
Eric W. Biederman | 71dbc8a | 2017-07-16 21:39:32 -0500 | [diff] [blame] | 3894 | if (unlikely(oldpid != task_pid(current))) { |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3895 | /* The thread running this VCPU changed. */ |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3896 | struct pid *newpid; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 3897 | |
Christoffer Dall | bd2a639 | 2018-02-23 17:23:57 +0100 | [diff] [blame] | 3898 | r = kvm_arch_vcpu_run_pid_change(vcpu); |
| 3899 | if (r) |
| 3900 | break; |
| 3901 | |
| 3902 | newpid = get_task_pid(current, PIDTYPE_PID); |
Christian Borntraeger | 7a72f7a | 2014-08-05 16:44:14 +0200 | [diff] [blame] | 3903 | rcu_assign_pointer(vcpu->pid, newpid); |
| 3904 | if (oldpid) |
| 3905 | synchronize_rcu(); |
| 3906 | put_pid(oldpid); |
| 3907 | } |
Tianjia Zhang | 1b94f6f | 2020-04-16 13:10:57 +0800 | [diff] [blame] | 3908 | r = kvm_arch_vcpu_ioctl_run(vcpu); |
Gleb Natapov | 64be500 | 2010-10-24 16:49:08 +0200 | [diff] [blame] | 3909 | trace_kvm_userspace_exit(vcpu->run->exit_reason, r); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3910 | break; |
Christian Borntraeger | 0e4524a | 2017-07-06 14:44:28 +0200 | [diff] [blame] | 3911 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3912 | case KVM_GET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3913 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3914 | |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3915 | r = -ENOMEM; |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3916 | kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3917 | if (!kvm_regs) |
| 3918 | goto out; |
| 3919 | r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3920 | if (r) |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3921 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3922 | r = -EFAULT; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3923 | if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) |
| 3924 | goto out_free1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3925 | r = 0; |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3926 | out_free1: |
| 3927 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3928 | break; |
| 3929 | } |
| 3930 | case KVM_SET_REGS: { |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3931 | struct kvm_regs *kvm_regs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3932 | |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3933 | kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); |
| 3934 | if (IS_ERR(kvm_regs)) { |
| 3935 | r = PTR_ERR(kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3936 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3937 | } |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3938 | r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); |
Xiantao Zhang | 3e4bb3a | 2008-02-25 18:52:20 +0800 | [diff] [blame] | 3939 | kfree(kvm_regs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3940 | break; |
| 3941 | } |
| 3942 | case KVM_GET_SREGS: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 3943 | kvm_sregs = kzalloc(sizeof(struct kvm_sregs), |
| 3944 | GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3945 | r = -ENOMEM; |
| 3946 | if (!kvm_sregs) |
| 3947 | goto out; |
| 3948 | r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3949 | if (r) |
| 3950 | goto out; |
| 3951 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3952 | if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3953 | goto out; |
| 3954 | r = 0; |
| 3955 | break; |
| 3956 | } |
| 3957 | case KVM_SET_SREGS: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3958 | kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); |
| 3959 | if (IS_ERR(kvm_sregs)) { |
| 3960 | r = PTR_ERR(kvm_sregs); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 3961 | kvm_sregs = NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3962 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 3963 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 3964 | r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3965 | break; |
| 3966 | } |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3967 | case KVM_GET_MP_STATE: { |
| 3968 | struct kvm_mp_state mp_state; |
| 3969 | |
| 3970 | r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); |
| 3971 | if (r) |
| 3972 | goto out; |
| 3973 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3974 | if (copy_to_user(argp, &mp_state, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3975 | goto out; |
| 3976 | r = 0; |
| 3977 | break; |
| 3978 | } |
| 3979 | case KVM_SET_MP_STATE: { |
| 3980 | struct kvm_mp_state mp_state; |
| 3981 | |
| 3982 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3983 | if (copy_from_user(&mp_state, argp, sizeof(mp_state))) |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3984 | goto out; |
| 3985 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 3986 | break; |
| 3987 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3988 | case KVM_TRANSLATE: { |
| 3989 | struct kvm_translation tr; |
| 3990 | |
| 3991 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3992 | if (copy_from_user(&tr, argp, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3993 | goto out; |
Zhang Xiantao | 8b00679 | 2007-11-16 13:05:55 +0800 | [diff] [blame] | 3994 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3995 | if (r) |
| 3996 | goto out; |
| 3997 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 3998 | if (copy_to_user(argp, &tr, sizeof(tr))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3999 | goto out; |
| 4000 | r = 0; |
| 4001 | break; |
| 4002 | } |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 4003 | case KVM_SET_GUEST_DEBUG: { |
| 4004 | struct kvm_guest_debug dbg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4005 | |
| 4006 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4007 | if (copy_from_user(&dbg, argp, sizeof(dbg))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4008 | goto out; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 4009 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4010 | break; |
| 4011 | } |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 4012 | case KVM_SET_SIGNAL_MASK: { |
| 4013 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 4014 | struct kvm_signal_mask kvm_sigmask; |
| 4015 | sigset_t sigset, *p; |
| 4016 | |
| 4017 | p = NULL; |
| 4018 | if (argp) { |
| 4019 | r = -EFAULT; |
| 4020 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4021 | sizeof(kvm_sigmask))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 4022 | goto out; |
| 4023 | r = -EINVAL; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4024 | if (kvm_sigmask.len != sizeof(sigset)) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 4025 | goto out; |
| 4026 | r = -EFAULT; |
| 4027 | if (copy_from_user(&sigset, sigmask_arg->sigset, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4028 | sizeof(sigset))) |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 4029 | goto out; |
| 4030 | p = &sigset; |
| 4031 | } |
Andi Kleen | 376d41f | 2010-06-10 13:10:47 +0200 | [diff] [blame] | 4032 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); |
Avi Kivity | 1961d27 | 2007-03-05 19:46:05 +0200 | [diff] [blame] | 4033 | break; |
| 4034 | } |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 4035 | case KVM_GET_FPU: { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4036 | fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 4037 | r = -ENOMEM; |
| 4038 | if (!fpu) |
| 4039 | goto out; |
| 4040 | r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 4041 | if (r) |
| 4042 | goto out; |
| 4043 | r = -EFAULT; |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 4044 | if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 4045 | goto out; |
| 4046 | r = 0; |
| 4047 | break; |
| 4048 | } |
| 4049 | case KVM_SET_FPU: { |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 4050 | fpu = memdup_user(argp, sizeof(*fpu)); |
| 4051 | if (IS_ERR(fpu)) { |
| 4052 | r = PTR_ERR(fpu); |
Guo Chao | 1859541 | 2012-11-02 18:33:21 +0800 | [diff] [blame] | 4053 | fpu = NULL; |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 4054 | goto out; |
Sasha Levin | ff5c2c0 | 2011-12-04 19:36:29 +0200 | [diff] [blame] | 4055 | } |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 4056 | r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); |
Avi Kivity | b883673 | 2007-04-01 16:34:31 +0300 | [diff] [blame] | 4057 | break; |
| 4058 | } |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 4059 | case KVM_GET_STATS_FD: { |
| 4060 | r = kvm_vcpu_ioctl_get_stats_fd(vcpu); |
| 4061 | break; |
| 4062 | } |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4063 | default: |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 4064 | r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4065 | } |
| 4066 | out: |
Christoffer Dall | ec7660c | 2017-12-04 21:35:23 +0100 | [diff] [blame] | 4067 | mutex_unlock(&vcpu->mutex); |
Dave Hansen | fa3795a | 2008-08-11 10:01:46 -0700 | [diff] [blame] | 4068 | kfree(fpu); |
| 4069 | kfree(kvm_sregs); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4070 | return r; |
| 4071 | } |
| 4072 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 4073 | #ifdef CONFIG_KVM_COMPAT |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4074 | static long kvm_vcpu_compat_ioctl(struct file *filp, |
| 4075 | unsigned int ioctl, unsigned long arg) |
| 4076 | { |
| 4077 | struct kvm_vcpu *vcpu = filp->private_data; |
| 4078 | void __user *argp = compat_ptr(arg); |
| 4079 | int r; |
| 4080 | |
Paolo Bonzini | f4d3165 | 2021-11-11 10:13:38 -0500 | [diff] [blame] | 4081 | if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4082 | return -EIO; |
| 4083 | |
| 4084 | switch (ioctl) { |
| 4085 | case KVM_SET_SIGNAL_MASK: { |
| 4086 | struct kvm_signal_mask __user *sigmask_arg = argp; |
| 4087 | struct kvm_signal_mask kvm_sigmask; |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4088 | sigset_t sigset; |
| 4089 | |
| 4090 | if (argp) { |
| 4091 | r = -EFAULT; |
| 4092 | if (copy_from_user(&kvm_sigmask, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4093 | sizeof(kvm_sigmask))) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4094 | goto out; |
| 4095 | r = -EINVAL; |
Al Viro | 3968cf6 | 2017-09-03 21:45:17 -0400 | [diff] [blame] | 4096 | if (kvm_sigmask.len != sizeof(compat_sigset_t)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4097 | goto out; |
| 4098 | r = -EFAULT; |
Paolo Bonzini | 1393b4a | 2020-07-02 05:39:31 -0400 | [diff] [blame] | 4099 | if (get_compat_sigset(&sigset, |
| 4100 | (compat_sigset_t __user *)sigmask_arg->sigset)) |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4101 | goto out; |
Alan Cox | 760a9a3 | 2012-08-22 14:34:11 +0100 | [diff] [blame] | 4102 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
| 4103 | } else |
| 4104 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); |
Alexander Graf | 1dda606 | 2011-06-08 02:45:37 +0200 | [diff] [blame] | 4105 | break; |
| 4106 | } |
| 4107 | default: |
| 4108 | r = kvm_vcpu_ioctl(filp, ioctl, arg); |
| 4109 | } |
| 4110 | |
| 4111 | out: |
| 4112 | return r; |
| 4113 | } |
| 4114 | #endif |
| 4115 | |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 4116 | static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) |
| 4117 | { |
| 4118 | struct kvm_device *dev = filp->private_data; |
| 4119 | |
| 4120 | if (dev->ops->mmap) |
| 4121 | return dev->ops->mmap(dev, vma); |
| 4122 | |
| 4123 | return -ENODEV; |
| 4124 | } |
| 4125 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4126 | static int kvm_device_ioctl_attr(struct kvm_device *dev, |
| 4127 | int (*accessor)(struct kvm_device *dev, |
| 4128 | struct kvm_device_attr *attr), |
| 4129 | unsigned long arg) |
| 4130 | { |
| 4131 | struct kvm_device_attr attr; |
| 4132 | |
| 4133 | if (!accessor) |
| 4134 | return -EPERM; |
| 4135 | |
| 4136 | if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) |
| 4137 | return -EFAULT; |
| 4138 | |
| 4139 | return accessor(dev, &attr); |
| 4140 | } |
| 4141 | |
| 4142 | static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, |
| 4143 | unsigned long arg) |
| 4144 | { |
| 4145 | struct kvm_device *dev = filp->private_data; |
| 4146 | |
Paolo Bonzini | f4d3165 | 2021-11-11 10:13:38 -0500 | [diff] [blame] | 4147 | if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) |
Sean Christopherson | ddba918 | 2019-02-15 12:48:39 -0800 | [diff] [blame] | 4148 | return -EIO; |
| 4149 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4150 | switch (ioctl) { |
| 4151 | case KVM_SET_DEVICE_ATTR: |
| 4152 | return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); |
| 4153 | case KVM_GET_DEVICE_ATTR: |
| 4154 | return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); |
| 4155 | case KVM_HAS_DEVICE_ATTR: |
| 4156 | return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); |
| 4157 | default: |
| 4158 | if (dev->ops->ioctl) |
| 4159 | return dev->ops->ioctl(dev, ioctl, arg); |
| 4160 | |
| 4161 | return -ENOTTY; |
| 4162 | } |
| 4163 | } |
| 4164 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4165 | static int kvm_device_release(struct inode *inode, struct file *filp) |
| 4166 | { |
| 4167 | struct kvm_device *dev = filp->private_data; |
| 4168 | struct kvm *kvm = dev->kvm; |
| 4169 | |
Cédric Le Goater | 2bde9b3 | 2019-04-18 12:39:41 +0200 | [diff] [blame] | 4170 | if (dev->ops->release) { |
| 4171 | mutex_lock(&kvm->lock); |
| 4172 | list_del(&dev->vm_node); |
| 4173 | dev->ops->release(dev); |
| 4174 | mutex_unlock(&kvm->lock); |
| 4175 | } |
| 4176 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4177 | kvm_put_kvm(kvm); |
| 4178 | return 0; |
| 4179 | } |
| 4180 | |
| 4181 | static const struct file_operations kvm_device_fops = { |
| 4182 | .unlocked_ioctl = kvm_device_ioctl, |
| 4183 | .release = kvm_device_release, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4184 | KVM_COMPAT(kvm_device_ioctl), |
Cédric Le Goater | a1cd3f0 | 2019-04-18 12:39:36 +0200 | [diff] [blame] | 4185 | .mmap = kvm_device_mmap, |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4186 | }; |
| 4187 | |
| 4188 | struct kvm_device *kvm_device_from_filp(struct file *filp) |
| 4189 | { |
| 4190 | if (filp->f_op != &kvm_device_fops) |
| 4191 | return NULL; |
| 4192 | |
| 4193 | return filp->private_data; |
| 4194 | } |
| 4195 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4196 | static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4197 | #ifdef CONFIG_KVM_MPIC |
| 4198 | [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, |
| 4199 | [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, |
| 4200 | #endif |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4201 | }; |
| 4202 | |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4203 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4204 | { |
| 4205 | if (type >= ARRAY_SIZE(kvm_device_ops_table)) |
| 4206 | return -ENOSPC; |
| 4207 | |
| 4208 | if (kvm_device_ops_table[type] != NULL) |
| 4209 | return -EEXIST; |
| 4210 | |
| 4211 | kvm_device_ops_table[type] = ops; |
| 4212 | return 0; |
| 4213 | } |
| 4214 | |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 4215 | void kvm_unregister_device_ops(u32 type) |
| 4216 | { |
| 4217 | if (kvm_device_ops_table[type] != NULL) |
| 4218 | kvm_device_ops_table[type] = NULL; |
| 4219 | } |
| 4220 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4221 | static int kvm_ioctl_create_device(struct kvm *kvm, |
| 4222 | struct kvm_create_device *cd) |
| 4223 | { |
Steven Price | 8538cb2 | 2019-10-21 16:28:19 +0100 | [diff] [blame] | 4224 | const struct kvm_device_ops *ops = NULL; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4225 | struct kvm_device *dev; |
| 4226 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4227 | int type; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4228 | int ret; |
| 4229 | |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4230 | if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4231 | return -ENODEV; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4232 | |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4233 | type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); |
| 4234 | ops = kvm_device_ops_table[type]; |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 4235 | if (ops == NULL) |
| 4236 | return -ENODEV; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4237 | |
| 4238 | if (test) |
| 4239 | return 0; |
| 4240 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 4241 | dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4242 | if (!dev) |
| 4243 | return -ENOMEM; |
| 4244 | |
| 4245 | dev->ops = ops; |
| 4246 | dev->kvm = kvm; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4247 | |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4248 | mutex_lock(&kvm->lock); |
Paolo Bonzini | 1d487e9 | 2019-04-11 11:16:47 +0200 | [diff] [blame] | 4249 | ret = ops->create(dev, type); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4250 | if (ret < 0) { |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4251 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4252 | kfree(dev); |
| 4253 | return ret; |
| 4254 | } |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4255 | list_add(&dev->vm_node, &kvm->devices); |
| 4256 | mutex_unlock(&kvm->lock); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4257 | |
Christoffer Dall | 023e9fd | 2016-08-09 19:13:00 +0200 | [diff] [blame] | 4258 | if (ops->init) |
| 4259 | ops->init(dev); |
| 4260 | |
Jann Horn | cfa3938 | 2019-01-26 01:54:33 +0100 | [diff] [blame] | 4261 | kvm_get_kvm(kvm); |
Yann Droneaud | 24009b0 | 2013-08-24 22:14:07 +0200 | [diff] [blame] | 4262 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4263 | if (ret < 0) { |
Sean Christopherson | 149487b | 2019-10-21 15:58:42 -0700 | [diff] [blame] | 4264 | kvm_put_kvm_no_destroy(kvm); |
Christoffer Dall | a28ebea | 2016-08-09 19:13:01 +0200 | [diff] [blame] | 4265 | mutex_lock(&kvm->lock); |
| 4266 | list_del(&dev->vm_node); |
| 4267 | mutex_unlock(&kvm->lock); |
Dan Carpenter | a0f1d21 | 2016-11-30 22:21:05 +0300 | [diff] [blame] | 4268 | ops->destroy(dev); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4269 | return ret; |
| 4270 | } |
| 4271 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4272 | cd->fd = ret; |
| 4273 | return 0; |
| 4274 | } |
| 4275 | |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4276 | static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) |
| 4277 | { |
| 4278 | switch (arg) { |
| 4279 | case KVM_CAP_USER_MEMORY: |
| 4280 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
| 4281 | case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4282 | case KVM_CAP_INTERNAL_ERROR_DATA: |
| 4283 | #ifdef CONFIG_HAVE_KVM_MSI |
| 4284 | case KVM_CAP_SIGNAL_MSI: |
| 4285 | #endif |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 4286 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Paolo Bonzini | dc9be0f | 2015-03-05 11:54:46 +0100 | [diff] [blame] | 4287 | case KVM_CAP_IRQFD: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4288 | case KVM_CAP_IRQFD_RESAMPLE: |
| 4289 | #endif |
Jason Wang | e9ea506 | 2015-09-15 14:41:59 +0800 | [diff] [blame] | 4290 | case KVM_CAP_IOEVENTFD_ANY_LENGTH: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4291 | case KVM_CAP_CHECK_EXTENSION_VM: |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4292 | case KVM_CAP_ENABLE_CAP_VM: |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 4293 | case KVM_CAP_HALT_POLL: |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4294 | return 1; |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4295 | #ifdef CONFIG_KVM_MMIO |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 4296 | case KVM_CAP_COALESCED_MMIO: |
| 4297 | return KVM_COALESCED_MMIO_PAGE_OFFSET; |
Peng Hao | 0804c84 | 2018-10-14 07:09:55 +0800 | [diff] [blame] | 4298 | case KVM_CAP_COALESCED_PIO: |
| 4299 | return 1; |
Paolo Bonzini | 3042255 | 2017-03-31 13:53:22 +0200 | [diff] [blame] | 4300 | #endif |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4301 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4302 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: |
| 4303 | return KVM_DIRTY_LOG_MANUAL_CAPS; |
| 4304 | #endif |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4305 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 4306 | case KVM_CAP_IRQ_ROUTING: |
| 4307 | return KVM_MAX_IRQ_ROUTES; |
| 4308 | #endif |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 4309 | #if KVM_ADDRESS_SPACE_NUM > 1 |
| 4310 | case KVM_CAP_MULTI_ADDRESS_SPACE: |
| 4311 | return KVM_ADDRESS_SPACE_NUM; |
| 4312 | #endif |
Paolo Bonzini | c110ae5 | 2019-03-28 17:24:03 +0100 | [diff] [blame] | 4313 | case KVM_CAP_NR_MEMSLOTS: |
| 4314 | return KVM_USER_MEM_SLOTS; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4315 | case KVM_CAP_DIRTY_LOG_RING: |
David Woodhouse | dc70ec2 | 2021-11-21 12:54:40 +0000 | [diff] [blame] | 4316 | #ifdef CONFIG_HAVE_KVM_DIRTY_RING |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4317 | return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); |
| 4318 | #else |
| 4319 | return 0; |
| 4320 | #endif |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 4321 | case KVM_CAP_BINARY_STATS_FD: |
| 4322 | return 1; |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4323 | default: |
| 4324 | break; |
| 4325 | } |
| 4326 | return kvm_vm_ioctl_check_extension(kvm, arg); |
| 4327 | } |
| 4328 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4329 | static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) |
| 4330 | { |
| 4331 | int r; |
| 4332 | |
| 4333 | if (!KVM_DIRTY_LOG_PAGE_OFFSET) |
| 4334 | return -EINVAL; |
| 4335 | |
| 4336 | /* the size should be power of 2 */ |
| 4337 | if (!size || (size & (size - 1))) |
| 4338 | return -EINVAL; |
| 4339 | |
| 4340 | /* Should be bigger to keep the reserved entries, or a page */ |
| 4341 | if (size < kvm_dirty_ring_get_rsvd_entries() * |
| 4342 | sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) |
| 4343 | return -EINVAL; |
| 4344 | |
| 4345 | if (size > KVM_DIRTY_RING_MAX_ENTRIES * |
| 4346 | sizeof(struct kvm_dirty_gfn)) |
| 4347 | return -E2BIG; |
| 4348 | |
| 4349 | /* We only allow it to set once */ |
| 4350 | if (kvm->dirty_ring_size) |
| 4351 | return -EINVAL; |
| 4352 | |
| 4353 | mutex_lock(&kvm->lock); |
| 4354 | |
| 4355 | if (kvm->created_vcpus) { |
| 4356 | /* We don't allow to change this value after vcpu created */ |
| 4357 | r = -EINVAL; |
| 4358 | } else { |
| 4359 | kvm->dirty_ring_size = size; |
| 4360 | r = 0; |
| 4361 | } |
| 4362 | |
| 4363 | mutex_unlock(&kvm->lock); |
| 4364 | return r; |
| 4365 | } |
| 4366 | |
| 4367 | static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) |
| 4368 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 4369 | unsigned long i; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4370 | struct kvm_vcpu *vcpu; |
| 4371 | int cleared = 0; |
| 4372 | |
| 4373 | if (!kvm->dirty_ring_size) |
| 4374 | return -EINVAL; |
| 4375 | |
| 4376 | mutex_lock(&kvm->slots_lock); |
| 4377 | |
| 4378 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 4379 | cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); |
| 4380 | |
| 4381 | mutex_unlock(&kvm->slots_lock); |
| 4382 | |
| 4383 | if (cleared) |
| 4384 | kvm_flush_remote_tlbs(kvm); |
| 4385 | |
| 4386 | return cleared; |
| 4387 | } |
| 4388 | |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4389 | int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
| 4390 | struct kvm_enable_cap *cap) |
| 4391 | { |
| 4392 | return -EINVAL; |
| 4393 | } |
| 4394 | |
| 4395 | static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, |
| 4396 | struct kvm_enable_cap *cap) |
| 4397 | { |
| 4398 | switch (cap->cap) { |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4399 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4400 | case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { |
| 4401 | u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; |
| 4402 | |
| 4403 | if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) |
| 4404 | allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; |
| 4405 | |
| 4406 | if (cap->flags || (cap->args[0] & ~allowed_options)) |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4407 | return -EINVAL; |
| 4408 | kvm->manual_dirty_log_protect = cap->args[0]; |
| 4409 | return 0; |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 4410 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4411 | #endif |
David Matlack | acd0578 | 2020-04-17 15:14:46 -0700 | [diff] [blame] | 4412 | case KVM_CAP_HALT_POLL: { |
| 4413 | if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) |
| 4414 | return -EINVAL; |
| 4415 | |
| 4416 | kvm->max_halt_poll_ns = cap->args[0]; |
| 4417 | return 0; |
| 4418 | } |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4419 | case KVM_CAP_DIRTY_LOG_RING: |
| 4420 | return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4421 | default: |
| 4422 | return kvm_vm_ioctl_enable_cap(kvm, cap); |
| 4423 | } |
| 4424 | } |
| 4425 | |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4426 | static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, |
| 4427 | size_t size, loff_t *offset) |
| 4428 | { |
| 4429 | struct kvm *kvm = file->private_data; |
| 4430 | |
| 4431 | return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, |
| 4432 | &kvm_vm_stats_desc[0], &kvm->stat, |
| 4433 | sizeof(kvm->stat), user_buffer, size, offset); |
| 4434 | } |
| 4435 | |
| 4436 | static const struct file_operations kvm_vm_stats_fops = { |
| 4437 | .read = kvm_vm_stats_read, |
| 4438 | .llseek = noop_llseek, |
| 4439 | }; |
| 4440 | |
| 4441 | static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) |
| 4442 | { |
| 4443 | int fd; |
| 4444 | struct file *file; |
| 4445 | |
| 4446 | fd = get_unused_fd_flags(O_CLOEXEC); |
| 4447 | if (fd < 0) |
| 4448 | return fd; |
| 4449 | |
| 4450 | file = anon_inode_getfile("kvm-vm-stats", |
| 4451 | &kvm_vm_stats_fops, kvm, O_RDONLY); |
| 4452 | if (IS_ERR(file)) { |
| 4453 | put_unused_fd(fd); |
| 4454 | return PTR_ERR(file); |
| 4455 | } |
| 4456 | file->f_mode |= FMODE_PREAD; |
| 4457 | fd_install(fd, file); |
| 4458 | |
| 4459 | return fd; |
| 4460 | } |
| 4461 | |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4462 | static long kvm_vm_ioctl(struct file *filp, |
| 4463 | unsigned int ioctl, unsigned long arg) |
| 4464 | { |
| 4465 | struct kvm *kvm = filp->private_data; |
| 4466 | void __user *argp = (void __user *)arg; |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 4467 | int r; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4468 | |
Paolo Bonzini | f4d3165 | 2021-11-11 10:13:38 -0500 | [diff] [blame] | 4469 | if (kvm->mm != current->mm || kvm->vm_dead) |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 4470 | return -EIO; |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4471 | switch (ioctl) { |
| 4472 | case KVM_CREATE_VCPU: |
| 4473 | r = kvm_vm_ioctl_create_vcpu(kvm, arg); |
Avi Kivity | bccf215 | 2007-02-21 18:04:26 +0200 | [diff] [blame] | 4474 | break; |
Paolo Bonzini | e5d83c7 | 2017-02-16 10:40:56 +0100 | [diff] [blame] | 4475 | case KVM_ENABLE_CAP: { |
| 4476 | struct kvm_enable_cap cap; |
| 4477 | |
| 4478 | r = -EFAULT; |
| 4479 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 4480 | goto out; |
| 4481 | r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); |
| 4482 | break; |
| 4483 | } |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 4484 | case KVM_SET_USER_MEMORY_REGION: { |
| 4485 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 4486 | |
| 4487 | r = -EFAULT; |
| 4488 | if (copy_from_user(&kvm_userspace_mem, argp, |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4489 | sizeof(kvm_userspace_mem))) |
Izik Eidus | 6fc138d | 2007-10-09 19:20:39 +0200 | [diff] [blame] | 4490 | goto out; |
| 4491 | |
Takuya Yoshikawa | 47ae31e | 2013-02-27 19:43:00 +0900 | [diff] [blame] | 4492 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4493 | break; |
| 4494 | } |
| 4495 | case KVM_GET_DIRTY_LOG: { |
| 4496 | struct kvm_dirty_log log; |
| 4497 | |
| 4498 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4499 | if (copy_from_user(&log, argp, sizeof(log))) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4500 | goto out; |
Avi Kivity | 2c6f5df | 2007-02-20 18:27:58 +0200 | [diff] [blame] | 4501 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4502 | break; |
| 4503 | } |
Paolo Bonzini | 2a31b9d | 2018-10-23 02:36:47 +0200 | [diff] [blame] | 4504 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4505 | case KVM_CLEAR_DIRTY_LOG: { |
| 4506 | struct kvm_clear_dirty_log log; |
| 4507 | |
| 4508 | r = -EFAULT; |
| 4509 | if (copy_from_user(&log, argp, sizeof(log))) |
| 4510 | goto out; |
| 4511 | r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
| 4512 | break; |
| 4513 | } |
| 4514 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4515 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4516 | case KVM_REGISTER_COALESCED_MMIO: { |
| 4517 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 4518 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4519 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4520 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4521 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4522 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4523 | break; |
| 4524 | } |
| 4525 | case KVM_UNREGISTER_COALESCED_MMIO: { |
| 4526 | struct kvm_coalesced_mmio_zone zone; |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 4527 | |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4528 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4529 | if (copy_from_user(&zone, argp, sizeof(zone))) |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4530 | goto out; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4531 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4532 | break; |
| 4533 | } |
| 4534 | #endif |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4535 | case KVM_IRQFD: { |
| 4536 | struct kvm_irqfd data; |
| 4537 | |
| 4538 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4539 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4540 | goto out; |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 4541 | r = kvm_irqfd(kvm, &data); |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 4542 | break; |
| 4543 | } |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 4544 | case KVM_IOEVENTFD: { |
| 4545 | struct kvm_ioeventfd data; |
| 4546 | |
| 4547 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4548 | if (copy_from_user(&data, argp, sizeof(data))) |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 4549 | goto out; |
| 4550 | r = kvm_ioeventfd(kvm, &data); |
| 4551 | break; |
| 4552 | } |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 4553 | #ifdef CONFIG_HAVE_KVM_MSI |
| 4554 | case KVM_SIGNAL_MSI: { |
| 4555 | struct kvm_msi msi; |
| 4556 | |
| 4557 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4558 | if (copy_from_user(&msi, argp, sizeof(msi))) |
Jan Kiszka | 07975ad | 2012-03-29 21:14:12 +0200 | [diff] [blame] | 4559 | goto out; |
| 4560 | r = kvm_send_userspace_msi(kvm, &msi); |
| 4561 | break; |
| 4562 | } |
| 4563 | #endif |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4564 | #ifdef __KVM_HAVE_IRQ_LINE |
| 4565 | case KVM_IRQ_LINE_STATUS: |
| 4566 | case KVM_IRQ_LINE: { |
| 4567 | struct kvm_irq_level irq_event; |
| 4568 | |
| 4569 | r = -EFAULT; |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4570 | if (copy_from_user(&irq_event, argp, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4571 | goto out; |
| 4572 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 4573 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
| 4574 | ioctl == KVM_IRQ_LINE_STATUS); |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4575 | if (r) |
| 4576 | goto out; |
| 4577 | |
| 4578 | r = -EFAULT; |
| 4579 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
Xiubo Li | 893bdbf | 2015-02-26 14:58:19 +0800 | [diff] [blame] | 4580 | if (copy_to_user(argp, &irq_event, sizeof(irq_event))) |
Christoffer Dall | 23d43cf | 2012-07-24 08:51:20 -0400 | [diff] [blame] | 4581 | goto out; |
| 4582 | } |
| 4583 | |
| 4584 | r = 0; |
| 4585 | break; |
| 4586 | } |
| 4587 | #endif |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4588 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 4589 | case KVM_SET_GSI_ROUTING: { |
| 4590 | struct kvm_irq_routing routing; |
| 4591 | struct kvm_irq_routing __user *urouting; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4592 | struct kvm_irq_routing_entry *entries = NULL; |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4593 | |
| 4594 | r = -EFAULT; |
| 4595 | if (copy_from_user(&routing, argp, sizeof(routing))) |
| 4596 | goto out; |
| 4597 | r = -EINVAL; |
David Hildenbrand | 5c0aea0 | 2017-04-28 17:06:20 +0200 | [diff] [blame] | 4598 | if (!kvm_arch_can_set_irq_routing(kvm)) |
| 4599 | goto out; |
Xiubo Li | caf1ff2 | 2016-06-15 18:00:33 +0800 | [diff] [blame] | 4600 | if (routing.nr > KVM_MAX_IRQ_ROUTES) |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4601 | goto out; |
| 4602 | if (routing.flags) |
| 4603 | goto out; |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4604 | if (routing.nr) { |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4605 | urouting = argp; |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 4606 | entries = vmemdup_user(urouting->entries, |
| 4607 | array_size(sizeof(*entries), |
| 4608 | routing.nr)); |
| 4609 | if (IS_ERR(entries)) { |
| 4610 | r = PTR_ERR(entries); |
| 4611 | goto out; |
| 4612 | } |
Paolo Bonzini | f8c1b85 | 2016-06-01 14:09:22 +0200 | [diff] [blame] | 4613 | } |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4614 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
| 4615 | routing.flags); |
Denis Efremov | 7ec28e2 | 2020-06-03 13:11:31 +0300 | [diff] [blame] | 4616 | kvfree(entries); |
Alexander Graf | aa8d594 | 2013-04-15 21:12:53 +0200 | [diff] [blame] | 4617 | break; |
| 4618 | } |
| 4619 | #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 4620 | case KVM_CREATE_DEVICE: { |
| 4621 | struct kvm_create_device cd; |
| 4622 | |
| 4623 | r = -EFAULT; |
| 4624 | if (copy_from_user(&cd, argp, sizeof(cd))) |
| 4625 | goto out; |
| 4626 | |
| 4627 | r = kvm_ioctl_create_device(kvm, &cd); |
| 4628 | if (r) |
| 4629 | goto out; |
| 4630 | |
| 4631 | r = -EFAULT; |
| 4632 | if (copy_to_user(argp, &cd, sizeof(cd))) |
| 4633 | goto out; |
| 4634 | |
| 4635 | r = 0; |
| 4636 | break; |
| 4637 | } |
Alexander Graf | 92b591a | 2014-07-14 18:33:08 +0200 | [diff] [blame] | 4638 | case KVM_CHECK_EXTENSION: |
| 4639 | r = kvm_vm_ioctl_check_extension_generic(kvm, arg); |
| 4640 | break; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 4641 | case KVM_RESET_DIRTY_RINGS: |
| 4642 | r = kvm_vm_ioctl_reset_dirty_pages(kvm); |
| 4643 | break; |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4644 | case KVM_GET_STATS_FD: |
| 4645 | r = kvm_vm_ioctl_get_stats_fd(kvm); |
| 4646 | break; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4647 | default: |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 4648 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4649 | } |
| 4650 | out: |
| 4651 | return r; |
| 4652 | } |
| 4653 | |
Christian Borntraeger | de8e5d7 | 2015-02-03 09:35:15 +0100 | [diff] [blame] | 4654 | #ifdef CONFIG_KVM_COMPAT |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4655 | struct compat_kvm_dirty_log { |
| 4656 | __u32 slot; |
| 4657 | __u32 padding1; |
| 4658 | union { |
| 4659 | compat_uptr_t dirty_bitmap; /* one bit per page */ |
| 4660 | __u64 padding2; |
| 4661 | }; |
| 4662 | }; |
| 4663 | |
Paolo Bonzini | 8750f9b | 2021-07-27 08:43:10 -0400 | [diff] [blame] | 4664 | struct compat_kvm_clear_dirty_log { |
| 4665 | __u32 slot; |
| 4666 | __u32 num_pages; |
| 4667 | __u64 first_page; |
| 4668 | union { |
| 4669 | compat_uptr_t dirty_bitmap; /* one bit per page */ |
| 4670 | __u64 padding2; |
| 4671 | }; |
| 4672 | }; |
| 4673 | |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4674 | static long kvm_vm_compat_ioctl(struct file *filp, |
| 4675 | unsigned int ioctl, unsigned long arg) |
| 4676 | { |
| 4677 | struct kvm *kvm = filp->private_data; |
| 4678 | int r; |
| 4679 | |
Paolo Bonzini | f4d3165 | 2021-11-11 10:13:38 -0500 | [diff] [blame] | 4680 | if (kvm->mm != current->mm || kvm->vm_dead) |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4681 | return -EIO; |
| 4682 | switch (ioctl) { |
Paolo Bonzini | 8750f9b | 2021-07-27 08:43:10 -0400 | [diff] [blame] | 4683 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
| 4684 | case KVM_CLEAR_DIRTY_LOG: { |
| 4685 | struct compat_kvm_clear_dirty_log compat_log; |
| 4686 | struct kvm_clear_dirty_log log; |
| 4687 | |
| 4688 | if (copy_from_user(&compat_log, (void __user *)arg, |
| 4689 | sizeof(compat_log))) |
| 4690 | return -EFAULT; |
| 4691 | log.slot = compat_log.slot; |
| 4692 | log.num_pages = compat_log.num_pages; |
| 4693 | log.first_page = compat_log.first_page; |
| 4694 | log.padding2 = compat_log.padding2; |
| 4695 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
| 4696 | |
| 4697 | r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
| 4698 | break; |
| 4699 | } |
| 4700 | #endif |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4701 | case KVM_GET_DIRTY_LOG: { |
| 4702 | struct compat_kvm_dirty_log compat_log; |
| 4703 | struct kvm_dirty_log log; |
| 4704 | |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4705 | if (copy_from_user(&compat_log, (void __user *)arg, |
| 4706 | sizeof(compat_log))) |
Markus Elfring | f6a3b16 | 2017-01-22 11:30:21 +0100 | [diff] [blame] | 4707 | return -EFAULT; |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4708 | log.slot = compat_log.slot; |
| 4709 | log.padding1 = compat_log.padding1; |
| 4710 | log.padding2 = compat_log.padding2; |
| 4711 | log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
| 4712 | |
| 4713 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4714 | break; |
| 4715 | } |
| 4716 | default: |
| 4717 | r = kvm_vm_ioctl(filp, ioctl, arg); |
| 4718 | } |
Arnd Bergmann | 6ff5894 | 2009-10-22 14:19:27 +0200 | [diff] [blame] | 4719 | return r; |
| 4720 | } |
| 4721 | #endif |
| 4722 | |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 4723 | static struct file_operations kvm_vm_fops = { |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4724 | .release = kvm_vm_release, |
| 4725 | .unlocked_ioctl = kvm_vm_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4726 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4727 | KVM_COMPAT(kvm_vm_compat_ioctl), |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4728 | }; |
| 4729 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4730 | bool file_is_kvm(struct file *file) |
| 4731 | { |
| 4732 | return file && file->f_op == &kvm_vm_fops; |
| 4733 | } |
| 4734 | EXPORT_SYMBOL_GPL(file_is_kvm); |
| 4735 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4736 | static int kvm_dev_ioctl_create_vm(unsigned long type) |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4737 | { |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4738 | int r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4739 | struct kvm *kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4740 | struct file *file; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4741 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4742 | kvm = kvm_create_vm(type); |
Avi Kivity | d6d2816 | 2007-06-28 08:38:16 -0400 | [diff] [blame] | 4743 | if (IS_ERR(kvm)) |
| 4744 | return PTR_ERR(kvm); |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4745 | #ifdef CONFIG_KVM_MMIO |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4746 | r = kvm_coalesced_mmio_init(kvm); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4747 | if (r < 0) |
| 4748 | goto put_kvm; |
Takuya Yoshikawa | 6ce5a09 | 2010-03-15 22:13:30 +0900 | [diff] [blame] | 4749 | #endif |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4750 | r = get_unused_fd_flags(O_CLOEXEC); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4751 | if (r < 0) |
| 4752 | goto put_kvm; |
| 4753 | |
Jing Zhang | fcfe1ba | 2021-06-18 22:27:05 +0000 | [diff] [blame] | 4754 | snprintf(kvm->stats_id, sizeof(kvm->stats_id), |
| 4755 | "kvm-%d", task_pid_nr(current)); |
| 4756 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4757 | file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); |
| 4758 | if (IS_ERR(file)) { |
| 4759 | put_unused_fd(r); |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4760 | r = PTR_ERR(file); |
| 4761 | goto put_kvm; |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4762 | } |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4763 | |
Paolo Bonzini | 525df86 | 2017-06-27 15:45:09 +0200 | [diff] [blame] | 4764 | /* |
| 4765 | * Don't call kvm_put_kvm anymore at this point; file->f_op is |
| 4766 | * already set, with ->release() being kvm_vm_release(). In error |
| 4767 | * cases it will be called by the final fput(file) and will take |
| 4768 | * care of doing kvm_put_kvm(kvm). |
| 4769 | */ |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4770 | if (kvm_create_vm_debugfs(kvm, r) < 0) { |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4771 | put_unused_fd(r); |
| 4772 | fput(file); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 4773 | return -ENOMEM; |
| 4774 | } |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 4775 | kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4776 | |
Al Viro | 506cfba | 2016-07-14 18:54:17 +0200 | [diff] [blame] | 4777 | fd_install(r, file); |
Heiko Carstens | aac8763 | 2010-10-27 17:22:10 +0200 | [diff] [blame] | 4778 | return r; |
Markus Elfring | 7858833 | 2017-11-21 13:40:17 +0100 | [diff] [blame] | 4779 | |
| 4780 | put_kvm: |
| 4781 | kvm_put_kvm(kvm); |
| 4782 | return r; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4783 | } |
| 4784 | |
| 4785 | static long kvm_dev_ioctl(struct file *filp, |
| 4786 | unsigned int ioctl, unsigned long arg) |
| 4787 | { |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4788 | long r = -EINVAL; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4789 | |
| 4790 | switch (ioctl) { |
| 4791 | case KVM_GET_API_VERSION: |
Avi Kivity | f0fe510 | 2007-03-07 13:11:17 +0200 | [diff] [blame] | 4792 | if (arg) |
| 4793 | goto out; |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4794 | r = KVM_API_VERSION; |
| 4795 | break; |
| 4796 | case KVM_CREATE_VM: |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 4797 | r = kvm_dev_ioctl_create_vm(arg); |
Avi Kivity | f17abe9 | 2007-02-21 19:28:04 +0200 | [diff] [blame] | 4798 | break; |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 4799 | case KVM_CHECK_EXTENSION: |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 4800 | r = kvm_vm_ioctl_check_extension_generic(NULL, arg); |
Avi Kivity | 5d308f4 | 2007-03-01 17:56:20 +0200 | [diff] [blame] | 4801 | break; |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4802 | case KVM_GET_VCPU_MMAP_SIZE: |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4803 | if (arg) |
| 4804 | goto out; |
Avi Kivity | adb1ff4 | 2008-01-24 15:13:08 +0200 | [diff] [blame] | 4805 | r = PAGE_SIZE; /* struct kvm_run */ |
| 4806 | #ifdef CONFIG_X86 |
| 4807 | r += PAGE_SIZE; /* pio data page */ |
| 4808 | #endif |
Paolo Bonzini | 4b4357e | 2017-03-31 13:53:23 +0200 | [diff] [blame] | 4809 | #ifdef CONFIG_KVM_MMIO |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 4810 | r += PAGE_SIZE; /* coalesced mmio ring page */ |
| 4811 | #endif |
Avi Kivity | 07c45a3 | 2007-03-07 13:05:38 +0200 | [diff] [blame] | 4812 | break; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4813 | case KVM_TRACE_ENABLE: |
| 4814 | case KVM_TRACE_PAUSE: |
| 4815 | case KVM_TRACE_DISABLE: |
Marcelo Tosatti | 2023a29 | 2009-06-18 11:47:28 -0300 | [diff] [blame] | 4816 | r = -EOPNOTSUPP; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 4817 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4818 | default: |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 4819 | return kvm_arch_dev_ioctl(filp, ioctl, arg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4820 | } |
| 4821 | out: |
| 4822 | return r; |
| 4823 | } |
| 4824 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4825 | static struct file_operations kvm_chardev_ops = { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4826 | .unlocked_ioctl = kvm_dev_ioctl, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 4827 | .llseek = noop_llseek, |
Marc Zyngier | 7ddfd3e | 2018-06-17 10:16:21 +0100 | [diff] [blame] | 4828 | KVM_COMPAT(kvm_dev_ioctl), |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4829 | }; |
| 4830 | |
| 4831 | static struct miscdevice kvm_dev = { |
Avi Kivity | bbe4432 | 2007-03-04 13:27:36 +0200 | [diff] [blame] | 4832 | KVM_MINOR, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4833 | "kvm", |
| 4834 | &kvm_chardev_ops, |
| 4835 | }; |
| 4836 | |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4837 | static void hardware_enable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4838 | { |
| 4839 | int cpu = raw_smp_processor_id(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4840 | int r; |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4841 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4842 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4843 | return; |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4844 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4845 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4846 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4847 | r = kvm_arch_hardware_enable(); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4848 | |
| 4849 | if (r) { |
| 4850 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
| 4851 | atomic_inc(&hardware_enable_failed); |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4852 | pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4853 | } |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4854 | } |
| 4855 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4856 | static int kvm_starting_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4857 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4858 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4859 | if (kvm_usage_count) |
| 4860 | hardware_enable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4861 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4862 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4863 | } |
| 4864 | |
| 4865 | static void hardware_disable_nolock(void *junk) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4866 | { |
| 4867 | int cpu = raw_smp_processor_id(); |
| 4868 | |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4869 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4870 | return; |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 4871 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 4872 | kvm_arch_hardware_disable(); |
Avi Kivity | 1b6c016 | 2007-05-24 13:03:52 +0300 | [diff] [blame] | 4873 | } |
| 4874 | |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4875 | static int kvm_dying_cpu(unsigned int cpu) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4876 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4877 | raw_spin_lock(&kvm_count_lock); |
Paolo Bonzini | 4fa92fb | 2013-09-10 12:57:17 +0200 | [diff] [blame] | 4878 | if (kvm_usage_count) |
| 4879 | hardware_disable_nolock(NULL); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4880 | raw_spin_unlock(&kvm_count_lock); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 4881 | return 0; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4882 | } |
| 4883 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4884 | static void hardware_disable_all_nolock(void) |
| 4885 | { |
| 4886 | BUG_ON(!kvm_usage_count); |
| 4887 | |
| 4888 | kvm_usage_count--; |
| 4889 | if (!kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4890 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4891 | } |
| 4892 | |
| 4893 | static void hardware_disable_all(void) |
| 4894 | { |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4895 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4896 | hardware_disable_all_nolock(); |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4897 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4898 | } |
| 4899 | |
| 4900 | static int hardware_enable_all(void) |
| 4901 | { |
| 4902 | int r = 0; |
| 4903 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4904 | raw_spin_lock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4905 | |
| 4906 | kvm_usage_count++; |
| 4907 | if (kvm_usage_count == 1) { |
| 4908 | atomic_set(&hardware_enable_failed, 0); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4909 | on_each_cpu(hardware_enable_nolock, NULL, 1); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4910 | |
| 4911 | if (atomic_read(&hardware_enable_failed)) { |
| 4912 | hardware_disable_all_nolock(); |
| 4913 | r = -EBUSY; |
| 4914 | } |
| 4915 | } |
| 4916 | |
Paolo Bonzini | 4a937f9 | 2013-09-10 12:58:35 +0200 | [diff] [blame] | 4917 | raw_spin_unlock(&kvm_count_lock); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 4918 | |
| 4919 | return r; |
| 4920 | } |
| 4921 | |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4922 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 4923 | void *v) |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4924 | { |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4925 | /* |
| 4926 | * Some (well, at least mine) BIOSes hang on reboot if |
| 4927 | * in vmx root mode. |
| 4928 | * |
| 4929 | * And Intel TXT required VMX off for all cpu when system shutdown. |
| 4930 | */ |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 4931 | pr_info("kvm: exiting hardware virtualization\n"); |
Sheng Yang | 8e1c181 | 2009-04-29 11:09:04 +0800 | [diff] [blame] | 4932 | kvm_rebooting = true; |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 4933 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Rusty Russell | 9a2b85c | 2007-07-17 23:17:55 +1000 | [diff] [blame] | 4934 | return NOTIFY_OK; |
| 4935 | } |
| 4936 | |
| 4937 | static struct notifier_block kvm_reboot_notifier = { |
| 4938 | .notifier_call = kvm_reboot, |
| 4939 | .priority = 0, |
| 4940 | }; |
| 4941 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4942 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4943 | { |
| 4944 | int i; |
| 4945 | |
| 4946 | for (i = 0; i < bus->dev_count; i++) { |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4947 | struct kvm_io_device *pos = bus->range[i].dev; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4948 | |
| 4949 | kvm_iodevice_destructor(pos); |
| 4950 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 4951 | kfree(bus); |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 4952 | } |
| 4953 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4954 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, |
Xiubo Li | 20e87b7 | 2015-02-26 14:58:25 +0800 | [diff] [blame] | 4955 | const struct kvm_io_range *r2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4956 | { |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4957 | gpa_t addr1 = r1->addr; |
| 4958 | gpa_t addr2 = r2->addr; |
| 4959 | |
| 4960 | if (addr1 < addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4961 | return -1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4962 | |
| 4963 | /* If r2->len == 0, match the exact address. If r2->len != 0, |
| 4964 | * accept any overlapping write. Any order is acceptable for |
| 4965 | * overlapping ranges, because kvm_io_bus_get_first_dev ensures |
| 4966 | * we process all of them. |
| 4967 | */ |
| 4968 | if (r2->len) { |
| 4969 | addr1 += r1->len; |
| 4970 | addr2 += r2->len; |
| 4971 | } |
| 4972 | |
| 4973 | if (addr1 > addr2) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4974 | return 1; |
Jason Wang | 8f4216c7 | 2015-09-15 14:41:57 +0800 | [diff] [blame] | 4975 | |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4976 | return 0; |
| 4977 | } |
| 4978 | |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4979 | static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) |
| 4980 | { |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 4981 | return kvm_io_bus_cmp(p1, p2); |
Paolo Bonzini | a343c9b | 2013-07-16 13:03:29 +0200 | [diff] [blame] | 4982 | } |
| 4983 | |
Geoff Levand | 39369f7 | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 4984 | static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 4985 | gpa_t addr, int len) |
| 4986 | { |
| 4987 | struct kvm_io_range *range, key; |
| 4988 | int off; |
| 4989 | |
| 4990 | key = (struct kvm_io_range) { |
| 4991 | .addr = addr, |
| 4992 | .len = len, |
| 4993 | }; |
| 4994 | |
| 4995 | range = bsearch(&key, bus->range, bus->dev_count, |
| 4996 | sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); |
| 4997 | if (range == NULL) |
| 4998 | return -ENOENT; |
| 4999 | |
| 5000 | off = range - bus->range; |
| 5001 | |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 5002 | while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 5003 | off--; |
| 5004 | |
| 5005 | return off; |
| 5006 | } |
| 5007 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5008 | static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5009 | struct kvm_io_range *range, const void *val) |
| 5010 | { |
| 5011 | int idx; |
| 5012 | |
| 5013 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
| 5014 | if (idx < 0) |
| 5015 | return -EOPNOTSUPP; |
| 5016 | |
| 5017 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 5018 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5019 | if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5020 | range->len, val)) |
| 5021 | return idx; |
| 5022 | idx++; |
| 5023 | } |
| 5024 | |
| 5025 | return -EOPNOTSUPP; |
| 5026 | } |
| 5027 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 5028 | /* kvm_io_bus_write - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5029 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 5030 | int len, const void *val) |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 5031 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5032 | struct kvm_io_bus *bus; |
| 5033 | struct kvm_io_range range; |
| 5034 | int r; |
| 5035 | |
| 5036 | range = (struct kvm_io_range) { |
| 5037 | .addr = addr, |
| 5038 | .len = len, |
| 5039 | }; |
| 5040 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5041 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5042 | if (!bus) |
| 5043 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5044 | r = __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5045 | return r < 0 ? r : 0; |
| 5046 | } |
Leo Yan | a242010 | 2019-02-22 16:10:09 +0800 | [diff] [blame] | 5047 | EXPORT_SYMBOL_GPL(kvm_io_bus_write); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5048 | |
| 5049 | /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5050 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
| 5051 | gpa_t addr, int len, const void *val, long cookie) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5052 | { |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 5053 | struct kvm_io_bus *bus; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 5054 | struct kvm_io_range range; |
| 5055 | |
| 5056 | range = (struct kvm_io_range) { |
| 5057 | .addr = addr, |
| 5058 | .len = len, |
| 5059 | }; |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 5060 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5061 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5062 | if (!bus) |
| 5063 | return -ENOMEM; |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5064 | |
| 5065 | /* First try the device referenced by cookie. */ |
| 5066 | if ((cookie >= 0) && (cookie < bus->dev_count) && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 5067 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5068 | if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5069 | val)) |
| 5070 | return cookie; |
| 5071 | |
| 5072 | /* |
| 5073 | * cookie contained garbage; fall back to search and return the |
| 5074 | * correct cookie value. |
| 5075 | */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5076 | return __kvm_io_bus_write(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5077 | } |
| 5078 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5079 | static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
| 5080 | struct kvm_io_range *range, void *val) |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5081 | { |
| 5082 | int idx; |
| 5083 | |
| 5084 | idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 5085 | if (idx < 0) |
| 5086 | return -EOPNOTSUPP; |
| 5087 | |
| 5088 | while (idx < bus->dev_count && |
Paolo Bonzini | c21fbff | 2013-08-27 15:41:41 +0200 | [diff] [blame] | 5089 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5090 | if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5091 | range->len, val)) |
| 5092 | return idx; |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 5093 | idx++; |
| 5094 | } |
| 5095 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 5096 | return -EOPNOTSUPP; |
| 5097 | } |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 5098 | |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 5099 | /* kvm_io_bus_read - called under kvm->slots_lock */ |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5100 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5101 | int len, void *val) |
Michael S. Tsirkin | bda9020 | 2009-06-29 22:24:32 +0300 | [diff] [blame] | 5102 | { |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5103 | struct kvm_io_bus *bus; |
| 5104 | struct kvm_io_range range; |
| 5105 | int r; |
| 5106 | |
| 5107 | range = (struct kvm_io_range) { |
| 5108 | .addr = addr, |
| 5109 | .len = len, |
| 5110 | }; |
| 5111 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5112 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5113 | if (!bus) |
| 5114 | return -ENOMEM; |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 5115 | r = __kvm_io_bus_read(vcpu, bus, &range, val); |
Cornelia Huck | 126a5af | 2013-07-03 16:30:53 +0200 | [diff] [blame] | 5116 | return r < 0 ? r : 0; |
| 5117 | } |
| 5118 | |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 5119 | /* Caller must hold slots_lock. */ |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 5120 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 5121 | int len, struct kvm_io_device *dev) |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 5122 | { |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 5123 | int i; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5124 | struct kvm_io_bus *new_bus, *bus; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 5125 | struct kvm_io_range range; |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5126 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 5127 | bus = kvm_get_bus(kvm, bus_idx); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5128 | if (!bus) |
| 5129 | return -ENOMEM; |
| 5130 | |
Amos Kong | 6ea34c9 | 2013-05-25 06:44:15 +0800 | [diff] [blame] | 5131 | /* exclude ioeventfd which is limited by maximum fd */ |
| 5132 | if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5133 | return -ENOSPC; |
| 5134 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 5135 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5136 | GFP_KERNEL_ACCOUNT); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5137 | if (!new_bus) |
| 5138 | return -ENOMEM; |
Gal Hammer | d4c67a7 | 2018-01-16 15:34:41 +0200 | [diff] [blame] | 5139 | |
| 5140 | range = (struct kvm_io_range) { |
| 5141 | .addr = addr, |
| 5142 | .len = len, |
| 5143 | .dev = dev, |
| 5144 | }; |
| 5145 | |
| 5146 | for (i = 0; i < bus->dev_count; i++) |
| 5147 | if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) |
| 5148 | break; |
| 5149 | |
| 5150 | memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
| 5151 | new_bus->dev_count++; |
| 5152 | new_bus->range[i] = range; |
| 5153 | memcpy(new_bus->range + i + 1, bus->range + i, |
| 5154 | (bus->dev_count - i) * sizeof(struct kvm_io_range)); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5155 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 5156 | synchronize_srcu_expedited(&kvm->srcu); |
| 5157 | kfree(bus); |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5158 | |
| 5159 | return 0; |
| 5160 | } |
| 5161 | |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5162 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 5163 | struct kvm_io_device *dev) |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5164 | { |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5165 | int i, j; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5166 | struct kvm_io_bus *new_bus, *bus; |
Michael S. Tsirkin | 6c47469 | 2009-06-29 22:24:26 +0300 | [diff] [blame] | 5167 | |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5168 | lockdep_assert_held(&kvm->slots_lock); |
| 5169 | |
Christian Borntraeger | 4a12f95 | 2017-07-07 10:51:38 +0200 | [diff] [blame] | 5170 | bus = kvm_get_bus(kvm, bus_idx); |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 5171 | if (!bus) |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5172 | return 0; |
Peter Xu | df630b8 | 2017-03-15 16:01:17 +0800 | [diff] [blame] | 5173 | |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5174 | for (i = 0; i < bus->dev_count; i++) { |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5175 | if (bus->range[i].dev == dev) { |
Gregory Haskins | 090b7af | 2009-07-07 17:08:44 -0400 | [diff] [blame] | 5176 | break; |
| 5177 | } |
Sean Christopherson | 7c896d3 | 2021-04-12 15:20:50 -0700 | [diff] [blame] | 5178 | } |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5179 | |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5180 | if (i == bus->dev_count) |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5181 | return 0; |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5182 | |
Gustavo A. R. Silva | 90952cd | 2019-01-30 17:07:47 +0100 | [diff] [blame] | 5183 | new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5184 | GFP_KERNEL_ACCOUNT); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5185 | if (new_bus) { |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 5186 | memcpy(new_bus, bus, struct_size(bus, range, i)); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5187 | new_bus->dev_count--; |
| 5188 | memcpy(new_bus->range + i, bus->range + i + 1, |
Rustam Kovhaev | 871c433 | 2020-09-18 05:05:00 -0700 | [diff] [blame] | 5189 | flex_array_size(new_bus, range, new_bus->dev_count - i)); |
Sean Christopherson | 2ee3757 | 2021-04-12 15:20:48 -0700 | [diff] [blame] | 5190 | } |
| 5191 | |
| 5192 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 5193 | synchronize_srcu_expedited(&kvm->srcu); |
| 5194 | |
| 5195 | /* Destroy the old bus _after_ installing the (null) bus. */ |
| 5196 | if (!new_bus) { |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5197 | pr_err("kvm: failed to shrink bus, removing it completely\n"); |
Rustam Kovhaev | f658866 | 2020-09-07 11:55:35 -0700 | [diff] [blame] | 5198 | for (j = 0; j < bus->dev_count; j++) { |
| 5199 | if (j == i) |
| 5200 | continue; |
| 5201 | kvm_iodevice_destructor(bus->range[j].dev); |
| 5202 | } |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5203 | } |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 5204 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 5205 | kfree(bus); |
Sean Christopherson | 5d3c4c793 | 2021-04-12 15:20:49 -0700 | [diff] [blame] | 5206 | return new_bus ? 0 : -ENOMEM; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 5207 | } |
| 5208 | |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 5209 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 5210 | gpa_t addr) |
| 5211 | { |
| 5212 | struct kvm_io_bus *bus; |
| 5213 | int dev_idx, srcu_idx; |
| 5214 | struct kvm_io_device *iodev = NULL; |
| 5215 | |
| 5216 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 5217 | |
| 5218 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); |
David Hildenbrand | 90db104 | 2017-03-23 18:24:19 +0100 | [diff] [blame] | 5219 | if (!bus) |
| 5220 | goto out_unlock; |
Andre Przywara | 8a39d00 | 2016-07-15 12:43:26 +0100 | [diff] [blame] | 5221 | |
| 5222 | dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); |
| 5223 | if (dev_idx < 0) |
| 5224 | goto out_unlock; |
| 5225 | |
| 5226 | iodev = bus->range[dev_idx].dev; |
| 5227 | |
| 5228 | out_unlock: |
| 5229 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 5230 | |
| 5231 | return iodev; |
| 5232 | } |
| 5233 | EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); |
| 5234 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5235 | static int kvm_debugfs_open(struct inode *inode, struct file *file, |
| 5236 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
| 5237 | const char *fmt) |
| 5238 | { |
| 5239 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 5240 | inode->i_private; |
| 5241 | |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 5242 | /* |
| 5243 | * The debugfs files are a reference to the kvm struct which |
| 5244 | * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe |
| 5245 | * avoids the race between open and the removal of the debugfs directory. |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5246 | */ |
Peter Xu | 605c713 | 2021-06-25 11:32:07 -0400 | [diff] [blame] | 5247 | if (!kvm_get_kvm_safe(stat_data->kvm)) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5248 | return -ENOENT; |
| 5249 | |
Paolo Bonzini | 833b45d | 2019-09-30 18:48:44 +0200 | [diff] [blame] | 5250 | if (simple_attr_open(inode, file, get, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5251 | kvm_stats_debugfs_mode(stat_data->desc) & 0222 |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5252 | ? set : NULL, |
| 5253 | fmt)) { |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5254 | kvm_put_kvm(stat_data->kvm); |
| 5255 | return -ENOMEM; |
| 5256 | } |
| 5257 | |
| 5258 | return 0; |
| 5259 | } |
| 5260 | |
| 5261 | static int kvm_debugfs_release(struct inode *inode, struct file *file) |
| 5262 | { |
| 5263 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *) |
| 5264 | inode->i_private; |
| 5265 | |
| 5266 | simple_attr_release(inode, file); |
| 5267 | kvm_put_kvm(stat_data->kvm); |
| 5268 | |
| 5269 | return 0; |
| 5270 | } |
| 5271 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5272 | static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5273 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5274 | *val = *(u64 *)((void *)(&kvm->stat) + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5275 | |
| 5276 | return 0; |
| 5277 | } |
| 5278 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5279 | static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5280 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5281 | *(u64 *)((void *)(&kvm->stat) + offset) = 0; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5282 | |
| 5283 | return 0; |
| 5284 | } |
| 5285 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5286 | static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5287 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 5288 | unsigned long i; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5289 | struct kvm_vcpu *vcpu; |
| 5290 | |
| 5291 | *val = 0; |
| 5292 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5293 | kvm_for_each_vcpu(i, vcpu, kvm) |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5294 | *val += *(u64 *)((void *)(&vcpu->stat) + offset); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5295 | |
| 5296 | return 0; |
| 5297 | } |
| 5298 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5299 | static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5300 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 5301 | unsigned long i; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5302 | struct kvm_vcpu *vcpu; |
| 5303 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5304 | kvm_for_each_vcpu(i, vcpu, kvm) |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5305 | *(u64 *)((void *)(&vcpu->stat) + offset) = 0; |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5306 | |
| 5307 | return 0; |
| 5308 | } |
| 5309 | |
| 5310 | static int kvm_stat_data_get(void *data, u64 *val) |
| 5311 | { |
| 5312 | int r = -EFAULT; |
| 5313 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 5314 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5315 | switch (stat_data->kind) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5316 | case KVM_STAT_VM: |
| 5317 | r = kvm_get_stat_per_vm(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5318 | stat_data->desc->desc.offset, val); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5319 | break; |
| 5320 | case KVM_STAT_VCPU: |
| 5321 | r = kvm_get_stat_per_vcpu(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5322 | stat_data->desc->desc.offset, val); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5323 | break; |
| 5324 | } |
| 5325 | |
| 5326 | return r; |
| 5327 | } |
| 5328 | |
| 5329 | static int kvm_stat_data_clear(void *data, u64 val) |
| 5330 | { |
| 5331 | int r = -EFAULT; |
| 5332 | struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; |
| 5333 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5334 | if (val) |
| 5335 | return -EINVAL; |
| 5336 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5337 | switch (stat_data->kind) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5338 | case KVM_STAT_VM: |
| 5339 | r = kvm_clear_stat_per_vm(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5340 | stat_data->desc->desc.offset); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5341 | break; |
| 5342 | case KVM_STAT_VCPU: |
| 5343 | r = kvm_clear_stat_per_vcpu(stat_data->kvm, |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5344 | stat_data->desc->desc.offset); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5345 | break; |
| 5346 | } |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5347 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5348 | return r; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5349 | } |
| 5350 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5351 | static int kvm_stat_data_open(struct inode *inode, struct file *file) |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5352 | { |
| 5353 | __simple_attr_check_format("%llu\n", 0ull); |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5354 | return kvm_debugfs_open(inode, file, kvm_stat_data_get, |
| 5355 | kvm_stat_data_clear, "%llu\n"); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5356 | } |
| 5357 | |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5358 | static const struct file_operations stat_fops_per_vm = { |
| 5359 | .owner = THIS_MODULE, |
| 5360 | .open = kvm_stat_data_open, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5361 | .release = kvm_debugfs_release, |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5362 | .read = simple_attr_read, |
| 5363 | .write = simple_attr_write, |
| 5364 | .llseek = no_llseek, |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5365 | }; |
| 5366 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5367 | static int vm_stat_get(void *_offset, u64 *val) |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5368 | { |
| 5369 | unsigned offset = (long)_offset; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5370 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5371 | u64 tmp_val; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5372 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5373 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5374 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5375 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5376 | kvm_get_stat_per_vm(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5377 | *val += tmp_val; |
| 5378 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5379 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5380 | return 0; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5381 | } |
| 5382 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5383 | static int vm_stat_clear(void *_offset, u64 val) |
| 5384 | { |
| 5385 | unsigned offset = (long)_offset; |
| 5386 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5387 | |
| 5388 | if (val) |
| 5389 | return -EINVAL; |
| 5390 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5391 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5392 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5393 | kvm_clear_stat_per_vm(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5394 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5395 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5396 | |
| 5397 | return 0; |
| 5398 | } |
| 5399 | |
| 5400 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5401 | DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 5402 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5403 | static int vcpu_stat_get(void *_offset, u64 *val) |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5404 | { |
| 5405 | unsigned offset = (long)_offset; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5406 | struct kvm *kvm; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5407 | u64 tmp_val; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5408 | |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5409 | *val = 0; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5410 | mutex_lock(&kvm_lock); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5411 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5412 | kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 5413 | *val += tmp_val; |
| 5414 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5415 | mutex_unlock(&kvm_lock); |
Christoph Hellwig | 8b88b09 | 2008-02-08 04:20:26 -0800 | [diff] [blame] | 5416 | return 0; |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5417 | } |
| 5418 | |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5419 | static int vcpu_stat_clear(void *_offset, u64 val) |
| 5420 | { |
| 5421 | unsigned offset = (long)_offset; |
| 5422 | struct kvm *kvm; |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5423 | |
| 5424 | if (val) |
| 5425 | return -EINVAL; |
| 5426 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5427 | mutex_lock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5428 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Milan Pandurov | 09cbcef | 2019-12-13 14:07:21 +0100 | [diff] [blame] | 5429 | kvm_clear_stat_per_vcpu(kvm, offset); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5430 | } |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5431 | mutex_unlock(&kvm_lock); |
Suraj Jitindar Singh | ce35ef2 | 2016-10-19 13:49:47 +1100 | [diff] [blame] | 5432 | |
| 5433 | return 0; |
| 5434 | } |
| 5435 | |
| 5436 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, |
| 5437 | "%llu\n"); |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5438 | DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 5439 | |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5440 | static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) |
| 5441 | { |
| 5442 | struct kobj_uevent_env *env; |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5443 | unsigned long long created, active; |
| 5444 | |
| 5445 | if (!kvm_dev.this_device || !kvm) |
| 5446 | return; |
| 5447 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5448 | mutex_lock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5449 | if (type == KVM_EVENT_CREATE_VM) { |
| 5450 | kvm_createvm_count++; |
| 5451 | kvm_active_vms++; |
| 5452 | } else if (type == KVM_EVENT_DESTROY_VM) { |
| 5453 | kvm_active_vms--; |
| 5454 | } |
| 5455 | created = kvm_createvm_count; |
| 5456 | active = kvm_active_vms; |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5457 | mutex_unlock(&kvm_lock); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5458 | |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5459 | env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5460 | if (!env) |
| 5461 | return; |
| 5462 | |
| 5463 | add_uevent_var(env, "CREATED=%llu", created); |
| 5464 | add_uevent_var(env, "COUNT=%llu", active); |
| 5465 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5466 | if (type == KVM_EVENT_CREATE_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5467 | add_uevent_var(env, "EVENT=create"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5468 | kvm->userspace_pid = task_pid_nr(current); |
| 5469 | } else if (type == KVM_EVENT_DESTROY_VM) { |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5470 | add_uevent_var(env, "EVENT=destroy"); |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5471 | } |
| 5472 | add_uevent_var(env, "PID=%d", kvm->userspace_pid); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5473 | |
Paolo Bonzini | 85cd39a | 2021-08-04 05:28:52 -0400 | [diff] [blame] | 5474 | if (kvm->debugfs_dentry) { |
Ben Gardon | b12ce36 | 2019-02-11 11:02:49 -0800 | [diff] [blame] | 5475 | char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5476 | |
Claudio Imbrenda | fdeaf7e | 2017-07-24 13:40:03 +0200 | [diff] [blame] | 5477 | if (p) { |
| 5478 | tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); |
| 5479 | if (!IS_ERR(tmp)) |
| 5480 | add_uevent_var(env, "STATS_PATH=%s", tmp); |
| 5481 | kfree(p); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5482 | } |
| 5483 | } |
| 5484 | /* no need for checks, since we are adding at most only 5 keys */ |
| 5485 | env->envp[env->envp_idx++] = NULL; |
| 5486 | kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); |
| 5487 | kfree(env); |
Claudio Imbrenda | 286de8f | 2017-07-12 17:56:44 +0200 | [diff] [blame] | 5488 | } |
| 5489 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 5490 | static void kvm_init_debug(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5491 | { |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5492 | const struct file_operations *fops; |
| 5493 | const struct _kvm_stats_desc *pdesc; |
| 5494 | int i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5495 | |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 5496 | kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 5497 | |
Jing Zhang | bc9e9e6 | 2021-06-23 17:28:46 -0400 | [diff] [blame] | 5498 | for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { |
| 5499 | pdesc = &kvm_vm_stats_desc[i]; |
| 5500 | if (kvm_stats_debugfs_mode(pdesc) & 0222) |
| 5501 | fops = &vm_stat_fops; |
| 5502 | else |
| 5503 | fops = &vm_stat_readonly_fops; |
| 5504 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 5505 | kvm_debugfs_dir, |
| 5506 | (void *)(long)pdesc->desc.offset, fops); |
| 5507 | } |
| 5508 | |
| 5509 | for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { |
| 5510 | pdesc = &kvm_vcpu_stats_desc[i]; |
| 5511 | if (kvm_stats_debugfs_mode(pdesc) & 0222) |
| 5512 | fops = &vcpu_stat_fops; |
| 5513 | else |
| 5514 | fops = &vcpu_stat_readonly_fops; |
| 5515 | debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), |
| 5516 | kvm_debugfs_dir, |
| 5517 | (void *)(long)pdesc->desc.offset, fops); |
Hamo | 4f69b68 | 2011-12-15 14:23:16 +0800 | [diff] [blame] | 5518 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5519 | } |
| 5520 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5521 | static int kvm_suspend(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5522 | { |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 5523 | if (kvm_usage_count) |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5524 | hardware_disable_nolock(NULL); |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5525 | return 0; |
| 5526 | } |
| 5527 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5528 | static void kvm_resume(void) |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5529 | { |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 5530 | if (kvm_usage_count) { |
Wanpeng Li | 2eb06c3 | 2019-05-17 16:49:49 +0800 | [diff] [blame] | 5531 | #ifdef CONFIG_LOCKDEP |
| 5532 | WARN_ON(lockdep_is_held(&kvm_count_lock)); |
| 5533 | #endif |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5534 | hardware_enable_nolock(NULL); |
Zachary Amsden | ca84d1a | 2010-08-19 22:07:28 -1000 | [diff] [blame] | 5535 | } |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5536 | } |
| 5537 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5538 | static struct syscore_ops kvm_syscore_ops = { |
Avi Kivity | 59ae6c6 | 2007-02-12 00:54:48 -0800 | [diff] [blame] | 5539 | .suspend = kvm_suspend, |
| 5540 | .resume = kvm_resume, |
| 5541 | }; |
| 5542 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5543 | static inline |
| 5544 | struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) |
| 5545 | { |
| 5546 | return container_of(pn, struct kvm_vcpu, preempt_notifier); |
| 5547 | } |
| 5548 | |
| 5549 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
| 5550 | { |
| 5551 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
Xiubo Li | f95ef0cd | 2015-02-26 14:58:23 +0800 | [diff] [blame] | 5552 | |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 5553 | WRITE_ONCE(vcpu->preempted, false); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 5554 | WRITE_ONCE(vcpu->ready, false); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5555 | |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5556 | __this_cpu_write(kvm_running_vcpu, vcpu); |
Radim Krčmář | e790d9e | 2014-08-21 18:08:05 +0200 | [diff] [blame] | 5557 | kvm_arch_sched_in(vcpu, cpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5558 | kvm_arch_vcpu_load(vcpu, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5559 | } |
| 5560 | |
| 5561 | static void kvm_sched_out(struct preempt_notifier *pn, |
| 5562 | struct task_struct *next) |
| 5563 | { |
| 5564 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
| 5565 | |
Peter Zijlstra | 3ba9f93 | 2021-06-11 10:28:13 +0200 | [diff] [blame] | 5566 | if (current->on_rq) { |
Wanpeng Li | 046ddee | 2019-08-01 11:30:14 +0800 | [diff] [blame] | 5567 | WRITE_ONCE(vcpu->preempted, true); |
Wanpeng Li | d73eb57 | 2019-07-18 19:39:06 +0800 | [diff] [blame] | 5568 | WRITE_ONCE(vcpu->ready, true); |
| 5569 | } |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5570 | kvm_arch_vcpu_put(vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5571 | __this_cpu_write(kvm_running_vcpu, NULL); |
| 5572 | } |
| 5573 | |
| 5574 | /** |
| 5575 | * kvm_get_running_vcpu - get the vcpu running on the current CPU. |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 5576 | * |
| 5577 | * We can disable preemption locally around accessing the per-CPU variable, |
| 5578 | * and use the resolved vcpu pointer after enabling preemption again, |
| 5579 | * because even if the current thread is migrated to another CPU, reading |
| 5580 | * the per-CPU value later will give us the same value as we update the |
| 5581 | * per-CPU variable in the preempt notifier handlers. |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5582 | */ |
| 5583 | struct kvm_vcpu *kvm_get_running_vcpu(void) |
| 5584 | { |
Marc Zyngier | 1f03b2b | 2020-02-07 16:34:10 +0000 | [diff] [blame] | 5585 | struct kvm_vcpu *vcpu; |
| 5586 | |
| 5587 | preempt_disable(); |
| 5588 | vcpu = __this_cpu_read(kvm_running_vcpu); |
| 5589 | preempt_enable(); |
| 5590 | |
| 5591 | return vcpu; |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5592 | } |
Wanpeng Li | 379a3c8 | 2020-04-28 14:23:27 +0800 | [diff] [blame] | 5593 | EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); |
Paolo Bonzini | 7495e22 | 2020-01-09 09:57:19 -0500 | [diff] [blame] | 5594 | |
| 5595 | /** |
| 5596 | * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
| 5597 | */ |
| 5598 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) |
| 5599 | { |
| 5600 | return &kvm_running_vcpu; |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5601 | } |
| 5602 | |
Sean Christopherson | e1bfc24 | 2021-11-11 02:07:33 +0000 | [diff] [blame] | 5603 | #ifdef CONFIG_GUEST_PERF_EVENTS |
| 5604 | static unsigned int kvm_guest_state(void) |
| 5605 | { |
| 5606 | struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); |
| 5607 | unsigned int state; |
| 5608 | |
| 5609 | if (!kvm_arch_pmi_in_guest(vcpu)) |
| 5610 | return 0; |
| 5611 | |
| 5612 | state = PERF_GUEST_ACTIVE; |
| 5613 | if (!kvm_arch_vcpu_in_kernel(vcpu)) |
| 5614 | state |= PERF_GUEST_USER; |
| 5615 | |
| 5616 | return state; |
| 5617 | } |
| 5618 | |
| 5619 | static unsigned long kvm_guest_get_ip(void) |
| 5620 | { |
| 5621 | struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); |
| 5622 | |
| 5623 | /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ |
| 5624 | if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) |
| 5625 | return 0; |
| 5626 | |
| 5627 | return kvm_arch_vcpu_get_ip(vcpu); |
| 5628 | } |
| 5629 | |
| 5630 | static struct perf_guest_info_callbacks kvm_guest_cbs = { |
| 5631 | .state = kvm_guest_state, |
| 5632 | .get_ip = kvm_guest_get_ip, |
| 5633 | .handle_intel_pt_intr = NULL, |
| 5634 | }; |
| 5635 | |
| 5636 | void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) |
| 5637 | { |
| 5638 | kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; |
| 5639 | perf_register_guest_info_callbacks(&kvm_guest_cbs); |
| 5640 | } |
| 5641 | void kvm_unregister_perf_callbacks(void) |
| 5642 | { |
| 5643 | perf_unregister_guest_info_callbacks(&kvm_guest_cbs); |
| 5644 | } |
| 5645 | #endif |
| 5646 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5647 | struct kvm_cpu_compat_check { |
| 5648 | void *opaque; |
| 5649 | int *ret; |
| 5650 | }; |
| 5651 | |
| 5652 | static void check_processor_compat(void *data) |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 5653 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5654 | struct kvm_cpu_compat_check *c = data; |
| 5655 | |
| 5656 | *c->ret = kvm_arch_check_processor_compat(c->opaque); |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 5657 | } |
| 5658 | |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 5659 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5660 | struct module *module) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5661 | { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5662 | struct kvm_cpu_compat_check c; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5663 | int r; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5664 | int cpu; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5665 | |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 5666 | r = kvm_arch_init(opaque); |
| 5667 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5668 | goto out_fail; |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5669 | |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5670 | /* |
| 5671 | * kvm_arch_init makes sure there's at most one caller |
| 5672 | * for architectures that support multiple implementations, |
| 5673 | * like intel and amd on x86. |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5674 | * kvm_arch_init must be called before kvm_irqfd_init to avoid creating |
| 5675 | * conflicts in case kvm is already setup for another implementation. |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5676 | */ |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5677 | r = kvm_irqfd_init(); |
| 5678 | if (r) |
| 5679 | goto out_irqfd; |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5680 | |
Avi Kivity | 8437a617 | 2009-06-06 14:52:35 -0700 | [diff] [blame] | 5681 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5682 | r = -ENOMEM; |
| 5683 | goto out_free_0; |
| 5684 | } |
| 5685 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5686 | r = kvm_arch_hardware_setup(opaque); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5687 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5688 | goto out_free_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5689 | |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5690 | c.ret = &r; |
| 5691 | c.opaque = opaque; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5692 | for_each_online_cpu(cpu) { |
Sean Christopherson | b990408 | 2020-03-21 13:25:55 -0700 | [diff] [blame] | 5693 | smp_call_function_single(cpu, check_processor_compat, &c, 1); |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5694 | if (r < 0) |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5695 | goto out_free_2; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 5696 | } |
| 5697 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 5698 | r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5699 | kvm_starting_cpu, kvm_dying_cpu); |
Avi Kivity | 774c47f | 2007-02-12 00:54:47 -0800 | [diff] [blame] | 5700 | if (r) |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5701 | goto out_free_2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5702 | register_reboot_notifier(&kvm_reboot_notifier); |
| 5703 | |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5704 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 5705 | if (!vcpu_align) |
| 5706 | vcpu_align = __alignof__(struct kvm_vcpu); |
Paolo Bonzini | 4651573 | 2017-10-26 15:45:46 +0200 | [diff] [blame] | 5707 | kvm_vcpu_cache = |
| 5708 | kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, |
| 5709 | SLAB_ACCOUNT, |
| 5710 | offsetof(struct kvm_vcpu, arch), |
Jing Zhang | ce55c04 | 2021-06-18 22:27:06 +0000 | [diff] [blame] | 5711 | offsetofend(struct kvm_vcpu, stats_id) |
| 5712 | - offsetof(struct kvm_vcpu, arch), |
Paolo Bonzini | 4651573 | 2017-10-26 15:45:46 +0200 | [diff] [blame] | 5713 | NULL); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5714 | if (!kvm_vcpu_cache) { |
| 5715 | r = -ENOMEM; |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5716 | goto out_free_3; |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5717 | } |
| 5718 | |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 5719 | for_each_possible_cpu(cpu) { |
| 5720 | if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), |
| 5721 | GFP_KERNEL, cpu_to_node(cpu))) { |
| 5722 | r = -ENOMEM; |
| 5723 | goto out_free_4; |
| 5724 | } |
| 5725 | } |
| 5726 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5727 | r = kvm_async_pf_init(); |
| 5728 | if (r) |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 5729 | goto out_free_5; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5730 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5731 | kvm_chardev_ops.owner = module; |
Christian Borntraeger | 3d3aab1 | 2008-12-02 11:17:32 +0100 | [diff] [blame] | 5732 | kvm_vm_fops.owner = module; |
| 5733 | kvm_vcpu_fops.owner = module; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5734 | |
| 5735 | r = misc_register(&kvm_dev); |
| 5736 | if (r) { |
Xiubo Li | 1170adc | 2015-02-26 14:58:26 +0800 | [diff] [blame] | 5737 | pr_err("kvm: misc device register failed\n"); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5738 | goto out_unreg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5739 | } |
| 5740 | |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5741 | register_syscore_ops(&kvm_syscore_ops); |
| 5742 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 5743 | kvm_preempt_ops.sched_in = kvm_sched_in; |
| 5744 | kvm_preempt_ops.sched_out = kvm_sched_out; |
| 5745 | |
Greg Kroah-Hartman | 929f45e | 2018-05-29 18:22:04 +0200 | [diff] [blame] | 5746 | kvm_init_debug(); |
Darrick J. Wong | 0ea4ed8 | 2009-10-14 16:21:00 -0700 | [diff] [blame] | 5747 | |
Paolo Bonzini | 3c3c29f | 2014-09-24 13:02:46 +0200 | [diff] [blame] | 5748 | r = kvm_vfio_ops_init(); |
| 5749 | WARN_ON(r); |
| 5750 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 5751 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5752 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5753 | out_unreg: |
| 5754 | kvm_async_pf_deinit(); |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 5755 | out_free_5: |
| 5756 | for_each_possible_cpu(cpu) |
| 5757 | free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); |
| 5758 | out_free_4: |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5759 | kmem_cache_destroy(kvm_vcpu_cache); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5760 | out_free_3: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5761 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5762 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5763 | out_free_2: |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5764 | kvm_arch_hardware_unsetup(); |
Miaohe Lin | faf0be2 | 2019-11-23 10:45:50 +0800 | [diff] [blame] | 5765 | out_free_1: |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5766 | free_cpumask_var(cpus_hardware_enabled); |
Zhang Xiantao | d2308784 | 2007-11-29 15:35:39 +0800 | [diff] [blame] | 5767 | out_free_0: |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 5768 | kvm_irqfd_exit(); |
Paolo Bonzini | 36343f6 | 2016-10-26 13:35:56 +0200 | [diff] [blame] | 5769 | out_irqfd: |
Asias He | 7dac16c | 2013-05-08 10:57:29 +0800 | [diff] [blame] | 5770 | kvm_arch_exit(); |
| 5771 | out_fail: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5772 | return r; |
| 5773 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5774 | EXPORT_SYMBOL_GPL(kvm_init); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5775 | |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5776 | void kvm_exit(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5777 | { |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 5778 | int cpu; |
| 5779 | |
Janosch Frank | 4bd33b5 | 2015-10-14 12:37:35 +0200 | [diff] [blame] | 5780 | debugfs_remove_recursive(kvm_debugfs_dir); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5781 | misc_deregister(&kvm_dev); |
Vitaly Kuznetsov | baff59c | 2021-09-03 09:51:40 +0200 | [diff] [blame] | 5782 | for_each_possible_cpu(cpu) |
| 5783 | free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 5784 | kmem_cache_destroy(kvm_vcpu_cache); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 5785 | kvm_async_pf_deinit(); |
Rafael J. Wysocki | fb3600c | 2011-03-23 22:16:23 +0100 | [diff] [blame] | 5786 | unregister_syscore_ops(&kvm_syscore_ops); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5787 | unregister_reboot_notifier(&kvm_reboot_notifier); |
Thomas Gleixner | 8c18b2d | 2016-07-13 17:16:37 +0000 | [diff] [blame] | 5788 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
Takuya Yoshikawa | 75b7127 | 2010-11-16 17:37:41 +0900 | [diff] [blame] | 5789 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 5790 | kvm_arch_hardware_unsetup(); |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 5791 | kvm_arch_exit(); |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 5792 | kvm_irqfd_exit(); |
Rusty Russell | 7f59f49 | 2008-12-07 21:25:45 +1030 | [diff] [blame] | 5793 | free_cpumask_var(cpus_hardware_enabled); |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 5794 | kvm_vfio_ops_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5795 | } |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5796 | EXPORT_SYMBOL_GPL(kvm_exit); |
Junaid Shahid | c57c804 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5797 | |
| 5798 | struct kvm_vm_worker_thread_context { |
| 5799 | struct kvm *kvm; |
| 5800 | struct task_struct *parent; |
| 5801 | struct completion init_done; |
| 5802 | kvm_vm_thread_fn_t thread_fn; |
| 5803 | uintptr_t data; |
| 5804 | int err; |
| 5805 | }; |
| 5806 | |
| 5807 | static int kvm_vm_worker_thread(void *context) |
| 5808 | { |
| 5809 | /* |
| 5810 | * The init_context is allocated on the stack of the parent thread, so |
| 5811 | * we have to locally copy anything that is needed beyond initialization |
| 5812 | */ |
| 5813 | struct kvm_vm_worker_thread_context *init_context = context; |
| 5814 | struct kvm *kvm = init_context->kvm; |
| 5815 | kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; |
| 5816 | uintptr_t data = init_context->data; |
| 5817 | int err; |
| 5818 | |
| 5819 | err = kthread_park(current); |
| 5820 | /* kthread_park(current) is never supposed to return an error */ |
| 5821 | WARN_ON(err != 0); |
| 5822 | if (err) |
| 5823 | goto init_complete; |
| 5824 | |
| 5825 | err = cgroup_attach_task_all(init_context->parent, current); |
| 5826 | if (err) { |
| 5827 | kvm_err("%s: cgroup_attach_task_all failed with err %d\n", |
| 5828 | __func__, err); |
| 5829 | goto init_complete; |
| 5830 | } |
| 5831 | |
| 5832 | set_user_nice(current, task_nice(init_context->parent)); |
| 5833 | |
| 5834 | init_complete: |
| 5835 | init_context->err = err; |
| 5836 | complete(&init_context->init_done); |
| 5837 | init_context = NULL; |
| 5838 | |
| 5839 | if (err) |
| 5840 | return err; |
| 5841 | |
| 5842 | /* Wait to be woken up by the spawner before proceeding. */ |
| 5843 | kthread_parkme(); |
| 5844 | |
| 5845 | if (!kthread_should_stop()) |
| 5846 | err = thread_fn(kvm, data); |
| 5847 | |
| 5848 | return err; |
| 5849 | } |
| 5850 | |
| 5851 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, |
| 5852 | uintptr_t data, const char *name, |
| 5853 | struct task_struct **thread_ptr) |
| 5854 | { |
| 5855 | struct kvm_vm_worker_thread_context init_context = {}; |
| 5856 | struct task_struct *thread; |
| 5857 | |
| 5858 | *thread_ptr = NULL; |
| 5859 | init_context.kvm = kvm; |
| 5860 | init_context.parent = current; |
| 5861 | init_context.thread_fn = thread_fn; |
| 5862 | init_context.data = data; |
| 5863 | init_completion(&init_context.init_done); |
| 5864 | |
| 5865 | thread = kthread_run(kvm_vm_worker_thread, &init_context, |
| 5866 | "%s-%d", name, task_pid_nr(current)); |
| 5867 | if (IS_ERR(thread)) |
| 5868 | return PTR_ERR(thread); |
| 5869 | |
| 5870 | /* kthread_run is never supposed to return NULL */ |
| 5871 | WARN_ON(thread == NULL); |
| 5872 | |
| 5873 | wait_for_completion(&init_context.init_done); |
| 5874 | |
| 5875 | if (!init_context.err) |
| 5876 | *thread_ptr = thread; |
| 5877 | |
| 5878 | return init_context.err; |
| 5879 | } |