Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * AMD SVM-SEV support |
| 6 | * |
| 7 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kvm_types.h> |
| 11 | #include <linux/kvm_host.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/highmem.h> |
| 14 | #include <linux/psp-sev.h> |
Borislav Petkov | b2bce0a | 2020-04-11 18:09:27 +0200 | [diff] [blame] | 15 | #include <linux/pagemap.h> |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 16 | #include <linux/swap.h> |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 17 | #include <linux/misc_cgroup.h> |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 18 | #include <linux/processor.h> |
Tom Lendacky | d523ab6b | 2020-12-10 11:09:48 -0600 | [diff] [blame] | 19 | #include <linux/trace_events.h> |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 20 | |
Dave Hansen | 784a4661 | 2021-06-23 14:02:05 +0200 | [diff] [blame] | 21 | #include <asm/pkru.h> |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 22 | #include <asm/trapnr.h> |
Thomas Gleixner | d9d005f | 2021-10-15 03:16:31 +0200 | [diff] [blame] | 23 | #include <asm/fpu/xcr.h> |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 24 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 25 | #include "x86.h" |
| 26 | #include "svm.h" |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 27 | #include "svm_ops.h" |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 28 | #include "cpuid.h" |
Tom Lendacky | d523ab6b | 2020-12-10 11:09:48 -0600 | [diff] [blame] | 29 | #include "trace.h" |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 30 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 31 | #ifndef CONFIG_KVM_AMD_SEV |
| 32 | /* |
| 33 | * When this config is not defined, SEV feature is not supported and APIs in |
| 34 | * this file are not used but this file still gets compiled into the KVM AMD |
| 35 | * module. |
| 36 | * |
| 37 | * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum |
| 38 | * misc_res_type {} defined in linux/misc_cgroup.h. |
| 39 | * |
| 40 | * Below macros allow compilation to succeed. |
| 41 | */ |
| 42 | #define MISC_CG_RES_SEV MISC_CG_RES_TYPES |
| 43 | #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES |
| 44 | #endif |
| 45 | |
Sean Christopherson | a479c33 | 2021-04-21 19:11:18 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_KVM_AMD_SEV |
Sean Christopherson | e8126bd | 2021-04-21 19:11:14 -0700 | [diff] [blame] | 47 | /* enable/disable SEV support */ |
Sean Christopherson | 6c2c7bf | 2021-04-21 19:11:19 -0700 | [diff] [blame] | 48 | static bool sev_enabled = true; |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 49 | module_param_named(sev, sev_enabled, bool, 0444); |
Sean Christopherson | e8126bd | 2021-04-21 19:11:14 -0700 | [diff] [blame] | 50 | |
| 51 | /* enable/disable SEV-ES support */ |
Sean Christopherson | 6c2c7bf | 2021-04-21 19:11:19 -0700 | [diff] [blame] | 52 | static bool sev_es_enabled = true; |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 53 | module_param_named(sev_es, sev_es_enabled, bool, 0444); |
Sean Christopherson | a479c33 | 2021-04-21 19:11:18 -0700 | [diff] [blame] | 54 | #else |
| 55 | #define sev_enabled false |
| 56 | #define sev_es_enabled false |
| 57 | #endif /* CONFIG_KVM_AMD_SEV */ |
Sean Christopherson | e8126bd | 2021-04-21 19:11:14 -0700 | [diff] [blame] | 58 | |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 59 | static u8 sev_enc_bit; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 60 | static DECLARE_RWSEM(sev_deactivate_lock); |
| 61 | static DEFINE_MUTEX(sev_bitmap_lock); |
| 62 | unsigned int max_sev_asid; |
| 63 | static unsigned int min_sev_asid; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 64 | static unsigned long sev_me_mask; |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 65 | static unsigned int nr_asids; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 66 | static unsigned long *sev_asid_bitmap; |
| 67 | static unsigned long *sev_reclaim_asid_bitmap; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 68 | |
| 69 | struct enc_region { |
| 70 | struct list_head list; |
| 71 | unsigned long npages; |
| 72 | struct page **pages; |
| 73 | unsigned long uaddr; |
| 74 | unsigned long size; |
| 75 | }; |
| 76 | |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 77 | /* Called with the sev_bitmap_lock held, or on shutdown */ |
| 78 | static int sev_flush_asids(int min_asid, int max_asid) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 79 | { |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 80 | int ret, asid, error = 0; |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 81 | |
| 82 | /* Check if there are any ASIDs to reclaim before performing a flush */ |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 83 | asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); |
| 84 | if (asid > max_asid) |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 85 | return -EBUSY; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 86 | |
| 87 | /* |
| 88 | * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, |
| 89 | * so it must be guarded. |
| 90 | */ |
| 91 | down_write(&sev_deactivate_lock); |
| 92 | |
| 93 | wbinvd_on_all_cpus(); |
| 94 | ret = sev_guest_df_flush(&error); |
| 95 | |
| 96 | up_write(&sev_deactivate_lock); |
| 97 | |
| 98 | if (ret) |
| 99 | pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error); |
| 100 | |
| 101 | return ret; |
| 102 | } |
| 103 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 104 | static inline bool is_mirroring_enc_context(struct kvm *kvm) |
| 105 | { |
| 106 | return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; |
| 107 | } |
| 108 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 109 | /* Must be called with the sev_bitmap_lock held */ |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 110 | static bool __sev_recycle_asids(int min_asid, int max_asid) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 111 | { |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 112 | if (sev_flush_asids(min_asid, max_asid)) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 113 | return false; |
| 114 | |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 115 | /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 116 | bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 117 | nr_asids); |
| 118 | bitmap_zero(sev_reclaim_asid_bitmap, nr_asids); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 119 | |
| 120 | return true; |
| 121 | } |
| 122 | |
Paolo Bonzini | 91b692a | 2021-11-11 10:02:26 -0500 | [diff] [blame] | 123 | static int sev_misc_cg_try_charge(struct kvm_sev_info *sev) |
| 124 | { |
| 125 | enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; |
| 126 | return misc_cg_try_charge(type, sev->misc_cg, 1); |
| 127 | } |
| 128 | |
| 129 | static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) |
| 130 | { |
| 131 | enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV; |
| 132 | misc_cg_uncharge(type, sev->misc_cg, 1); |
| 133 | } |
| 134 | |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 135 | static int sev_asid_new(struct kvm_sev_info *sev) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 136 | { |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 137 | int asid, min_asid, max_asid, ret; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 138 | bool retry = true; |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 139 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 140 | WARN_ON(sev->misc_cg); |
| 141 | sev->misc_cg = get_current_misc_cg(); |
Paolo Bonzini | 91b692a | 2021-11-11 10:02:26 -0500 | [diff] [blame] | 142 | ret = sev_misc_cg_try_charge(sev); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 143 | if (ret) { |
| 144 | put_misc_cg(sev->misc_cg); |
| 145 | sev->misc_cg = NULL; |
| 146 | return ret; |
| 147 | } |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 148 | |
| 149 | mutex_lock(&sev_bitmap_lock); |
| 150 | |
| 151 | /* |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 152 | * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. |
| 153 | * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 154 | */ |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 155 | min_asid = sev->es_active ? 1 : min_sev_asid; |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 156 | max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 157 | again: |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 158 | asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); |
| 159 | if (asid > max_asid) { |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 160 | if (retry && __sev_recycle_asids(min_asid, max_asid)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 161 | retry = false; |
| 162 | goto again; |
| 163 | } |
| 164 | mutex_unlock(&sev_bitmap_lock); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 165 | ret = -EBUSY; |
| 166 | goto e_uncharge; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 167 | } |
| 168 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 169 | __set_bit(asid, sev_asid_bitmap); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 170 | |
| 171 | mutex_unlock(&sev_bitmap_lock); |
| 172 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 173 | return asid; |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 174 | e_uncharge: |
Paolo Bonzini | 91b692a | 2021-11-11 10:02:26 -0500 | [diff] [blame] | 175 | sev_misc_cg_uncharge(sev); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 176 | put_misc_cg(sev->misc_cg); |
| 177 | sev->misc_cg = NULL; |
| 178 | return ret; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | static int sev_get_asid(struct kvm *kvm) |
| 182 | { |
| 183 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 184 | |
| 185 | return sev->asid; |
| 186 | } |
| 187 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 188 | static void sev_asid_free(struct kvm_sev_info *sev) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 189 | { |
| 190 | struct svm_cpu_data *sd; |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 191 | int cpu; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 192 | |
| 193 | mutex_lock(&sev_bitmap_lock); |
| 194 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 195 | __set_bit(sev->asid, sev_reclaim_asid_bitmap); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 196 | |
| 197 | for_each_possible_cpu(cpu) { |
| 198 | sd = per_cpu(svm_data, cpu); |
Sean Christopherson | 179c6c2 | 2021-08-03 09:27:46 -0700 | [diff] [blame] | 199 | sd->sev_vmcbs[sev->asid] = NULL; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | mutex_unlock(&sev_bitmap_lock); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 203 | |
Paolo Bonzini | 91b692a | 2021-11-11 10:02:26 -0500 | [diff] [blame] | 204 | sev_misc_cg_uncharge(sev); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 205 | put_misc_cg(sev->misc_cg); |
| 206 | sev->misc_cg = NULL; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 207 | } |
| 208 | |
Alper Gun | 934002c | 2021-06-10 17:46:04 +0000 | [diff] [blame] | 209 | static void sev_decommission(unsigned int handle) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 210 | { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 211 | struct sev_data_decommission decommission; |
Alper Gun | 934002c | 2021-06-10 17:46:04 +0000 | [diff] [blame] | 212 | |
| 213 | if (!handle) |
| 214 | return; |
| 215 | |
| 216 | decommission.handle = handle; |
| 217 | sev_guest_decommission(&decommission, NULL); |
| 218 | } |
| 219 | |
| 220 | static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) |
| 221 | { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 222 | struct sev_data_deactivate deactivate; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 223 | |
| 224 | if (!handle) |
| 225 | return; |
| 226 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 227 | deactivate.handle = handle; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 228 | |
| 229 | /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */ |
| 230 | down_read(&sev_deactivate_lock); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 231 | sev_guest_deactivate(&deactivate, NULL); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 232 | up_read(&sev_deactivate_lock); |
| 233 | |
Alper Gun | 934002c | 2021-06-10 17:46:04 +0000 | [diff] [blame] | 234 | sev_decommission(handle); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 235 | } |
| 236 | |
| 237 | static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 238 | { |
| 239 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 240 | int asid, ret; |
| 241 | |
Sean Christopherson | 8727906 | 2021-03-30 20:19:36 -0700 | [diff] [blame] | 242 | if (kvm->created_vcpus) |
| 243 | return -EINVAL; |
| 244 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 245 | ret = -EBUSY; |
| 246 | if (unlikely(sev->active)) |
| 247 | return ret; |
| 248 | |
Sean Christopherson | a41fb26 | 2021-11-09 21:50:58 +0000 | [diff] [blame] | 249 | sev->active = true; |
| 250 | sev->es_active = argp->id == KVM_SEV_ES_INIT; |
Tom Lendacky | 80675b3 | 2020-12-10 11:10:05 -0600 | [diff] [blame] | 251 | asid = sev_asid_new(sev); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 252 | if (asid < 0) |
Paolo Bonzini | fd49e8e | 2021-04-22 02:39:48 -0400 | [diff] [blame] | 253 | goto e_no_asid; |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 254 | sev->asid = asid; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 255 | |
| 256 | ret = sev_platform_init(&argp->error); |
| 257 | if (ret) |
| 258 | goto e_free; |
| 259 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 260 | INIT_LIST_HEAD(&sev->regions_list); |
| 261 | |
| 262 | return 0; |
| 263 | |
| 264 | e_free: |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 265 | sev_asid_free(sev); |
| 266 | sev->asid = 0; |
Paolo Bonzini | fd49e8e | 2021-04-22 02:39:48 -0400 | [diff] [blame] | 267 | e_no_asid: |
| 268 | sev->es_active = false; |
Sean Christopherson | a41fb26 | 2021-11-09 21:50:58 +0000 | [diff] [blame] | 269 | sev->active = false; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) |
| 274 | { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 275 | struct sev_data_activate activate; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 276 | int asid = sev_get_asid(kvm); |
| 277 | int ret; |
| 278 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 279 | /* activate ASID on the given handle */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 280 | activate.handle = handle; |
| 281 | activate.asid = asid; |
| 282 | ret = sev_guest_activate(&activate, error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 283 | |
| 284 | return ret; |
| 285 | } |
| 286 | |
| 287 | static int __sev_issue_cmd(int fd, int id, void *data, int *error) |
| 288 | { |
| 289 | struct fd f; |
| 290 | int ret; |
| 291 | |
| 292 | f = fdget(fd); |
| 293 | if (!f.file) |
| 294 | return -EBADF; |
| 295 | |
| 296 | ret = sev_issue_cmd_external_user(f.file, id, data, error); |
| 297 | |
| 298 | fdput(f); |
| 299 | return ret; |
| 300 | } |
| 301 | |
| 302 | static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) |
| 303 | { |
| 304 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 305 | |
| 306 | return __sev_issue_cmd(sev->fd, id, data, error); |
| 307 | } |
| 308 | |
| 309 | static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 310 | { |
| 311 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 312 | struct sev_data_launch_start start; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 313 | struct kvm_sev_launch_start params; |
| 314 | void *dh_blob, *session_blob; |
| 315 | int *error = &argp->error; |
| 316 | int ret; |
| 317 | |
| 318 | if (!sev_guest(kvm)) |
| 319 | return -ENOTTY; |
| 320 | |
| 321 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 322 | return -EFAULT; |
| 323 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 324 | memset(&start, 0, sizeof(start)); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 325 | |
| 326 | dh_blob = NULL; |
| 327 | if (params.dh_uaddr) { |
| 328 | dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 329 | if (IS_ERR(dh_blob)) |
| 330 | return PTR_ERR(dh_blob); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 331 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 332 | start.dh_cert_address = __sme_set(__pa(dh_blob)); |
| 333 | start.dh_cert_len = params.dh_len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | session_blob = NULL; |
| 337 | if (params.session_uaddr) { |
| 338 | session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len); |
| 339 | if (IS_ERR(session_blob)) { |
| 340 | ret = PTR_ERR(session_blob); |
| 341 | goto e_free_dh; |
| 342 | } |
| 343 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 344 | start.session_address = __sme_set(__pa(session_blob)); |
| 345 | start.session_len = params.session_len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 346 | } |
| 347 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 348 | start.handle = params.handle; |
| 349 | start.policy = params.policy; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 350 | |
| 351 | /* create memory encryption context */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 352 | ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 353 | if (ret) |
| 354 | goto e_free_session; |
| 355 | |
| 356 | /* Bind ASID to this guest */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 357 | ret = sev_bind_asid(kvm, start.handle, error); |
Alper Gun | 934002c | 2021-06-10 17:46:04 +0000 | [diff] [blame] | 358 | if (ret) { |
| 359 | sev_decommission(start.handle); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 360 | goto e_free_session; |
Alper Gun | 934002c | 2021-06-10 17:46:04 +0000 | [diff] [blame] | 361 | } |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 362 | |
| 363 | /* return handle to userspace */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 364 | params.handle = start.handle; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 365 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 366 | sev_unbind_asid(kvm, start.handle); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 367 | ret = -EFAULT; |
| 368 | goto e_free_session; |
| 369 | } |
| 370 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 371 | sev->handle = start.handle; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 372 | sev->fd = argp->sev_fd; |
| 373 | |
| 374 | e_free_session: |
| 375 | kfree(session_blob); |
| 376 | e_free_dh: |
| 377 | kfree(dh_blob); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 378 | return ret; |
| 379 | } |
| 380 | |
| 381 | static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, |
| 382 | unsigned long ulen, unsigned long *n, |
| 383 | int write) |
| 384 | { |
| 385 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
John Hubbard | 78824fa | 2020-05-25 23:22:06 -0700 | [diff] [blame] | 386 | unsigned long npages, size; |
| 387 | int npinned; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 388 | unsigned long locked, lock_limit; |
| 389 | struct page **pages; |
| 390 | unsigned long first, last; |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 391 | int ret; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 392 | |
Peter Gonda | 19a23da | 2021-01-27 08:15:24 -0800 | [diff] [blame] | 393 | lockdep_assert_held(&kvm->lock); |
| 394 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 395 | if (ulen == 0 || uaddr + ulen < uaddr) |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 396 | return ERR_PTR(-EINVAL); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 397 | |
| 398 | /* Calculate number of pages. */ |
| 399 | first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; |
| 400 | last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; |
| 401 | npages = (last - first + 1); |
| 402 | |
| 403 | locked = sev->pages_locked + npages; |
| 404 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 405 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { |
| 406 | pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 407 | return ERR_PTR(-ENOMEM); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 408 | } |
| 409 | |
John Hubbard | 78824fa | 2020-05-25 23:22:06 -0700 | [diff] [blame] | 410 | if (WARN_ON_ONCE(npages > INT_MAX)) |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 411 | return ERR_PTR(-EINVAL); |
John Hubbard | 78824fa | 2020-05-25 23:22:06 -0700 | [diff] [blame] | 412 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 413 | /* Avoid using vmalloc for smaller buffers. */ |
| 414 | size = npages * sizeof(struct page *); |
| 415 | if (size > PAGE_SIZE) |
Christoph Hellwig | 88dca4c | 2020-06-01 21:51:40 -0700 | [diff] [blame] | 416 | pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 417 | else |
| 418 | pages = kmalloc(size, GFP_KERNEL_ACCOUNT); |
| 419 | |
| 420 | if (!pages) |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 421 | return ERR_PTR(-ENOMEM); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 422 | |
| 423 | /* Pin the user virtual address. */ |
John Hubbard | dc42c8a | 2020-05-25 23:22:07 -0700 | [diff] [blame] | 424 | npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 425 | if (npinned != npages) { |
| 426 | pr_err("SEV: Failure locking %lu pages.\n", npages); |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 427 | ret = -ENOMEM; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 428 | goto err; |
| 429 | } |
| 430 | |
| 431 | *n = npages; |
| 432 | sev->pages_locked = locked; |
| 433 | |
| 434 | return pages; |
| 435 | |
| 436 | err: |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 437 | if (npinned > 0) |
John Hubbard | dc42c8a | 2020-05-25 23:22:07 -0700 | [diff] [blame] | 438 | unpin_user_pages(pages, npinned); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 439 | |
| 440 | kvfree(pages); |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 441 | return ERR_PTR(ret); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | static void sev_unpin_memory(struct kvm *kvm, struct page **pages, |
| 445 | unsigned long npages) |
| 446 | { |
| 447 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 448 | |
John Hubbard | dc42c8a | 2020-05-25 23:22:07 -0700 | [diff] [blame] | 449 | unpin_user_pages(pages, npages); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 450 | kvfree(pages); |
| 451 | sev->pages_locked -= npages; |
| 452 | } |
| 453 | |
| 454 | static void sev_clflush_pages(struct page *pages[], unsigned long npages) |
| 455 | { |
| 456 | uint8_t *page_virtual; |
| 457 | unsigned long i; |
| 458 | |
Krish Sadhukhan | e1ebb2b | 2020-09-17 21:20:38 +0000 | [diff] [blame] | 459 | if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 || |
| 460 | pages == NULL) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 461 | return; |
| 462 | |
| 463 | for (i = 0; i < npages; i++) { |
| 464 | page_virtual = kmap_atomic(pages[i]); |
| 465 | clflush_cache_range(page_virtual, PAGE_SIZE); |
| 466 | kunmap_atomic(page_virtual); |
| 467 | } |
| 468 | } |
| 469 | |
| 470 | static unsigned long get_num_contig_pages(unsigned long idx, |
| 471 | struct page **inpages, unsigned long npages) |
| 472 | { |
| 473 | unsigned long paddr, next_paddr; |
| 474 | unsigned long i = idx + 1, pages = 1; |
| 475 | |
| 476 | /* find the number of contiguous pages starting from idx */ |
| 477 | paddr = __sme_page_pa(inpages[idx]); |
| 478 | while (i < npages) { |
| 479 | next_paddr = __sme_page_pa(inpages[i++]); |
| 480 | if ((paddr + PAGE_SIZE) == next_paddr) { |
| 481 | pages++; |
| 482 | paddr = next_paddr; |
| 483 | continue; |
| 484 | } |
| 485 | break; |
| 486 | } |
| 487 | |
| 488 | return pages; |
| 489 | } |
| 490 | |
| 491 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 492 | { |
| 493 | unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; |
| 494 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 495 | struct kvm_sev_launch_update_data params; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 496 | struct sev_data_launch_update_data data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 497 | struct page **inpages; |
| 498 | int ret; |
| 499 | |
| 500 | if (!sev_guest(kvm)) |
| 501 | return -ENOTTY; |
| 502 | |
| 503 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 504 | return -EFAULT; |
| 505 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 506 | vaddr = params.uaddr; |
| 507 | size = params.len; |
| 508 | vaddr_end = vaddr + size; |
| 509 | |
| 510 | /* Lock the user memory. */ |
| 511 | inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 512 | if (IS_ERR(inpages)) |
| 513 | return PTR_ERR(inpages); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 514 | |
| 515 | /* |
Paolo Bonzini | 14e3dd8 | 2020-09-23 13:01:33 -0400 | [diff] [blame] | 516 | * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in |
| 517 | * place; the cache may contain the data that was written unencrypted. |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 518 | */ |
| 519 | sev_clflush_pages(inpages, npages); |
| 520 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 521 | data.reserved = 0; |
| 522 | data.handle = sev->handle; |
| 523 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 524 | for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { |
| 525 | int offset, len; |
| 526 | |
| 527 | /* |
| 528 | * If the user buffer is not page-aligned, calculate the offset |
| 529 | * within the page. |
| 530 | */ |
| 531 | offset = vaddr & (PAGE_SIZE - 1); |
| 532 | |
| 533 | /* Calculate the number of pages that can be encrypted in one go. */ |
| 534 | pages = get_num_contig_pages(i, inpages, npages); |
| 535 | |
| 536 | len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); |
| 537 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 538 | data.len = len; |
| 539 | data.address = __sme_page_pa(inpages[i]) + offset; |
| 540 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 541 | if (ret) |
| 542 | goto e_unpin; |
| 543 | |
| 544 | size -= len; |
| 545 | next_vaddr = vaddr + len; |
| 546 | } |
| 547 | |
| 548 | e_unpin: |
| 549 | /* content of memory is updated, mark pages dirty */ |
| 550 | for (i = 0; i < npages; i++) { |
| 551 | set_page_dirty_lock(inpages[i]); |
| 552 | mark_page_accessed(inpages[i]); |
| 553 | } |
| 554 | /* unlock the user pages */ |
| 555 | sev_unpin_memory(kvm, inpages, npages); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 556 | return ret; |
| 557 | } |
| 558 | |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 559 | static int sev_es_sync_vmsa(struct vcpu_svm *svm) |
| 560 | { |
| 561 | struct vmcb_save_area *save = &svm->vmcb->save; |
| 562 | |
| 563 | /* Check some debug related fields before encrypting the VMSA */ |
| 564 | if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1)) |
| 565 | return -EINVAL; |
| 566 | |
| 567 | /* Sync registgers */ |
| 568 | save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; |
| 569 | save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; |
| 570 | save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
| 571 | save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; |
| 572 | save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; |
| 573 | save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; |
| 574 | save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; |
| 575 | save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; |
Paolo Bonzini | d45f89f | 2020-12-16 13:08:21 -0500 | [diff] [blame] | 576 | #ifdef CONFIG_X86_64 |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 577 | save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; |
| 578 | save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; |
| 579 | save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; |
| 580 | save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; |
| 581 | save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; |
| 582 | save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; |
| 583 | save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; |
| 584 | save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; |
Paolo Bonzini | d45f89f | 2020-12-16 13:08:21 -0500 | [diff] [blame] | 585 | #endif |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 586 | save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; |
| 587 | |
| 588 | /* Sync some non-GPR registers before encrypting */ |
| 589 | save->xcr0 = svm->vcpu.arch.xcr0; |
| 590 | save->pkru = svm->vcpu.arch.pkru; |
| 591 | save->xss = svm->vcpu.arch.ia32_xss; |
Sean Christopherson | d0f9f82 | 2021-07-13 09:33:10 -0700 | [diff] [blame] | 592 | save->dr6 = svm->vcpu.arch.dr6; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 593 | |
| 594 | /* |
| 595 | * SEV-ES will use a VMSA that is pointed to by the VMCB, not |
| 596 | * the traditional VMSA that is part of the VMCB. Copy the |
| 597 | * traditional VMSA as it has been built so far (in prep |
| 598 | * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state. |
| 599 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 600 | memcpy(svm->sev_es.vmsa, save, sizeof(*save)); |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 601 | |
| 602 | return 0; |
| 603 | } |
| 604 | |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 605 | static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, |
| 606 | int *error) |
| 607 | { |
| 608 | struct sev_data_launch_update_vmsa vmsa; |
| 609 | struct vcpu_svm *svm = to_svm(vcpu); |
| 610 | int ret; |
| 611 | |
| 612 | /* Perform some pre-encryption checks against the VMSA */ |
| 613 | ret = sev_es_sync_vmsa(svm); |
| 614 | if (ret) |
| 615 | return ret; |
| 616 | |
| 617 | /* |
| 618 | * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of |
| 619 | * the VMSA memory content (i.e it will write the same memory region |
| 620 | * with the guest's key), so invalidate it first. |
| 621 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 622 | clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 623 | |
| 624 | vmsa.reserved = 0; |
| 625 | vmsa.handle = to_kvm_svm(kvm)->sev_info.handle; |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 626 | vmsa.address = __sme_pa(svm->sev_es.vmsa); |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 627 | vmsa.len = PAGE_SIZE; |
Peter Gonda | baa1e5c | 2021-10-15 13:32:22 -0400 | [diff] [blame] | 628 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error); |
| 629 | if (ret) |
| 630 | return ret; |
| 631 | |
| 632 | vcpu->arch.guest_state_protected = true; |
| 633 | return 0; |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 634 | } |
| 635 | |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 636 | static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 637 | { |
Sean Christopherson | c36b16d | 2021-03-30 20:19:34 -0700 | [diff] [blame] | 638 | struct kvm_vcpu *vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 639 | unsigned long i; |
| 640 | int ret; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 641 | |
| 642 | if (!sev_es_guest(kvm)) |
| 643 | return -ENOTTY; |
| 644 | |
Sean Christopherson | c36b16d | 2021-03-30 20:19:34 -0700 | [diff] [blame] | 645 | kvm_for_each_vcpu(i, vcpu, kvm) { |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 646 | ret = mutex_lock_killable(&vcpu->mutex); |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 647 | if (ret) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 648 | return ret; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 649 | |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 650 | ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error); |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 651 | |
Peter Gonda | bb18a67 | 2021-09-15 10:17:55 -0700 | [diff] [blame] | 652 | mutex_unlock(&vcpu->mutex); |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 653 | if (ret) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 654 | return ret; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 655 | } |
| 656 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 657 | return 0; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 658 | } |
| 659 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 660 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 661 | { |
| 662 | void __user *measure = (void __user *)(uintptr_t)argp->data; |
| 663 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 664 | struct sev_data_launch_measure data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 665 | struct kvm_sev_launch_measure params; |
| 666 | void __user *p = NULL; |
| 667 | void *blob = NULL; |
| 668 | int ret; |
| 669 | |
| 670 | if (!sev_guest(kvm)) |
| 671 | return -ENOTTY; |
| 672 | |
| 673 | if (copy_from_user(¶ms, measure, sizeof(params))) |
| 674 | return -EFAULT; |
| 675 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 676 | memset(&data, 0, sizeof(data)); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 677 | |
| 678 | /* User wants to query the blob length */ |
| 679 | if (!params.len) |
| 680 | goto cmd; |
| 681 | |
| 682 | p = (void __user *)(uintptr_t)params.uaddr; |
| 683 | if (p) { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 684 | if (params.len > SEV_FW_BLOB_MAX_SIZE) |
| 685 | return -EINVAL; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 686 | |
Sean Christopherson | eba04b2 | 2021-03-30 19:30:25 -0700 | [diff] [blame] | 687 | blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 688 | if (!blob) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 689 | return -ENOMEM; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 690 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 691 | data.address = __psp_pa(blob); |
| 692 | data.len = params.len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | cmd: |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 696 | data.handle = sev->handle; |
| 697 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 698 | |
| 699 | /* |
| 700 | * If we query the session length, FW responded with expected data. |
| 701 | */ |
| 702 | if (!params.len) |
| 703 | goto done; |
| 704 | |
| 705 | if (ret) |
| 706 | goto e_free_blob; |
| 707 | |
| 708 | if (blob) { |
| 709 | if (copy_to_user(p, blob, params.len)) |
| 710 | ret = -EFAULT; |
| 711 | } |
| 712 | |
| 713 | done: |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 714 | params.len = data.len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 715 | if (copy_to_user(measure, ¶ms, sizeof(params))) |
| 716 | ret = -EFAULT; |
| 717 | e_free_blob: |
| 718 | kfree(blob); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 719 | return ret; |
| 720 | } |
| 721 | |
| 722 | static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 723 | { |
| 724 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 725 | struct sev_data_launch_finish data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 726 | |
| 727 | if (!sev_guest(kvm)) |
| 728 | return -ENOTTY; |
| 729 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 730 | data.handle = sev->handle; |
| 731 | return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 735 | { |
| 736 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 737 | struct kvm_sev_guest_status params; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 738 | struct sev_data_guest_status data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 739 | int ret; |
| 740 | |
| 741 | if (!sev_guest(kvm)) |
| 742 | return -ENOTTY; |
| 743 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 744 | memset(&data, 0, sizeof(data)); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 745 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 746 | data.handle = sev->handle; |
| 747 | ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 748 | if (ret) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 749 | return ret; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 750 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 751 | params.policy = data.policy; |
| 752 | params.state = data.state; |
| 753 | params.handle = data.handle; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 754 | |
| 755 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) |
| 756 | ret = -EFAULT; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 757 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 758 | return ret; |
| 759 | } |
| 760 | |
| 761 | static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, |
| 762 | unsigned long dst, int size, |
| 763 | int *error, bool enc) |
| 764 | { |
| 765 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 766 | struct sev_data_dbg data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 767 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 768 | data.reserved = 0; |
| 769 | data.handle = sev->handle; |
| 770 | data.dst_addr = dst; |
| 771 | data.src_addr = src; |
| 772 | data.len = size; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 773 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 774 | return sev_issue_cmd(kvm, |
| 775 | enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, |
| 776 | &data, error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 777 | } |
| 778 | |
| 779 | static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, |
| 780 | unsigned long dst_paddr, int sz, int *err) |
| 781 | { |
| 782 | int offset; |
| 783 | |
| 784 | /* |
| 785 | * Its safe to read more than we are asked, caller should ensure that |
| 786 | * destination has enough space. |
| 787 | */ |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 788 | offset = src_paddr & 15; |
Ashish Kalra | 854c57f | 2020-11-10 22:42:05 +0000 | [diff] [blame] | 789 | src_paddr = round_down(src_paddr, 16); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 790 | sz = round_up(sz + offset, 16); |
| 791 | |
| 792 | return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); |
| 793 | } |
| 794 | |
| 795 | static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 796 | void __user *dst_uaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 797 | unsigned long dst_paddr, |
| 798 | int size, int *err) |
| 799 | { |
| 800 | struct page *tpage = NULL; |
| 801 | int ret, offset; |
| 802 | |
| 803 | /* if inputs are not 16-byte then use intermediate buffer */ |
| 804 | if (!IS_ALIGNED(dst_paddr, 16) || |
| 805 | !IS_ALIGNED(paddr, 16) || |
| 806 | !IS_ALIGNED(size, 16)) { |
| 807 | tpage = (void *)alloc_page(GFP_KERNEL); |
| 808 | if (!tpage) |
| 809 | return -ENOMEM; |
| 810 | |
| 811 | dst_paddr = __sme_page_pa(tpage); |
| 812 | } |
| 813 | |
| 814 | ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err); |
| 815 | if (ret) |
| 816 | goto e_free; |
| 817 | |
| 818 | if (tpage) { |
| 819 | offset = paddr & 15; |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 820 | if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size)) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 821 | ret = -EFAULT; |
| 822 | } |
| 823 | |
| 824 | e_free: |
| 825 | if (tpage) |
| 826 | __free_page(tpage); |
| 827 | |
| 828 | return ret; |
| 829 | } |
| 830 | |
| 831 | static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 832 | void __user *vaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 833 | unsigned long dst_paddr, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 834 | void __user *dst_vaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 835 | int size, int *error) |
| 836 | { |
| 837 | struct page *src_tpage = NULL; |
| 838 | struct page *dst_tpage = NULL; |
| 839 | int ret, len = size; |
| 840 | |
| 841 | /* If source buffer is not aligned then use an intermediate buffer */ |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 842 | if (!IS_ALIGNED((unsigned long)vaddr, 16)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 843 | src_tpage = alloc_page(GFP_KERNEL); |
| 844 | if (!src_tpage) |
| 845 | return -ENOMEM; |
| 846 | |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 847 | if (copy_from_user(page_address(src_tpage), vaddr, size)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 848 | __free_page(src_tpage); |
| 849 | return -EFAULT; |
| 850 | } |
| 851 | |
| 852 | paddr = __sme_page_pa(src_tpage); |
| 853 | } |
| 854 | |
| 855 | /* |
| 856 | * If destination buffer or length is not aligned then do read-modify-write: |
| 857 | * - decrypt destination in an intermediate buffer |
| 858 | * - copy the source buffer in an intermediate buffer |
| 859 | * - use the intermediate buffer as source buffer |
| 860 | */ |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 861 | if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 862 | int dst_offset; |
| 863 | |
| 864 | dst_tpage = alloc_page(GFP_KERNEL); |
| 865 | if (!dst_tpage) { |
| 866 | ret = -ENOMEM; |
| 867 | goto e_free; |
| 868 | } |
| 869 | |
| 870 | ret = __sev_dbg_decrypt(kvm, dst_paddr, |
| 871 | __sme_page_pa(dst_tpage), size, error); |
| 872 | if (ret) |
| 873 | goto e_free; |
| 874 | |
| 875 | /* |
| 876 | * If source is kernel buffer then use memcpy() otherwise |
| 877 | * copy_from_user(). |
| 878 | */ |
| 879 | dst_offset = dst_paddr & 15; |
| 880 | |
| 881 | if (src_tpage) |
| 882 | memcpy(page_address(dst_tpage) + dst_offset, |
| 883 | page_address(src_tpage), size); |
| 884 | else { |
| 885 | if (copy_from_user(page_address(dst_tpage) + dst_offset, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 886 | vaddr, size)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 887 | ret = -EFAULT; |
| 888 | goto e_free; |
| 889 | } |
| 890 | } |
| 891 | |
| 892 | paddr = __sme_page_pa(dst_tpage); |
| 893 | dst_paddr = round_down(dst_paddr, 16); |
| 894 | len = round_up(size, 16); |
| 895 | } |
| 896 | |
| 897 | ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true); |
| 898 | |
| 899 | e_free: |
| 900 | if (src_tpage) |
| 901 | __free_page(src_tpage); |
| 902 | if (dst_tpage) |
| 903 | __free_page(dst_tpage); |
| 904 | return ret; |
| 905 | } |
| 906 | |
| 907 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) |
| 908 | { |
| 909 | unsigned long vaddr, vaddr_end, next_vaddr; |
| 910 | unsigned long dst_vaddr; |
| 911 | struct page **src_p, **dst_p; |
| 912 | struct kvm_sev_dbg debug; |
| 913 | unsigned long n; |
| 914 | unsigned int size; |
| 915 | int ret; |
| 916 | |
| 917 | if (!sev_guest(kvm)) |
| 918 | return -ENOTTY; |
| 919 | |
| 920 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) |
| 921 | return -EFAULT; |
| 922 | |
| 923 | if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) |
| 924 | return -EINVAL; |
| 925 | if (!debug.dst_uaddr) |
| 926 | return -EINVAL; |
| 927 | |
| 928 | vaddr = debug.src_uaddr; |
| 929 | size = debug.len; |
| 930 | vaddr_end = vaddr + size; |
| 931 | dst_vaddr = debug.dst_uaddr; |
| 932 | |
| 933 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { |
| 934 | int len, s_off, d_off; |
| 935 | |
| 936 | /* lock userspace source and destination page */ |
| 937 | src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 938 | if (IS_ERR(src_p)) |
| 939 | return PTR_ERR(src_p); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 940 | |
| 941 | dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 942 | if (IS_ERR(dst_p)) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 943 | sev_unpin_memory(kvm, src_p, n); |
Dan Carpenter | ff2bd9f | 2020-07-14 17:23:51 +0300 | [diff] [blame] | 944 | return PTR_ERR(dst_p); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 945 | } |
| 946 | |
| 947 | /* |
Paolo Bonzini | 14e3dd8 | 2020-09-23 13:01:33 -0400 | [diff] [blame] | 948 | * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify |
| 949 | * the pages; flush the destination too so that future accesses do not |
| 950 | * see stale data. |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 951 | */ |
| 952 | sev_clflush_pages(src_p, 1); |
| 953 | sev_clflush_pages(dst_p, 1); |
| 954 | |
| 955 | /* |
| 956 | * Since user buffer may not be page aligned, calculate the |
| 957 | * offset within the page. |
| 958 | */ |
| 959 | s_off = vaddr & ~PAGE_MASK; |
| 960 | d_off = dst_vaddr & ~PAGE_MASK; |
| 961 | len = min_t(size_t, (PAGE_SIZE - s_off), size); |
| 962 | |
| 963 | if (dec) |
| 964 | ret = __sev_dbg_decrypt_user(kvm, |
| 965 | __sme_page_pa(src_p[0]) + s_off, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 966 | (void __user *)dst_vaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 967 | __sme_page_pa(dst_p[0]) + d_off, |
| 968 | len, &argp->error); |
| 969 | else |
| 970 | ret = __sev_dbg_encrypt_user(kvm, |
| 971 | __sme_page_pa(src_p[0]) + s_off, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 972 | (void __user *)vaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 973 | __sme_page_pa(dst_p[0]) + d_off, |
Sean Christopherson | 368340a | 2021-05-06 16:15:42 -0700 | [diff] [blame] | 974 | (void __user *)dst_vaddr, |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 975 | len, &argp->error); |
| 976 | |
| 977 | sev_unpin_memory(kvm, src_p, n); |
| 978 | sev_unpin_memory(kvm, dst_p, n); |
| 979 | |
| 980 | if (ret) |
| 981 | goto err; |
| 982 | |
| 983 | next_vaddr = vaddr + len; |
| 984 | dst_vaddr = dst_vaddr + len; |
| 985 | size -= len; |
| 986 | } |
| 987 | err: |
| 988 | return ret; |
| 989 | } |
| 990 | |
| 991 | static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 992 | { |
| 993 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 994 | struct sev_data_launch_secret data; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 995 | struct kvm_sev_launch_secret params; |
| 996 | struct page **pages; |
| 997 | void *blob, *hdr; |
Cfir Cohen | 50085be | 2020-08-07 17:37:46 -0700 | [diff] [blame] | 998 | unsigned long n, i; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 999 | int ret, offset; |
| 1000 | |
| 1001 | if (!sev_guest(kvm)) |
| 1002 | return -ENOTTY; |
| 1003 | |
| 1004 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 1005 | return -EFAULT; |
| 1006 | |
| 1007 | pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 1008 | if (IS_ERR(pages)) |
| 1009 | return PTR_ERR(pages); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1010 | |
| 1011 | /* |
Paolo Bonzini | 14e3dd8 | 2020-09-23 13:01:33 -0400 | [diff] [blame] | 1012 | * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in |
| 1013 | * place; the cache may contain the data that was written unencrypted. |
Cfir Cohen | 50085be | 2020-08-07 17:37:46 -0700 | [diff] [blame] | 1014 | */ |
| 1015 | sev_clflush_pages(pages, n); |
| 1016 | |
| 1017 | /* |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1018 | * The secret must be copied into contiguous memory region, lets verify |
| 1019 | * that userspace memory pages are contiguous before we issue command. |
| 1020 | */ |
| 1021 | if (get_num_contig_pages(0, pages, n) != n) { |
| 1022 | ret = -EINVAL; |
| 1023 | goto e_unpin_memory; |
| 1024 | } |
| 1025 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1026 | memset(&data, 0, sizeof(data)); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1027 | |
| 1028 | offset = params.guest_uaddr & (PAGE_SIZE - 1); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1029 | data.guest_address = __sme_page_pa(pages[0]) + offset; |
| 1030 | data.guest_len = params.guest_len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1031 | |
| 1032 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); |
| 1033 | if (IS_ERR(blob)) { |
| 1034 | ret = PTR_ERR(blob); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1035 | goto e_unpin_memory; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1036 | } |
| 1037 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1038 | data.trans_address = __psp_pa(blob); |
| 1039 | data.trans_len = params.trans_len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1040 | |
| 1041 | hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); |
| 1042 | if (IS_ERR(hdr)) { |
| 1043 | ret = PTR_ERR(hdr); |
| 1044 | goto e_free_blob; |
| 1045 | } |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1046 | data.hdr_address = __psp_pa(hdr); |
| 1047 | data.hdr_len = params.hdr_len; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1048 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1049 | data.handle = sev->handle; |
| 1050 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1051 | |
| 1052 | kfree(hdr); |
| 1053 | |
| 1054 | e_free_blob: |
| 1055 | kfree(blob); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1056 | e_unpin_memory: |
Cfir Cohen | 50085be | 2020-08-07 17:37:46 -0700 | [diff] [blame] | 1057 | /* content of memory is updated, mark pages dirty */ |
| 1058 | for (i = 0; i < n; i++) { |
| 1059 | set_page_dirty_lock(pages[i]); |
| 1060 | mark_page_accessed(pages[i]); |
| 1061 | } |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1062 | sev_unpin_memory(kvm, pages, n); |
| 1063 | return ret; |
| 1064 | } |
| 1065 | |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1066 | static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1067 | { |
| 1068 | void __user *report = (void __user *)(uintptr_t)argp->data; |
| 1069 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1070 | struct sev_data_attestation_report data; |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1071 | struct kvm_sev_attestation_report params; |
| 1072 | void __user *p; |
| 1073 | void *blob = NULL; |
| 1074 | int ret; |
| 1075 | |
| 1076 | if (!sev_guest(kvm)) |
| 1077 | return -ENOTTY; |
| 1078 | |
| 1079 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 1080 | return -EFAULT; |
| 1081 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1082 | memset(&data, 0, sizeof(data)); |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1083 | |
| 1084 | /* User wants to query the blob length */ |
| 1085 | if (!params.len) |
| 1086 | goto cmd; |
| 1087 | |
| 1088 | p = (void __user *)(uintptr_t)params.uaddr; |
| 1089 | if (p) { |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1090 | if (params.len > SEV_FW_BLOB_MAX_SIZE) |
| 1091 | return -EINVAL; |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1092 | |
Sean Christopherson | eba04b2 | 2021-03-30 19:30:25 -0700 | [diff] [blame] | 1093 | blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1094 | if (!blob) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1095 | return -ENOMEM; |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1096 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1097 | data.address = __psp_pa(blob); |
| 1098 | data.len = params.len; |
| 1099 | memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce)); |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1100 | } |
| 1101 | cmd: |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1102 | data.handle = sev->handle; |
| 1103 | ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error); |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1104 | /* |
| 1105 | * If we query the session length, FW responded with expected data. |
| 1106 | */ |
| 1107 | if (!params.len) |
| 1108 | goto done; |
| 1109 | |
| 1110 | if (ret) |
| 1111 | goto e_free_blob; |
| 1112 | |
| 1113 | if (blob) { |
| 1114 | if (copy_to_user(p, blob, params.len)) |
| 1115 | ret = -EFAULT; |
| 1116 | } |
| 1117 | |
| 1118 | done: |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1119 | params.len = data.len; |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1120 | if (copy_to_user(report, ¶ms, sizeof(params))) |
| 1121 | ret = -EFAULT; |
| 1122 | e_free_blob: |
| 1123 | kfree(blob); |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1124 | return ret; |
| 1125 | } |
| 1126 | |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1127 | /* Userspace wants to query session length. */ |
| 1128 | static int |
| 1129 | __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, |
| 1130 | struct kvm_sev_send_start *params) |
| 1131 | { |
| 1132 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1133 | struct sev_data_send_start data; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1134 | int ret; |
| 1135 | |
Ashish Kalra | 4f13d47 | 2021-06-07 06:15:32 +0000 | [diff] [blame] | 1136 | memset(&data, 0, sizeof(data)); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1137 | data.handle = sev->handle; |
| 1138 | ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1139 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1140 | params->session_len = data.session_len; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1141 | if (copy_to_user((void __user *)(uintptr_t)argp->data, params, |
| 1142 | sizeof(struct kvm_sev_send_start))) |
| 1143 | ret = -EFAULT; |
| 1144 | |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1145 | return ret; |
| 1146 | } |
| 1147 | |
| 1148 | static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1149 | { |
| 1150 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1151 | struct sev_data_send_start data; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1152 | struct kvm_sev_send_start params; |
| 1153 | void *amd_certs, *session_data; |
| 1154 | void *pdh_cert, *plat_certs; |
| 1155 | int ret; |
| 1156 | |
| 1157 | if (!sev_guest(kvm)) |
| 1158 | return -ENOTTY; |
| 1159 | |
| 1160 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, |
| 1161 | sizeof(struct kvm_sev_send_start))) |
| 1162 | return -EFAULT; |
| 1163 | |
| 1164 | /* if session_len is zero, userspace wants to query the session length */ |
| 1165 | if (!params.session_len) |
| 1166 | return __sev_send_start_query_session_length(kvm, argp, |
| 1167 | ¶ms); |
| 1168 | |
| 1169 | /* some sanity checks */ |
| 1170 | if (!params.pdh_cert_uaddr || !params.pdh_cert_len || |
| 1171 | !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE) |
| 1172 | return -EINVAL; |
| 1173 | |
| 1174 | /* allocate the memory to hold the session data blob */ |
| 1175 | session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); |
| 1176 | if (!session_data) |
| 1177 | return -ENOMEM; |
| 1178 | |
| 1179 | /* copy the certificate blobs from userspace */ |
| 1180 | pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr, |
| 1181 | params.pdh_cert_len); |
| 1182 | if (IS_ERR(pdh_cert)) { |
| 1183 | ret = PTR_ERR(pdh_cert); |
| 1184 | goto e_free_session; |
| 1185 | } |
| 1186 | |
| 1187 | plat_certs = psp_copy_user_blob(params.plat_certs_uaddr, |
| 1188 | params.plat_certs_len); |
| 1189 | if (IS_ERR(plat_certs)) { |
| 1190 | ret = PTR_ERR(plat_certs); |
| 1191 | goto e_free_pdh; |
| 1192 | } |
| 1193 | |
| 1194 | amd_certs = psp_copy_user_blob(params.amd_certs_uaddr, |
| 1195 | params.amd_certs_len); |
| 1196 | if (IS_ERR(amd_certs)) { |
| 1197 | ret = PTR_ERR(amd_certs); |
| 1198 | goto e_free_plat_cert; |
| 1199 | } |
| 1200 | |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1201 | /* populate the FW SEND_START field with system physical address */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1202 | memset(&data, 0, sizeof(data)); |
| 1203 | data.pdh_cert_address = __psp_pa(pdh_cert); |
| 1204 | data.pdh_cert_len = params.pdh_cert_len; |
| 1205 | data.plat_certs_address = __psp_pa(plat_certs); |
| 1206 | data.plat_certs_len = params.plat_certs_len; |
| 1207 | data.amd_certs_address = __psp_pa(amd_certs); |
| 1208 | data.amd_certs_len = params.amd_certs_len; |
| 1209 | data.session_address = __psp_pa(session_data); |
| 1210 | data.session_len = params.session_len; |
| 1211 | data.handle = sev->handle; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1212 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1213 | ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error); |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1214 | |
| 1215 | if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr, |
| 1216 | session_data, params.session_len)) { |
| 1217 | ret = -EFAULT; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1218 | goto e_free_amd_cert; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1219 | } |
| 1220 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1221 | params.policy = data.policy; |
| 1222 | params.session_len = data.session_len; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1223 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, |
| 1224 | sizeof(struct kvm_sev_send_start))) |
| 1225 | ret = -EFAULT; |
| 1226 | |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1227 | e_free_amd_cert: |
| 1228 | kfree(amd_certs); |
| 1229 | e_free_plat_cert: |
| 1230 | kfree(plat_certs); |
| 1231 | e_free_pdh: |
| 1232 | kfree(pdh_cert); |
| 1233 | e_free_session: |
| 1234 | kfree(session_data); |
| 1235 | return ret; |
| 1236 | } |
| 1237 | |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1238 | /* Userspace wants to query either header or trans length. */ |
| 1239 | static int |
| 1240 | __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, |
| 1241 | struct kvm_sev_send_update_data *params) |
| 1242 | { |
| 1243 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1244 | struct sev_data_send_update_data data; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1245 | int ret; |
| 1246 | |
Ashish Kalra | 4f13d47 | 2021-06-07 06:15:32 +0000 | [diff] [blame] | 1247 | memset(&data, 0, sizeof(data)); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1248 | data.handle = sev->handle; |
| 1249 | ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1250 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1251 | params->hdr_len = data.hdr_len; |
| 1252 | params->trans_len = data.trans_len; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1253 | |
| 1254 | if (copy_to_user((void __user *)(uintptr_t)argp->data, params, |
| 1255 | sizeof(struct kvm_sev_send_update_data))) |
| 1256 | ret = -EFAULT; |
| 1257 | |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1258 | return ret; |
| 1259 | } |
| 1260 | |
| 1261 | static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1262 | { |
| 1263 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1264 | struct sev_data_send_update_data data; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1265 | struct kvm_sev_send_update_data params; |
| 1266 | void *hdr, *trans_data; |
| 1267 | struct page **guest_page; |
| 1268 | unsigned long n; |
| 1269 | int ret, offset; |
| 1270 | |
| 1271 | if (!sev_guest(kvm)) |
| 1272 | return -ENOTTY; |
| 1273 | |
| 1274 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, |
| 1275 | sizeof(struct kvm_sev_send_update_data))) |
| 1276 | return -EFAULT; |
| 1277 | |
| 1278 | /* userspace wants to query either header or trans length */ |
| 1279 | if (!params.trans_len || !params.hdr_len) |
| 1280 | return __sev_send_update_data_query_lengths(kvm, argp, ¶ms); |
| 1281 | |
| 1282 | if (!params.trans_uaddr || !params.guest_uaddr || |
| 1283 | !params.guest_len || !params.hdr_uaddr) |
| 1284 | return -EINVAL; |
| 1285 | |
| 1286 | /* Check if we are crossing the page boundary */ |
| 1287 | offset = params.guest_uaddr & (PAGE_SIZE - 1); |
| 1288 | if ((params.guest_len + offset > PAGE_SIZE)) |
| 1289 | return -EINVAL; |
| 1290 | |
| 1291 | /* Pin guest memory */ |
| 1292 | guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, |
| 1293 | PAGE_SIZE, &n, 0); |
Sean Christopherson | c7a1b2b | 2021-05-06 10:58:26 -0700 | [diff] [blame] | 1294 | if (IS_ERR(guest_page)) |
| 1295 | return PTR_ERR(guest_page); |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1296 | |
| 1297 | /* allocate memory for header and transport buffer */ |
| 1298 | ret = -ENOMEM; |
| 1299 | hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); |
| 1300 | if (!hdr) |
| 1301 | goto e_unpin; |
| 1302 | |
| 1303 | trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); |
| 1304 | if (!trans_data) |
| 1305 | goto e_free_hdr; |
| 1306 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1307 | memset(&data, 0, sizeof(data)); |
| 1308 | data.hdr_address = __psp_pa(hdr); |
| 1309 | data.hdr_len = params.hdr_len; |
| 1310 | data.trans_address = __psp_pa(trans_data); |
| 1311 | data.trans_len = params.trans_len; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1312 | |
| 1313 | /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1314 | data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; |
| 1315 | data.guest_address |= sev_me_mask; |
| 1316 | data.guest_len = params.guest_len; |
| 1317 | data.handle = sev->handle; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1318 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1319 | ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error); |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1320 | |
| 1321 | if (ret) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1322 | goto e_free_trans_data; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1323 | |
| 1324 | /* copy transport buffer to user space */ |
| 1325 | if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, |
| 1326 | trans_data, params.trans_len)) { |
| 1327 | ret = -EFAULT; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1328 | goto e_free_trans_data; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1329 | } |
| 1330 | |
| 1331 | /* Copy packet header to userspace. */ |
Sean Christopherson | b4a6939 | 2021-05-06 10:58:25 -0700 | [diff] [blame] | 1332 | if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, |
| 1333 | params.hdr_len)) |
| 1334 | ret = -EFAULT; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1335 | |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1336 | e_free_trans_data: |
| 1337 | kfree(trans_data); |
| 1338 | e_free_hdr: |
| 1339 | kfree(hdr); |
| 1340 | e_unpin: |
| 1341 | sev_unpin_memory(kvm, guest_page, n); |
| 1342 | |
| 1343 | return ret; |
| 1344 | } |
| 1345 | |
Brijesh Singh | fddecf6 | 2021-04-15 15:54:15 +0000 | [diff] [blame] | 1346 | static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1347 | { |
| 1348 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1349 | struct sev_data_send_finish data; |
Brijesh Singh | fddecf6 | 2021-04-15 15:54:15 +0000 | [diff] [blame] | 1350 | |
| 1351 | if (!sev_guest(kvm)) |
| 1352 | return -ENOTTY; |
| 1353 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1354 | data.handle = sev->handle; |
| 1355 | return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error); |
Brijesh Singh | fddecf6 | 2021-04-15 15:54:15 +0000 | [diff] [blame] | 1356 | } |
| 1357 | |
Steve Rutherford | 5569e2e | 2021-04-20 05:01:20 -0400 | [diff] [blame] | 1358 | static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1359 | { |
| 1360 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1361 | struct sev_data_send_cancel data; |
Steve Rutherford | 5569e2e | 2021-04-20 05:01:20 -0400 | [diff] [blame] | 1362 | |
| 1363 | if (!sev_guest(kvm)) |
| 1364 | return -ENOTTY; |
| 1365 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1366 | data.handle = sev->handle; |
| 1367 | return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error); |
Steve Rutherford | 5569e2e | 2021-04-20 05:01:20 -0400 | [diff] [blame] | 1368 | } |
| 1369 | |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1370 | static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1371 | { |
| 1372 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1373 | struct sev_data_receive_start start; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1374 | struct kvm_sev_receive_start params; |
| 1375 | int *error = &argp->error; |
| 1376 | void *session_data; |
| 1377 | void *pdh_data; |
| 1378 | int ret; |
| 1379 | |
| 1380 | if (!sev_guest(kvm)) |
| 1381 | return -ENOTTY; |
| 1382 | |
| 1383 | /* Get parameter from the userspace */ |
| 1384 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, |
| 1385 | sizeof(struct kvm_sev_receive_start))) |
| 1386 | return -EFAULT; |
| 1387 | |
| 1388 | /* some sanity checks */ |
| 1389 | if (!params.pdh_uaddr || !params.pdh_len || |
| 1390 | !params.session_uaddr || !params.session_len) |
| 1391 | return -EINVAL; |
| 1392 | |
| 1393 | pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len); |
| 1394 | if (IS_ERR(pdh_data)) |
| 1395 | return PTR_ERR(pdh_data); |
| 1396 | |
| 1397 | session_data = psp_copy_user_blob(params.session_uaddr, |
| 1398 | params.session_len); |
| 1399 | if (IS_ERR(session_data)) { |
| 1400 | ret = PTR_ERR(session_data); |
| 1401 | goto e_free_pdh; |
| 1402 | } |
| 1403 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1404 | memset(&start, 0, sizeof(start)); |
| 1405 | start.handle = params.handle; |
| 1406 | start.policy = params.policy; |
| 1407 | start.pdh_cert_address = __psp_pa(pdh_data); |
| 1408 | start.pdh_cert_len = params.pdh_len; |
| 1409 | start.session_address = __psp_pa(session_data); |
| 1410 | start.session_len = params.session_len; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1411 | |
| 1412 | /* create memory encryption context */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1413 | ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start, |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1414 | error); |
| 1415 | if (ret) |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1416 | goto e_free_session; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1417 | |
| 1418 | /* Bind ASID to this guest */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1419 | ret = sev_bind_asid(kvm, start.handle, error); |
Mingwei Zhang | f1815e0a | 2021-09-12 18:18:15 +0000 | [diff] [blame] | 1420 | if (ret) { |
| 1421 | sev_decommission(start.handle); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1422 | goto e_free_session; |
Mingwei Zhang | f1815e0a | 2021-09-12 18:18:15 +0000 | [diff] [blame] | 1423 | } |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1424 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1425 | params.handle = start.handle; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1426 | if (copy_to_user((void __user *)(uintptr_t)argp->data, |
| 1427 | ¶ms, sizeof(struct kvm_sev_receive_start))) { |
| 1428 | ret = -EFAULT; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1429 | sev_unbind_asid(kvm, start.handle); |
| 1430 | goto e_free_session; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1431 | } |
| 1432 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1433 | sev->handle = start.handle; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1434 | sev->fd = argp->sev_fd; |
| 1435 | |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1436 | e_free_session: |
| 1437 | kfree(session_data); |
| 1438 | e_free_pdh: |
| 1439 | kfree(pdh_data); |
| 1440 | |
| 1441 | return ret; |
| 1442 | } |
| 1443 | |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1444 | static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1445 | { |
| 1446 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 1447 | struct kvm_sev_receive_update_data params; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1448 | struct sev_data_receive_update_data data; |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1449 | void *hdr = NULL, *trans = NULL; |
| 1450 | struct page **guest_page; |
| 1451 | unsigned long n; |
| 1452 | int ret, offset; |
| 1453 | |
| 1454 | if (!sev_guest(kvm)) |
| 1455 | return -EINVAL; |
| 1456 | |
| 1457 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, |
| 1458 | sizeof(struct kvm_sev_receive_update_data))) |
| 1459 | return -EFAULT; |
| 1460 | |
| 1461 | if (!params.hdr_uaddr || !params.hdr_len || |
| 1462 | !params.guest_uaddr || !params.guest_len || |
| 1463 | !params.trans_uaddr || !params.trans_len) |
| 1464 | return -EINVAL; |
| 1465 | |
| 1466 | /* Check if we are crossing the page boundary */ |
| 1467 | offset = params.guest_uaddr & (PAGE_SIZE - 1); |
| 1468 | if ((params.guest_len + offset > PAGE_SIZE)) |
| 1469 | return -EINVAL; |
| 1470 | |
| 1471 | hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); |
| 1472 | if (IS_ERR(hdr)) |
| 1473 | return PTR_ERR(hdr); |
| 1474 | |
| 1475 | trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); |
| 1476 | if (IS_ERR(trans)) { |
| 1477 | ret = PTR_ERR(trans); |
| 1478 | goto e_free_hdr; |
| 1479 | } |
| 1480 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1481 | memset(&data, 0, sizeof(data)); |
| 1482 | data.hdr_address = __psp_pa(hdr); |
| 1483 | data.hdr_len = params.hdr_len; |
| 1484 | data.trans_address = __psp_pa(trans); |
| 1485 | data.trans_len = params.trans_len; |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1486 | |
| 1487 | /* Pin guest memory */ |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1488 | guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, |
Sean Christopherson | 50c0380 | 2021-09-14 14:09:50 -0700 | [diff] [blame] | 1489 | PAGE_SIZE, &n, 1); |
Sean Christopherson | c7a1b2b | 2021-05-06 10:58:26 -0700 | [diff] [blame] | 1490 | if (IS_ERR(guest_page)) { |
| 1491 | ret = PTR_ERR(guest_page); |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1492 | goto e_free_trans; |
Sean Christopherson | c7a1b2b | 2021-05-06 10:58:26 -0700 | [diff] [blame] | 1493 | } |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1494 | |
Masahiro Kozuka | c8c340a | 2021-09-14 14:09:51 -0700 | [diff] [blame] | 1495 | /* |
| 1496 | * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP |
| 1497 | * encrypts the written data with the guest's key, and the cache may |
| 1498 | * contain dirty, unencrypted data. |
| 1499 | */ |
| 1500 | sev_clflush_pages(guest_page, n); |
| 1501 | |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1502 | /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1503 | data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; |
| 1504 | data.guest_address |= sev_me_mask; |
| 1505 | data.guest_len = params.guest_len; |
| 1506 | data.handle = sev->handle; |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1507 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1508 | ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data, |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1509 | &argp->error); |
| 1510 | |
| 1511 | sev_unpin_memory(kvm, guest_page, n); |
| 1512 | |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1513 | e_free_trans: |
| 1514 | kfree(trans); |
| 1515 | e_free_hdr: |
| 1516 | kfree(hdr); |
| 1517 | |
| 1518 | return ret; |
| 1519 | } |
| 1520 | |
Brijesh Singh | 6a443de | 2021-04-15 15:55:40 +0000 | [diff] [blame] | 1521 | static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 1522 | { |
| 1523 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1524 | struct sev_data_receive_finish data; |
Brijesh Singh | 6a443de | 2021-04-15 15:55:40 +0000 | [diff] [blame] | 1525 | |
| 1526 | if (!sev_guest(kvm)) |
| 1527 | return -ENOTTY; |
| 1528 | |
Sean Christopherson | 238eca8 | 2021-04-06 15:49:52 -0700 | [diff] [blame] | 1529 | data.handle = sev->handle; |
| 1530 | return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); |
Brijesh Singh | 6a443de | 2021-04-15 15:55:40 +0000 | [diff] [blame] | 1531 | } |
| 1532 | |
Sean Christopherson | 8e38e96 | 2021-11-09 21:51:01 +0000 | [diff] [blame] | 1533 | static bool is_cmd_allowed_from_mirror(u32 cmd_id) |
Peter Gonda | 5b92b6c | 2021-09-21 08:03:45 -0700 | [diff] [blame] | 1534 | { |
| 1535 | /* |
| 1536 | * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES |
| 1537 | * active mirror VMs. Also allow the debugging and status commands. |
| 1538 | */ |
| 1539 | if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA || |
| 1540 | cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT || |
| 1541 | cmd_id == KVM_SEV_DBG_ENCRYPT) |
| 1542 | return true; |
| 1543 | |
| 1544 | return false; |
| 1545 | } |
| 1546 | |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1547 | static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1548 | { |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1549 | struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; |
| 1550 | struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; |
Paolo Bonzini | c9d61dc | 2021-11-22 19:50:36 -0500 | [diff] [blame] | 1551 | int r = -EBUSY; |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1552 | |
| 1553 | if (dst_kvm == src_kvm) |
| 1554 | return -EINVAL; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1555 | |
| 1556 | /* |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1557 | * Bail if these VMs are already involved in a migration to avoid |
| 1558 | * deadlock between two VMs trying to migrate to/from each other. |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1559 | */ |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1560 | if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1561 | return -EBUSY; |
| 1562 | |
Paolo Bonzini | c9d61dc | 2021-11-22 19:50:36 -0500 | [diff] [blame] | 1563 | if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) |
| 1564 | goto release_dst; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1565 | |
Paolo Bonzini | c9d61dc | 2021-11-22 19:50:36 -0500 | [diff] [blame] | 1566 | r = -EINTR; |
| 1567 | if (mutex_lock_killable(&dst_kvm->lock)) |
| 1568 | goto release_src; |
Wanpeng Li | 597cb79 | 2022-01-04 22:41:03 -0800 | [diff] [blame] | 1569 | if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING)) |
Paolo Bonzini | c9d61dc | 2021-11-22 19:50:36 -0500 | [diff] [blame] | 1570 | goto unlock_dst; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1571 | return 0; |
Paolo Bonzini | c9d61dc | 2021-11-22 19:50:36 -0500 | [diff] [blame] | 1572 | |
| 1573 | unlock_dst: |
| 1574 | mutex_unlock(&dst_kvm->lock); |
| 1575 | release_src: |
| 1576 | atomic_set_release(&src_sev->migration_in_progress, 0); |
| 1577 | release_dst: |
| 1578 | atomic_set_release(&dst_sev->migration_in_progress, 0); |
| 1579 | return r; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1580 | } |
| 1581 | |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1582 | static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1583 | { |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1584 | struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; |
| 1585 | struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1586 | |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1587 | mutex_unlock(&dst_kvm->lock); |
| 1588 | mutex_unlock(&src_kvm->lock); |
| 1589 | atomic_set_release(&dst_sev->migration_in_progress, 0); |
| 1590 | atomic_set_release(&src_sev->migration_in_progress, 0); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1591 | } |
| 1592 | |
| 1593 | |
| 1594 | static int sev_lock_vcpus_for_migration(struct kvm *kvm) |
| 1595 | { |
| 1596 | struct kvm_vcpu *vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 1597 | unsigned long i, j; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1598 | |
| 1599 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 1600 | if (mutex_lock_killable(&vcpu->mutex)) |
| 1601 | goto out_unlock; |
| 1602 | } |
| 1603 | |
| 1604 | return 0; |
| 1605 | |
| 1606 | out_unlock: |
| 1607 | kvm_for_each_vcpu(j, vcpu, kvm) { |
| 1608 | if (i == j) |
| 1609 | break; |
| 1610 | |
| 1611 | mutex_unlock(&vcpu->mutex); |
| 1612 | } |
| 1613 | return -EINTR; |
| 1614 | } |
| 1615 | |
| 1616 | static void sev_unlock_vcpus_for_migration(struct kvm *kvm) |
| 1617 | { |
| 1618 | struct kvm_vcpu *vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 1619 | unsigned long i; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1620 | |
| 1621 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 1622 | mutex_unlock(&vcpu->mutex); |
| 1623 | } |
| 1624 | } |
| 1625 | |
| 1626 | static void sev_migrate_from(struct kvm_sev_info *dst, |
| 1627 | struct kvm_sev_info *src) |
| 1628 | { |
| 1629 | dst->active = true; |
| 1630 | dst->asid = src->asid; |
| 1631 | dst->handle = src->handle; |
| 1632 | dst->pages_locked = src->pages_locked; |
Paolo Bonzini | 642525e | 2021-11-22 19:50:31 -0500 | [diff] [blame] | 1633 | dst->enc_context_owner = src->enc_context_owner; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1634 | |
| 1635 | src->asid = 0; |
| 1636 | src->active = false; |
| 1637 | src->handle = 0; |
| 1638 | src->pages_locked = 0; |
Paolo Bonzini | 642525e | 2021-11-22 19:50:31 -0500 | [diff] [blame] | 1639 | src->enc_context_owner = NULL; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1640 | |
Paolo Bonzini | 4674164 | 2021-11-22 19:50:28 -0500 | [diff] [blame] | 1641 | list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1642 | } |
| 1643 | |
Peter Gonda | 0b020f5 | 2021-10-21 10:43:01 -0700 | [diff] [blame] | 1644 | static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) |
| 1645 | { |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 1646 | unsigned long i; |
Peter Gonda | 0b020f5 | 2021-10-21 10:43:01 -0700 | [diff] [blame] | 1647 | struct kvm_vcpu *dst_vcpu, *src_vcpu; |
| 1648 | struct vcpu_svm *dst_svm, *src_svm; |
| 1649 | |
| 1650 | if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) |
| 1651 | return -EINVAL; |
| 1652 | |
| 1653 | kvm_for_each_vcpu(i, src_vcpu, src) { |
| 1654 | if (!src_vcpu->arch.guest_state_protected) |
| 1655 | return -EINVAL; |
| 1656 | } |
| 1657 | |
| 1658 | kvm_for_each_vcpu(i, src_vcpu, src) { |
| 1659 | src_svm = to_svm(src_vcpu); |
| 1660 | dst_vcpu = kvm_get_vcpu(dst, i); |
| 1661 | dst_svm = to_svm(dst_vcpu); |
| 1662 | |
| 1663 | /* |
| 1664 | * Transfer VMSA and GHCB state to the destination. Nullify and |
| 1665 | * clear source fields as appropriate, the state now belongs to |
| 1666 | * the destination. |
| 1667 | */ |
| 1668 | memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es)); |
| 1669 | dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; |
| 1670 | dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa; |
| 1671 | dst_vcpu->arch.guest_state_protected = true; |
| 1672 | |
| 1673 | memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es)); |
| 1674 | src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE; |
| 1675 | src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; |
| 1676 | src_vcpu->arch.guest_state_protected = false; |
| 1677 | } |
| 1678 | to_kvm_svm(src)->sev_info.es_active = false; |
| 1679 | to_kvm_svm(dst)->sev_info.es_active = true; |
| 1680 | |
| 1681 | return 0; |
| 1682 | } |
| 1683 | |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1684 | int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) |
| 1685 | { |
| 1686 | struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1687 | struct kvm_sev_info *src_sev, *cg_cleanup_sev; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1688 | struct file *source_kvm_file; |
| 1689 | struct kvm *source_kvm; |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1690 | bool charged = false; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1691 | int ret; |
| 1692 | |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1693 | source_kvm_file = fget(source_fd); |
| 1694 | if (!file_is_kvm(source_kvm_file)) { |
| 1695 | ret = -EBADF; |
| 1696 | goto out_fput; |
| 1697 | } |
| 1698 | |
| 1699 | source_kvm = source_kvm_file->private_data; |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1700 | ret = sev_lock_two_vms(kvm, source_kvm); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1701 | if (ret) |
| 1702 | goto out_fput; |
| 1703 | |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1704 | if (sev_guest(kvm) || !sev_guest(source_kvm)) { |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1705 | ret = -EINVAL; |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1706 | goto out_unlock; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1707 | } |
| 1708 | |
| 1709 | src_sev = &to_kvm_svm(source_kvm)->sev_info; |
Paolo Bonzini | 17d44a9 | 2021-11-22 19:50:34 -0500 | [diff] [blame] | 1710 | |
| 1711 | /* |
| 1712 | * VMs mirroring src's encryption context rely on it to keep the |
| 1713 | * ASID allocated, but below we are clearing src_sev->asid. |
| 1714 | */ |
| 1715 | if (src_sev->num_mirrored_vms) { |
| 1716 | ret = -EBUSY; |
| 1717 | goto out_unlock; |
| 1718 | } |
| 1719 | |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1720 | dst_sev->misc_cg = get_current_misc_cg(); |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1721 | cg_cleanup_sev = dst_sev; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1722 | if (dst_sev->misc_cg != src_sev->misc_cg) { |
| 1723 | ret = sev_misc_cg_try_charge(dst_sev); |
| 1724 | if (ret) |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1725 | goto out_dst_cgroup; |
| 1726 | charged = true; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1727 | } |
| 1728 | |
| 1729 | ret = sev_lock_vcpus_for_migration(kvm); |
| 1730 | if (ret) |
| 1731 | goto out_dst_cgroup; |
| 1732 | ret = sev_lock_vcpus_for_migration(source_kvm); |
| 1733 | if (ret) |
| 1734 | goto out_dst_vcpu; |
| 1735 | |
Peter Gonda | 0b020f5 | 2021-10-21 10:43:01 -0700 | [diff] [blame] | 1736 | if (sev_es_guest(source_kvm)) { |
| 1737 | ret = sev_es_migrate_from(kvm, source_kvm); |
| 1738 | if (ret) |
| 1739 | goto out_source_vcpu; |
| 1740 | } |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1741 | sev_migrate_from(dst_sev, src_sev); |
| 1742 | kvm_vm_dead(source_kvm); |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1743 | cg_cleanup_sev = src_sev; |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1744 | ret = 0; |
| 1745 | |
Peter Gonda | 0b020f5 | 2021-10-21 10:43:01 -0700 | [diff] [blame] | 1746 | out_source_vcpu: |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1747 | sev_unlock_vcpus_for_migration(source_kvm); |
| 1748 | out_dst_vcpu: |
| 1749 | sev_unlock_vcpus_for_migration(kvm); |
| 1750 | out_dst_cgroup: |
Paolo Bonzini | 501cfe0 | 2021-11-12 04:02:24 -0500 | [diff] [blame] | 1751 | /* Operates on the source on success, on the destination on failure. */ |
| 1752 | if (charged) |
| 1753 | sev_misc_cg_uncharge(cg_cleanup_sev); |
| 1754 | put_misc_cg(cg_cleanup_sev->misc_cg); |
| 1755 | cg_cleanup_sev->misc_cg = NULL; |
Paolo Bonzini | 501b580 | 2021-11-22 19:50:29 -0500 | [diff] [blame] | 1756 | out_unlock: |
| 1757 | sev_unlock_two_vms(kvm, source_kvm); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1758 | out_fput: |
| 1759 | if (source_kvm_file) |
| 1760 | fput(source_kvm_file); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 1761 | return ret; |
| 1762 | } |
| 1763 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1764 | int svm_mem_enc_op(struct kvm *kvm, void __user *argp) |
| 1765 | { |
| 1766 | struct kvm_sev_cmd sev_cmd; |
| 1767 | int r; |
| 1768 | |
Sean Christopherson | a5c1c5a | 2021-04-21 19:11:23 -0700 | [diff] [blame] | 1769 | if (!sev_enabled) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1770 | return -ENOTTY; |
| 1771 | |
| 1772 | if (!argp) |
| 1773 | return 0; |
| 1774 | |
| 1775 | if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) |
| 1776 | return -EFAULT; |
| 1777 | |
| 1778 | mutex_lock(&kvm->lock); |
| 1779 | |
Peter Gonda | 5b92b6c | 2021-09-21 08:03:45 -0700 | [diff] [blame] | 1780 | /* Only the enc_context_owner handles some memory enc operations. */ |
| 1781 | if (is_mirroring_enc_context(kvm) && |
Sean Christopherson | 8e38e96 | 2021-11-09 21:51:01 +0000 | [diff] [blame] | 1782 | !is_cmd_allowed_from_mirror(sev_cmd.id)) { |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1783 | r = -EINVAL; |
| 1784 | goto out; |
| 1785 | } |
| 1786 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1787 | switch (sev_cmd.id) { |
Sean Christopherson | 9fa1521 | 2021-03-30 20:19:35 -0700 | [diff] [blame] | 1788 | case KVM_SEV_ES_INIT: |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 1789 | if (!sev_es_enabled) { |
Sean Christopherson | 9fa1521 | 2021-03-30 20:19:35 -0700 | [diff] [blame] | 1790 | r = -ENOTTY; |
| 1791 | goto out; |
| 1792 | } |
| 1793 | fallthrough; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1794 | case KVM_SEV_INIT: |
| 1795 | r = sev_guest_init(kvm, &sev_cmd); |
| 1796 | break; |
| 1797 | case KVM_SEV_LAUNCH_START: |
| 1798 | r = sev_launch_start(kvm, &sev_cmd); |
| 1799 | break; |
| 1800 | case KVM_SEV_LAUNCH_UPDATE_DATA: |
| 1801 | r = sev_launch_update_data(kvm, &sev_cmd); |
| 1802 | break; |
Tom Lendacky | ad73109 | 2020-12-10 11:10:09 -0600 | [diff] [blame] | 1803 | case KVM_SEV_LAUNCH_UPDATE_VMSA: |
| 1804 | r = sev_launch_update_vmsa(kvm, &sev_cmd); |
| 1805 | break; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1806 | case KVM_SEV_LAUNCH_MEASURE: |
| 1807 | r = sev_launch_measure(kvm, &sev_cmd); |
| 1808 | break; |
| 1809 | case KVM_SEV_LAUNCH_FINISH: |
| 1810 | r = sev_launch_finish(kvm, &sev_cmd); |
| 1811 | break; |
| 1812 | case KVM_SEV_GUEST_STATUS: |
| 1813 | r = sev_guest_status(kvm, &sev_cmd); |
| 1814 | break; |
| 1815 | case KVM_SEV_DBG_DECRYPT: |
| 1816 | r = sev_dbg_crypt(kvm, &sev_cmd, true); |
| 1817 | break; |
| 1818 | case KVM_SEV_DBG_ENCRYPT: |
| 1819 | r = sev_dbg_crypt(kvm, &sev_cmd, false); |
| 1820 | break; |
| 1821 | case KVM_SEV_LAUNCH_SECRET: |
| 1822 | r = sev_launch_secret(kvm, &sev_cmd); |
| 1823 | break; |
Brijesh Singh | 2c07ded | 2021-01-04 09:17:49 -0600 | [diff] [blame] | 1824 | case KVM_SEV_GET_ATTESTATION_REPORT: |
| 1825 | r = sev_get_attestation_report(kvm, &sev_cmd); |
| 1826 | break; |
Brijesh Singh | 4cfdd47 | 2021-04-15 15:53:14 +0000 | [diff] [blame] | 1827 | case KVM_SEV_SEND_START: |
| 1828 | r = sev_send_start(kvm, &sev_cmd); |
| 1829 | break; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 1830 | case KVM_SEV_SEND_UPDATE_DATA: |
| 1831 | r = sev_send_update_data(kvm, &sev_cmd); |
| 1832 | break; |
Brijesh Singh | fddecf6 | 2021-04-15 15:54:15 +0000 | [diff] [blame] | 1833 | case KVM_SEV_SEND_FINISH: |
| 1834 | r = sev_send_finish(kvm, &sev_cmd); |
| 1835 | break; |
Steve Rutherford | 5569e2e | 2021-04-20 05:01:20 -0400 | [diff] [blame] | 1836 | case KVM_SEV_SEND_CANCEL: |
| 1837 | r = sev_send_cancel(kvm, &sev_cmd); |
| 1838 | break; |
Brijesh Singh | af43cbb | 2021-04-15 15:54:50 +0000 | [diff] [blame] | 1839 | case KVM_SEV_RECEIVE_START: |
| 1840 | r = sev_receive_start(kvm, &sev_cmd); |
| 1841 | break; |
Brijesh Singh | 15fb7de | 2021-04-15 15:55:17 +0000 | [diff] [blame] | 1842 | case KVM_SEV_RECEIVE_UPDATE_DATA: |
| 1843 | r = sev_receive_update_data(kvm, &sev_cmd); |
| 1844 | break; |
Brijesh Singh | 6a443de | 2021-04-15 15:55:40 +0000 | [diff] [blame] | 1845 | case KVM_SEV_RECEIVE_FINISH: |
| 1846 | r = sev_receive_finish(kvm, &sev_cmd); |
| 1847 | break; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1848 | default: |
| 1849 | r = -EINVAL; |
| 1850 | goto out; |
| 1851 | } |
| 1852 | |
| 1853 | if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) |
| 1854 | r = -EFAULT; |
| 1855 | |
| 1856 | out: |
| 1857 | mutex_unlock(&kvm->lock); |
| 1858 | return r; |
| 1859 | } |
| 1860 | |
| 1861 | int svm_register_enc_region(struct kvm *kvm, |
| 1862 | struct kvm_enc_region *range) |
| 1863 | { |
| 1864 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 1865 | struct enc_region *region; |
| 1866 | int ret = 0; |
| 1867 | |
| 1868 | if (!sev_guest(kvm)) |
| 1869 | return -ENOTTY; |
| 1870 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1871 | /* If kvm is mirroring encryption context it isn't responsible for it */ |
| 1872 | if (is_mirroring_enc_context(kvm)) |
| 1873 | return -EINVAL; |
| 1874 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1875 | if (range->addr > ULONG_MAX || range->size > ULONG_MAX) |
| 1876 | return -EINVAL; |
| 1877 | |
| 1878 | region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT); |
| 1879 | if (!region) |
| 1880 | return -ENOMEM; |
| 1881 | |
Peter Gonda | 19a23da | 2021-01-27 08:15:24 -0800 | [diff] [blame] | 1882 | mutex_lock(&kvm->lock); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1883 | region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); |
Paolo Bonzini | a8d908b | 2020-06-23 05:12:24 -0400 | [diff] [blame] | 1884 | if (IS_ERR(region->pages)) { |
| 1885 | ret = PTR_ERR(region->pages); |
Peter Gonda | 19a23da | 2021-01-27 08:15:24 -0800 | [diff] [blame] | 1886 | mutex_unlock(&kvm->lock); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1887 | goto e_free; |
| 1888 | } |
| 1889 | |
Peter Gonda | 19a23da | 2021-01-27 08:15:24 -0800 | [diff] [blame] | 1890 | region->uaddr = range->addr; |
| 1891 | region->size = range->size; |
| 1892 | |
| 1893 | list_add_tail(®ion->list, &sev->regions_list); |
| 1894 | mutex_unlock(&kvm->lock); |
| 1895 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1896 | /* |
| 1897 | * The guest may change the memory encryption attribute from C=0 -> C=1 |
| 1898 | * or vice versa for this memory range. Lets make sure caches are |
| 1899 | * flushed to ensure that guest data gets written into memory with |
| 1900 | * correct C-bit. |
| 1901 | */ |
| 1902 | sev_clflush_pages(region->pages, region->npages); |
| 1903 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1904 | return ret; |
| 1905 | |
| 1906 | e_free: |
| 1907 | kfree(region); |
| 1908 | return ret; |
| 1909 | } |
| 1910 | |
| 1911 | static struct enc_region * |
| 1912 | find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) |
| 1913 | { |
| 1914 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 1915 | struct list_head *head = &sev->regions_list; |
| 1916 | struct enc_region *i; |
| 1917 | |
| 1918 | list_for_each_entry(i, head, list) { |
| 1919 | if (i->uaddr == range->addr && |
| 1920 | i->size == range->size) |
| 1921 | return i; |
| 1922 | } |
| 1923 | |
| 1924 | return NULL; |
| 1925 | } |
| 1926 | |
| 1927 | static void __unregister_enc_region_locked(struct kvm *kvm, |
| 1928 | struct enc_region *region) |
| 1929 | { |
| 1930 | sev_unpin_memory(kvm, region->pages, region->npages); |
| 1931 | list_del(®ion->list); |
| 1932 | kfree(region); |
| 1933 | } |
| 1934 | |
| 1935 | int svm_unregister_enc_region(struct kvm *kvm, |
| 1936 | struct kvm_enc_region *range) |
| 1937 | { |
| 1938 | struct enc_region *region; |
| 1939 | int ret; |
| 1940 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1941 | /* If kvm is mirroring encryption context it isn't responsible for it */ |
| 1942 | if (is_mirroring_enc_context(kvm)) |
| 1943 | return -EINVAL; |
| 1944 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 1945 | mutex_lock(&kvm->lock); |
| 1946 | |
| 1947 | if (!sev_guest(kvm)) { |
| 1948 | ret = -ENOTTY; |
| 1949 | goto failed; |
| 1950 | } |
| 1951 | |
| 1952 | region = find_enc_region(kvm, range); |
| 1953 | if (!region) { |
| 1954 | ret = -EINVAL; |
| 1955 | goto failed; |
| 1956 | } |
| 1957 | |
| 1958 | /* |
| 1959 | * Ensure that all guest tagged cache entries are flushed before |
| 1960 | * releasing the pages back to the system for use. CLFLUSH will |
| 1961 | * not do this, so issue a WBINVD. |
| 1962 | */ |
| 1963 | wbinvd_on_all_cpus(); |
| 1964 | |
| 1965 | __unregister_enc_region_locked(kvm, region); |
| 1966 | |
| 1967 | mutex_unlock(&kvm->lock); |
| 1968 | return 0; |
| 1969 | |
| 1970 | failed: |
| 1971 | mutex_unlock(&kvm->lock); |
| 1972 | return ret; |
| 1973 | } |
| 1974 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1975 | int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) |
| 1976 | { |
| 1977 | struct file *source_kvm_file; |
| 1978 | struct kvm *source_kvm; |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 1979 | struct kvm_sev_info *source_sev, *mirror_sev; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1980 | int ret; |
| 1981 | |
| 1982 | source_kvm_file = fget(source_fd); |
| 1983 | if (!file_is_kvm(source_kvm_file)) { |
| 1984 | ret = -EBADF; |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 1985 | goto e_source_fput; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1986 | } |
| 1987 | |
| 1988 | source_kvm = source_kvm_file->private_data; |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 1989 | ret = sev_lock_two_vms(kvm, source_kvm); |
| 1990 | if (ret) |
| 1991 | goto e_source_fput; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 1992 | |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 1993 | /* |
| 1994 | * Mirrors of mirrors should work, but let's not get silly. Also |
| 1995 | * disallow out-of-band SEV/SEV-ES init if the target is already an |
| 1996 | * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being |
| 1997 | * created after SEV/SEV-ES initialization, e.g. to init intercepts. |
| 1998 | */ |
| 1999 | if (sev_guest(kvm) || !sev_guest(source_kvm) || |
| 2000 | is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) { |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2001 | ret = -EINVAL; |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 2002 | goto e_unlock; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2003 | } |
| 2004 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2005 | /* |
| 2006 | * The mirror kvm holds an enc_context_owner ref so its asid can't |
| 2007 | * disappear until we're done with it |
| 2008 | */ |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 2009 | source_sev = &to_kvm_svm(source_kvm)->sev_info; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2010 | kvm_get_kvm(source_kvm); |
Paolo Bonzini | 17d44a9 | 2021-11-22 19:50:34 -0500 | [diff] [blame] | 2011 | source_sev->num_mirrored_vms++; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2012 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2013 | /* Set enc_context_owner and copy its encryption context over */ |
| 2014 | mirror_sev = &to_kvm_svm(kvm)->sev_info; |
| 2015 | mirror_sev->enc_context_owner = source_kvm; |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2016 | mirror_sev->active = true; |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 2017 | mirror_sev->asid = source_sev->asid; |
| 2018 | mirror_sev->fd = source_sev->fd; |
| 2019 | mirror_sev->es_active = source_sev->es_active; |
| 2020 | mirror_sev->handle = source_sev->handle; |
Paolo Bonzini | 2b347a3 | 2021-11-22 19:50:30 -0500 | [diff] [blame] | 2021 | INIT_LIST_HEAD(&mirror_sev->regions_list); |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 2022 | ret = 0; |
| 2023 | |
Peter Gonda | f43c887 | 2021-09-21 08:03:44 -0700 | [diff] [blame] | 2024 | /* |
| 2025 | * Do not copy ap_jump_table. Since the mirror does not share the same |
| 2026 | * KVM contexts as the original, and they may have different |
| 2027 | * memory-views. |
| 2028 | */ |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2029 | |
Paolo Bonzini | bf42b02 | 2021-11-22 19:50:33 -0500 | [diff] [blame] | 2030 | e_unlock: |
| 2031 | sev_unlock_two_vms(kvm, source_kvm); |
| 2032 | e_source_fput: |
Colin Ian King | 8899a5f | 2021-04-30 18:03:03 +0100 | [diff] [blame] | 2033 | if (source_kvm_file) |
| 2034 | fput(source_kvm_file); |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2035 | return ret; |
| 2036 | } |
| 2037 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2038 | void sev_vm_destroy(struct kvm *kvm) |
| 2039 | { |
| 2040 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 2041 | struct list_head *head = &sev->regions_list; |
| 2042 | struct list_head *pos, *q; |
| 2043 | |
Paolo Bonzini | 17d44a9 | 2021-11-22 19:50:34 -0500 | [diff] [blame] | 2044 | WARN_ON(sev->num_mirrored_vms); |
| 2045 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2046 | if (!sev_guest(kvm)) |
| 2047 | return; |
| 2048 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2049 | /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ |
| 2050 | if (is_mirroring_enc_context(kvm)) { |
Paolo Bonzini | 17d44a9 | 2021-11-22 19:50:34 -0500 | [diff] [blame] | 2051 | struct kvm *owner_kvm = sev->enc_context_owner; |
| 2052 | struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info; |
| 2053 | |
| 2054 | mutex_lock(&owner_kvm->lock); |
| 2055 | if (!WARN_ON(!owner_sev->num_mirrored_vms)) |
| 2056 | owner_sev->num_mirrored_vms--; |
| 2057 | mutex_unlock(&owner_kvm->lock); |
| 2058 | kvm_put_kvm(owner_kvm); |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 2059 | return; |
| 2060 | } |
| 2061 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2062 | /* |
| 2063 | * Ensure that all guest tagged cache entries are flushed before |
| 2064 | * releasing the pages back to the system for use. CLFLUSH will |
| 2065 | * not do this, so issue a WBINVD. |
| 2066 | */ |
| 2067 | wbinvd_on_all_cpus(); |
| 2068 | |
| 2069 | /* |
| 2070 | * if userspace was terminated before unregistering the memory regions |
| 2071 | * then lets unpin all the registered memory. |
| 2072 | */ |
| 2073 | if (!list_empty(head)) { |
| 2074 | list_for_each_safe(pos, q, head) { |
| 2075 | __unregister_enc_region_locked(kvm, |
| 2076 | list_entry(pos, struct enc_region, list)); |
David Rientjes | 7be7494 | 2020-08-25 12:56:28 -0700 | [diff] [blame] | 2077 | cond_resched(); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2078 | } |
| 2079 | } |
| 2080 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2081 | sev_unbind_asid(kvm, sev->handle); |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 2082 | sev_asid_free(sev); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2083 | } |
| 2084 | |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 2085 | void __init sev_set_cpu_caps(void) |
| 2086 | { |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 2087 | if (!sev_enabled) |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 2088 | kvm_cpu_cap_clear(X86_FEATURE_SEV); |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 2089 | if (!sev_es_enabled) |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 2090 | kvm_cpu_cap_clear(X86_FEATURE_SEV_ES); |
| 2091 | } |
| 2092 | |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2093 | void __init sev_hardware_setup(void) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2094 | { |
Sean Christopherson | a479c33 | 2021-04-21 19:11:18 -0700 | [diff] [blame] | 2095 | #ifdef CONFIG_KVM_AMD_SEV |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 2096 | unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2097 | bool sev_es_supported = false; |
| 2098 | bool sev_supported = false; |
| 2099 | |
Sean Christopherson | a479c33 | 2021-04-21 19:11:18 -0700 | [diff] [blame] | 2100 | if (!sev_enabled || !npt_enabled) |
Sean Christopherson | e8126bd | 2021-04-21 19:11:14 -0700 | [diff] [blame] | 2101 | goto out; |
| 2102 | |
Sean Christopherson | c532f29 | 2022-01-20 01:07:14 +0000 | [diff] [blame] | 2103 | /* |
| 2104 | * SEV must obviously be supported in hardware. Sanity check that the |
| 2105 | * CPU supports decode assists, which is mandatory for SEV guests to |
| 2106 | * support instruction emulation. |
| 2107 | */ |
| 2108 | if (!boot_cpu_has(X86_FEATURE_SEV) || |
| 2109 | WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS))) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2110 | goto out; |
| 2111 | |
| 2112 | /* Retrieve SEV CPUID information */ |
| 2113 | cpuid(0x8000001f, &eax, &ebx, &ecx, &edx); |
| 2114 | |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2115 | /* Set encryption bit location for SEV-ES guests */ |
| 2116 | sev_enc_bit = ebx & 0x3f; |
| 2117 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2118 | /* Maximum number of encrypted guests supported simultaneously */ |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2119 | max_sev_asid = ecx; |
Sean Christopherson | 8cb756b | 2021-04-21 19:11:21 -0700 | [diff] [blame] | 2120 | if (!max_sev_asid) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2121 | goto out; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2122 | |
| 2123 | /* Minimum ASID value that should be used for SEV guest */ |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2124 | min_sev_asid = edx; |
Brijesh Singh | d3d1af8 | 2021-04-15 15:53:55 +0000 | [diff] [blame] | 2125 | sev_me_mask = 1UL << (ebx & 0x3f); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2126 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 2127 | /* |
| 2128 | * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap, |
| 2129 | * even though it's never used, so that the bitmap is indexed by the |
| 2130 | * actual ASID. |
| 2131 | */ |
| 2132 | nr_asids = max_sev_asid + 1; |
| 2133 | sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2134 | if (!sev_asid_bitmap) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2135 | goto out; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2136 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 2137 | sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL); |
Sean Christopherson | f31b88b | 2021-04-21 19:11:12 -0700 | [diff] [blame] | 2138 | if (!sev_reclaim_asid_bitmap) { |
| 2139 | bitmap_free(sev_asid_bitmap); |
| 2140 | sev_asid_bitmap = NULL; |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2141 | goto out; |
Sean Christopherson | f31b88b | 2021-04-21 19:11:12 -0700 | [diff] [blame] | 2142 | } |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2143 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 2144 | sev_asid_count = max_sev_asid - min_sev_asid + 1; |
| 2145 | if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)) |
| 2146 | goto out; |
| 2147 | |
| 2148 | pr_info("SEV supported: %u ASIDs\n", sev_asid_count); |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2149 | sev_supported = true; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2150 | |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2151 | /* SEV-ES support requested? */ |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 2152 | if (!sev_es_enabled) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2153 | goto out; |
| 2154 | |
| 2155 | /* Does the CPU support SEV-ES? */ |
| 2156 | if (!boot_cpu_has(X86_FEATURE_SEV_ES)) |
| 2157 | goto out; |
| 2158 | |
| 2159 | /* Has the system been allocated ASIDs for SEV-ES? */ |
| 2160 | if (min_sev_asid == 1) |
| 2161 | goto out; |
| 2162 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 2163 | sev_es_asid_count = min_sev_asid - 1; |
| 2164 | if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)) |
| 2165 | goto out; |
| 2166 | |
| 2167 | pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count); |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 2168 | sev_es_supported = true; |
| 2169 | |
| 2170 | out: |
Sean Christopherson | 8d364a0 | 2021-04-21 19:11:17 -0700 | [diff] [blame] | 2171 | sev_enabled = sev_supported; |
| 2172 | sev_es_enabled = sev_es_supported; |
Sean Christopherson | a479c33 | 2021-04-21 19:11:18 -0700 | [diff] [blame] | 2173 | #endif |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2174 | } |
| 2175 | |
| 2176 | void sev_hardware_teardown(void) |
| 2177 | { |
Sean Christopherson | a5c1c5a | 2021-04-21 19:11:23 -0700 | [diff] [blame] | 2178 | if (!sev_enabled) |
Paolo Bonzini | 9ef1530 | 2020-04-13 03:20:06 -0400 | [diff] [blame] | 2179 | return; |
| 2180 | |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 2181 | /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 2182 | sev_flush_asids(1, max_sev_asid); |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 2183 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2184 | bitmap_free(sev_asid_bitmap); |
| 2185 | bitmap_free(sev_reclaim_asid_bitmap); |
Sean Christopherson | 469bb32 | 2021-04-21 19:11:25 -0700 | [diff] [blame] | 2186 | |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 2187 | misc_cg_set_capacity(MISC_CG_RES_SEV, 0); |
| 2188 | misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2189 | } |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2190 | |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 2191 | int sev_cpu_init(struct svm_cpu_data *sd) |
| 2192 | { |
Sean Christopherson | a5c1c5a | 2021-04-21 19:11:23 -0700 | [diff] [blame] | 2193 | if (!sev_enabled) |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 2194 | return 0; |
| 2195 | |
Mingwei Zhang | bb2baeb | 2021-08-02 11:09:03 -0700 | [diff] [blame] | 2196 | sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL); |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 2197 | if (!sd->sev_vmcbs) |
| 2198 | return -ENOMEM; |
| 2199 | |
| 2200 | return 0; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2201 | } |
| 2202 | |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 2203 | /* |
| 2204 | * Pages used by hardware to hold guest encrypted state must be flushed before |
| 2205 | * returning them to the system. |
| 2206 | */ |
| 2207 | static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va, |
| 2208 | unsigned long len) |
| 2209 | { |
| 2210 | /* |
| 2211 | * If hardware enforced cache coherency for encrypted mappings of the |
| 2212 | * same physical page is supported, nothing to do. |
| 2213 | */ |
| 2214 | if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) |
| 2215 | return; |
| 2216 | |
| 2217 | /* |
| 2218 | * If the VM Page Flush MSR is supported, use it to flush the page |
| 2219 | * (using the page virtual address and the guest ASID). |
| 2220 | */ |
| 2221 | if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) { |
| 2222 | struct kvm_sev_info *sev; |
| 2223 | unsigned long va_start; |
| 2224 | u64 start, stop; |
| 2225 | |
| 2226 | /* Align start and stop to page boundaries. */ |
| 2227 | va_start = (unsigned long)va; |
| 2228 | start = (u64)va_start & PAGE_MASK; |
| 2229 | stop = PAGE_ALIGN((u64)va_start + len); |
| 2230 | |
| 2231 | if (start < stop) { |
| 2232 | sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; |
| 2233 | |
| 2234 | while (start < stop) { |
| 2235 | wrmsrl(MSR_AMD64_VM_PAGE_FLUSH, |
| 2236 | start | sev->asid); |
| 2237 | |
| 2238 | start += PAGE_SIZE; |
| 2239 | } |
| 2240 | |
| 2241 | return; |
| 2242 | } |
| 2243 | |
| 2244 | WARN(1, "Address overflow, using WBINVD\n"); |
| 2245 | } |
| 2246 | |
| 2247 | /* |
| 2248 | * Hardware should always have one of the above features, |
| 2249 | * but if not, use WBINVD and issue a warning. |
| 2250 | */ |
| 2251 | WARN_ONCE(1, "Using WBINVD to flush guest memory\n"); |
| 2252 | wbinvd_on_all_cpus(); |
| 2253 | } |
| 2254 | |
| 2255 | void sev_free_vcpu(struct kvm_vcpu *vcpu) |
| 2256 | { |
| 2257 | struct vcpu_svm *svm; |
| 2258 | |
| 2259 | if (!sev_es_guest(vcpu->kvm)) |
| 2260 | return; |
| 2261 | |
| 2262 | svm = to_svm(vcpu); |
| 2263 | |
| 2264 | if (vcpu->arch.guest_state_protected) |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2265 | sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE); |
| 2266 | __free_page(virt_to_page(svm->sev_es.vmsa)); |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2267 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2268 | if (svm->sev_es.ghcb_sa_free) |
Sean Christopherson | a655276 | 2021-11-09 22:23:50 +0000 | [diff] [blame] | 2269 | kvfree(svm->sev_es.ghcb_sa); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 2270 | } |
| 2271 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2272 | static void dump_ghcb(struct vcpu_svm *svm) |
| 2273 | { |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2274 | struct ghcb *ghcb = svm->sev_es.ghcb; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2275 | unsigned int nbits; |
| 2276 | |
| 2277 | /* Re-use the dump_invalid_vmcb module parameter */ |
| 2278 | if (!dump_invalid_vmcb) { |
| 2279 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); |
| 2280 | return; |
| 2281 | } |
| 2282 | |
| 2283 | nbits = sizeof(ghcb->save.valid_bitmap) * 8; |
| 2284 | |
| 2285 | pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); |
| 2286 | pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code", |
| 2287 | ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb)); |
| 2288 | pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1", |
| 2289 | ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb)); |
| 2290 | pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2", |
| 2291 | ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb)); |
| 2292 | pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch", |
| 2293 | ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb)); |
| 2294 | pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap); |
| 2295 | } |
| 2296 | |
| 2297 | static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) |
| 2298 | { |
| 2299 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2300 | struct ghcb *ghcb = svm->sev_es.ghcb; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2301 | |
| 2302 | /* |
| 2303 | * The GHCB protocol so far allows for the following data |
| 2304 | * to be returned: |
| 2305 | * GPRs RAX, RBX, RCX, RDX |
| 2306 | * |
Sean Christopherson | 2500914 | 2021-01-22 15:50:47 -0800 | [diff] [blame] | 2307 | * Copy their values, even if they may not have been written during the |
| 2308 | * VM-Exit. It's the guest's responsibility to not consume random data. |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2309 | */ |
Sean Christopherson | 2500914 | 2021-01-22 15:50:47 -0800 | [diff] [blame] | 2310 | ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]); |
| 2311 | ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]); |
| 2312 | ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]); |
| 2313 | ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2314 | } |
| 2315 | |
| 2316 | static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) |
| 2317 | { |
| 2318 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 2319 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2320 | struct ghcb *ghcb = svm->sev_es.ghcb; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2321 | u64 exit_code; |
| 2322 | |
| 2323 | /* |
| 2324 | * The GHCB protocol so far allows for the following data |
| 2325 | * to be supplied: |
| 2326 | * GPRs RAX, RBX, RCX, RDX |
| 2327 | * XCR0 |
| 2328 | * CPL |
| 2329 | * |
| 2330 | * VMMCALL allows the guest to provide extra registers. KVM also |
| 2331 | * expects RSI for hypercalls, so include that, too. |
| 2332 | * |
| 2333 | * Copy their values to the appropriate location if supplied. |
| 2334 | */ |
| 2335 | memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); |
| 2336 | |
| 2337 | vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); |
| 2338 | vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); |
| 2339 | vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); |
| 2340 | vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); |
| 2341 | vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); |
| 2342 | |
| 2343 | svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); |
| 2344 | |
| 2345 | if (ghcb_xcr0_is_valid(ghcb)) { |
| 2346 | vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); |
| 2347 | kvm_update_cpuid_runtime(vcpu); |
| 2348 | } |
| 2349 | |
| 2350 | /* Copy the GHCB exit information into the VMCB fields */ |
| 2351 | exit_code = ghcb_get_sw_exit_code(ghcb); |
| 2352 | control->exit_code = lower_32_bits(exit_code); |
| 2353 | control->exit_code_hi = upper_32_bits(exit_code); |
| 2354 | control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); |
| 2355 | control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); |
| 2356 | |
| 2357 | /* Clear the valid entries fields */ |
| 2358 | memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); |
| 2359 | } |
| 2360 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2361 | static bool sev_es_validate_vmgexit(struct vcpu_svm *svm) |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2362 | { |
| 2363 | struct kvm_vcpu *vcpu; |
| 2364 | struct ghcb *ghcb; |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2365 | u64 exit_code; |
| 2366 | u64 reason; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2367 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2368 | ghcb = svm->sev_es.ghcb; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2369 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2370 | /* |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2371 | * Retrieve the exit code now even though it may not be marked valid |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2372 | * as it could help with debugging. |
| 2373 | */ |
| 2374 | exit_code = ghcb_get_sw_exit_code(ghcb); |
| 2375 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2376 | /* Only GHCB Usage code 0 is supported */ |
| 2377 | if (ghcb->ghcb_usage) { |
| 2378 | reason = GHCB_ERR_INVALID_USAGE; |
| 2379 | goto vmgexit_err; |
| 2380 | } |
| 2381 | |
| 2382 | reason = GHCB_ERR_MISSING_INPUT; |
| 2383 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2384 | if (!ghcb_sw_exit_code_is_valid(ghcb) || |
| 2385 | !ghcb_sw_exit_info_1_is_valid(ghcb) || |
| 2386 | !ghcb_sw_exit_info_2_is_valid(ghcb)) |
| 2387 | goto vmgexit_err; |
| 2388 | |
| 2389 | switch (ghcb_get_sw_exit_code(ghcb)) { |
| 2390 | case SVM_EXIT_READ_DR7: |
| 2391 | break; |
| 2392 | case SVM_EXIT_WRITE_DR7: |
| 2393 | if (!ghcb_rax_is_valid(ghcb)) |
| 2394 | goto vmgexit_err; |
| 2395 | break; |
| 2396 | case SVM_EXIT_RDTSC: |
| 2397 | break; |
| 2398 | case SVM_EXIT_RDPMC: |
| 2399 | if (!ghcb_rcx_is_valid(ghcb)) |
| 2400 | goto vmgexit_err; |
| 2401 | break; |
| 2402 | case SVM_EXIT_CPUID: |
| 2403 | if (!ghcb_rax_is_valid(ghcb) || |
| 2404 | !ghcb_rcx_is_valid(ghcb)) |
| 2405 | goto vmgexit_err; |
| 2406 | if (ghcb_get_rax(ghcb) == 0xd) |
| 2407 | if (!ghcb_xcr0_is_valid(ghcb)) |
| 2408 | goto vmgexit_err; |
| 2409 | break; |
| 2410 | case SVM_EXIT_INVD: |
| 2411 | break; |
| 2412 | case SVM_EXIT_IOIO: |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2413 | if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { |
| 2414 | if (!ghcb_sw_scratch_is_valid(ghcb)) |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2415 | goto vmgexit_err; |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2416 | } else { |
| 2417 | if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) |
| 2418 | if (!ghcb_rax_is_valid(ghcb)) |
| 2419 | goto vmgexit_err; |
| 2420 | } |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2421 | break; |
| 2422 | case SVM_EXIT_MSR: |
| 2423 | if (!ghcb_rcx_is_valid(ghcb)) |
| 2424 | goto vmgexit_err; |
| 2425 | if (ghcb_get_sw_exit_info_1(ghcb)) { |
| 2426 | if (!ghcb_rax_is_valid(ghcb) || |
| 2427 | !ghcb_rdx_is_valid(ghcb)) |
| 2428 | goto vmgexit_err; |
| 2429 | } |
| 2430 | break; |
| 2431 | case SVM_EXIT_VMMCALL: |
| 2432 | if (!ghcb_rax_is_valid(ghcb) || |
| 2433 | !ghcb_cpl_is_valid(ghcb)) |
| 2434 | goto vmgexit_err; |
| 2435 | break; |
| 2436 | case SVM_EXIT_RDTSCP: |
| 2437 | break; |
| 2438 | case SVM_EXIT_WBINVD: |
| 2439 | break; |
| 2440 | case SVM_EXIT_MONITOR: |
| 2441 | if (!ghcb_rax_is_valid(ghcb) || |
| 2442 | !ghcb_rcx_is_valid(ghcb) || |
| 2443 | !ghcb_rdx_is_valid(ghcb)) |
| 2444 | goto vmgexit_err; |
| 2445 | break; |
| 2446 | case SVM_EXIT_MWAIT: |
| 2447 | if (!ghcb_rax_is_valid(ghcb) || |
| 2448 | !ghcb_rcx_is_valid(ghcb)) |
| 2449 | goto vmgexit_err; |
| 2450 | break; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2451 | case SVM_VMGEXIT_MMIO_READ: |
| 2452 | case SVM_VMGEXIT_MMIO_WRITE: |
| 2453 | if (!ghcb_sw_scratch_is_valid(ghcb)) |
| 2454 | goto vmgexit_err; |
| 2455 | break; |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2456 | case SVM_VMGEXIT_NMI_COMPLETE: |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2457 | case SVM_VMGEXIT_AP_HLT_LOOP: |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 2458 | case SVM_VMGEXIT_AP_JUMP_TABLE: |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2459 | case SVM_VMGEXIT_UNSUPPORTED_EVENT: |
| 2460 | break; |
| 2461 | default: |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2462 | reason = GHCB_ERR_INVALID_EVENT; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2463 | goto vmgexit_err; |
| 2464 | } |
| 2465 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2466 | return true; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2467 | |
| 2468 | vmgexit_err: |
| 2469 | vcpu = &svm->vcpu; |
| 2470 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2471 | if (reason == GHCB_ERR_INVALID_USAGE) { |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2472 | vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", |
| 2473 | ghcb->ghcb_usage); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2474 | } else if (reason == GHCB_ERR_INVALID_EVENT) { |
| 2475 | vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", |
| 2476 | exit_code); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2477 | } else { |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2478 | vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n", |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2479 | exit_code); |
| 2480 | dump_ghcb(svm); |
| 2481 | } |
| 2482 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2483 | /* Clear the valid entries fields */ |
| 2484 | memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2485 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2486 | ghcb_set_sw_exit_info_1(ghcb, 2); |
| 2487 | ghcb_set_sw_exit_info_2(ghcb, reason); |
| 2488 | |
| 2489 | return false; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2490 | } |
| 2491 | |
Tom Lendacky | ce7ea0c | 2021-05-06 15:14:41 -0500 | [diff] [blame] | 2492 | void sev_es_unmap_ghcb(struct vcpu_svm *svm) |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2493 | { |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2494 | if (!svm->sev_es.ghcb) |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2495 | return; |
| 2496 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2497 | if (svm->sev_es.ghcb_sa_free) { |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2498 | /* |
| 2499 | * The scratch area lives outside the GHCB, so there is a |
| 2500 | * buffer that, depending on the operation performed, may |
| 2501 | * need to be synced, then freed. |
| 2502 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2503 | if (svm->sev_es.ghcb_sa_sync) { |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2504 | kvm_write_guest(svm->vcpu.kvm, |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2505 | ghcb_get_sw_scratch(svm->sev_es.ghcb), |
| 2506 | svm->sev_es.ghcb_sa, |
| 2507 | svm->sev_es.ghcb_sa_len); |
| 2508 | svm->sev_es.ghcb_sa_sync = false; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2509 | } |
| 2510 | |
Sean Christopherson | a655276 | 2021-11-09 22:23:50 +0000 | [diff] [blame] | 2511 | kvfree(svm->sev_es.ghcb_sa); |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2512 | svm->sev_es.ghcb_sa = NULL; |
| 2513 | svm->sev_es.ghcb_sa_free = false; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2514 | } |
| 2515 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2516 | trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); |
Tom Lendacky | d523ab6b | 2020-12-10 11:09:48 -0600 | [diff] [blame] | 2517 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2518 | sev_es_sync_to_ghcb(svm); |
| 2519 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2520 | kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true); |
| 2521 | svm->sev_es.ghcb = NULL; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2522 | } |
| 2523 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2524 | void pre_sev_run(struct vcpu_svm *svm, int cpu) |
| 2525 | { |
| 2526 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 2527 | int asid = sev_get_asid(svm->vcpu.kvm); |
| 2528 | |
| 2529 | /* Assign the asid allocated with this SEV guest */ |
Paolo Bonzini | dee734a | 2020-11-30 09:39:59 -0500 | [diff] [blame] | 2530 | svm->asid = asid; |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2531 | |
| 2532 | /* |
| 2533 | * Flush guest TLB: |
| 2534 | * |
| 2535 | * 1) when different VMCB for the same ASID is to be run on the same host CPU. |
| 2536 | * 2) or this VMCB was executed on different host CPU in previous VMRUNs. |
| 2537 | */ |
| 2538 | if (sd->sev_vmcbs[asid] == svm->vmcb && |
Jim Mattson | 8a14fe4 | 2020-06-03 16:56:22 -0700 | [diff] [blame] | 2539 | svm->vcpu.arch.last_vmentry_cpu == cpu) |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2540 | return; |
| 2541 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2542 | sd->sev_vmcbs[asid] = svm->vmcb; |
| 2543 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 2544 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 2545 | } |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2546 | |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2547 | #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE) |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2548 | static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2549 | { |
| 2550 | struct vmcb_control_area *control = &svm->vmcb->control; |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2551 | struct ghcb *ghcb = svm->sev_es.ghcb; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2552 | u64 ghcb_scratch_beg, ghcb_scratch_end; |
| 2553 | u64 scratch_gpa_beg, scratch_gpa_end; |
| 2554 | void *scratch_va; |
| 2555 | |
| 2556 | scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); |
| 2557 | if (!scratch_gpa_beg) { |
| 2558 | pr_err("vmgexit: scratch gpa not provided\n"); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2559 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2560 | } |
| 2561 | |
| 2562 | scratch_gpa_end = scratch_gpa_beg + len; |
| 2563 | if (scratch_gpa_end < scratch_gpa_beg) { |
| 2564 | pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", |
| 2565 | len, scratch_gpa_beg); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2566 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2567 | } |
| 2568 | |
| 2569 | if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { |
| 2570 | /* Scratch area begins within GHCB */ |
| 2571 | ghcb_scratch_beg = control->ghcb_gpa + |
| 2572 | offsetof(struct ghcb, shared_buffer); |
| 2573 | ghcb_scratch_end = control->ghcb_gpa + |
| 2574 | offsetof(struct ghcb, reserved_1); |
| 2575 | |
| 2576 | /* |
| 2577 | * If the scratch area begins within the GHCB, it must be |
| 2578 | * completely contained in the GHCB shared buffer area. |
| 2579 | */ |
| 2580 | if (scratch_gpa_beg < ghcb_scratch_beg || |
| 2581 | scratch_gpa_end > ghcb_scratch_end) { |
| 2582 | pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", |
| 2583 | scratch_gpa_beg, scratch_gpa_end); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2584 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2585 | } |
| 2586 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2587 | scratch_va = (void *)svm->sev_es.ghcb; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2588 | scratch_va += (scratch_gpa_beg - control->ghcb_gpa); |
| 2589 | } else { |
| 2590 | /* |
| 2591 | * The guest memory must be read into a kernel buffer, so |
| 2592 | * limit the size |
| 2593 | */ |
| 2594 | if (len > GHCB_SCRATCH_AREA_LIMIT) { |
| 2595 | pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", |
| 2596 | len, GHCB_SCRATCH_AREA_LIMIT); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2597 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2598 | } |
Sean Christopherson | a655276 | 2021-11-09 22:23:50 +0000 | [diff] [blame] | 2599 | scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT); |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2600 | if (!scratch_va) |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2601 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2602 | |
| 2603 | if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { |
| 2604 | /* Unable to copy scratch area from guest */ |
| 2605 | pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); |
| 2606 | |
Sean Christopherson | a655276 | 2021-11-09 22:23:50 +0000 | [diff] [blame] | 2607 | kvfree(scratch_va); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2608 | goto e_scratch; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2609 | } |
| 2610 | |
| 2611 | /* |
| 2612 | * The scratch area is outside the GHCB. The operation will |
| 2613 | * dictate whether the buffer needs to be synced before running |
| 2614 | * the vCPU next time (i.e. a read was requested so the data |
| 2615 | * must be written back to the guest memory). |
| 2616 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2617 | svm->sev_es.ghcb_sa_sync = sync; |
| 2618 | svm->sev_es.ghcb_sa_free = true; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2619 | } |
| 2620 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2621 | svm->sev_es.ghcb_sa = scratch_va; |
| 2622 | svm->sev_es.ghcb_sa_len = len; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2623 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2624 | return true; |
| 2625 | |
| 2626 | e_scratch: |
| 2627 | ghcb_set_sw_exit_info_1(ghcb, 2); |
| 2628 | ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); |
| 2629 | |
| 2630 | return false; |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2631 | } |
| 2632 | |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2633 | static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, |
| 2634 | unsigned int pos) |
| 2635 | { |
| 2636 | svm->vmcb->control.ghcb_gpa &= ~(mask << pos); |
| 2637 | svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; |
| 2638 | } |
| 2639 | |
| 2640 | static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) |
| 2641 | { |
| 2642 | return (svm->vmcb->control.ghcb_gpa >> pos) & mask; |
| 2643 | } |
| 2644 | |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2645 | static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) |
| 2646 | { |
| 2647 | svm->vmcb->control.ghcb_gpa = value; |
| 2648 | } |
| 2649 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2650 | static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) |
| 2651 | { |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2652 | struct vmcb_control_area *control = &svm->vmcb->control; |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2653 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2654 | u64 ghcb_info; |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2655 | int ret = 1; |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2656 | |
| 2657 | ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK; |
| 2658 | |
Tom Lendacky | 59e38b5 | 2020-12-10 11:09:52 -0600 | [diff] [blame] | 2659 | trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, |
| 2660 | control->ghcb_gpa); |
| 2661 | |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2662 | switch (ghcb_info) { |
| 2663 | case GHCB_MSR_SEV_INFO_REQ: |
| 2664 | set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, |
| 2665 | GHCB_VERSION_MIN, |
| 2666 | sev_enc_bit)); |
| 2667 | break; |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2668 | case GHCB_MSR_CPUID_REQ: { |
| 2669 | u64 cpuid_fn, cpuid_reg, cpuid_value; |
| 2670 | |
| 2671 | cpuid_fn = get_ghcb_msr_bits(svm, |
| 2672 | GHCB_MSR_CPUID_FUNC_MASK, |
| 2673 | GHCB_MSR_CPUID_FUNC_POS); |
| 2674 | |
| 2675 | /* Initialize the registers needed by the CPUID intercept */ |
| 2676 | vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; |
| 2677 | vcpu->arch.regs[VCPU_REGS_RCX] = 0; |
| 2678 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2679 | ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2680 | if (!ret) { |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2681 | /* Error, keep GHCB MSR value as-is */ |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2682 | break; |
| 2683 | } |
| 2684 | |
| 2685 | cpuid_reg = get_ghcb_msr_bits(svm, |
| 2686 | GHCB_MSR_CPUID_REG_MASK, |
| 2687 | GHCB_MSR_CPUID_REG_POS); |
| 2688 | if (cpuid_reg == 0) |
| 2689 | cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 2690 | else if (cpuid_reg == 1) |
| 2691 | cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX]; |
| 2692 | else if (cpuid_reg == 2) |
| 2693 | cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX]; |
| 2694 | else |
| 2695 | cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX]; |
| 2696 | |
| 2697 | set_ghcb_msr_bits(svm, cpuid_value, |
| 2698 | GHCB_MSR_CPUID_VALUE_MASK, |
| 2699 | GHCB_MSR_CPUID_VALUE_POS); |
| 2700 | |
| 2701 | set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, |
| 2702 | GHCB_MSR_INFO_MASK, |
| 2703 | GHCB_MSR_INFO_POS); |
| 2704 | break; |
| 2705 | } |
Tom Lendacky | e1d7111 | 2020-12-10 11:09:51 -0600 | [diff] [blame] | 2706 | case GHCB_MSR_TERM_REQ: { |
| 2707 | u64 reason_set, reason_code; |
| 2708 | |
| 2709 | reason_set = get_ghcb_msr_bits(svm, |
| 2710 | GHCB_MSR_TERM_REASON_SET_MASK, |
| 2711 | GHCB_MSR_TERM_REASON_SET_POS); |
| 2712 | reason_code = get_ghcb_msr_bits(svm, |
| 2713 | GHCB_MSR_TERM_REASON_MASK, |
| 2714 | GHCB_MSR_TERM_REASON_POS); |
| 2715 | pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", |
| 2716 | reason_set, reason_code); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2717 | |
| 2718 | ret = -EINVAL; |
| 2719 | break; |
Tom Lendacky | e1d7111 | 2020-12-10 11:09:51 -0600 | [diff] [blame] | 2720 | } |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2721 | default: |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2722 | /* Error, keep GHCB MSR value as-is */ |
| 2723 | break; |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 2724 | } |
| 2725 | |
Tom Lendacky | 59e38b5 | 2020-12-10 11:09:52 -0600 | [diff] [blame] | 2726 | trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, |
| 2727 | control->ghcb_gpa, ret); |
| 2728 | |
Tom Lendacky | d369466 | 2020-12-10 11:09:50 -0600 | [diff] [blame] | 2729 | return ret; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2730 | } |
| 2731 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2732 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu) |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2733 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2734 | struct vcpu_svm *svm = to_svm(vcpu); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2735 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 2736 | u64 ghcb_gpa, exit_code; |
| 2737 | struct ghcb *ghcb; |
| 2738 | int ret; |
| 2739 | |
| 2740 | /* Validate the GHCB */ |
| 2741 | ghcb_gpa = control->ghcb_gpa; |
| 2742 | if (ghcb_gpa & GHCB_MSR_INFO_MASK) |
| 2743 | return sev_handle_vmgexit_msr_protocol(svm); |
| 2744 | |
| 2745 | if (!ghcb_gpa) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2746 | vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2747 | |
| 2748 | /* Without a GHCB, just return right back to the guest */ |
| 2749 | return 1; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2750 | } |
| 2751 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2752 | if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2753 | /* Unable to map GHCB from guest */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2754 | vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2755 | ghcb_gpa); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2756 | |
| 2757 | /* Without a GHCB, just return right back to the guest */ |
| 2758 | return 1; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2759 | } |
| 2760 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2761 | svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; |
| 2762 | ghcb = svm->sev_es.ghcb_map.hva; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2763 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2764 | trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); |
Tom Lendacky | d523ab6b | 2020-12-10 11:09:48 -0600 | [diff] [blame] | 2765 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2766 | exit_code = ghcb_get_sw_exit_code(ghcb); |
| 2767 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2768 | if (!sev_es_validate_vmgexit(svm)) |
| 2769 | return 1; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2770 | |
| 2771 | sev_es_sync_from_ghcb(svm); |
| 2772 | ghcb_set_sw_exit_info_1(ghcb, 0); |
| 2773 | ghcb_set_sw_exit_info_2(ghcb, 0); |
| 2774 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2775 | ret = 1; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2776 | switch (exit_code) { |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2777 | case SVM_VMGEXIT_MMIO_READ: |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2778 | if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2779 | break; |
| 2780 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2781 | ret = kvm_sev_es_mmio_read(vcpu, |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2782 | control->exit_info_1, |
| 2783 | control->exit_info_2, |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2784 | svm->sev_es.ghcb_sa); |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2785 | break; |
| 2786 | case SVM_VMGEXIT_MMIO_WRITE: |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2787 | if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2788 | break; |
| 2789 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2790 | ret = kvm_sev_es_mmio_write(vcpu, |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2791 | control->exit_info_1, |
| 2792 | control->exit_info_2, |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2793 | svm->sev_es.ghcb_sa); |
Tom Lendacky | 8f423a8 | 2020-12-10 11:09:53 -0600 | [diff] [blame] | 2794 | break; |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2795 | case SVM_VMGEXIT_NMI_COMPLETE: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2796 | ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET); |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2797 | break; |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2798 | case SVM_VMGEXIT_AP_HLT_LOOP: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2799 | ret = kvm_emulate_ap_reset_hold(vcpu); |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2800 | break; |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 2801 | case SVM_VMGEXIT_AP_JUMP_TABLE: { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2802 | struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info; |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 2803 | |
| 2804 | switch (control->exit_info_1) { |
| 2805 | case 0: |
| 2806 | /* Set AP jump table address */ |
| 2807 | sev->ap_jump_table = control->exit_info_2; |
| 2808 | break; |
| 2809 | case 1: |
| 2810 | /* Get AP jump table address */ |
| 2811 | ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); |
| 2812 | break; |
| 2813 | default: |
| 2814 | pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", |
| 2815 | control->exit_info_1); |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2816 | ghcb_set_sw_exit_info_1(ghcb, 2); |
| 2817 | ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 2818 | } |
| 2819 | |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 2820 | break; |
| 2821 | } |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2822 | case SVM_VMGEXIT_UNSUPPORTED_EVENT: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2823 | vcpu_unimpl(vcpu, |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2824 | "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", |
| 2825 | control->exit_info_1, control->exit_info_2); |
Sean Christopherson | 75236f5 | 2021-11-09 22:23:49 +0000 | [diff] [blame] | 2826 | ret = -EINVAL; |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2827 | break; |
| 2828 | default: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2829 | ret = svm_invoke_exit_handler(vcpu, exit_code); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 2830 | } |
| 2831 | |
| 2832 | return ret; |
| 2833 | } |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2834 | |
| 2835 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) |
| 2836 | { |
Paolo Bonzini | 9b0971c | 2021-10-25 12:14:31 -0400 | [diff] [blame] | 2837 | int count; |
| 2838 | int bytes; |
| 2839 | |
| 2840 | if (svm->vmcb->control.exit_info_2 > INT_MAX) |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2841 | return -EINVAL; |
| 2842 | |
Paolo Bonzini | 9b0971c | 2021-10-25 12:14:31 -0400 | [diff] [blame] | 2843 | count = svm->vmcb->control.exit_info_2; |
| 2844 | if (unlikely(check_mul_overflow(count, size, &bytes))) |
| 2845 | return -EINVAL; |
| 2846 | |
Tom Lendacky | ad5b353 | 2021-12-02 12:52:05 -0600 | [diff] [blame] | 2847 | if (!setup_vmgexit_scratch(svm, in, bytes)) |
| 2848 | return 1; |
Paolo Bonzini | 9b0971c | 2021-10-25 12:14:31 -0400 | [diff] [blame] | 2849 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2850 | return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, |
Paolo Bonzini | 1f05833 | 2021-11-11 10:52:26 -0500 | [diff] [blame] | 2851 | count, in); |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2852 | } |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 2853 | |
| 2854 | void sev_es_init_vmcb(struct vcpu_svm *svm) |
| 2855 | { |
| 2856 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 2857 | |
| 2858 | svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; |
| 2859 | svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
| 2860 | |
| 2861 | /* |
| 2862 | * An SEV-ES guest requires a VMSA area that is a separate from the |
| 2863 | * VMCB page. Do not include the encryption mask on the VMSA physical |
| 2864 | * address since hardware will access it using the guest key. |
| 2865 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2866 | svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 2867 | |
| 2868 | /* Can't intercept CR register access, HV can't modify CR registers */ |
| 2869 | svm_clr_intercept(svm, INTERCEPT_CR0_READ); |
| 2870 | svm_clr_intercept(svm, INTERCEPT_CR4_READ); |
| 2871 | svm_clr_intercept(svm, INTERCEPT_CR8_READ); |
| 2872 | svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); |
| 2873 | svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); |
| 2874 | svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); |
| 2875 | |
| 2876 | svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); |
| 2877 | |
| 2878 | /* Track EFER/CR register changes */ |
| 2879 | svm_set_intercept(svm, TRAP_EFER_WRITE); |
| 2880 | svm_set_intercept(svm, TRAP_CR0_WRITE); |
| 2881 | svm_set_intercept(svm, TRAP_CR4_WRITE); |
| 2882 | svm_set_intercept(svm, TRAP_CR8_WRITE); |
| 2883 | |
| 2884 | /* No support for enable_vmware_backdoor */ |
| 2885 | clr_exception_intercept(svm, GP_VECTOR); |
| 2886 | |
| 2887 | /* Can't intercept XSETBV, HV can't modify XCR0 directly */ |
| 2888 | svm_clr_intercept(svm, INTERCEPT_XSETBV); |
| 2889 | |
| 2890 | /* Clear intercepts on selected MSRs */ |
| 2891 | set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); |
| 2892 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); |
| 2893 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); |
| 2894 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); |
| 2895 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); |
| 2896 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); |
| 2897 | } |
| 2898 | |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 2899 | void sev_es_vcpu_reset(struct vcpu_svm *svm) |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 2900 | { |
| 2901 | /* |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 2902 | * Set the GHCB MSR value as per the GHCB specification when emulating |
| 2903 | * vCPU RESET for an SEV-ES guest. |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 2904 | */ |
| 2905 | set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, |
| 2906 | GHCB_VERSION_MIN, |
| 2907 | sev_enc_bit)); |
| 2908 | } |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2909 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 2910 | void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2911 | { |
| 2912 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 2913 | struct vmcb_save_area *hostsa; |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2914 | |
| 2915 | /* |
| 2916 | * As an SEV-ES guest, hardware will restore the host state on VMEXIT, |
| 2917 | * of which one step is to perform a VMLOAD. Since hardware does not |
| 2918 | * perform a VMSAVE on VMRUN, the host savearea must be updated. |
| 2919 | */ |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 2920 | vmsave(__sme_page_pa(sd->save_area)); |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2921 | |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2922 | /* XCR0 is restored on VMEXIT, save the current host value */ |
| 2923 | hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); |
| 2924 | hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); |
| 2925 | |
Ingo Molnar | d9f6e12 | 2021-03-18 15:28:01 +0100 | [diff] [blame] | 2926 | /* PKRU is restored on VMEXIT, save the current host value */ |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 2927 | hostsa->pkru = read_pkru(); |
| 2928 | |
| 2929 | /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */ |
| 2930 | hostsa->xss = host_xss; |
| 2931 | } |
| 2932 | |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2933 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
| 2934 | { |
| 2935 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2936 | |
| 2937 | /* First SIPI: Use the values as initially set by the VMM */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2938 | if (!svm->sev_es.received_first_sipi) { |
| 2939 | svm->sev_es.received_first_sipi = true; |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2940 | return; |
| 2941 | } |
| 2942 | |
| 2943 | /* |
| 2944 | * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where |
| 2945 | * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a |
| 2946 | * non-zero value. |
| 2947 | */ |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2948 | if (!svm->sev_es.ghcb) |
Tom Lendacky | a3ba26e | 2021-04-09 09:38:42 -0500 | [diff] [blame] | 2949 | return; |
| 2950 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2951 | ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 2952 | } |