blob: be288314122098bfa661d3a37df245227b098ada [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Vipin Sharma7aef27f2021-03-29 21:42:06 -070017#include <linux/misc_cgroup.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060018#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060019#include <linux/trace_events.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010020
Dave Hansen784a46612021-06-23 14:02:05 +020021#include <asm/pkru.h>
Tom Lendacky8640ca52020-12-15 12:44:07 -050022#include <asm/trapnr.h>
Thomas Gleixnerd9d005f2021-10-15 03:16:31 +020023#include <asm/fpu/xcr.h>
Tom Lendacky8640ca52020-12-15 12:44:07 -050024
Joerg Roedeleaf78262020-03-24 10:41:54 +010025#include "x86.h"
26#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080027#include "svm_ops.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060028#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060029#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010030
Vipin Sharma7aef27f2021-03-29 21:42:06 -070031#ifndef CONFIG_KVM_AMD_SEV
32/*
33 * When this config is not defined, SEV feature is not supported and APIs in
34 * this file are not used but this file still gets compiled into the KVM AMD
35 * module.
36 *
37 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
38 * misc_res_type {} defined in linux/misc_cgroup.h.
39 *
40 * Below macros allow compilation to succeed.
41 */
42#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
43#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
44#endif
45
Sean Christophersona479c332021-04-21 19:11:18 -070046#ifdef CONFIG_KVM_AMD_SEV
Sean Christophersone8126bd2021-04-21 19:11:14 -070047/* enable/disable SEV support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070048static bool sev_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070049module_param_named(sev, sev_enabled, bool, 0444);
Sean Christophersone8126bd2021-04-21 19:11:14 -070050
51/* enable/disable SEV-ES support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070052static bool sev_es_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070053module_param_named(sev_es, sev_es_enabled, bool, 0444);
Sean Christophersona479c332021-04-21 19:11:18 -070054#else
55#define sev_enabled false
56#define sev_es_enabled false
57#endif /* CONFIG_KVM_AMD_SEV */
Sean Christophersone8126bd2021-04-21 19:11:14 -070058
Tom Lendacky1edc1452020-12-10 11:09:49 -060059static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010060static DECLARE_RWSEM(sev_deactivate_lock);
61static DEFINE_MUTEX(sev_bitmap_lock);
62unsigned int max_sev_asid;
63static unsigned int min_sev_asid;
Brijesh Singhd3d1af82021-04-15 15:53:55 +000064static unsigned long sev_me_mask;
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070065static unsigned int nr_asids;
Joerg Roedeleaf78262020-03-24 10:41:54 +010066static unsigned long *sev_asid_bitmap;
67static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010068
69struct enc_region {
70 struct list_head list;
71 unsigned long npages;
72 struct page **pages;
73 unsigned long uaddr;
74 unsigned long size;
75};
76
Sean Christopherson469bb322021-04-21 19:11:25 -070077/* Called with the sev_bitmap_lock held, or on shutdown */
78static int sev_flush_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010079{
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070080 int ret, asid, error = 0;
Sean Christopherson469bb322021-04-21 19:11:25 -070081
82 /* Check if there are any ASIDs to reclaim before performing a flush */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070083 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
84 if (asid > max_asid)
Sean Christopherson469bb322021-04-21 19:11:25 -070085 return -EBUSY;
Joerg Roedeleaf78262020-03-24 10:41:54 +010086
87 /*
88 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
89 * so it must be guarded.
90 */
91 down_write(&sev_deactivate_lock);
92
93 wbinvd_on_all_cpus();
94 ret = sev_guest_df_flush(&error);
95
96 up_write(&sev_deactivate_lock);
97
98 if (ret)
99 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
100
101 return ret;
102}
103
Nathan Tempelman54526d12021-04-08 22:32:14 +0000104static inline bool is_mirroring_enc_context(struct kvm *kvm)
105{
106 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
107}
108
Joerg Roedeleaf78262020-03-24 10:41:54 +0100109/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -0600110static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100111{
Sean Christopherson469bb322021-04-21 19:11:25 -0700112 if (sev_flush_asids(min_asid, max_asid))
Joerg Roedeleaf78262020-03-24 10:41:54 +0100113 return false;
114
Tom Lendacky80675b32020-12-10 11:10:05 -0600115 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100116 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700117 nr_asids);
118 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100119
120 return true;
121}
122
Paolo Bonzini91b692a2021-11-11 10:02:26 -0500123static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
124{
125 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
126 return misc_cg_try_charge(type, sev->misc_cg, 1);
127}
128
129static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
130{
131 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
132 misc_cg_uncharge(type, sev->misc_cg, 1);
133}
134
Tom Lendacky80675b32020-12-10 11:10:05 -0600135static int sev_asid_new(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100136{
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700137 int asid, min_asid, max_asid, ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100138 bool retry = true;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700139
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700140 WARN_ON(sev->misc_cg);
141 sev->misc_cg = get_current_misc_cg();
Paolo Bonzini91b692a2021-11-11 10:02:26 -0500142 ret = sev_misc_cg_try_charge(sev);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700143 if (ret) {
144 put_misc_cg(sev->misc_cg);
145 sev->misc_cg = NULL;
146 return ret;
147 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100148
149 mutex_lock(&sev_bitmap_lock);
150
151 /*
Tom Lendacky80675b32020-12-10 11:10:05 -0600152 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
153 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100154 */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700155 min_asid = sev->es_active ? 1 : min_sev_asid;
Tom Lendacky80675b32020-12-10 11:10:05 -0600156 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100157again:
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700158 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
159 if (asid > max_asid) {
Tom Lendacky80675b32020-12-10 11:10:05 -0600160 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100161 retry = false;
162 goto again;
163 }
164 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700165 ret = -EBUSY;
166 goto e_uncharge;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100167 }
168
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700169 __set_bit(asid, sev_asid_bitmap);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100170
171 mutex_unlock(&sev_bitmap_lock);
172
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700173 return asid;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700174e_uncharge:
Paolo Bonzini91b692a2021-11-11 10:02:26 -0500175 sev_misc_cg_uncharge(sev);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700176 put_misc_cg(sev->misc_cg);
177 sev->misc_cg = NULL;
178 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100179}
180
181static int sev_get_asid(struct kvm *kvm)
182{
183 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
184
185 return sev->asid;
186}
187
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700188static void sev_asid_free(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100189{
190 struct svm_cpu_data *sd;
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700191 int cpu;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100192
193 mutex_lock(&sev_bitmap_lock);
194
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700195 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100196
197 for_each_possible_cpu(cpu) {
198 sd = per_cpu(svm_data, cpu);
Sean Christopherson179c6c22021-08-03 09:27:46 -0700199 sd->sev_vmcbs[sev->asid] = NULL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100200 }
201
202 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700203
Paolo Bonzini91b692a2021-11-11 10:02:26 -0500204 sev_misc_cg_uncharge(sev);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700205 put_misc_cg(sev->misc_cg);
206 sev->misc_cg = NULL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100207}
208
Alper Gun934002c2021-06-10 17:46:04 +0000209static void sev_decommission(unsigned int handle)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100210{
Sean Christopherson238eca82021-04-06 15:49:52 -0700211 struct sev_data_decommission decommission;
Alper Gun934002c2021-06-10 17:46:04 +0000212
213 if (!handle)
214 return;
215
216 decommission.handle = handle;
217 sev_guest_decommission(&decommission, NULL);
218}
219
220static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
221{
Sean Christopherson238eca82021-04-06 15:49:52 -0700222 struct sev_data_deactivate deactivate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100223
224 if (!handle)
225 return;
226
Sean Christopherson238eca82021-04-06 15:49:52 -0700227 deactivate.handle = handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100228
229 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
230 down_read(&sev_deactivate_lock);
Sean Christopherson238eca82021-04-06 15:49:52 -0700231 sev_guest_deactivate(&deactivate, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100232 up_read(&sev_deactivate_lock);
233
Alper Gun934002c2021-06-10 17:46:04 +0000234 sev_decommission(handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100235}
236
237static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
238{
239 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
240 int asid, ret;
241
Sean Christopherson87279062021-03-30 20:19:36 -0700242 if (kvm->created_vcpus)
243 return -EINVAL;
244
Joerg Roedeleaf78262020-03-24 10:41:54 +0100245 ret = -EBUSY;
246 if (unlikely(sev->active))
247 return ret;
248
Sean Christophersona41fb262021-11-09 21:50:58 +0000249 sev->active = true;
250 sev->es_active = argp->id == KVM_SEV_ES_INIT;
Tom Lendacky80675b32020-12-10 11:10:05 -0600251 asid = sev_asid_new(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100252 if (asid < 0)
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400253 goto e_no_asid;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700254 sev->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100255
256 ret = sev_platform_init(&argp->error);
257 if (ret)
258 goto e_free;
259
Joerg Roedeleaf78262020-03-24 10:41:54 +0100260 INIT_LIST_HEAD(&sev->regions_list);
261
262 return 0;
263
264e_free:
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700265 sev_asid_free(sev);
266 sev->asid = 0;
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400267e_no_asid:
268 sev->es_active = false;
Sean Christophersona41fb262021-11-09 21:50:58 +0000269 sev->active = false;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100270 return ret;
271}
272
273static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
274{
Sean Christopherson238eca82021-04-06 15:49:52 -0700275 struct sev_data_activate activate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100276 int asid = sev_get_asid(kvm);
277 int ret;
278
Joerg Roedeleaf78262020-03-24 10:41:54 +0100279 /* activate ASID on the given handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700280 activate.handle = handle;
281 activate.asid = asid;
282 ret = sev_guest_activate(&activate, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100283
284 return ret;
285}
286
287static int __sev_issue_cmd(int fd, int id, void *data, int *error)
288{
289 struct fd f;
290 int ret;
291
292 f = fdget(fd);
293 if (!f.file)
294 return -EBADF;
295
296 ret = sev_issue_cmd_external_user(f.file, id, data, error);
297
298 fdput(f);
299 return ret;
300}
301
302static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
303{
304 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
305
306 return __sev_issue_cmd(sev->fd, id, data, error);
307}
308
309static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
310{
311 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700312 struct sev_data_launch_start start;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100313 struct kvm_sev_launch_start params;
314 void *dh_blob, *session_blob;
315 int *error = &argp->error;
316 int ret;
317
318 if (!sev_guest(kvm))
319 return -ENOTTY;
320
321 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
322 return -EFAULT;
323
Sean Christopherson238eca82021-04-06 15:49:52 -0700324 memset(&start, 0, sizeof(start));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100325
326 dh_blob = NULL;
327 if (params.dh_uaddr) {
328 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
Sean Christopherson238eca82021-04-06 15:49:52 -0700329 if (IS_ERR(dh_blob))
330 return PTR_ERR(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100331
Sean Christopherson238eca82021-04-06 15:49:52 -0700332 start.dh_cert_address = __sme_set(__pa(dh_blob));
333 start.dh_cert_len = params.dh_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100334 }
335
336 session_blob = NULL;
337 if (params.session_uaddr) {
338 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
339 if (IS_ERR(session_blob)) {
340 ret = PTR_ERR(session_blob);
341 goto e_free_dh;
342 }
343
Sean Christopherson238eca82021-04-06 15:49:52 -0700344 start.session_address = __sme_set(__pa(session_blob));
345 start.session_len = params.session_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100346 }
347
Sean Christopherson238eca82021-04-06 15:49:52 -0700348 start.handle = params.handle;
349 start.policy = params.policy;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100350
351 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -0700352 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100353 if (ret)
354 goto e_free_session;
355
356 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -0700357 ret = sev_bind_asid(kvm, start.handle, error);
Alper Gun934002c2021-06-10 17:46:04 +0000358 if (ret) {
359 sev_decommission(start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100360 goto e_free_session;
Alper Gun934002c2021-06-10 17:46:04 +0000361 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100362
363 /* return handle to userspace */
Sean Christopherson238eca82021-04-06 15:49:52 -0700364 params.handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100365 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700366 sev_unbind_asid(kvm, start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100367 ret = -EFAULT;
368 goto e_free_session;
369 }
370
Sean Christopherson238eca82021-04-06 15:49:52 -0700371 sev->handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100372 sev->fd = argp->sev_fd;
373
374e_free_session:
375 kfree(session_blob);
376e_free_dh:
377 kfree(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100378 return ret;
379}
380
381static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
382 unsigned long ulen, unsigned long *n,
383 int write)
384{
385 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700386 unsigned long npages, size;
387 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100388 unsigned long locked, lock_limit;
389 struct page **pages;
390 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300391 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100392
Peter Gonda19a23da2021-01-27 08:15:24 -0800393 lockdep_assert_held(&kvm->lock);
394
Joerg Roedeleaf78262020-03-24 10:41:54 +0100395 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400396 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100397
398 /* Calculate number of pages. */
399 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
400 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
401 npages = (last - first + 1);
402
403 locked = sev->pages_locked + npages;
404 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
405 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
406 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400407 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100408 }
409
John Hubbard78824fa2020-05-25 23:22:06 -0700410 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400411 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700412
Joerg Roedeleaf78262020-03-24 10:41:54 +0100413 /* Avoid using vmalloc for smaller buffers. */
414 size = npages * sizeof(struct page *);
415 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700416 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100417 else
418 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
419
420 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400421 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100422
423 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700424 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100425 if (npinned != npages) {
426 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300427 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100428 goto err;
429 }
430
431 *n = npages;
432 sev->pages_locked = locked;
433
434 return pages;
435
436err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300437 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700438 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100439
440 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300441 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100442}
443
444static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
445 unsigned long npages)
446{
447 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
448
John Hubbarddc42c8a2020-05-25 23:22:07 -0700449 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100450 kvfree(pages);
451 sev->pages_locked -= npages;
452}
453
454static void sev_clflush_pages(struct page *pages[], unsigned long npages)
455{
456 uint8_t *page_virtual;
457 unsigned long i;
458
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000459 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
460 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100461 return;
462
463 for (i = 0; i < npages; i++) {
464 page_virtual = kmap_atomic(pages[i]);
465 clflush_cache_range(page_virtual, PAGE_SIZE);
466 kunmap_atomic(page_virtual);
467 }
468}
469
470static unsigned long get_num_contig_pages(unsigned long idx,
471 struct page **inpages, unsigned long npages)
472{
473 unsigned long paddr, next_paddr;
474 unsigned long i = idx + 1, pages = 1;
475
476 /* find the number of contiguous pages starting from idx */
477 paddr = __sme_page_pa(inpages[idx]);
478 while (i < npages) {
479 next_paddr = __sme_page_pa(inpages[i++]);
480 if ((paddr + PAGE_SIZE) == next_paddr) {
481 pages++;
482 paddr = next_paddr;
483 continue;
484 }
485 break;
486 }
487
488 return pages;
489}
490
491static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
492{
493 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
494 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
495 struct kvm_sev_launch_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700496 struct sev_data_launch_update_data data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100497 struct page **inpages;
498 int ret;
499
500 if (!sev_guest(kvm))
501 return -ENOTTY;
502
503 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
504 return -EFAULT;
505
Joerg Roedeleaf78262020-03-24 10:41:54 +0100506 vaddr = params.uaddr;
507 size = params.len;
508 vaddr_end = vaddr + size;
509
510 /* Lock the user memory. */
511 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Sean Christopherson238eca82021-04-06 15:49:52 -0700512 if (IS_ERR(inpages))
513 return PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100514
515 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400516 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
517 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100518 */
519 sev_clflush_pages(inpages, npages);
520
Sean Christopherson238eca82021-04-06 15:49:52 -0700521 data.reserved = 0;
522 data.handle = sev->handle;
523
Joerg Roedeleaf78262020-03-24 10:41:54 +0100524 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
525 int offset, len;
526
527 /*
528 * If the user buffer is not page-aligned, calculate the offset
529 * within the page.
530 */
531 offset = vaddr & (PAGE_SIZE - 1);
532
533 /* Calculate the number of pages that can be encrypted in one go. */
534 pages = get_num_contig_pages(i, inpages, npages);
535
536 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
537
Sean Christopherson238eca82021-04-06 15:49:52 -0700538 data.len = len;
539 data.address = __sme_page_pa(inpages[i]) + offset;
540 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100541 if (ret)
542 goto e_unpin;
543
544 size -= len;
545 next_vaddr = vaddr + len;
546 }
547
548e_unpin:
549 /* content of memory is updated, mark pages dirty */
550 for (i = 0; i < npages; i++) {
551 set_page_dirty_lock(inpages[i]);
552 mark_page_accessed(inpages[i]);
553 }
554 /* unlock the user pages */
555 sev_unpin_memory(kvm, inpages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100556 return ret;
557}
558
Tom Lendackyad731092020-12-10 11:10:09 -0600559static int sev_es_sync_vmsa(struct vcpu_svm *svm)
560{
561 struct vmcb_save_area *save = &svm->vmcb->save;
562
563 /* Check some debug related fields before encrypting the VMSA */
564 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
565 return -EINVAL;
566
567 /* Sync registgers */
568 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
569 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
570 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
571 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
572 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
573 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
574 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
575 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500576#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600577 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
578 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
579 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
580 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
581 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
582 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
583 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
584 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500585#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600586 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
587
588 /* Sync some non-GPR registers before encrypting */
589 save->xcr0 = svm->vcpu.arch.xcr0;
590 save->pkru = svm->vcpu.arch.pkru;
591 save->xss = svm->vcpu.arch.ia32_xss;
Sean Christophersond0f9f822021-07-13 09:33:10 -0700592 save->dr6 = svm->vcpu.arch.dr6;
Tom Lendackyad731092020-12-10 11:10:09 -0600593
594 /*
595 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
596 * the traditional VMSA that is part of the VMCB. Copy the
597 * traditional VMSA as it has been built so far (in prep
598 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
599 */
Peter Gondab67a4cc2021-10-21 10:42:59 -0700600 memcpy(svm->sev_es.vmsa, save, sizeof(*save));
Tom Lendackyad731092020-12-10 11:10:09 -0600601
602 return 0;
603}
604
Peter Gondabb18a672021-09-15 10:17:55 -0700605static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
606 int *error)
607{
608 struct sev_data_launch_update_vmsa vmsa;
609 struct vcpu_svm *svm = to_svm(vcpu);
610 int ret;
611
612 /* Perform some pre-encryption checks against the VMSA */
613 ret = sev_es_sync_vmsa(svm);
614 if (ret)
615 return ret;
616
617 /*
618 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
619 * the VMSA memory content (i.e it will write the same memory region
620 * with the guest's key), so invalidate it first.
621 */
Peter Gondab67a4cc2021-10-21 10:42:59 -0700622 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
Peter Gondabb18a672021-09-15 10:17:55 -0700623
624 vmsa.reserved = 0;
625 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
Peter Gondab67a4cc2021-10-21 10:42:59 -0700626 vmsa.address = __sme_pa(svm->sev_es.vmsa);
Peter Gondabb18a672021-09-15 10:17:55 -0700627 vmsa.len = PAGE_SIZE;
Peter Gondabaa1e5c2021-10-15 13:32:22 -0400628 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
629 if (ret)
630 return ret;
631
632 vcpu->arch.guest_state_protected = true;
633 return 0;
Peter Gondabb18a672021-09-15 10:17:55 -0700634}
635
Tom Lendackyad731092020-12-10 11:10:09 -0600636static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
637{
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700638 struct kvm_vcpu *vcpu;
Tom Lendackyad731092020-12-10 11:10:09 -0600639 int i, ret;
640
641 if (!sev_es_guest(kvm))
642 return -ENOTTY;
643
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700644 kvm_for_each_vcpu(i, vcpu, kvm) {
Peter Gondabb18a672021-09-15 10:17:55 -0700645 ret = mutex_lock_killable(&vcpu->mutex);
Tom Lendackyad731092020-12-10 11:10:09 -0600646 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700647 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600648
Peter Gondabb18a672021-09-15 10:17:55 -0700649 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
Tom Lendackyad731092020-12-10 11:10:09 -0600650
Peter Gondabb18a672021-09-15 10:17:55 -0700651 mutex_unlock(&vcpu->mutex);
Tom Lendackyad731092020-12-10 11:10:09 -0600652 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700653 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600654 }
655
Sean Christopherson238eca82021-04-06 15:49:52 -0700656 return 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600657}
658
Joerg Roedeleaf78262020-03-24 10:41:54 +0100659static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
660{
661 void __user *measure = (void __user *)(uintptr_t)argp->data;
662 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700663 struct sev_data_launch_measure data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100664 struct kvm_sev_launch_measure params;
665 void __user *p = NULL;
666 void *blob = NULL;
667 int ret;
668
669 if (!sev_guest(kvm))
670 return -ENOTTY;
671
672 if (copy_from_user(&params, measure, sizeof(params)))
673 return -EFAULT;
674
Sean Christopherson238eca82021-04-06 15:49:52 -0700675 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100676
677 /* User wants to query the blob length */
678 if (!params.len)
679 goto cmd;
680
681 p = (void __user *)(uintptr_t)params.uaddr;
682 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700683 if (params.len > SEV_FW_BLOB_MAX_SIZE)
684 return -EINVAL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100685
Sean Christophersoneba04b22021-03-30 19:30:25 -0700686 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100687 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -0700688 return -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100689
Sean Christopherson238eca82021-04-06 15:49:52 -0700690 data.address = __psp_pa(blob);
691 data.len = params.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100692 }
693
694cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -0700695 data.handle = sev->handle;
696 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100697
698 /*
699 * If we query the session length, FW responded with expected data.
700 */
701 if (!params.len)
702 goto done;
703
704 if (ret)
705 goto e_free_blob;
706
707 if (blob) {
708 if (copy_to_user(p, blob, params.len))
709 ret = -EFAULT;
710 }
711
712done:
Sean Christopherson238eca82021-04-06 15:49:52 -0700713 params.len = data.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100714 if (copy_to_user(measure, &params, sizeof(params)))
715 ret = -EFAULT;
716e_free_blob:
717 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100718 return ret;
719}
720
721static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
722{
723 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700724 struct sev_data_launch_finish data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100725
726 if (!sev_guest(kvm))
727 return -ENOTTY;
728
Sean Christopherson238eca82021-04-06 15:49:52 -0700729 data.handle = sev->handle;
730 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100731}
732
733static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
734{
735 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
736 struct kvm_sev_guest_status params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700737 struct sev_data_guest_status data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100738 int ret;
739
740 if (!sev_guest(kvm))
741 return -ENOTTY;
742
Sean Christopherson238eca82021-04-06 15:49:52 -0700743 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100744
Sean Christopherson238eca82021-04-06 15:49:52 -0700745 data.handle = sev->handle;
746 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100747 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700748 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100749
Sean Christopherson238eca82021-04-06 15:49:52 -0700750 params.policy = data.policy;
751 params.state = data.state;
752 params.handle = data.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100753
754 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
755 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -0700756
Joerg Roedeleaf78262020-03-24 10:41:54 +0100757 return ret;
758}
759
760static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
761 unsigned long dst, int size,
762 int *error, bool enc)
763{
764 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700765 struct sev_data_dbg data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100766
Sean Christopherson238eca82021-04-06 15:49:52 -0700767 data.reserved = 0;
768 data.handle = sev->handle;
769 data.dst_addr = dst;
770 data.src_addr = src;
771 data.len = size;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100772
Sean Christopherson238eca82021-04-06 15:49:52 -0700773 return sev_issue_cmd(kvm,
774 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
775 &data, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100776}
777
778static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
779 unsigned long dst_paddr, int sz, int *err)
780{
781 int offset;
782
783 /*
784 * Its safe to read more than we are asked, caller should ensure that
785 * destination has enough space.
786 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100787 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000788 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100789 sz = round_up(sz + offset, 16);
790
791 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
792}
793
794static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700795 void __user *dst_uaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100796 unsigned long dst_paddr,
797 int size, int *err)
798{
799 struct page *tpage = NULL;
800 int ret, offset;
801
802 /* if inputs are not 16-byte then use intermediate buffer */
803 if (!IS_ALIGNED(dst_paddr, 16) ||
804 !IS_ALIGNED(paddr, 16) ||
805 !IS_ALIGNED(size, 16)) {
806 tpage = (void *)alloc_page(GFP_KERNEL);
807 if (!tpage)
808 return -ENOMEM;
809
810 dst_paddr = __sme_page_pa(tpage);
811 }
812
813 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
814 if (ret)
815 goto e_free;
816
817 if (tpage) {
818 offset = paddr & 15;
Sean Christopherson368340a2021-05-06 16:15:42 -0700819 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
Joerg Roedeleaf78262020-03-24 10:41:54 +0100820 ret = -EFAULT;
821 }
822
823e_free:
824 if (tpage)
825 __free_page(tpage);
826
827 return ret;
828}
829
830static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700831 void __user *vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100832 unsigned long dst_paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700833 void __user *dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100834 int size, int *error)
835{
836 struct page *src_tpage = NULL;
837 struct page *dst_tpage = NULL;
838 int ret, len = size;
839
840 /* If source buffer is not aligned then use an intermediate buffer */
Sean Christopherson368340a2021-05-06 16:15:42 -0700841 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100842 src_tpage = alloc_page(GFP_KERNEL);
843 if (!src_tpage)
844 return -ENOMEM;
845
Sean Christopherson368340a2021-05-06 16:15:42 -0700846 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100847 __free_page(src_tpage);
848 return -EFAULT;
849 }
850
851 paddr = __sme_page_pa(src_tpage);
852 }
853
854 /*
855 * If destination buffer or length is not aligned then do read-modify-write:
856 * - decrypt destination in an intermediate buffer
857 * - copy the source buffer in an intermediate buffer
858 * - use the intermediate buffer as source buffer
859 */
Sean Christopherson368340a2021-05-06 16:15:42 -0700860 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100861 int dst_offset;
862
863 dst_tpage = alloc_page(GFP_KERNEL);
864 if (!dst_tpage) {
865 ret = -ENOMEM;
866 goto e_free;
867 }
868
869 ret = __sev_dbg_decrypt(kvm, dst_paddr,
870 __sme_page_pa(dst_tpage), size, error);
871 if (ret)
872 goto e_free;
873
874 /*
875 * If source is kernel buffer then use memcpy() otherwise
876 * copy_from_user().
877 */
878 dst_offset = dst_paddr & 15;
879
880 if (src_tpage)
881 memcpy(page_address(dst_tpage) + dst_offset,
882 page_address(src_tpage), size);
883 else {
884 if (copy_from_user(page_address(dst_tpage) + dst_offset,
Sean Christopherson368340a2021-05-06 16:15:42 -0700885 vaddr, size)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100886 ret = -EFAULT;
887 goto e_free;
888 }
889 }
890
891 paddr = __sme_page_pa(dst_tpage);
892 dst_paddr = round_down(dst_paddr, 16);
893 len = round_up(size, 16);
894 }
895
896 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
897
898e_free:
899 if (src_tpage)
900 __free_page(src_tpage);
901 if (dst_tpage)
902 __free_page(dst_tpage);
903 return ret;
904}
905
906static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
907{
908 unsigned long vaddr, vaddr_end, next_vaddr;
909 unsigned long dst_vaddr;
910 struct page **src_p, **dst_p;
911 struct kvm_sev_dbg debug;
912 unsigned long n;
913 unsigned int size;
914 int ret;
915
916 if (!sev_guest(kvm))
917 return -ENOTTY;
918
919 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
920 return -EFAULT;
921
922 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
923 return -EINVAL;
924 if (!debug.dst_uaddr)
925 return -EINVAL;
926
927 vaddr = debug.src_uaddr;
928 size = debug.len;
929 vaddr_end = vaddr + size;
930 dst_vaddr = debug.dst_uaddr;
931
932 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
933 int len, s_off, d_off;
934
935 /* lock userspace source and destination page */
936 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300937 if (IS_ERR(src_p))
938 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100939
940 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300941 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100942 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300943 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100944 }
945
946 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400947 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
948 * the pages; flush the destination too so that future accesses do not
949 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100950 */
951 sev_clflush_pages(src_p, 1);
952 sev_clflush_pages(dst_p, 1);
953
954 /*
955 * Since user buffer may not be page aligned, calculate the
956 * offset within the page.
957 */
958 s_off = vaddr & ~PAGE_MASK;
959 d_off = dst_vaddr & ~PAGE_MASK;
960 len = min_t(size_t, (PAGE_SIZE - s_off), size);
961
962 if (dec)
963 ret = __sev_dbg_decrypt_user(kvm,
964 __sme_page_pa(src_p[0]) + s_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700965 (void __user *)dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100966 __sme_page_pa(dst_p[0]) + d_off,
967 len, &argp->error);
968 else
969 ret = __sev_dbg_encrypt_user(kvm,
970 __sme_page_pa(src_p[0]) + s_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700971 (void __user *)vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100972 __sme_page_pa(dst_p[0]) + d_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700973 (void __user *)dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100974 len, &argp->error);
975
976 sev_unpin_memory(kvm, src_p, n);
977 sev_unpin_memory(kvm, dst_p, n);
978
979 if (ret)
980 goto err;
981
982 next_vaddr = vaddr + len;
983 dst_vaddr = dst_vaddr + len;
984 size -= len;
985 }
986err:
987 return ret;
988}
989
990static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
991{
992 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700993 struct sev_data_launch_secret data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100994 struct kvm_sev_launch_secret params;
995 struct page **pages;
996 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700997 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100998 int ret, offset;
999
1000 if (!sev_guest(kvm))
1001 return -ENOTTY;
1002
1003 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1004 return -EFAULT;
1005
1006 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001007 if (IS_ERR(pages))
1008 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001009
1010 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -04001011 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1012 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -07001013 */
1014 sev_clflush_pages(pages, n);
1015
1016 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +01001017 * The secret must be copied into contiguous memory region, lets verify
1018 * that userspace memory pages are contiguous before we issue command.
1019 */
1020 if (get_num_contig_pages(0, pages, n) != n) {
1021 ret = -EINVAL;
1022 goto e_unpin_memory;
1023 }
1024
Sean Christopherson238eca82021-04-06 15:49:52 -07001025 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +01001026
1027 offset = params.guest_uaddr & (PAGE_SIZE - 1);
Sean Christopherson238eca82021-04-06 15:49:52 -07001028 data.guest_address = __sme_page_pa(pages[0]) + offset;
1029 data.guest_len = params.guest_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001030
1031 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1032 if (IS_ERR(blob)) {
1033 ret = PTR_ERR(blob);
Sean Christopherson238eca82021-04-06 15:49:52 -07001034 goto e_unpin_memory;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001035 }
1036
Sean Christopherson238eca82021-04-06 15:49:52 -07001037 data.trans_address = __psp_pa(blob);
1038 data.trans_len = params.trans_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001039
1040 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1041 if (IS_ERR(hdr)) {
1042 ret = PTR_ERR(hdr);
1043 goto e_free_blob;
1044 }
Sean Christopherson238eca82021-04-06 15:49:52 -07001045 data.hdr_address = __psp_pa(hdr);
1046 data.hdr_len = params.hdr_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001047
Sean Christopherson238eca82021-04-06 15:49:52 -07001048 data.handle = sev->handle;
1049 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001050
1051 kfree(hdr);
1052
1053e_free_blob:
1054 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001055e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -07001056 /* content of memory is updated, mark pages dirty */
1057 for (i = 0; i < n; i++) {
1058 set_page_dirty_lock(pages[i]);
1059 mark_page_accessed(pages[i]);
1060 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001061 sev_unpin_memory(kvm, pages, n);
1062 return ret;
1063}
1064
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001065static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1066{
1067 void __user *report = (void __user *)(uintptr_t)argp->data;
1068 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001069 struct sev_data_attestation_report data;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001070 struct kvm_sev_attestation_report params;
1071 void __user *p;
1072 void *blob = NULL;
1073 int ret;
1074
1075 if (!sev_guest(kvm))
1076 return -ENOTTY;
1077
1078 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1079 return -EFAULT;
1080
Sean Christopherson238eca82021-04-06 15:49:52 -07001081 memset(&data, 0, sizeof(data));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001082
1083 /* User wants to query the blob length */
1084 if (!params.len)
1085 goto cmd;
1086
1087 p = (void __user *)(uintptr_t)params.uaddr;
1088 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -07001089 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1090 return -EINVAL;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001091
Sean Christophersoneba04b22021-03-30 19:30:25 -07001092 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001093 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -07001094 return -ENOMEM;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001095
Sean Christopherson238eca82021-04-06 15:49:52 -07001096 data.address = __psp_pa(blob);
1097 data.len = params.len;
1098 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001099 }
1100cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -07001101 data.handle = sev->handle;
1102 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001103 /*
1104 * If we query the session length, FW responded with expected data.
1105 */
1106 if (!params.len)
1107 goto done;
1108
1109 if (ret)
1110 goto e_free_blob;
1111
1112 if (blob) {
1113 if (copy_to_user(p, blob, params.len))
1114 ret = -EFAULT;
1115 }
1116
1117done:
Sean Christopherson238eca82021-04-06 15:49:52 -07001118 params.len = data.len;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001119 if (copy_to_user(report, &params, sizeof(params)))
1120 ret = -EFAULT;
1121e_free_blob:
1122 kfree(blob);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001123 return ret;
1124}
1125
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001126/* Userspace wants to query session length. */
1127static int
1128__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1129 struct kvm_sev_send_start *params)
1130{
1131 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001132 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001133 int ret;
1134
Ashish Kalra4f13d472021-06-07 06:15:32 +00001135 memset(&data, 0, sizeof(data));
Sean Christopherson238eca82021-04-06 15:49:52 -07001136 data.handle = sev->handle;
1137 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001138
Sean Christopherson238eca82021-04-06 15:49:52 -07001139 params->session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001140 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1141 sizeof(struct kvm_sev_send_start)))
1142 ret = -EFAULT;
1143
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001144 return ret;
1145}
1146
1147static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1148{
1149 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001150 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001151 struct kvm_sev_send_start params;
1152 void *amd_certs, *session_data;
1153 void *pdh_cert, *plat_certs;
1154 int ret;
1155
1156 if (!sev_guest(kvm))
1157 return -ENOTTY;
1158
1159 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1160 sizeof(struct kvm_sev_send_start)))
1161 return -EFAULT;
1162
1163 /* if session_len is zero, userspace wants to query the session length */
1164 if (!params.session_len)
1165 return __sev_send_start_query_session_length(kvm, argp,
1166 &params);
1167
1168 /* some sanity checks */
1169 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1170 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1171 return -EINVAL;
1172
1173 /* allocate the memory to hold the session data blob */
1174 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1175 if (!session_data)
1176 return -ENOMEM;
1177
1178 /* copy the certificate blobs from userspace */
1179 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1180 params.pdh_cert_len);
1181 if (IS_ERR(pdh_cert)) {
1182 ret = PTR_ERR(pdh_cert);
1183 goto e_free_session;
1184 }
1185
1186 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1187 params.plat_certs_len);
1188 if (IS_ERR(plat_certs)) {
1189 ret = PTR_ERR(plat_certs);
1190 goto e_free_pdh;
1191 }
1192
1193 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1194 params.amd_certs_len);
1195 if (IS_ERR(amd_certs)) {
1196 ret = PTR_ERR(amd_certs);
1197 goto e_free_plat_cert;
1198 }
1199
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001200 /* populate the FW SEND_START field with system physical address */
Sean Christopherson238eca82021-04-06 15:49:52 -07001201 memset(&data, 0, sizeof(data));
1202 data.pdh_cert_address = __psp_pa(pdh_cert);
1203 data.pdh_cert_len = params.pdh_cert_len;
1204 data.plat_certs_address = __psp_pa(plat_certs);
1205 data.plat_certs_len = params.plat_certs_len;
1206 data.amd_certs_address = __psp_pa(amd_certs);
1207 data.amd_certs_len = params.amd_certs_len;
1208 data.session_address = __psp_pa(session_data);
1209 data.session_len = params.session_len;
1210 data.handle = sev->handle;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001211
Sean Christopherson238eca82021-04-06 15:49:52 -07001212 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001213
1214 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1215 session_data, params.session_len)) {
1216 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001217 goto e_free_amd_cert;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001218 }
1219
Sean Christopherson238eca82021-04-06 15:49:52 -07001220 params.policy = data.policy;
1221 params.session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001222 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1223 sizeof(struct kvm_sev_send_start)))
1224 ret = -EFAULT;
1225
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001226e_free_amd_cert:
1227 kfree(amd_certs);
1228e_free_plat_cert:
1229 kfree(plat_certs);
1230e_free_pdh:
1231 kfree(pdh_cert);
1232e_free_session:
1233 kfree(session_data);
1234 return ret;
1235}
1236
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001237/* Userspace wants to query either header or trans length. */
1238static int
1239__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1240 struct kvm_sev_send_update_data *params)
1241{
1242 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001243 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001244 int ret;
1245
Ashish Kalra4f13d472021-06-07 06:15:32 +00001246 memset(&data, 0, sizeof(data));
Sean Christopherson238eca82021-04-06 15:49:52 -07001247 data.handle = sev->handle;
1248 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001249
Sean Christopherson238eca82021-04-06 15:49:52 -07001250 params->hdr_len = data.hdr_len;
1251 params->trans_len = data.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001252
1253 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1254 sizeof(struct kvm_sev_send_update_data)))
1255 ret = -EFAULT;
1256
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001257 return ret;
1258}
1259
1260static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1261{
1262 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001263 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001264 struct kvm_sev_send_update_data params;
1265 void *hdr, *trans_data;
1266 struct page **guest_page;
1267 unsigned long n;
1268 int ret, offset;
1269
1270 if (!sev_guest(kvm))
1271 return -ENOTTY;
1272
1273 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1274 sizeof(struct kvm_sev_send_update_data)))
1275 return -EFAULT;
1276
1277 /* userspace wants to query either header or trans length */
1278 if (!params.trans_len || !params.hdr_len)
1279 return __sev_send_update_data_query_lengths(kvm, argp, &params);
1280
1281 if (!params.trans_uaddr || !params.guest_uaddr ||
1282 !params.guest_len || !params.hdr_uaddr)
1283 return -EINVAL;
1284
1285 /* Check if we are crossing the page boundary */
1286 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1287 if ((params.guest_len + offset > PAGE_SIZE))
1288 return -EINVAL;
1289
1290 /* Pin guest memory */
1291 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1292 PAGE_SIZE, &n, 0);
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001293 if (IS_ERR(guest_page))
1294 return PTR_ERR(guest_page);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001295
1296 /* allocate memory for header and transport buffer */
1297 ret = -ENOMEM;
1298 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1299 if (!hdr)
1300 goto e_unpin;
1301
1302 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1303 if (!trans_data)
1304 goto e_free_hdr;
1305
Sean Christopherson238eca82021-04-06 15:49:52 -07001306 memset(&data, 0, sizeof(data));
1307 data.hdr_address = __psp_pa(hdr);
1308 data.hdr_len = params.hdr_len;
1309 data.trans_address = __psp_pa(trans_data);
1310 data.trans_len = params.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001311
1312 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001313 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1314 data.guest_address |= sev_me_mask;
1315 data.guest_len = params.guest_len;
1316 data.handle = sev->handle;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001317
Sean Christopherson238eca82021-04-06 15:49:52 -07001318 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001319
1320 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001321 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001322
1323 /* copy transport buffer to user space */
1324 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1325 trans_data, params.trans_len)) {
1326 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001327 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001328 }
1329
1330 /* Copy packet header to userspace. */
Sean Christophersonb4a69392021-05-06 10:58:25 -07001331 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1332 params.hdr_len))
1333 ret = -EFAULT;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001334
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001335e_free_trans_data:
1336 kfree(trans_data);
1337e_free_hdr:
1338 kfree(hdr);
1339e_unpin:
1340 sev_unpin_memory(kvm, guest_page, n);
1341
1342 return ret;
1343}
1344
Brijesh Singhfddecf62021-04-15 15:54:15 +00001345static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1346{
1347 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001348 struct sev_data_send_finish data;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001349
1350 if (!sev_guest(kvm))
1351 return -ENOTTY;
1352
Sean Christopherson238eca82021-04-06 15:49:52 -07001353 data.handle = sev->handle;
1354 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
Brijesh Singhfddecf62021-04-15 15:54:15 +00001355}
1356
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001357static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1358{
1359 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001360 struct sev_data_send_cancel data;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001361
1362 if (!sev_guest(kvm))
1363 return -ENOTTY;
1364
Sean Christopherson238eca82021-04-06 15:49:52 -07001365 data.handle = sev->handle;
1366 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001367}
1368
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001369static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1370{
1371 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001372 struct sev_data_receive_start start;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001373 struct kvm_sev_receive_start params;
1374 int *error = &argp->error;
1375 void *session_data;
1376 void *pdh_data;
1377 int ret;
1378
1379 if (!sev_guest(kvm))
1380 return -ENOTTY;
1381
1382 /* Get parameter from the userspace */
1383 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1384 sizeof(struct kvm_sev_receive_start)))
1385 return -EFAULT;
1386
1387 /* some sanity checks */
1388 if (!params.pdh_uaddr || !params.pdh_len ||
1389 !params.session_uaddr || !params.session_len)
1390 return -EINVAL;
1391
1392 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1393 if (IS_ERR(pdh_data))
1394 return PTR_ERR(pdh_data);
1395
1396 session_data = psp_copy_user_blob(params.session_uaddr,
1397 params.session_len);
1398 if (IS_ERR(session_data)) {
1399 ret = PTR_ERR(session_data);
1400 goto e_free_pdh;
1401 }
1402
Sean Christopherson238eca82021-04-06 15:49:52 -07001403 memset(&start, 0, sizeof(start));
1404 start.handle = params.handle;
1405 start.policy = params.policy;
1406 start.pdh_cert_address = __psp_pa(pdh_data);
1407 start.pdh_cert_len = params.pdh_len;
1408 start.session_address = __psp_pa(session_data);
1409 start.session_len = params.session_len;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001410
1411 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -07001412 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001413 error);
1414 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001415 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001416
1417 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -07001418 ret = sev_bind_asid(kvm, start.handle, error);
Mingwei Zhangf1815e0a2021-09-12 18:18:15 +00001419 if (ret) {
1420 sev_decommission(start.handle);
Sean Christopherson238eca82021-04-06 15:49:52 -07001421 goto e_free_session;
Mingwei Zhangf1815e0a2021-09-12 18:18:15 +00001422 }
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001423
Sean Christopherson238eca82021-04-06 15:49:52 -07001424 params.handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001425 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1426 &params, sizeof(struct kvm_sev_receive_start))) {
1427 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001428 sev_unbind_asid(kvm, start.handle);
1429 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001430 }
1431
Sean Christopherson238eca82021-04-06 15:49:52 -07001432 sev->handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001433 sev->fd = argp->sev_fd;
1434
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001435e_free_session:
1436 kfree(session_data);
1437e_free_pdh:
1438 kfree(pdh_data);
1439
1440 return ret;
1441}
1442
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001443static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1444{
1445 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1446 struct kvm_sev_receive_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -07001447 struct sev_data_receive_update_data data;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001448 void *hdr = NULL, *trans = NULL;
1449 struct page **guest_page;
1450 unsigned long n;
1451 int ret, offset;
1452
1453 if (!sev_guest(kvm))
1454 return -EINVAL;
1455
1456 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1457 sizeof(struct kvm_sev_receive_update_data)))
1458 return -EFAULT;
1459
1460 if (!params.hdr_uaddr || !params.hdr_len ||
1461 !params.guest_uaddr || !params.guest_len ||
1462 !params.trans_uaddr || !params.trans_len)
1463 return -EINVAL;
1464
1465 /* Check if we are crossing the page boundary */
1466 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1467 if ((params.guest_len + offset > PAGE_SIZE))
1468 return -EINVAL;
1469
1470 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1471 if (IS_ERR(hdr))
1472 return PTR_ERR(hdr);
1473
1474 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1475 if (IS_ERR(trans)) {
1476 ret = PTR_ERR(trans);
1477 goto e_free_hdr;
1478 }
1479
Sean Christopherson238eca82021-04-06 15:49:52 -07001480 memset(&data, 0, sizeof(data));
1481 data.hdr_address = __psp_pa(hdr);
1482 data.hdr_len = params.hdr_len;
1483 data.trans_address = __psp_pa(trans);
1484 data.trans_len = params.trans_len;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001485
1486 /* Pin guest memory */
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001487 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
Sean Christopherson50c03802021-09-14 14:09:50 -07001488 PAGE_SIZE, &n, 1);
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001489 if (IS_ERR(guest_page)) {
1490 ret = PTR_ERR(guest_page);
Sean Christopherson238eca82021-04-06 15:49:52 -07001491 goto e_free_trans;
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001492 }
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001493
Masahiro Kozukac8c340a2021-09-14 14:09:51 -07001494 /*
1495 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1496 * encrypts the written data with the guest's key, and the cache may
1497 * contain dirty, unencrypted data.
1498 */
1499 sev_clflush_pages(guest_page, n);
1500
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001501 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001502 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1503 data.guest_address |= sev_me_mask;
1504 data.guest_len = params.guest_len;
1505 data.handle = sev->handle;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001506
Sean Christopherson238eca82021-04-06 15:49:52 -07001507 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001508 &argp->error);
1509
1510 sev_unpin_memory(kvm, guest_page, n);
1511
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001512e_free_trans:
1513 kfree(trans);
1514e_free_hdr:
1515 kfree(hdr);
1516
1517 return ret;
1518}
1519
Brijesh Singh6a443de2021-04-15 15:55:40 +00001520static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1521{
1522 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001523 struct sev_data_receive_finish data;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001524
1525 if (!sev_guest(kvm))
1526 return -ENOTTY;
1527
Sean Christopherson238eca82021-04-06 15:49:52 -07001528 data.handle = sev->handle;
1529 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
Brijesh Singh6a443de2021-04-15 15:55:40 +00001530}
1531
Sean Christopherson8e38e962021-11-09 21:51:01 +00001532static bool is_cmd_allowed_from_mirror(u32 cmd_id)
Peter Gonda5b92b6c2021-09-21 08:03:45 -07001533{
1534 /*
1535 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1536 * active mirror VMs. Also allow the debugging and status commands.
1537 */
1538 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1539 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1540 cmd_id == KVM_SEV_DBG_ENCRYPT)
1541 return true;
1542
1543 return false;
1544}
1545
Paolo Bonzini501b5802021-11-22 19:50:29 -05001546static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
Peter Gondab5663932021-10-21 10:43:00 -07001547{
Paolo Bonzini501b5802021-11-22 19:50:29 -05001548 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1549 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
Paolo Bonzinic9d61dc2021-11-22 19:50:36 -05001550 int r = -EBUSY;
Paolo Bonzini501b5802021-11-22 19:50:29 -05001551
1552 if (dst_kvm == src_kvm)
1553 return -EINVAL;
Peter Gondab5663932021-10-21 10:43:00 -07001554
1555 /*
Paolo Bonzini501b5802021-11-22 19:50:29 -05001556 * Bail if these VMs are already involved in a migration to avoid
1557 * deadlock between two VMs trying to migrate to/from each other.
Peter Gondab5663932021-10-21 10:43:00 -07001558 */
Paolo Bonzini501b5802021-11-22 19:50:29 -05001559 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
Peter Gondab5663932021-10-21 10:43:00 -07001560 return -EBUSY;
1561
Paolo Bonzinic9d61dc2021-11-22 19:50:36 -05001562 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1563 goto release_dst;
Peter Gondab5663932021-10-21 10:43:00 -07001564
Paolo Bonzinic9d61dc2021-11-22 19:50:36 -05001565 r = -EINTR;
1566 if (mutex_lock_killable(&dst_kvm->lock))
1567 goto release_src;
Wanpeng Li597cb792022-01-04 22:41:03 -08001568 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
Paolo Bonzinic9d61dc2021-11-22 19:50:36 -05001569 goto unlock_dst;
Peter Gondab5663932021-10-21 10:43:00 -07001570 return 0;
Paolo Bonzinic9d61dc2021-11-22 19:50:36 -05001571
1572unlock_dst:
1573 mutex_unlock(&dst_kvm->lock);
1574release_src:
1575 atomic_set_release(&src_sev->migration_in_progress, 0);
1576release_dst:
1577 atomic_set_release(&dst_sev->migration_in_progress, 0);
1578 return r;
Peter Gondab5663932021-10-21 10:43:00 -07001579}
1580
Paolo Bonzini501b5802021-11-22 19:50:29 -05001581static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
Peter Gondab5663932021-10-21 10:43:00 -07001582{
Paolo Bonzini501b5802021-11-22 19:50:29 -05001583 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1584 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
Peter Gondab5663932021-10-21 10:43:00 -07001585
Paolo Bonzini501b5802021-11-22 19:50:29 -05001586 mutex_unlock(&dst_kvm->lock);
1587 mutex_unlock(&src_kvm->lock);
1588 atomic_set_release(&dst_sev->migration_in_progress, 0);
1589 atomic_set_release(&src_sev->migration_in_progress, 0);
Peter Gondab5663932021-10-21 10:43:00 -07001590}
1591
1592
1593static int sev_lock_vcpus_for_migration(struct kvm *kvm)
1594{
1595 struct kvm_vcpu *vcpu;
1596 int i, j;
1597
1598 kvm_for_each_vcpu(i, vcpu, kvm) {
1599 if (mutex_lock_killable(&vcpu->mutex))
1600 goto out_unlock;
1601 }
1602
1603 return 0;
1604
1605out_unlock:
1606 kvm_for_each_vcpu(j, vcpu, kvm) {
1607 if (i == j)
1608 break;
1609
1610 mutex_unlock(&vcpu->mutex);
1611 }
1612 return -EINTR;
1613}
1614
1615static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1616{
1617 struct kvm_vcpu *vcpu;
1618 int i;
1619
1620 kvm_for_each_vcpu(i, vcpu, kvm) {
1621 mutex_unlock(&vcpu->mutex);
1622 }
1623}
1624
1625static void sev_migrate_from(struct kvm_sev_info *dst,
1626 struct kvm_sev_info *src)
1627{
1628 dst->active = true;
1629 dst->asid = src->asid;
1630 dst->handle = src->handle;
1631 dst->pages_locked = src->pages_locked;
Paolo Bonzini642525e2021-11-22 19:50:31 -05001632 dst->enc_context_owner = src->enc_context_owner;
Peter Gondab5663932021-10-21 10:43:00 -07001633
1634 src->asid = 0;
1635 src->active = false;
1636 src->handle = 0;
1637 src->pages_locked = 0;
Paolo Bonzini642525e2021-11-22 19:50:31 -05001638 src->enc_context_owner = NULL;
Peter Gondab5663932021-10-21 10:43:00 -07001639
Paolo Bonzini46741642021-11-22 19:50:28 -05001640 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
Peter Gondab5663932021-10-21 10:43:00 -07001641}
1642
Peter Gonda0b020f52021-10-21 10:43:01 -07001643static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
1644{
1645 int i;
1646 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1647 struct vcpu_svm *dst_svm, *src_svm;
1648
1649 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1650 return -EINVAL;
1651
1652 kvm_for_each_vcpu(i, src_vcpu, src) {
1653 if (!src_vcpu->arch.guest_state_protected)
1654 return -EINVAL;
1655 }
1656
1657 kvm_for_each_vcpu(i, src_vcpu, src) {
1658 src_svm = to_svm(src_vcpu);
1659 dst_vcpu = kvm_get_vcpu(dst, i);
1660 dst_svm = to_svm(dst_vcpu);
1661
1662 /*
1663 * Transfer VMSA and GHCB state to the destination. Nullify and
1664 * clear source fields as appropriate, the state now belongs to
1665 * the destination.
1666 */
1667 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
1668 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
1669 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
1670 dst_vcpu->arch.guest_state_protected = true;
1671
1672 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
1673 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
1674 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
1675 src_vcpu->arch.guest_state_protected = false;
1676 }
1677 to_kvm_svm(src)->sev_info.es_active = false;
1678 to_kvm_svm(dst)->sev_info.es_active = true;
1679
1680 return 0;
1681}
1682
Peter Gondab5663932021-10-21 10:43:00 -07001683int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
1684{
1685 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001686 struct kvm_sev_info *src_sev, *cg_cleanup_sev;
Peter Gondab5663932021-10-21 10:43:00 -07001687 struct file *source_kvm_file;
1688 struct kvm *source_kvm;
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001689 bool charged = false;
Peter Gondab5663932021-10-21 10:43:00 -07001690 int ret;
1691
Peter Gondab5663932021-10-21 10:43:00 -07001692 source_kvm_file = fget(source_fd);
1693 if (!file_is_kvm(source_kvm_file)) {
1694 ret = -EBADF;
1695 goto out_fput;
1696 }
1697
1698 source_kvm = source_kvm_file->private_data;
Paolo Bonzini501b5802021-11-22 19:50:29 -05001699 ret = sev_lock_two_vms(kvm, source_kvm);
Peter Gondab5663932021-10-21 10:43:00 -07001700 if (ret)
1701 goto out_fput;
1702
Paolo Bonzini501b5802021-11-22 19:50:29 -05001703 if (sev_guest(kvm) || !sev_guest(source_kvm)) {
Peter Gondab5663932021-10-21 10:43:00 -07001704 ret = -EINVAL;
Paolo Bonzini501b5802021-11-22 19:50:29 -05001705 goto out_unlock;
Peter Gondab5663932021-10-21 10:43:00 -07001706 }
1707
1708 src_sev = &to_kvm_svm(source_kvm)->sev_info;
Paolo Bonzini17d44a92021-11-22 19:50:34 -05001709
1710 /*
1711 * VMs mirroring src's encryption context rely on it to keep the
1712 * ASID allocated, but below we are clearing src_sev->asid.
1713 */
1714 if (src_sev->num_mirrored_vms) {
1715 ret = -EBUSY;
1716 goto out_unlock;
1717 }
1718
Peter Gondab5663932021-10-21 10:43:00 -07001719 dst_sev->misc_cg = get_current_misc_cg();
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001720 cg_cleanup_sev = dst_sev;
Peter Gondab5663932021-10-21 10:43:00 -07001721 if (dst_sev->misc_cg != src_sev->misc_cg) {
1722 ret = sev_misc_cg_try_charge(dst_sev);
1723 if (ret)
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001724 goto out_dst_cgroup;
1725 charged = true;
Peter Gondab5663932021-10-21 10:43:00 -07001726 }
1727
1728 ret = sev_lock_vcpus_for_migration(kvm);
1729 if (ret)
1730 goto out_dst_cgroup;
1731 ret = sev_lock_vcpus_for_migration(source_kvm);
1732 if (ret)
1733 goto out_dst_vcpu;
1734
Peter Gonda0b020f52021-10-21 10:43:01 -07001735 if (sev_es_guest(source_kvm)) {
1736 ret = sev_es_migrate_from(kvm, source_kvm);
1737 if (ret)
1738 goto out_source_vcpu;
1739 }
Peter Gondab5663932021-10-21 10:43:00 -07001740 sev_migrate_from(dst_sev, src_sev);
1741 kvm_vm_dead(source_kvm);
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001742 cg_cleanup_sev = src_sev;
Peter Gondab5663932021-10-21 10:43:00 -07001743 ret = 0;
1744
Peter Gonda0b020f52021-10-21 10:43:01 -07001745out_source_vcpu:
Peter Gondab5663932021-10-21 10:43:00 -07001746 sev_unlock_vcpus_for_migration(source_kvm);
1747out_dst_vcpu:
1748 sev_unlock_vcpus_for_migration(kvm);
1749out_dst_cgroup:
Paolo Bonzini501cfe02021-11-12 04:02:24 -05001750 /* Operates on the source on success, on the destination on failure. */
1751 if (charged)
1752 sev_misc_cg_uncharge(cg_cleanup_sev);
1753 put_misc_cg(cg_cleanup_sev->misc_cg);
1754 cg_cleanup_sev->misc_cg = NULL;
Paolo Bonzini501b5802021-11-22 19:50:29 -05001755out_unlock:
1756 sev_unlock_two_vms(kvm, source_kvm);
Peter Gondab5663932021-10-21 10:43:00 -07001757out_fput:
1758 if (source_kvm_file)
1759 fput(source_kvm_file);
Peter Gondab5663932021-10-21 10:43:00 -07001760 return ret;
1761}
1762
Joerg Roedeleaf78262020-03-24 10:41:54 +01001763int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1764{
1765 struct kvm_sev_cmd sev_cmd;
1766 int r;
1767
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07001768 if (!sev_enabled)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001769 return -ENOTTY;
1770
1771 if (!argp)
1772 return 0;
1773
1774 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1775 return -EFAULT;
1776
1777 mutex_lock(&kvm->lock);
1778
Peter Gonda5b92b6c2021-09-21 08:03:45 -07001779 /* Only the enc_context_owner handles some memory enc operations. */
1780 if (is_mirroring_enc_context(kvm) &&
Sean Christopherson8e38e962021-11-09 21:51:01 +00001781 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
Nathan Tempelman54526d12021-04-08 22:32:14 +00001782 r = -EINVAL;
1783 goto out;
1784 }
1785
Joerg Roedeleaf78262020-03-24 10:41:54 +01001786 switch (sev_cmd.id) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001787 case KVM_SEV_ES_INIT:
Sean Christopherson8d364a02021-04-21 19:11:17 -07001788 if (!sev_es_enabled) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001789 r = -ENOTTY;
1790 goto out;
1791 }
1792 fallthrough;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001793 case KVM_SEV_INIT:
1794 r = sev_guest_init(kvm, &sev_cmd);
1795 break;
1796 case KVM_SEV_LAUNCH_START:
1797 r = sev_launch_start(kvm, &sev_cmd);
1798 break;
1799 case KVM_SEV_LAUNCH_UPDATE_DATA:
1800 r = sev_launch_update_data(kvm, &sev_cmd);
1801 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001802 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1803 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1804 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001805 case KVM_SEV_LAUNCH_MEASURE:
1806 r = sev_launch_measure(kvm, &sev_cmd);
1807 break;
1808 case KVM_SEV_LAUNCH_FINISH:
1809 r = sev_launch_finish(kvm, &sev_cmd);
1810 break;
1811 case KVM_SEV_GUEST_STATUS:
1812 r = sev_guest_status(kvm, &sev_cmd);
1813 break;
1814 case KVM_SEV_DBG_DECRYPT:
1815 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1816 break;
1817 case KVM_SEV_DBG_ENCRYPT:
1818 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1819 break;
1820 case KVM_SEV_LAUNCH_SECRET:
1821 r = sev_launch_secret(kvm, &sev_cmd);
1822 break;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001823 case KVM_SEV_GET_ATTESTATION_REPORT:
1824 r = sev_get_attestation_report(kvm, &sev_cmd);
1825 break;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001826 case KVM_SEV_SEND_START:
1827 r = sev_send_start(kvm, &sev_cmd);
1828 break;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001829 case KVM_SEV_SEND_UPDATE_DATA:
1830 r = sev_send_update_data(kvm, &sev_cmd);
1831 break;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001832 case KVM_SEV_SEND_FINISH:
1833 r = sev_send_finish(kvm, &sev_cmd);
1834 break;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001835 case KVM_SEV_SEND_CANCEL:
1836 r = sev_send_cancel(kvm, &sev_cmd);
1837 break;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001838 case KVM_SEV_RECEIVE_START:
1839 r = sev_receive_start(kvm, &sev_cmd);
1840 break;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001841 case KVM_SEV_RECEIVE_UPDATE_DATA:
1842 r = sev_receive_update_data(kvm, &sev_cmd);
1843 break;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001844 case KVM_SEV_RECEIVE_FINISH:
1845 r = sev_receive_finish(kvm, &sev_cmd);
1846 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001847 default:
1848 r = -EINVAL;
1849 goto out;
1850 }
1851
1852 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1853 r = -EFAULT;
1854
1855out:
1856 mutex_unlock(&kvm->lock);
1857 return r;
1858}
1859
1860int svm_register_enc_region(struct kvm *kvm,
1861 struct kvm_enc_region *range)
1862{
1863 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1864 struct enc_region *region;
1865 int ret = 0;
1866
1867 if (!sev_guest(kvm))
1868 return -ENOTTY;
1869
Nathan Tempelman54526d12021-04-08 22:32:14 +00001870 /* If kvm is mirroring encryption context it isn't responsible for it */
1871 if (is_mirroring_enc_context(kvm))
1872 return -EINVAL;
1873
Joerg Roedeleaf78262020-03-24 10:41:54 +01001874 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1875 return -EINVAL;
1876
1877 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1878 if (!region)
1879 return -ENOMEM;
1880
Peter Gonda19a23da2021-01-27 08:15:24 -08001881 mutex_lock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001882 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001883 if (IS_ERR(region->pages)) {
1884 ret = PTR_ERR(region->pages);
Peter Gonda19a23da2021-01-27 08:15:24 -08001885 mutex_unlock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001886 goto e_free;
1887 }
1888
Peter Gonda19a23da2021-01-27 08:15:24 -08001889 region->uaddr = range->addr;
1890 region->size = range->size;
1891
1892 list_add_tail(&region->list, &sev->regions_list);
1893 mutex_unlock(&kvm->lock);
1894
Joerg Roedeleaf78262020-03-24 10:41:54 +01001895 /*
1896 * The guest may change the memory encryption attribute from C=0 -> C=1
1897 * or vice versa for this memory range. Lets make sure caches are
1898 * flushed to ensure that guest data gets written into memory with
1899 * correct C-bit.
1900 */
1901 sev_clflush_pages(region->pages, region->npages);
1902
Joerg Roedeleaf78262020-03-24 10:41:54 +01001903 return ret;
1904
1905e_free:
1906 kfree(region);
1907 return ret;
1908}
1909
1910static struct enc_region *
1911find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1912{
1913 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1914 struct list_head *head = &sev->regions_list;
1915 struct enc_region *i;
1916
1917 list_for_each_entry(i, head, list) {
1918 if (i->uaddr == range->addr &&
1919 i->size == range->size)
1920 return i;
1921 }
1922
1923 return NULL;
1924}
1925
1926static void __unregister_enc_region_locked(struct kvm *kvm,
1927 struct enc_region *region)
1928{
1929 sev_unpin_memory(kvm, region->pages, region->npages);
1930 list_del(&region->list);
1931 kfree(region);
1932}
1933
1934int svm_unregister_enc_region(struct kvm *kvm,
1935 struct kvm_enc_region *range)
1936{
1937 struct enc_region *region;
1938 int ret;
1939
Nathan Tempelman54526d12021-04-08 22:32:14 +00001940 /* If kvm is mirroring encryption context it isn't responsible for it */
1941 if (is_mirroring_enc_context(kvm))
1942 return -EINVAL;
1943
Joerg Roedeleaf78262020-03-24 10:41:54 +01001944 mutex_lock(&kvm->lock);
1945
1946 if (!sev_guest(kvm)) {
1947 ret = -ENOTTY;
1948 goto failed;
1949 }
1950
1951 region = find_enc_region(kvm, range);
1952 if (!region) {
1953 ret = -EINVAL;
1954 goto failed;
1955 }
1956
1957 /*
1958 * Ensure that all guest tagged cache entries are flushed before
1959 * releasing the pages back to the system for use. CLFLUSH will
1960 * not do this, so issue a WBINVD.
1961 */
1962 wbinvd_on_all_cpus();
1963
1964 __unregister_enc_region_locked(kvm, region);
1965
1966 mutex_unlock(&kvm->lock);
1967 return 0;
1968
1969failed:
1970 mutex_unlock(&kvm->lock);
1971 return ret;
1972}
1973
Nathan Tempelman54526d12021-04-08 22:32:14 +00001974int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1975{
1976 struct file *source_kvm_file;
1977 struct kvm *source_kvm;
Paolo Bonzinibf42b022021-11-22 19:50:33 -05001978 struct kvm_sev_info *source_sev, *mirror_sev;
Nathan Tempelman54526d12021-04-08 22:32:14 +00001979 int ret;
1980
1981 source_kvm_file = fget(source_fd);
1982 if (!file_is_kvm(source_kvm_file)) {
1983 ret = -EBADF;
Paolo Bonzinibf42b022021-11-22 19:50:33 -05001984 goto e_source_fput;
Nathan Tempelman54526d12021-04-08 22:32:14 +00001985 }
1986
1987 source_kvm = source_kvm_file->private_data;
Paolo Bonzinibf42b022021-11-22 19:50:33 -05001988 ret = sev_lock_two_vms(kvm, source_kvm);
1989 if (ret)
1990 goto e_source_fput;
Nathan Tempelman54526d12021-04-08 22:32:14 +00001991
Paolo Bonzinibf42b022021-11-22 19:50:33 -05001992 /*
1993 * Mirrors of mirrors should work, but let's not get silly. Also
1994 * disallow out-of-band SEV/SEV-ES init if the target is already an
1995 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
1996 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
1997 */
1998 if (sev_guest(kvm) || !sev_guest(source_kvm) ||
1999 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
Nathan Tempelman54526d12021-04-08 22:32:14 +00002000 ret = -EINVAL;
Paolo Bonzinibf42b022021-11-22 19:50:33 -05002001 goto e_unlock;
Nathan Tempelman54526d12021-04-08 22:32:14 +00002002 }
2003
Nathan Tempelman54526d12021-04-08 22:32:14 +00002004 /*
2005 * The mirror kvm holds an enc_context_owner ref so its asid can't
2006 * disappear until we're done with it
2007 */
Paolo Bonzinibf42b022021-11-22 19:50:33 -05002008 source_sev = &to_kvm_svm(source_kvm)->sev_info;
Nathan Tempelman54526d12021-04-08 22:32:14 +00002009 kvm_get_kvm(source_kvm);
Paolo Bonzini17d44a92021-11-22 19:50:34 -05002010 source_sev->num_mirrored_vms++;
Nathan Tempelman54526d12021-04-08 22:32:14 +00002011
Nathan Tempelman54526d12021-04-08 22:32:14 +00002012 /* Set enc_context_owner and copy its encryption context over */
2013 mirror_sev = &to_kvm_svm(kvm)->sev_info;
2014 mirror_sev->enc_context_owner = source_kvm;
Nathan Tempelman54526d12021-04-08 22:32:14 +00002015 mirror_sev->active = true;
Paolo Bonzinibf42b022021-11-22 19:50:33 -05002016 mirror_sev->asid = source_sev->asid;
2017 mirror_sev->fd = source_sev->fd;
2018 mirror_sev->es_active = source_sev->es_active;
2019 mirror_sev->handle = source_sev->handle;
Paolo Bonzini2b347a32021-11-22 19:50:30 -05002020 INIT_LIST_HEAD(&mirror_sev->regions_list);
Paolo Bonzinibf42b022021-11-22 19:50:33 -05002021 ret = 0;
2022
Peter Gondaf43c8872021-09-21 08:03:44 -07002023 /*
2024 * Do not copy ap_jump_table. Since the mirror does not share the same
2025 * KVM contexts as the original, and they may have different
2026 * memory-views.
2027 */
Nathan Tempelman54526d12021-04-08 22:32:14 +00002028
Paolo Bonzinibf42b022021-11-22 19:50:33 -05002029e_unlock:
2030 sev_unlock_two_vms(kvm, source_kvm);
2031e_source_fput:
Colin Ian King8899a5f2021-04-30 18:03:03 +01002032 if (source_kvm_file)
2033 fput(source_kvm_file);
Nathan Tempelman54526d12021-04-08 22:32:14 +00002034 return ret;
2035}
2036
Joerg Roedeleaf78262020-03-24 10:41:54 +01002037void sev_vm_destroy(struct kvm *kvm)
2038{
2039 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2040 struct list_head *head = &sev->regions_list;
2041 struct list_head *pos, *q;
2042
Paolo Bonzini17d44a92021-11-22 19:50:34 -05002043 WARN_ON(sev->num_mirrored_vms);
2044
Joerg Roedeleaf78262020-03-24 10:41:54 +01002045 if (!sev_guest(kvm))
2046 return;
2047
Nathan Tempelman54526d12021-04-08 22:32:14 +00002048 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2049 if (is_mirroring_enc_context(kvm)) {
Paolo Bonzini17d44a92021-11-22 19:50:34 -05002050 struct kvm *owner_kvm = sev->enc_context_owner;
2051 struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
2052
2053 mutex_lock(&owner_kvm->lock);
2054 if (!WARN_ON(!owner_sev->num_mirrored_vms))
2055 owner_sev->num_mirrored_vms--;
2056 mutex_unlock(&owner_kvm->lock);
2057 kvm_put_kvm(owner_kvm);
Nathan Tempelman54526d12021-04-08 22:32:14 +00002058 return;
2059 }
2060
Joerg Roedeleaf78262020-03-24 10:41:54 +01002061 /*
2062 * Ensure that all guest tagged cache entries are flushed before
2063 * releasing the pages back to the system for use. CLFLUSH will
2064 * not do this, so issue a WBINVD.
2065 */
2066 wbinvd_on_all_cpus();
2067
2068 /*
2069 * if userspace was terminated before unregistering the memory regions
2070 * then lets unpin all the registered memory.
2071 */
2072 if (!list_empty(head)) {
2073 list_for_each_safe(pos, q, head) {
2074 __unregister_enc_region_locked(kvm,
2075 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07002076 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01002077 }
2078 }
2079
Joerg Roedeleaf78262020-03-24 10:41:54 +01002080 sev_unbind_asid(kvm, sev->handle);
Vipin Sharma7aef27f2021-03-29 21:42:06 -07002081 sev_asid_free(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002082}
2083
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07002084void __init sev_set_cpu_caps(void)
2085{
Sean Christopherson8d364a02021-04-21 19:11:17 -07002086 if (!sev_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07002087 kvm_cpu_cap_clear(X86_FEATURE_SEV);
Sean Christopherson8d364a02021-04-21 19:11:17 -07002088 if (!sev_es_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07002089 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2090}
2091
Tom Lendacky916391a2020-12-10 11:09:38 -06002092void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002093{
Sean Christophersona479c332021-04-21 19:11:18 -07002094#ifdef CONFIG_KVM_AMD_SEV
Vipin Sharma7aef27f2021-03-29 21:42:06 -07002095 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
Tom Lendacky916391a2020-12-10 11:09:38 -06002096 bool sev_es_supported = false;
2097 bool sev_supported = false;
2098
Sean Christophersona479c332021-04-21 19:11:18 -07002099 if (!sev_enabled || !npt_enabled)
Sean Christophersone8126bd2021-04-21 19:11:14 -07002100 goto out;
2101
Tom Lendacky916391a2020-12-10 11:09:38 -06002102 /* Does the CPU support SEV? */
2103 if (!boot_cpu_has(X86_FEATURE_SEV))
2104 goto out;
2105
2106 /* Retrieve SEV CPUID information */
2107 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2108
Tom Lendacky1edc1452020-12-10 11:09:49 -06002109 /* Set encryption bit location for SEV-ES guests */
2110 sev_enc_bit = ebx & 0x3f;
2111
Joerg Roedeleaf78262020-03-24 10:41:54 +01002112 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06002113 max_sev_asid = ecx;
Sean Christopherson8cb756b2021-04-21 19:11:21 -07002114 if (!max_sev_asid)
Tom Lendacky916391a2020-12-10 11:09:38 -06002115 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002116
2117 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06002118 min_sev_asid = edx;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00002119 sev_me_mask = 1UL << (ebx & 0x3f);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002120
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07002121 /*
2122 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2123 * even though it's never used, so that the bitmap is indexed by the
2124 * actual ASID.
2125 */
2126 nr_asids = max_sev_asid + 1;
2127 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002128 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06002129 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002130
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07002131 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
Sean Christophersonf31b88b2021-04-21 19:11:12 -07002132 if (!sev_reclaim_asid_bitmap) {
2133 bitmap_free(sev_asid_bitmap);
2134 sev_asid_bitmap = NULL;
Tom Lendacky916391a2020-12-10 11:09:38 -06002135 goto out;
Sean Christophersonf31b88b2021-04-21 19:11:12 -07002136 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01002137
Vipin Sharma7aef27f2021-03-29 21:42:06 -07002138 sev_asid_count = max_sev_asid - min_sev_asid + 1;
2139 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
2140 goto out;
2141
2142 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06002143 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002144
Tom Lendacky916391a2020-12-10 11:09:38 -06002145 /* SEV-ES support requested? */
Sean Christopherson8d364a02021-04-21 19:11:17 -07002146 if (!sev_es_enabled)
Tom Lendacky916391a2020-12-10 11:09:38 -06002147 goto out;
2148
2149 /* Does the CPU support SEV-ES? */
2150 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2151 goto out;
2152
2153 /* Has the system been allocated ASIDs for SEV-ES? */
2154 if (min_sev_asid == 1)
2155 goto out;
2156
Vipin Sharma7aef27f2021-03-29 21:42:06 -07002157 sev_es_asid_count = min_sev_asid - 1;
2158 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
2159 goto out;
2160
2161 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06002162 sev_es_supported = true;
2163
2164out:
Sean Christopherson8d364a02021-04-21 19:11:17 -07002165 sev_enabled = sev_supported;
2166 sev_es_enabled = sev_es_supported;
Sean Christophersona479c332021-04-21 19:11:18 -07002167#endif
Joerg Roedeleaf78262020-03-24 10:41:54 +01002168}
2169
2170void sev_hardware_teardown(void)
2171{
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07002172 if (!sev_enabled)
Paolo Bonzini9ef15302020-04-13 03:20:06 -04002173 return;
2174
Sean Christopherson469bb322021-04-21 19:11:25 -07002175 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07002176 sev_flush_asids(1, max_sev_asid);
Sean Christopherson469bb322021-04-21 19:11:25 -07002177
Joerg Roedeleaf78262020-03-24 10:41:54 +01002178 bitmap_free(sev_asid_bitmap);
2179 bitmap_free(sev_reclaim_asid_bitmap);
Sean Christopherson469bb322021-04-21 19:11:25 -07002180
Vipin Sharma7aef27f2021-03-29 21:42:06 -07002181 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2182 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002183}
Joerg Roedeleaf78262020-03-24 10:41:54 +01002184
Sean Christophersonb95c2212021-04-21 19:11:22 -07002185int sev_cpu_init(struct svm_cpu_data *sd)
2186{
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07002187 if (!sev_enabled)
Sean Christophersonb95c2212021-04-21 19:11:22 -07002188 return 0;
2189
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07002190 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
Sean Christophersonb95c2212021-04-21 19:11:22 -07002191 if (!sd->sev_vmcbs)
2192 return -ENOMEM;
2193
2194 return 0;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002195}
2196
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06002197/*
2198 * Pages used by hardware to hold guest encrypted state must be flushed before
2199 * returning them to the system.
2200 */
2201static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
2202 unsigned long len)
2203{
2204 /*
2205 * If hardware enforced cache coherency for encrypted mappings of the
2206 * same physical page is supported, nothing to do.
2207 */
2208 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
2209 return;
2210
2211 /*
2212 * If the VM Page Flush MSR is supported, use it to flush the page
2213 * (using the page virtual address and the guest ASID).
2214 */
2215 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
2216 struct kvm_sev_info *sev;
2217 unsigned long va_start;
2218 u64 start, stop;
2219
2220 /* Align start and stop to page boundaries. */
2221 va_start = (unsigned long)va;
2222 start = (u64)va_start & PAGE_MASK;
2223 stop = PAGE_ALIGN((u64)va_start + len);
2224
2225 if (start < stop) {
2226 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
2227
2228 while (start < stop) {
2229 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
2230 start | sev->asid);
2231
2232 start += PAGE_SIZE;
2233 }
2234
2235 return;
2236 }
2237
2238 WARN(1, "Address overflow, using WBINVD\n");
2239 }
2240
2241 /*
2242 * Hardware should always have one of the above features,
2243 * but if not, use WBINVD and issue a warning.
2244 */
2245 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
2246 wbinvd_on_all_cpus();
2247}
2248
2249void sev_free_vcpu(struct kvm_vcpu *vcpu)
2250{
2251 struct vcpu_svm *svm;
2252
2253 if (!sev_es_guest(vcpu->kvm))
2254 return;
2255
2256 svm = to_svm(vcpu);
2257
2258 if (vcpu->arch.guest_state_protected)
Peter Gondab67a4cc2021-10-21 10:42:59 -07002259 sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
2260 __free_page(virt_to_page(svm->sev_es.vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06002261
Peter Gondab67a4cc2021-10-21 10:42:59 -07002262 if (svm->sev_es.ghcb_sa_free)
Sean Christophersona6552762021-11-09 22:23:50 +00002263 kvfree(svm->sev_es.ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06002264}
2265
Tom Lendacky291bd202020-12-10 11:09:47 -06002266static void dump_ghcb(struct vcpu_svm *svm)
2267{
Peter Gondab67a4cc2021-10-21 10:42:59 -07002268 struct ghcb *ghcb = svm->sev_es.ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -06002269 unsigned int nbits;
2270
2271 /* Re-use the dump_invalid_vmcb module parameter */
2272 if (!dump_invalid_vmcb) {
2273 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2274 return;
2275 }
2276
2277 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2278
2279 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2280 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2281 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2282 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2283 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2284 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2285 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2286 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2287 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2288 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2289}
2290
2291static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2292{
2293 struct kvm_vcpu *vcpu = &svm->vcpu;
Peter Gondab67a4cc2021-10-21 10:42:59 -07002294 struct ghcb *ghcb = svm->sev_es.ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -06002295
2296 /*
2297 * The GHCB protocol so far allows for the following data
2298 * to be returned:
2299 * GPRs RAX, RBX, RCX, RDX
2300 *
Sean Christopherson25009142021-01-22 15:50:47 -08002301 * Copy their values, even if they may not have been written during the
2302 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06002303 */
Sean Christopherson25009142021-01-22 15:50:47 -08002304 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2305 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2306 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2307 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06002308}
2309
2310static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2311{
2312 struct vmcb_control_area *control = &svm->vmcb->control;
2313 struct kvm_vcpu *vcpu = &svm->vcpu;
Peter Gondab67a4cc2021-10-21 10:42:59 -07002314 struct ghcb *ghcb = svm->sev_es.ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -06002315 u64 exit_code;
2316
2317 /*
2318 * The GHCB protocol so far allows for the following data
2319 * to be supplied:
2320 * GPRs RAX, RBX, RCX, RDX
2321 * XCR0
2322 * CPL
2323 *
2324 * VMMCALL allows the guest to provide extra registers. KVM also
2325 * expects RSI for hypercalls, so include that, too.
2326 *
2327 * Copy their values to the appropriate location if supplied.
2328 */
2329 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2330
2331 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2332 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2333 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2334 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2335 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2336
2337 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2338
2339 if (ghcb_xcr0_is_valid(ghcb)) {
2340 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2341 kvm_update_cpuid_runtime(vcpu);
2342 }
2343
2344 /* Copy the GHCB exit information into the VMCB fields */
2345 exit_code = ghcb_get_sw_exit_code(ghcb);
2346 control->exit_code = lower_32_bits(exit_code);
2347 control->exit_code_hi = upper_32_bits(exit_code);
2348 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2349 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2350
2351 /* Clear the valid entries fields */
2352 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2353}
2354
Tom Lendackyad5b3532021-12-02 12:52:05 -06002355static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
Tom Lendacky291bd202020-12-10 11:09:47 -06002356{
2357 struct kvm_vcpu *vcpu;
2358 struct ghcb *ghcb;
Tom Lendackyad5b3532021-12-02 12:52:05 -06002359 u64 exit_code;
2360 u64 reason;
Tom Lendacky291bd202020-12-10 11:09:47 -06002361
Peter Gondab67a4cc2021-10-21 10:42:59 -07002362 ghcb = svm->sev_es.ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -06002363
Tom Lendacky291bd202020-12-10 11:09:47 -06002364 /*
Tom Lendackyad5b3532021-12-02 12:52:05 -06002365 * Retrieve the exit code now even though it may not be marked valid
Tom Lendacky291bd202020-12-10 11:09:47 -06002366 * as it could help with debugging.
2367 */
2368 exit_code = ghcb_get_sw_exit_code(ghcb);
2369
Tom Lendackyad5b3532021-12-02 12:52:05 -06002370 /* Only GHCB Usage code 0 is supported */
2371 if (ghcb->ghcb_usage) {
2372 reason = GHCB_ERR_INVALID_USAGE;
2373 goto vmgexit_err;
2374 }
2375
2376 reason = GHCB_ERR_MISSING_INPUT;
2377
Tom Lendacky291bd202020-12-10 11:09:47 -06002378 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2379 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2380 !ghcb_sw_exit_info_2_is_valid(ghcb))
2381 goto vmgexit_err;
2382
2383 switch (ghcb_get_sw_exit_code(ghcb)) {
2384 case SVM_EXIT_READ_DR7:
2385 break;
2386 case SVM_EXIT_WRITE_DR7:
2387 if (!ghcb_rax_is_valid(ghcb))
2388 goto vmgexit_err;
2389 break;
2390 case SVM_EXIT_RDTSC:
2391 break;
2392 case SVM_EXIT_RDPMC:
2393 if (!ghcb_rcx_is_valid(ghcb))
2394 goto vmgexit_err;
2395 break;
2396 case SVM_EXIT_CPUID:
2397 if (!ghcb_rax_is_valid(ghcb) ||
2398 !ghcb_rcx_is_valid(ghcb))
2399 goto vmgexit_err;
2400 if (ghcb_get_rax(ghcb) == 0xd)
2401 if (!ghcb_xcr0_is_valid(ghcb))
2402 goto vmgexit_err;
2403 break;
2404 case SVM_EXIT_INVD:
2405 break;
2406 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002407 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2408 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06002409 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002410 } else {
2411 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2412 if (!ghcb_rax_is_valid(ghcb))
2413 goto vmgexit_err;
2414 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002415 break;
2416 case SVM_EXIT_MSR:
2417 if (!ghcb_rcx_is_valid(ghcb))
2418 goto vmgexit_err;
2419 if (ghcb_get_sw_exit_info_1(ghcb)) {
2420 if (!ghcb_rax_is_valid(ghcb) ||
2421 !ghcb_rdx_is_valid(ghcb))
2422 goto vmgexit_err;
2423 }
2424 break;
2425 case SVM_EXIT_VMMCALL:
2426 if (!ghcb_rax_is_valid(ghcb) ||
2427 !ghcb_cpl_is_valid(ghcb))
2428 goto vmgexit_err;
2429 break;
2430 case SVM_EXIT_RDTSCP:
2431 break;
2432 case SVM_EXIT_WBINVD:
2433 break;
2434 case SVM_EXIT_MONITOR:
2435 if (!ghcb_rax_is_valid(ghcb) ||
2436 !ghcb_rcx_is_valid(ghcb) ||
2437 !ghcb_rdx_is_valid(ghcb))
2438 goto vmgexit_err;
2439 break;
2440 case SVM_EXIT_MWAIT:
2441 if (!ghcb_rax_is_valid(ghcb) ||
2442 !ghcb_rcx_is_valid(ghcb))
2443 goto vmgexit_err;
2444 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002445 case SVM_VMGEXIT_MMIO_READ:
2446 case SVM_VMGEXIT_MMIO_WRITE:
2447 if (!ghcb_sw_scratch_is_valid(ghcb))
2448 goto vmgexit_err;
2449 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002450 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06002451 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05002452 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06002453 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2454 break;
2455 default:
Tom Lendackyad5b3532021-12-02 12:52:05 -06002456 reason = GHCB_ERR_INVALID_EVENT;
Tom Lendacky291bd202020-12-10 11:09:47 -06002457 goto vmgexit_err;
2458 }
2459
Tom Lendackyad5b3532021-12-02 12:52:05 -06002460 return true;
Tom Lendacky291bd202020-12-10 11:09:47 -06002461
2462vmgexit_err:
2463 vcpu = &svm->vcpu;
2464
Tom Lendackyad5b3532021-12-02 12:52:05 -06002465 if (reason == GHCB_ERR_INVALID_USAGE) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002466 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2467 ghcb->ghcb_usage);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002468 } else if (reason == GHCB_ERR_INVALID_EVENT) {
2469 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
2470 exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002471 } else {
Tom Lendackyad5b3532021-12-02 12:52:05 -06002472 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002473 exit_code);
2474 dump_ghcb(svm);
2475 }
2476
Tom Lendackyad5b3532021-12-02 12:52:05 -06002477 /* Clear the valid entries fields */
2478 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
Tom Lendacky291bd202020-12-10 11:09:47 -06002479
Tom Lendackyad5b3532021-12-02 12:52:05 -06002480 ghcb_set_sw_exit_info_1(ghcb, 2);
2481 ghcb_set_sw_exit_info_2(ghcb, reason);
2482
2483 return false;
Tom Lendacky291bd202020-12-10 11:09:47 -06002484}
2485
Tom Lendackyce7ea0c2021-05-06 15:14:41 -05002486void sev_es_unmap_ghcb(struct vcpu_svm *svm)
Tom Lendacky291bd202020-12-10 11:09:47 -06002487{
Peter Gondab67a4cc2021-10-21 10:42:59 -07002488 if (!svm->sev_es.ghcb)
Tom Lendacky291bd202020-12-10 11:09:47 -06002489 return;
2490
Peter Gondab67a4cc2021-10-21 10:42:59 -07002491 if (svm->sev_es.ghcb_sa_free) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002492 /*
2493 * The scratch area lives outside the GHCB, so there is a
2494 * buffer that, depending on the operation performed, may
2495 * need to be synced, then freed.
2496 */
Peter Gondab67a4cc2021-10-21 10:42:59 -07002497 if (svm->sev_es.ghcb_sa_sync) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002498 kvm_write_guest(svm->vcpu.kvm,
Peter Gondab67a4cc2021-10-21 10:42:59 -07002499 ghcb_get_sw_scratch(svm->sev_es.ghcb),
2500 svm->sev_es.ghcb_sa,
2501 svm->sev_es.ghcb_sa_len);
2502 svm->sev_es.ghcb_sa_sync = false;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002503 }
2504
Sean Christophersona6552762021-11-09 22:23:50 +00002505 kvfree(svm->sev_es.ghcb_sa);
Peter Gondab67a4cc2021-10-21 10:42:59 -07002506 svm->sev_es.ghcb_sa = NULL;
2507 svm->sev_es.ghcb_sa_free = false;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002508 }
2509
Peter Gondab67a4cc2021-10-21 10:42:59 -07002510 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002511
Tom Lendacky291bd202020-12-10 11:09:47 -06002512 sev_es_sync_to_ghcb(svm);
2513
Peter Gondab67a4cc2021-10-21 10:42:59 -07002514 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2515 svm->sev_es.ghcb = NULL;
Tom Lendacky291bd202020-12-10 11:09:47 -06002516}
2517
Joerg Roedeleaf78262020-03-24 10:41:54 +01002518void pre_sev_run(struct vcpu_svm *svm, int cpu)
2519{
2520 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2521 int asid = sev_get_asid(svm->vcpu.kvm);
2522
2523 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05002524 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002525
2526 /*
2527 * Flush guest TLB:
2528 *
2529 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2530 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2531 */
2532 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07002533 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002534 return;
2535
Joerg Roedeleaf78262020-03-24 10:41:54 +01002536 sd->sev_vmcbs[asid] = svm->vmcb;
2537 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02002538 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002539}
Tom Lendacky291bd202020-12-10 11:09:47 -06002540
Tom Lendacky8f423a82020-12-10 11:09:53 -06002541#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
Tom Lendackyad5b3532021-12-02 12:52:05 -06002542static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
Tom Lendacky8f423a82020-12-10 11:09:53 -06002543{
2544 struct vmcb_control_area *control = &svm->vmcb->control;
Peter Gondab67a4cc2021-10-21 10:42:59 -07002545 struct ghcb *ghcb = svm->sev_es.ghcb;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002546 u64 ghcb_scratch_beg, ghcb_scratch_end;
2547 u64 scratch_gpa_beg, scratch_gpa_end;
2548 void *scratch_va;
2549
2550 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2551 if (!scratch_gpa_beg) {
2552 pr_err("vmgexit: scratch gpa not provided\n");
Tom Lendackyad5b3532021-12-02 12:52:05 -06002553 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002554 }
2555
2556 scratch_gpa_end = scratch_gpa_beg + len;
2557 if (scratch_gpa_end < scratch_gpa_beg) {
2558 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2559 len, scratch_gpa_beg);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002560 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002561 }
2562
2563 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2564 /* Scratch area begins within GHCB */
2565 ghcb_scratch_beg = control->ghcb_gpa +
2566 offsetof(struct ghcb, shared_buffer);
2567 ghcb_scratch_end = control->ghcb_gpa +
2568 offsetof(struct ghcb, reserved_1);
2569
2570 /*
2571 * If the scratch area begins within the GHCB, it must be
2572 * completely contained in the GHCB shared buffer area.
2573 */
2574 if (scratch_gpa_beg < ghcb_scratch_beg ||
2575 scratch_gpa_end > ghcb_scratch_end) {
2576 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2577 scratch_gpa_beg, scratch_gpa_end);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002578 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002579 }
2580
Peter Gondab67a4cc2021-10-21 10:42:59 -07002581 scratch_va = (void *)svm->sev_es.ghcb;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002582 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2583 } else {
2584 /*
2585 * The guest memory must be read into a kernel buffer, so
2586 * limit the size
2587 */
2588 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2589 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2590 len, GHCB_SCRATCH_AREA_LIMIT);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002591 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002592 }
Sean Christophersona6552762021-11-09 22:23:50 +00002593 scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002594 if (!scratch_va)
Tom Lendackyad5b3532021-12-02 12:52:05 -06002595 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002596
2597 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2598 /* Unable to copy scratch area from guest */
2599 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2600
Sean Christophersona6552762021-11-09 22:23:50 +00002601 kvfree(scratch_va);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002602 goto e_scratch;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002603 }
2604
2605 /*
2606 * The scratch area is outside the GHCB. The operation will
2607 * dictate whether the buffer needs to be synced before running
2608 * the vCPU next time (i.e. a read was requested so the data
2609 * must be written back to the guest memory).
2610 */
Peter Gondab67a4cc2021-10-21 10:42:59 -07002611 svm->sev_es.ghcb_sa_sync = sync;
2612 svm->sev_es.ghcb_sa_free = true;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002613 }
2614
Peter Gondab67a4cc2021-10-21 10:42:59 -07002615 svm->sev_es.ghcb_sa = scratch_va;
2616 svm->sev_es.ghcb_sa_len = len;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002617
Tom Lendackyad5b3532021-12-02 12:52:05 -06002618 return true;
2619
2620e_scratch:
2621 ghcb_set_sw_exit_info_1(ghcb, 2);
2622 ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2623
2624 return false;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002625}
2626
Tom Lendackyd3694662020-12-10 11:09:50 -06002627static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2628 unsigned int pos)
2629{
2630 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2631 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2632}
2633
2634static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2635{
2636 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2637}
2638
Tom Lendacky1edc1452020-12-10 11:09:49 -06002639static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2640{
2641 svm->vmcb->control.ghcb_gpa = value;
2642}
2643
Tom Lendacky291bd202020-12-10 11:09:47 -06002644static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2645{
Tom Lendacky1edc1452020-12-10 11:09:49 -06002646 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06002647 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002648 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06002649 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002650
2651 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2652
Tom Lendacky59e38b52020-12-10 11:09:52 -06002653 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2654 control->ghcb_gpa);
2655
Tom Lendacky1edc1452020-12-10 11:09:49 -06002656 switch (ghcb_info) {
2657 case GHCB_MSR_SEV_INFO_REQ:
2658 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2659 GHCB_VERSION_MIN,
2660 sev_enc_bit));
2661 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06002662 case GHCB_MSR_CPUID_REQ: {
2663 u64 cpuid_fn, cpuid_reg, cpuid_value;
2664
2665 cpuid_fn = get_ghcb_msr_bits(svm,
2666 GHCB_MSR_CPUID_FUNC_MASK,
2667 GHCB_MSR_CPUID_FUNC_POS);
2668
2669 /* Initialize the registers needed by the CPUID intercept */
2670 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2671 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2672
Paolo Bonzini63129752021-03-02 14:40:39 -05002673 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
Tom Lendackyd3694662020-12-10 11:09:50 -06002674 if (!ret) {
Tom Lendackyad5b3532021-12-02 12:52:05 -06002675 /* Error, keep GHCB MSR value as-is */
Tom Lendackyd3694662020-12-10 11:09:50 -06002676 break;
2677 }
2678
2679 cpuid_reg = get_ghcb_msr_bits(svm,
2680 GHCB_MSR_CPUID_REG_MASK,
2681 GHCB_MSR_CPUID_REG_POS);
2682 if (cpuid_reg == 0)
2683 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2684 else if (cpuid_reg == 1)
2685 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2686 else if (cpuid_reg == 2)
2687 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2688 else
2689 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2690
2691 set_ghcb_msr_bits(svm, cpuid_value,
2692 GHCB_MSR_CPUID_VALUE_MASK,
2693 GHCB_MSR_CPUID_VALUE_POS);
2694
2695 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2696 GHCB_MSR_INFO_MASK,
2697 GHCB_MSR_INFO_POS);
2698 break;
2699 }
Tom Lendackye1d71112020-12-10 11:09:51 -06002700 case GHCB_MSR_TERM_REQ: {
2701 u64 reason_set, reason_code;
2702
2703 reason_set = get_ghcb_msr_bits(svm,
2704 GHCB_MSR_TERM_REASON_SET_MASK,
2705 GHCB_MSR_TERM_REASON_SET_POS);
2706 reason_code = get_ghcb_msr_bits(svm,
2707 GHCB_MSR_TERM_REASON_MASK,
2708 GHCB_MSR_TERM_REASON_POS);
2709 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2710 reason_set, reason_code);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002711
2712 ret = -EINVAL;
2713 break;
Tom Lendackye1d71112020-12-10 11:09:51 -06002714 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06002715 default:
Tom Lendackyad5b3532021-12-02 12:52:05 -06002716 /* Error, keep GHCB MSR value as-is */
2717 break;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002718 }
2719
Tom Lendacky59e38b52020-12-10 11:09:52 -06002720 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2721 control->ghcb_gpa, ret);
2722
Tom Lendackyd3694662020-12-10 11:09:50 -06002723 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06002724}
2725
Paolo Bonzini63129752021-03-02 14:40:39 -05002726int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
Tom Lendacky291bd202020-12-10 11:09:47 -06002727{
Paolo Bonzini63129752021-03-02 14:40:39 -05002728 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -06002729 struct vmcb_control_area *control = &svm->vmcb->control;
2730 u64 ghcb_gpa, exit_code;
2731 struct ghcb *ghcb;
2732 int ret;
2733
2734 /* Validate the GHCB */
2735 ghcb_gpa = control->ghcb_gpa;
2736 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2737 return sev_handle_vmgexit_msr_protocol(svm);
2738
2739 if (!ghcb_gpa) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002740 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
Tom Lendackyad5b3532021-12-02 12:52:05 -06002741
2742 /* Without a GHCB, just return right back to the guest */
2743 return 1;
Tom Lendacky291bd202020-12-10 11:09:47 -06002744 }
2745
Peter Gondab67a4cc2021-10-21 10:42:59 -07002746 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002747 /* Unable to map GHCB from guest */
Paolo Bonzini63129752021-03-02 14:40:39 -05002748 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002749 ghcb_gpa);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002750
2751 /* Without a GHCB, just return right back to the guest */
2752 return 1;
Tom Lendacky291bd202020-12-10 11:09:47 -06002753 }
2754
Peter Gondab67a4cc2021-10-21 10:42:59 -07002755 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2756 ghcb = svm->sev_es.ghcb_map.hva;
Tom Lendacky291bd202020-12-10 11:09:47 -06002757
Paolo Bonzini63129752021-03-02 14:40:39 -05002758 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002759
Tom Lendacky291bd202020-12-10 11:09:47 -06002760 exit_code = ghcb_get_sw_exit_code(ghcb);
2761
Tom Lendackyad5b3532021-12-02 12:52:05 -06002762 if (!sev_es_validate_vmgexit(svm))
2763 return 1;
Tom Lendacky291bd202020-12-10 11:09:47 -06002764
2765 sev_es_sync_from_ghcb(svm);
2766 ghcb_set_sw_exit_info_1(ghcb, 0);
2767 ghcb_set_sw_exit_info_2(ghcb, 0);
2768
Tom Lendackyad5b3532021-12-02 12:52:05 -06002769 ret = 1;
Tom Lendacky291bd202020-12-10 11:09:47 -06002770 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002771 case SVM_VMGEXIT_MMIO_READ:
Tom Lendackyad5b3532021-12-02 12:52:05 -06002772 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
Tom Lendacky8f423a82020-12-10 11:09:53 -06002773 break;
2774
Paolo Bonzini63129752021-03-02 14:40:39 -05002775 ret = kvm_sev_es_mmio_read(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002776 control->exit_info_1,
2777 control->exit_info_2,
Peter Gondab67a4cc2021-10-21 10:42:59 -07002778 svm->sev_es.ghcb_sa);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002779 break;
2780 case SVM_VMGEXIT_MMIO_WRITE:
Tom Lendackyad5b3532021-12-02 12:52:05 -06002781 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
Tom Lendacky8f423a82020-12-10 11:09:53 -06002782 break;
2783
Paolo Bonzini63129752021-03-02 14:40:39 -05002784 ret = kvm_sev_es_mmio_write(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002785 control->exit_info_1,
2786 control->exit_info_2,
Peter Gondab67a4cc2021-10-21 10:42:59 -07002787 svm->sev_es.ghcb_sa);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002788 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002789 case SVM_VMGEXIT_NMI_COMPLETE:
Paolo Bonzini63129752021-03-02 14:40:39 -05002790 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002791 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06002792 case SVM_VMGEXIT_AP_HLT_LOOP:
Paolo Bonzini63129752021-03-02 14:40:39 -05002793 ret = kvm_emulate_ap_reset_hold(vcpu);
Tom Lendacky647daca2021-01-04 14:20:01 -06002794 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002795 case SVM_VMGEXIT_AP_JUMP_TABLE: {
Paolo Bonzini63129752021-03-02 14:40:39 -05002796 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002797
2798 switch (control->exit_info_1) {
2799 case 0:
2800 /* Set AP jump table address */
2801 sev->ap_jump_table = control->exit_info_2;
2802 break;
2803 case 1:
2804 /* Get AP jump table address */
2805 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2806 break;
2807 default:
2808 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2809 control->exit_info_1);
Tom Lendackyad5b3532021-12-02 12:52:05 -06002810 ghcb_set_sw_exit_info_1(ghcb, 2);
2811 ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
Tom Lendacky8640ca52020-12-15 12:44:07 -05002812 }
2813
Tom Lendacky8640ca52020-12-15 12:44:07 -05002814 break;
2815 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002816 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
Paolo Bonzini63129752021-03-02 14:40:39 -05002817 vcpu_unimpl(vcpu,
Tom Lendacky291bd202020-12-10 11:09:47 -06002818 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2819 control->exit_info_1, control->exit_info_2);
Sean Christopherson75236f52021-11-09 22:23:49 +00002820 ret = -EINVAL;
Tom Lendacky291bd202020-12-10 11:09:47 -06002821 break;
2822 default:
Paolo Bonzini63129752021-03-02 14:40:39 -05002823 ret = svm_invoke_exit_handler(vcpu, exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002824 }
2825
2826 return ret;
2827}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002828
2829int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2830{
Paolo Bonzini9b0971c2021-10-25 12:14:31 -04002831 int count;
2832 int bytes;
2833
2834 if (svm->vmcb->control.exit_info_2 > INT_MAX)
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002835 return -EINVAL;
2836
Paolo Bonzini9b0971c2021-10-25 12:14:31 -04002837 count = svm->vmcb->control.exit_info_2;
2838 if (unlikely(check_mul_overflow(count, size, &bytes)))
2839 return -EINVAL;
2840
Tom Lendackyad5b3532021-12-02 12:52:05 -06002841 if (!setup_vmgexit_scratch(svm, in, bytes))
2842 return 1;
Paolo Bonzini9b0971c2021-10-25 12:14:31 -04002843
Peter Gondab67a4cc2021-10-21 10:42:59 -07002844 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
Paolo Bonzini1f058332021-11-11 10:52:26 -05002845 count, in);
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002846}
Tom Lendacky376c6d22020-12-10 11:10:06 -06002847
2848void sev_es_init_vmcb(struct vcpu_svm *svm)
2849{
2850 struct kvm_vcpu *vcpu = &svm->vcpu;
2851
2852 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2853 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2854
2855 /*
2856 * An SEV-ES guest requires a VMSA area that is a separate from the
2857 * VMCB page. Do not include the encryption mask on the VMSA physical
2858 * address since hardware will access it using the guest key.
2859 */
Peter Gondab67a4cc2021-10-21 10:42:59 -07002860 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
Tom Lendacky376c6d22020-12-10 11:10:06 -06002861
2862 /* Can't intercept CR register access, HV can't modify CR registers */
2863 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2864 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2865 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2866 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2867 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2868 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2869
2870 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2871
2872 /* Track EFER/CR register changes */
2873 svm_set_intercept(svm, TRAP_EFER_WRITE);
2874 svm_set_intercept(svm, TRAP_CR0_WRITE);
2875 svm_set_intercept(svm, TRAP_CR4_WRITE);
2876 svm_set_intercept(svm, TRAP_CR8_WRITE);
2877
2878 /* No support for enable_vmware_backdoor */
2879 clr_exception_intercept(svm, GP_VECTOR);
2880
2881 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2882 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2883
2884 /* Clear intercepts on selected MSRs */
2885 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2886 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2887 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2888 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2889 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2890 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2891}
2892
Sean Christopherson9ebe5302021-09-20 17:03:02 -07002893void sev_es_vcpu_reset(struct vcpu_svm *svm)
Tom Lendacky376c6d22020-12-10 11:10:06 -06002894{
2895 /*
Sean Christopherson9ebe5302021-09-20 17:03:02 -07002896 * Set the GHCB MSR value as per the GHCB specification when emulating
2897 * vCPU RESET for an SEV-ES guest.
Tom Lendacky376c6d22020-12-10 11:10:06 -06002898 */
2899 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2900 GHCB_VERSION_MIN,
2901 sev_enc_bit));
2902}
Tom Lendacky86137772020-12-10 11:10:07 -06002903
Michael Rotha7fc06d2021-02-02 13:01:26 -06002904void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
Tom Lendacky86137772020-12-10 11:10:07 -06002905{
2906 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2907 struct vmcb_save_area *hostsa;
Tom Lendacky86137772020-12-10 11:10:07 -06002908
2909 /*
2910 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2911 * of which one step is to perform a VMLOAD. Since hardware does not
2912 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2913 */
Sean Christopherson35a78312020-12-30 16:27:00 -08002914 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06002915
Tom Lendacky86137772020-12-10 11:10:07 -06002916 /* XCR0 is restored on VMEXIT, save the current host value */
2917 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2918 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2919
Ingo Molnard9f6e122021-03-18 15:28:01 +01002920 /* PKRU is restored on VMEXIT, save the current host value */
Tom Lendacky86137772020-12-10 11:10:07 -06002921 hostsa->pkru = read_pkru();
2922
2923 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2924 hostsa->xss = host_xss;
2925}
2926
Tom Lendacky647daca2021-01-04 14:20:01 -06002927void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2928{
2929 struct vcpu_svm *svm = to_svm(vcpu);
2930
2931 /* First SIPI: Use the values as initially set by the VMM */
Peter Gondab67a4cc2021-10-21 10:42:59 -07002932 if (!svm->sev_es.received_first_sipi) {
2933 svm->sev_es.received_first_sipi = true;
Tom Lendacky647daca2021-01-04 14:20:01 -06002934 return;
2935 }
2936
2937 /*
2938 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2939 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2940 * non-zero value.
2941 */
Peter Gondab67a4cc2021-10-21 10:42:59 -07002942 if (!svm->sev_es.ghcb)
Tom Lendackya3ba26e2021-04-09 09:38:42 -05002943 return;
2944
Peter Gondab67a4cc2021-10-21 10:42:59 -07002945 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
Tom Lendacky647daca2021-01-04 14:20:01 -06002946}