blob: 50b54d273a59aa100e226deb5bccfda0ff63a4d1 [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Vipin Sharma7aef27f2021-03-29 21:42:06 -070017#include <linux/misc_cgroup.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060018#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060019#include <linux/trace_events.h>
Tom Lendacky86137772020-12-10 11:10:07 -060020#include <asm/fpu/internal.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010021
Tom Lendacky8640ca52020-12-15 12:44:07 -050022#include <asm/trapnr.h>
23
Joerg Roedeleaf78262020-03-24 10:41:54 +010024#include "x86.h"
25#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080026#include "svm_ops.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060027#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060028#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010029
Tom Lendacky86137772020-12-10 11:10:07 -060030#define __ex(x) __kvm_handle_fault_on_reboot(x)
31
Vipin Sharma7aef27f2021-03-29 21:42:06 -070032#ifndef CONFIG_KVM_AMD_SEV
33/*
34 * When this config is not defined, SEV feature is not supported and APIs in
35 * this file are not used but this file still gets compiled into the KVM AMD
36 * module.
37 *
38 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
39 * misc_res_type {} defined in linux/misc_cgroup.h.
40 *
41 * Below macros allow compilation to succeed.
42 */
43#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
44#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
45#endif
46
Sean Christophersona479c332021-04-21 19:11:18 -070047#ifdef CONFIG_KVM_AMD_SEV
Sean Christophersone8126bd2021-04-21 19:11:14 -070048/* enable/disable SEV support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070049static bool sev_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070050module_param_named(sev, sev_enabled, bool, 0444);
Sean Christophersone8126bd2021-04-21 19:11:14 -070051
52/* enable/disable SEV-ES support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070053static bool sev_es_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070054module_param_named(sev_es, sev_es_enabled, bool, 0444);
Sean Christophersona479c332021-04-21 19:11:18 -070055#else
56#define sev_enabled false
57#define sev_es_enabled false
58#endif /* CONFIG_KVM_AMD_SEV */
Sean Christophersone8126bd2021-04-21 19:11:14 -070059
Tom Lendacky1edc1452020-12-10 11:09:49 -060060static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010061static int sev_flush_asids(void);
62static DECLARE_RWSEM(sev_deactivate_lock);
63static DEFINE_MUTEX(sev_bitmap_lock);
64unsigned int max_sev_asid;
65static unsigned int min_sev_asid;
Brijesh Singhd3d1af82021-04-15 15:53:55 +000066static unsigned long sev_me_mask;
Joerg Roedeleaf78262020-03-24 10:41:54 +010067static unsigned long *sev_asid_bitmap;
68static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010069
70struct enc_region {
71 struct list_head list;
72 unsigned long npages;
73 struct page **pages;
74 unsigned long uaddr;
75 unsigned long size;
76};
77
78static int sev_flush_asids(void)
79{
80 int ret, error = 0;
81
82 /*
83 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
84 * so it must be guarded.
85 */
86 down_write(&sev_deactivate_lock);
87
88 wbinvd_on_all_cpus();
89 ret = sev_guest_df_flush(&error);
90
91 up_write(&sev_deactivate_lock);
92
93 if (ret)
94 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
95
96 return ret;
97}
98
Nathan Tempelman54526d12021-04-08 22:32:14 +000099static inline bool is_mirroring_enc_context(struct kvm *kvm)
100{
101 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
102}
103
Joerg Roedeleaf78262020-03-24 10:41:54 +0100104/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -0600105static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100106{
107 int pos;
108
109 /* Check if there are any ASIDs to reclaim before performing a flush */
Tom Lendacky80675b32020-12-10 11:10:05 -0600110 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
111 if (pos >= max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100112 return false;
113
114 if (sev_flush_asids())
115 return false;
116
Tom Lendacky80675b32020-12-10 11:10:05 -0600117 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100118 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
119 max_sev_asid);
120 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
121
122 return true;
123}
124
Tom Lendacky80675b32020-12-10 11:10:05 -0600125static int sev_asid_new(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100126{
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700127 int pos, min_asid, max_asid, ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100128 bool retry = true;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700129 enum misc_res_type type;
130
131 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
132 WARN_ON(sev->misc_cg);
133 sev->misc_cg = get_current_misc_cg();
134 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
135 if (ret) {
136 put_misc_cg(sev->misc_cg);
137 sev->misc_cg = NULL;
138 return ret;
139 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100140
141 mutex_lock(&sev_bitmap_lock);
142
143 /*
Tom Lendacky80675b32020-12-10 11:10:05 -0600144 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
145 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100146 */
Tom Lendacky80675b32020-12-10 11:10:05 -0600147 min_asid = sev->es_active ? 0 : min_sev_asid - 1;
148 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100149again:
Tom Lendacky80675b32020-12-10 11:10:05 -0600150 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
151 if (pos >= max_asid) {
152 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100153 retry = false;
154 goto again;
155 }
156 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700157 ret = -EBUSY;
158 goto e_uncharge;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100159 }
160
161 __set_bit(pos, sev_asid_bitmap);
162
163 mutex_unlock(&sev_bitmap_lock);
164
165 return pos + 1;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700166e_uncharge:
167 misc_cg_uncharge(type, sev->misc_cg, 1);
168 put_misc_cg(sev->misc_cg);
169 sev->misc_cg = NULL;
170 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100171}
172
173static int sev_get_asid(struct kvm *kvm)
174{
175 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
176
177 return sev->asid;
178}
179
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700180static void sev_asid_free(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100181{
182 struct svm_cpu_data *sd;
183 int cpu, pos;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700184 enum misc_res_type type;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100185
186 mutex_lock(&sev_bitmap_lock);
187
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700188 pos = sev->asid - 1;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100189 __set_bit(pos, sev_reclaim_asid_bitmap);
190
191 for_each_possible_cpu(cpu) {
192 sd = per_cpu(svm_data, cpu);
193 sd->sev_vmcbs[pos] = NULL;
194 }
195
196 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700197
198 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
199 misc_cg_uncharge(type, sev->misc_cg, 1);
200 put_misc_cg(sev->misc_cg);
201 sev->misc_cg = NULL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100202}
203
204static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
205{
Sean Christopherson238eca82021-04-06 15:49:52 -0700206 struct sev_data_decommission decommission;
207 struct sev_data_deactivate deactivate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100208
209 if (!handle)
210 return;
211
Sean Christopherson238eca82021-04-06 15:49:52 -0700212 deactivate.handle = handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100213
214 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
215 down_read(&sev_deactivate_lock);
Sean Christopherson238eca82021-04-06 15:49:52 -0700216 sev_guest_deactivate(&deactivate, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100217 up_read(&sev_deactivate_lock);
218
Joerg Roedeleaf78262020-03-24 10:41:54 +0100219 /* decommission handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700220 decommission.handle = handle;
221 sev_guest_decommission(&decommission, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100222}
223
224static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
225{
226 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson9fa15212021-03-30 20:19:35 -0700227 bool es_active = argp->id == KVM_SEV_ES_INIT;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100228 int asid, ret;
229
Sean Christopherson87279062021-03-30 20:19:36 -0700230 if (kvm->created_vcpus)
231 return -EINVAL;
232
Joerg Roedeleaf78262020-03-24 10:41:54 +0100233 ret = -EBUSY;
234 if (unlikely(sev->active))
235 return ret;
236
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400237 sev->es_active = es_active;
Tom Lendacky80675b32020-12-10 11:10:05 -0600238 asid = sev_asid_new(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100239 if (asid < 0)
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400240 goto e_no_asid;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700241 sev->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100242
243 ret = sev_platform_init(&argp->error);
244 if (ret)
245 goto e_free;
246
247 sev->active = true;
248 sev->asid = asid;
249 INIT_LIST_HEAD(&sev->regions_list);
250
251 return 0;
252
253e_free:
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700254 sev_asid_free(sev);
255 sev->asid = 0;
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400256e_no_asid:
257 sev->es_active = false;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100258 return ret;
259}
260
261static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
262{
Sean Christopherson238eca82021-04-06 15:49:52 -0700263 struct sev_data_activate activate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100264 int asid = sev_get_asid(kvm);
265 int ret;
266
Joerg Roedeleaf78262020-03-24 10:41:54 +0100267 /* activate ASID on the given handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700268 activate.handle = handle;
269 activate.asid = asid;
270 ret = sev_guest_activate(&activate, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100271
272 return ret;
273}
274
275static int __sev_issue_cmd(int fd, int id, void *data, int *error)
276{
277 struct fd f;
278 int ret;
279
280 f = fdget(fd);
281 if (!f.file)
282 return -EBADF;
283
284 ret = sev_issue_cmd_external_user(f.file, id, data, error);
285
286 fdput(f);
287 return ret;
288}
289
290static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
291{
292 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
293
294 return __sev_issue_cmd(sev->fd, id, data, error);
295}
296
297static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
298{
299 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700300 struct sev_data_launch_start start;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100301 struct kvm_sev_launch_start params;
302 void *dh_blob, *session_blob;
303 int *error = &argp->error;
304 int ret;
305
306 if (!sev_guest(kvm))
307 return -ENOTTY;
308
309 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
310 return -EFAULT;
311
Sean Christopherson238eca82021-04-06 15:49:52 -0700312 memset(&start, 0, sizeof(start));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100313
314 dh_blob = NULL;
315 if (params.dh_uaddr) {
316 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
Sean Christopherson238eca82021-04-06 15:49:52 -0700317 if (IS_ERR(dh_blob))
318 return PTR_ERR(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100319
Sean Christopherson238eca82021-04-06 15:49:52 -0700320 start.dh_cert_address = __sme_set(__pa(dh_blob));
321 start.dh_cert_len = params.dh_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100322 }
323
324 session_blob = NULL;
325 if (params.session_uaddr) {
326 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
327 if (IS_ERR(session_blob)) {
328 ret = PTR_ERR(session_blob);
329 goto e_free_dh;
330 }
331
Sean Christopherson238eca82021-04-06 15:49:52 -0700332 start.session_address = __sme_set(__pa(session_blob));
333 start.session_len = params.session_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100334 }
335
Sean Christopherson238eca82021-04-06 15:49:52 -0700336 start.handle = params.handle;
337 start.policy = params.policy;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100338
339 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -0700340 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100341 if (ret)
342 goto e_free_session;
343
344 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -0700345 ret = sev_bind_asid(kvm, start.handle, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100346 if (ret)
347 goto e_free_session;
348
349 /* return handle to userspace */
Sean Christopherson238eca82021-04-06 15:49:52 -0700350 params.handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100351 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700352 sev_unbind_asid(kvm, start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100353 ret = -EFAULT;
354 goto e_free_session;
355 }
356
Sean Christopherson238eca82021-04-06 15:49:52 -0700357 sev->handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100358 sev->fd = argp->sev_fd;
359
360e_free_session:
361 kfree(session_blob);
362e_free_dh:
363 kfree(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100364 return ret;
365}
366
367static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
368 unsigned long ulen, unsigned long *n,
369 int write)
370{
371 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700372 unsigned long npages, size;
373 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100374 unsigned long locked, lock_limit;
375 struct page **pages;
376 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300377 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100378
Peter Gonda19a23da2021-01-27 08:15:24 -0800379 lockdep_assert_held(&kvm->lock);
380
Joerg Roedeleaf78262020-03-24 10:41:54 +0100381 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400382 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100383
384 /* Calculate number of pages. */
385 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
386 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
387 npages = (last - first + 1);
388
389 locked = sev->pages_locked + npages;
390 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
391 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
392 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400393 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100394 }
395
John Hubbard78824fa2020-05-25 23:22:06 -0700396 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400397 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700398
Joerg Roedeleaf78262020-03-24 10:41:54 +0100399 /* Avoid using vmalloc for smaller buffers. */
400 size = npages * sizeof(struct page *);
401 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700402 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100403 else
404 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
405
406 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400407 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100408
409 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700410 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100411 if (npinned != npages) {
412 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300413 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100414 goto err;
415 }
416
417 *n = npages;
418 sev->pages_locked = locked;
419
420 return pages;
421
422err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300423 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700424 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100425
426 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300427 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100428}
429
430static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
431 unsigned long npages)
432{
433 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
434
John Hubbarddc42c8a2020-05-25 23:22:07 -0700435 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100436 kvfree(pages);
437 sev->pages_locked -= npages;
438}
439
440static void sev_clflush_pages(struct page *pages[], unsigned long npages)
441{
442 uint8_t *page_virtual;
443 unsigned long i;
444
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000445 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
446 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100447 return;
448
449 for (i = 0; i < npages; i++) {
450 page_virtual = kmap_atomic(pages[i]);
451 clflush_cache_range(page_virtual, PAGE_SIZE);
452 kunmap_atomic(page_virtual);
453 }
454}
455
456static unsigned long get_num_contig_pages(unsigned long idx,
457 struct page **inpages, unsigned long npages)
458{
459 unsigned long paddr, next_paddr;
460 unsigned long i = idx + 1, pages = 1;
461
462 /* find the number of contiguous pages starting from idx */
463 paddr = __sme_page_pa(inpages[idx]);
464 while (i < npages) {
465 next_paddr = __sme_page_pa(inpages[i++]);
466 if ((paddr + PAGE_SIZE) == next_paddr) {
467 pages++;
468 paddr = next_paddr;
469 continue;
470 }
471 break;
472 }
473
474 return pages;
475}
476
477static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
478{
479 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
480 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
481 struct kvm_sev_launch_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700482 struct sev_data_launch_update_data data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100483 struct page **inpages;
484 int ret;
485
486 if (!sev_guest(kvm))
487 return -ENOTTY;
488
489 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
490 return -EFAULT;
491
Joerg Roedeleaf78262020-03-24 10:41:54 +0100492 vaddr = params.uaddr;
493 size = params.len;
494 vaddr_end = vaddr + size;
495
496 /* Lock the user memory. */
497 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Sean Christopherson238eca82021-04-06 15:49:52 -0700498 if (IS_ERR(inpages))
499 return PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100500
501 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400502 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
503 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100504 */
505 sev_clflush_pages(inpages, npages);
506
Sean Christopherson238eca82021-04-06 15:49:52 -0700507 data.reserved = 0;
508 data.handle = sev->handle;
509
Joerg Roedeleaf78262020-03-24 10:41:54 +0100510 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
511 int offset, len;
512
513 /*
514 * If the user buffer is not page-aligned, calculate the offset
515 * within the page.
516 */
517 offset = vaddr & (PAGE_SIZE - 1);
518
519 /* Calculate the number of pages that can be encrypted in one go. */
520 pages = get_num_contig_pages(i, inpages, npages);
521
522 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
523
Sean Christopherson238eca82021-04-06 15:49:52 -0700524 data.len = len;
525 data.address = __sme_page_pa(inpages[i]) + offset;
526 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100527 if (ret)
528 goto e_unpin;
529
530 size -= len;
531 next_vaddr = vaddr + len;
532 }
533
534e_unpin:
535 /* content of memory is updated, mark pages dirty */
536 for (i = 0; i < npages; i++) {
537 set_page_dirty_lock(inpages[i]);
538 mark_page_accessed(inpages[i]);
539 }
540 /* unlock the user pages */
541 sev_unpin_memory(kvm, inpages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100542 return ret;
543}
544
Tom Lendackyad731092020-12-10 11:10:09 -0600545static int sev_es_sync_vmsa(struct vcpu_svm *svm)
546{
547 struct vmcb_save_area *save = &svm->vmcb->save;
548
549 /* Check some debug related fields before encrypting the VMSA */
550 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
551 return -EINVAL;
552
553 /* Sync registgers */
554 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
555 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
556 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
557 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
558 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
559 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
560 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
561 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500562#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600563 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
564 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
565 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
566 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
567 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
568 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
569 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
570 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500571#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600572 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
573
574 /* Sync some non-GPR registers before encrypting */
575 save->xcr0 = svm->vcpu.arch.xcr0;
576 save->pkru = svm->vcpu.arch.pkru;
577 save->xss = svm->vcpu.arch.ia32_xss;
578
579 /*
580 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
581 * the traditional VMSA that is part of the VMCB. Copy the
582 * traditional VMSA as it has been built so far (in prep
583 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
584 */
585 memcpy(svm->vmsa, save, sizeof(*save));
586
587 return 0;
588}
589
590static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
591{
592 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700593 struct sev_data_launch_update_vmsa vmsa;
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700594 struct kvm_vcpu *vcpu;
Tom Lendackyad731092020-12-10 11:10:09 -0600595 int i, ret;
596
597 if (!sev_es_guest(kvm))
598 return -ENOTTY;
599
Sean Christopherson238eca82021-04-06 15:49:52 -0700600 vmsa.reserved = 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600601
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700602 kvm_for_each_vcpu(i, vcpu, kvm) {
603 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendackyad731092020-12-10 11:10:09 -0600604
605 /* Perform some pre-encryption checks against the VMSA */
606 ret = sev_es_sync_vmsa(svm);
607 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700608 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600609
610 /*
611 * The LAUNCH_UPDATE_VMSA command will perform in-place
612 * encryption of the VMSA memory content (i.e it will write
613 * the same memory region with the guest's key), so invalidate
614 * it first.
615 */
616 clflush_cache_range(svm->vmsa, PAGE_SIZE);
617
Sean Christopherson238eca82021-04-06 15:49:52 -0700618 vmsa.handle = sev->handle;
619 vmsa.address = __sme_pa(svm->vmsa);
620 vmsa.len = PAGE_SIZE;
621 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
Tom Lendackyad731092020-12-10 11:10:09 -0600622 &argp->error);
623 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700624 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600625
626 svm->vcpu.arch.guest_state_protected = true;
627 }
628
Sean Christopherson238eca82021-04-06 15:49:52 -0700629 return 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600630}
631
Joerg Roedeleaf78262020-03-24 10:41:54 +0100632static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
633{
634 void __user *measure = (void __user *)(uintptr_t)argp->data;
635 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700636 struct sev_data_launch_measure data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100637 struct kvm_sev_launch_measure params;
638 void __user *p = NULL;
639 void *blob = NULL;
640 int ret;
641
642 if (!sev_guest(kvm))
643 return -ENOTTY;
644
645 if (copy_from_user(&params, measure, sizeof(params)))
646 return -EFAULT;
647
Sean Christopherson238eca82021-04-06 15:49:52 -0700648 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100649
650 /* User wants to query the blob length */
651 if (!params.len)
652 goto cmd;
653
654 p = (void __user *)(uintptr_t)params.uaddr;
655 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700656 if (params.len > SEV_FW_BLOB_MAX_SIZE)
657 return -EINVAL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100658
Sean Christophersoneba04b22021-03-30 19:30:25 -0700659 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100660 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -0700661 return -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100662
Sean Christopherson238eca82021-04-06 15:49:52 -0700663 data.address = __psp_pa(blob);
664 data.len = params.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100665 }
666
667cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -0700668 data.handle = sev->handle;
669 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100670
671 /*
672 * If we query the session length, FW responded with expected data.
673 */
674 if (!params.len)
675 goto done;
676
677 if (ret)
678 goto e_free_blob;
679
680 if (blob) {
681 if (copy_to_user(p, blob, params.len))
682 ret = -EFAULT;
683 }
684
685done:
Sean Christopherson238eca82021-04-06 15:49:52 -0700686 params.len = data.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100687 if (copy_to_user(measure, &params, sizeof(params)))
688 ret = -EFAULT;
689e_free_blob:
690 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100691 return ret;
692}
693
694static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
695{
696 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700697 struct sev_data_launch_finish data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100698
699 if (!sev_guest(kvm))
700 return -ENOTTY;
701
Sean Christopherson238eca82021-04-06 15:49:52 -0700702 data.handle = sev->handle;
703 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100704}
705
706static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
707{
708 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
709 struct kvm_sev_guest_status params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700710 struct sev_data_guest_status data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100711 int ret;
712
713 if (!sev_guest(kvm))
714 return -ENOTTY;
715
Sean Christopherson238eca82021-04-06 15:49:52 -0700716 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100717
Sean Christopherson238eca82021-04-06 15:49:52 -0700718 data.handle = sev->handle;
719 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100720 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700721 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100722
Sean Christopherson238eca82021-04-06 15:49:52 -0700723 params.policy = data.policy;
724 params.state = data.state;
725 params.handle = data.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100726
727 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
728 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -0700729
Joerg Roedeleaf78262020-03-24 10:41:54 +0100730 return ret;
731}
732
733static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
734 unsigned long dst, int size,
735 int *error, bool enc)
736{
737 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700738 struct sev_data_dbg data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100739
Sean Christopherson238eca82021-04-06 15:49:52 -0700740 data.reserved = 0;
741 data.handle = sev->handle;
742 data.dst_addr = dst;
743 data.src_addr = src;
744 data.len = size;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100745
Sean Christopherson238eca82021-04-06 15:49:52 -0700746 return sev_issue_cmd(kvm,
747 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
748 &data, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100749}
750
751static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
752 unsigned long dst_paddr, int sz, int *err)
753{
754 int offset;
755
756 /*
757 * Its safe to read more than we are asked, caller should ensure that
758 * destination has enough space.
759 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100760 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000761 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100762 sz = round_up(sz + offset, 16);
763
764 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
765}
766
767static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
768 unsigned long __user dst_uaddr,
769 unsigned long dst_paddr,
770 int size, int *err)
771{
772 struct page *tpage = NULL;
773 int ret, offset;
774
775 /* if inputs are not 16-byte then use intermediate buffer */
776 if (!IS_ALIGNED(dst_paddr, 16) ||
777 !IS_ALIGNED(paddr, 16) ||
778 !IS_ALIGNED(size, 16)) {
779 tpage = (void *)alloc_page(GFP_KERNEL);
780 if (!tpage)
781 return -ENOMEM;
782
783 dst_paddr = __sme_page_pa(tpage);
784 }
785
786 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
787 if (ret)
788 goto e_free;
789
790 if (tpage) {
791 offset = paddr & 15;
792 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
793 page_address(tpage) + offset, size))
794 ret = -EFAULT;
795 }
796
797e_free:
798 if (tpage)
799 __free_page(tpage);
800
801 return ret;
802}
803
804static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
805 unsigned long __user vaddr,
806 unsigned long dst_paddr,
807 unsigned long __user dst_vaddr,
808 int size, int *error)
809{
810 struct page *src_tpage = NULL;
811 struct page *dst_tpage = NULL;
812 int ret, len = size;
813
814 /* If source buffer is not aligned then use an intermediate buffer */
815 if (!IS_ALIGNED(vaddr, 16)) {
816 src_tpage = alloc_page(GFP_KERNEL);
817 if (!src_tpage)
818 return -ENOMEM;
819
820 if (copy_from_user(page_address(src_tpage),
821 (void __user *)(uintptr_t)vaddr, size)) {
822 __free_page(src_tpage);
823 return -EFAULT;
824 }
825
826 paddr = __sme_page_pa(src_tpage);
827 }
828
829 /*
830 * If destination buffer or length is not aligned then do read-modify-write:
831 * - decrypt destination in an intermediate buffer
832 * - copy the source buffer in an intermediate buffer
833 * - use the intermediate buffer as source buffer
834 */
835 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
836 int dst_offset;
837
838 dst_tpage = alloc_page(GFP_KERNEL);
839 if (!dst_tpage) {
840 ret = -ENOMEM;
841 goto e_free;
842 }
843
844 ret = __sev_dbg_decrypt(kvm, dst_paddr,
845 __sme_page_pa(dst_tpage), size, error);
846 if (ret)
847 goto e_free;
848
849 /*
850 * If source is kernel buffer then use memcpy() otherwise
851 * copy_from_user().
852 */
853 dst_offset = dst_paddr & 15;
854
855 if (src_tpage)
856 memcpy(page_address(dst_tpage) + dst_offset,
857 page_address(src_tpage), size);
858 else {
859 if (copy_from_user(page_address(dst_tpage) + dst_offset,
860 (void __user *)(uintptr_t)vaddr, size)) {
861 ret = -EFAULT;
862 goto e_free;
863 }
864 }
865
866 paddr = __sme_page_pa(dst_tpage);
867 dst_paddr = round_down(dst_paddr, 16);
868 len = round_up(size, 16);
869 }
870
871 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
872
873e_free:
874 if (src_tpage)
875 __free_page(src_tpage);
876 if (dst_tpage)
877 __free_page(dst_tpage);
878 return ret;
879}
880
881static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
882{
883 unsigned long vaddr, vaddr_end, next_vaddr;
884 unsigned long dst_vaddr;
885 struct page **src_p, **dst_p;
886 struct kvm_sev_dbg debug;
887 unsigned long n;
888 unsigned int size;
889 int ret;
890
891 if (!sev_guest(kvm))
892 return -ENOTTY;
893
894 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
895 return -EFAULT;
896
897 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
898 return -EINVAL;
899 if (!debug.dst_uaddr)
900 return -EINVAL;
901
902 vaddr = debug.src_uaddr;
903 size = debug.len;
904 vaddr_end = vaddr + size;
905 dst_vaddr = debug.dst_uaddr;
906
907 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
908 int len, s_off, d_off;
909
910 /* lock userspace source and destination page */
911 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300912 if (IS_ERR(src_p))
913 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100914
915 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300916 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100917 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300918 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100919 }
920
921 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400922 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
923 * the pages; flush the destination too so that future accesses do not
924 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100925 */
926 sev_clflush_pages(src_p, 1);
927 sev_clflush_pages(dst_p, 1);
928
929 /*
930 * Since user buffer may not be page aligned, calculate the
931 * offset within the page.
932 */
933 s_off = vaddr & ~PAGE_MASK;
934 d_off = dst_vaddr & ~PAGE_MASK;
935 len = min_t(size_t, (PAGE_SIZE - s_off), size);
936
937 if (dec)
938 ret = __sev_dbg_decrypt_user(kvm,
939 __sme_page_pa(src_p[0]) + s_off,
940 dst_vaddr,
941 __sme_page_pa(dst_p[0]) + d_off,
942 len, &argp->error);
943 else
944 ret = __sev_dbg_encrypt_user(kvm,
945 __sme_page_pa(src_p[0]) + s_off,
946 vaddr,
947 __sme_page_pa(dst_p[0]) + d_off,
948 dst_vaddr,
949 len, &argp->error);
950
951 sev_unpin_memory(kvm, src_p, n);
952 sev_unpin_memory(kvm, dst_p, n);
953
954 if (ret)
955 goto err;
956
957 next_vaddr = vaddr + len;
958 dst_vaddr = dst_vaddr + len;
959 size -= len;
960 }
961err:
962 return ret;
963}
964
965static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
966{
967 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700968 struct sev_data_launch_secret data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100969 struct kvm_sev_launch_secret params;
970 struct page **pages;
971 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700972 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100973 int ret, offset;
974
975 if (!sev_guest(kvm))
976 return -ENOTTY;
977
978 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
979 return -EFAULT;
980
981 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400982 if (IS_ERR(pages))
983 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100984
985 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400986 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
987 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -0700988 */
989 sev_clflush_pages(pages, n);
990
991 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +0100992 * The secret must be copied into contiguous memory region, lets verify
993 * that userspace memory pages are contiguous before we issue command.
994 */
995 if (get_num_contig_pages(0, pages, n) != n) {
996 ret = -EINVAL;
997 goto e_unpin_memory;
998 }
999
Sean Christopherson238eca82021-04-06 15:49:52 -07001000 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +01001001
1002 offset = params.guest_uaddr & (PAGE_SIZE - 1);
Sean Christopherson238eca82021-04-06 15:49:52 -07001003 data.guest_address = __sme_page_pa(pages[0]) + offset;
1004 data.guest_len = params.guest_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001005
1006 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1007 if (IS_ERR(blob)) {
1008 ret = PTR_ERR(blob);
Sean Christopherson238eca82021-04-06 15:49:52 -07001009 goto e_unpin_memory;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001010 }
1011
Sean Christopherson238eca82021-04-06 15:49:52 -07001012 data.trans_address = __psp_pa(blob);
1013 data.trans_len = params.trans_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001014
1015 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1016 if (IS_ERR(hdr)) {
1017 ret = PTR_ERR(hdr);
1018 goto e_free_blob;
1019 }
Sean Christopherson238eca82021-04-06 15:49:52 -07001020 data.hdr_address = __psp_pa(hdr);
1021 data.hdr_len = params.hdr_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001022
Sean Christopherson238eca82021-04-06 15:49:52 -07001023 data.handle = sev->handle;
1024 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001025
1026 kfree(hdr);
1027
1028e_free_blob:
1029 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001030e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -07001031 /* content of memory is updated, mark pages dirty */
1032 for (i = 0; i < n; i++) {
1033 set_page_dirty_lock(pages[i]);
1034 mark_page_accessed(pages[i]);
1035 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001036 sev_unpin_memory(kvm, pages, n);
1037 return ret;
1038}
1039
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001040static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1041{
1042 void __user *report = (void __user *)(uintptr_t)argp->data;
1043 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001044 struct sev_data_attestation_report data;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001045 struct kvm_sev_attestation_report params;
1046 void __user *p;
1047 void *blob = NULL;
1048 int ret;
1049
1050 if (!sev_guest(kvm))
1051 return -ENOTTY;
1052
1053 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1054 return -EFAULT;
1055
Sean Christopherson238eca82021-04-06 15:49:52 -07001056 memset(&data, 0, sizeof(data));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001057
1058 /* User wants to query the blob length */
1059 if (!params.len)
1060 goto cmd;
1061
1062 p = (void __user *)(uintptr_t)params.uaddr;
1063 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -07001064 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1065 return -EINVAL;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001066
Sean Christophersoneba04b22021-03-30 19:30:25 -07001067 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001068 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -07001069 return -ENOMEM;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001070
Sean Christopherson238eca82021-04-06 15:49:52 -07001071 data.address = __psp_pa(blob);
1072 data.len = params.len;
1073 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001074 }
1075cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -07001076 data.handle = sev->handle;
1077 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001078 /*
1079 * If we query the session length, FW responded with expected data.
1080 */
1081 if (!params.len)
1082 goto done;
1083
1084 if (ret)
1085 goto e_free_blob;
1086
1087 if (blob) {
1088 if (copy_to_user(p, blob, params.len))
1089 ret = -EFAULT;
1090 }
1091
1092done:
Sean Christopherson238eca82021-04-06 15:49:52 -07001093 params.len = data.len;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001094 if (copy_to_user(report, &params, sizeof(params)))
1095 ret = -EFAULT;
1096e_free_blob:
1097 kfree(blob);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001098 return ret;
1099}
1100
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001101/* Userspace wants to query session length. */
1102static int
1103__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1104 struct kvm_sev_send_start *params)
1105{
1106 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001107 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001108 int ret;
1109
Sean Christopherson238eca82021-04-06 15:49:52 -07001110 data.handle = sev->handle;
1111 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001112 if (ret < 0)
Sean Christopherson238eca82021-04-06 15:49:52 -07001113 return ret;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001114
Sean Christopherson238eca82021-04-06 15:49:52 -07001115 params->session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001116 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1117 sizeof(struct kvm_sev_send_start)))
1118 ret = -EFAULT;
1119
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001120 return ret;
1121}
1122
1123static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1124{
1125 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001126 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001127 struct kvm_sev_send_start params;
1128 void *amd_certs, *session_data;
1129 void *pdh_cert, *plat_certs;
1130 int ret;
1131
1132 if (!sev_guest(kvm))
1133 return -ENOTTY;
1134
1135 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1136 sizeof(struct kvm_sev_send_start)))
1137 return -EFAULT;
1138
1139 /* if session_len is zero, userspace wants to query the session length */
1140 if (!params.session_len)
1141 return __sev_send_start_query_session_length(kvm, argp,
1142 &params);
1143
1144 /* some sanity checks */
1145 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1146 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1147 return -EINVAL;
1148
1149 /* allocate the memory to hold the session data blob */
1150 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1151 if (!session_data)
1152 return -ENOMEM;
1153
1154 /* copy the certificate blobs from userspace */
1155 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1156 params.pdh_cert_len);
1157 if (IS_ERR(pdh_cert)) {
1158 ret = PTR_ERR(pdh_cert);
1159 goto e_free_session;
1160 }
1161
1162 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1163 params.plat_certs_len);
1164 if (IS_ERR(plat_certs)) {
1165 ret = PTR_ERR(plat_certs);
1166 goto e_free_pdh;
1167 }
1168
1169 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1170 params.amd_certs_len);
1171 if (IS_ERR(amd_certs)) {
1172 ret = PTR_ERR(amd_certs);
1173 goto e_free_plat_cert;
1174 }
1175
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001176 /* populate the FW SEND_START field with system physical address */
Sean Christopherson238eca82021-04-06 15:49:52 -07001177 memset(&data, 0, sizeof(data));
1178 data.pdh_cert_address = __psp_pa(pdh_cert);
1179 data.pdh_cert_len = params.pdh_cert_len;
1180 data.plat_certs_address = __psp_pa(plat_certs);
1181 data.plat_certs_len = params.plat_certs_len;
1182 data.amd_certs_address = __psp_pa(amd_certs);
1183 data.amd_certs_len = params.amd_certs_len;
1184 data.session_address = __psp_pa(session_data);
1185 data.session_len = params.session_len;
1186 data.handle = sev->handle;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001187
Sean Christopherson238eca82021-04-06 15:49:52 -07001188 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001189
1190 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1191 session_data, params.session_len)) {
1192 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001193 goto e_free_amd_cert;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001194 }
1195
Sean Christopherson238eca82021-04-06 15:49:52 -07001196 params.policy = data.policy;
1197 params.session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001198 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1199 sizeof(struct kvm_sev_send_start)))
1200 ret = -EFAULT;
1201
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001202e_free_amd_cert:
1203 kfree(amd_certs);
1204e_free_plat_cert:
1205 kfree(plat_certs);
1206e_free_pdh:
1207 kfree(pdh_cert);
1208e_free_session:
1209 kfree(session_data);
1210 return ret;
1211}
1212
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001213/* Userspace wants to query either header or trans length. */
1214static int
1215__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1216 struct kvm_sev_send_update_data *params)
1217{
1218 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001219 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001220 int ret;
1221
Sean Christopherson238eca82021-04-06 15:49:52 -07001222 data.handle = sev->handle;
1223 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001224 if (ret < 0)
Sean Christopherson238eca82021-04-06 15:49:52 -07001225 return ret;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001226
Sean Christopherson238eca82021-04-06 15:49:52 -07001227 params->hdr_len = data.hdr_len;
1228 params->trans_len = data.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001229
1230 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1231 sizeof(struct kvm_sev_send_update_data)))
1232 ret = -EFAULT;
1233
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001234 return ret;
1235}
1236
1237static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1238{
1239 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001240 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001241 struct kvm_sev_send_update_data params;
1242 void *hdr, *trans_data;
1243 struct page **guest_page;
1244 unsigned long n;
1245 int ret, offset;
1246
1247 if (!sev_guest(kvm))
1248 return -ENOTTY;
1249
1250 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1251 sizeof(struct kvm_sev_send_update_data)))
1252 return -EFAULT;
1253
1254 /* userspace wants to query either header or trans length */
1255 if (!params.trans_len || !params.hdr_len)
1256 return __sev_send_update_data_query_lengths(kvm, argp, &params);
1257
1258 if (!params.trans_uaddr || !params.guest_uaddr ||
1259 !params.guest_len || !params.hdr_uaddr)
1260 return -EINVAL;
1261
1262 /* Check if we are crossing the page boundary */
1263 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1264 if ((params.guest_len + offset > PAGE_SIZE))
1265 return -EINVAL;
1266
1267 /* Pin guest memory */
1268 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1269 PAGE_SIZE, &n, 0);
1270 if (!guest_page)
1271 return -EFAULT;
1272
1273 /* allocate memory for header and transport buffer */
1274 ret = -ENOMEM;
1275 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1276 if (!hdr)
1277 goto e_unpin;
1278
1279 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1280 if (!trans_data)
1281 goto e_free_hdr;
1282
Sean Christopherson238eca82021-04-06 15:49:52 -07001283 memset(&data, 0, sizeof(data));
1284 data.hdr_address = __psp_pa(hdr);
1285 data.hdr_len = params.hdr_len;
1286 data.trans_address = __psp_pa(trans_data);
1287 data.trans_len = params.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001288
1289 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001290 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1291 data.guest_address |= sev_me_mask;
1292 data.guest_len = params.guest_len;
1293 data.handle = sev->handle;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001294
Sean Christopherson238eca82021-04-06 15:49:52 -07001295 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001296
1297 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001298 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001299
1300 /* copy transport buffer to user space */
1301 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1302 trans_data, params.trans_len)) {
1303 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001304 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001305 }
1306
1307 /* Copy packet header to userspace. */
1308 ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1309 params.hdr_len);
1310
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001311e_free_trans_data:
1312 kfree(trans_data);
1313e_free_hdr:
1314 kfree(hdr);
1315e_unpin:
1316 sev_unpin_memory(kvm, guest_page, n);
1317
1318 return ret;
1319}
1320
Brijesh Singhfddecf62021-04-15 15:54:15 +00001321static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1322{
1323 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001324 struct sev_data_send_finish data;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001325
1326 if (!sev_guest(kvm))
1327 return -ENOTTY;
1328
Sean Christopherson238eca82021-04-06 15:49:52 -07001329 data.handle = sev->handle;
1330 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
Brijesh Singhfddecf62021-04-15 15:54:15 +00001331}
1332
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001333static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1334{
1335 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001336 struct sev_data_send_cancel data;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001337
1338 if (!sev_guest(kvm))
1339 return -ENOTTY;
1340
Sean Christopherson238eca82021-04-06 15:49:52 -07001341 data.handle = sev->handle;
1342 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001343}
1344
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001345static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1346{
1347 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001348 struct sev_data_receive_start start;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001349 struct kvm_sev_receive_start params;
1350 int *error = &argp->error;
1351 void *session_data;
1352 void *pdh_data;
1353 int ret;
1354
1355 if (!sev_guest(kvm))
1356 return -ENOTTY;
1357
1358 /* Get parameter from the userspace */
1359 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1360 sizeof(struct kvm_sev_receive_start)))
1361 return -EFAULT;
1362
1363 /* some sanity checks */
1364 if (!params.pdh_uaddr || !params.pdh_len ||
1365 !params.session_uaddr || !params.session_len)
1366 return -EINVAL;
1367
1368 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1369 if (IS_ERR(pdh_data))
1370 return PTR_ERR(pdh_data);
1371
1372 session_data = psp_copy_user_blob(params.session_uaddr,
1373 params.session_len);
1374 if (IS_ERR(session_data)) {
1375 ret = PTR_ERR(session_data);
1376 goto e_free_pdh;
1377 }
1378
Sean Christopherson238eca82021-04-06 15:49:52 -07001379 memset(&start, 0, sizeof(start));
1380 start.handle = params.handle;
1381 start.policy = params.policy;
1382 start.pdh_cert_address = __psp_pa(pdh_data);
1383 start.pdh_cert_len = params.pdh_len;
1384 start.session_address = __psp_pa(session_data);
1385 start.session_len = params.session_len;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001386
1387 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -07001388 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001389 error);
1390 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001391 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001392
1393 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -07001394 ret = sev_bind_asid(kvm, start.handle, error);
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001395 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001396 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001397
Sean Christopherson238eca82021-04-06 15:49:52 -07001398 params.handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001399 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1400 &params, sizeof(struct kvm_sev_receive_start))) {
1401 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001402 sev_unbind_asid(kvm, start.handle);
1403 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001404 }
1405
Sean Christopherson238eca82021-04-06 15:49:52 -07001406 sev->handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001407 sev->fd = argp->sev_fd;
1408
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001409e_free_session:
1410 kfree(session_data);
1411e_free_pdh:
1412 kfree(pdh_data);
1413
1414 return ret;
1415}
1416
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001417static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1418{
1419 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1420 struct kvm_sev_receive_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -07001421 struct sev_data_receive_update_data data;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001422 void *hdr = NULL, *trans = NULL;
1423 struct page **guest_page;
1424 unsigned long n;
1425 int ret, offset;
1426
1427 if (!sev_guest(kvm))
1428 return -EINVAL;
1429
1430 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1431 sizeof(struct kvm_sev_receive_update_data)))
1432 return -EFAULT;
1433
1434 if (!params.hdr_uaddr || !params.hdr_len ||
1435 !params.guest_uaddr || !params.guest_len ||
1436 !params.trans_uaddr || !params.trans_len)
1437 return -EINVAL;
1438
1439 /* Check if we are crossing the page boundary */
1440 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1441 if ((params.guest_len + offset > PAGE_SIZE))
1442 return -EINVAL;
1443
1444 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1445 if (IS_ERR(hdr))
1446 return PTR_ERR(hdr);
1447
1448 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1449 if (IS_ERR(trans)) {
1450 ret = PTR_ERR(trans);
1451 goto e_free_hdr;
1452 }
1453
Sean Christopherson238eca82021-04-06 15:49:52 -07001454 memset(&data, 0, sizeof(data));
1455 data.hdr_address = __psp_pa(hdr);
1456 data.hdr_len = params.hdr_len;
1457 data.trans_address = __psp_pa(trans);
1458 data.trans_len = params.trans_len;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001459
1460 /* Pin guest memory */
1461 ret = -EFAULT;
1462 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1463 PAGE_SIZE, &n, 0);
1464 if (!guest_page)
Sean Christopherson238eca82021-04-06 15:49:52 -07001465 goto e_free_trans;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001466
1467 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001468 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1469 data.guest_address |= sev_me_mask;
1470 data.guest_len = params.guest_len;
1471 data.handle = sev->handle;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001472
Sean Christopherson238eca82021-04-06 15:49:52 -07001473 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001474 &argp->error);
1475
1476 sev_unpin_memory(kvm, guest_page, n);
1477
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001478e_free_trans:
1479 kfree(trans);
1480e_free_hdr:
1481 kfree(hdr);
1482
1483 return ret;
1484}
1485
Brijesh Singh6a443de2021-04-15 15:55:40 +00001486static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1487{
1488 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001489 struct sev_data_receive_finish data;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001490
1491 if (!sev_guest(kvm))
1492 return -ENOTTY;
1493
Sean Christopherson238eca82021-04-06 15:49:52 -07001494 data.handle = sev->handle;
1495 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
Brijesh Singh6a443de2021-04-15 15:55:40 +00001496}
1497
Joerg Roedeleaf78262020-03-24 10:41:54 +01001498int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1499{
1500 struct kvm_sev_cmd sev_cmd;
1501 int r;
1502
Sean Christopherson8d364a02021-04-21 19:11:17 -07001503 if (!svm_sev_enabled() || !sev_enabled)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001504 return -ENOTTY;
1505
1506 if (!argp)
1507 return 0;
1508
1509 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1510 return -EFAULT;
1511
1512 mutex_lock(&kvm->lock);
1513
Nathan Tempelman54526d12021-04-08 22:32:14 +00001514 /* enc_context_owner handles all memory enc operations */
1515 if (is_mirroring_enc_context(kvm)) {
1516 r = -EINVAL;
1517 goto out;
1518 }
1519
Joerg Roedeleaf78262020-03-24 10:41:54 +01001520 switch (sev_cmd.id) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001521 case KVM_SEV_ES_INIT:
Sean Christopherson8d364a02021-04-21 19:11:17 -07001522 if (!sev_es_enabled) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001523 r = -ENOTTY;
1524 goto out;
1525 }
1526 fallthrough;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001527 case KVM_SEV_INIT:
1528 r = sev_guest_init(kvm, &sev_cmd);
1529 break;
1530 case KVM_SEV_LAUNCH_START:
1531 r = sev_launch_start(kvm, &sev_cmd);
1532 break;
1533 case KVM_SEV_LAUNCH_UPDATE_DATA:
1534 r = sev_launch_update_data(kvm, &sev_cmd);
1535 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001536 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1537 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1538 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001539 case KVM_SEV_LAUNCH_MEASURE:
1540 r = sev_launch_measure(kvm, &sev_cmd);
1541 break;
1542 case KVM_SEV_LAUNCH_FINISH:
1543 r = sev_launch_finish(kvm, &sev_cmd);
1544 break;
1545 case KVM_SEV_GUEST_STATUS:
1546 r = sev_guest_status(kvm, &sev_cmd);
1547 break;
1548 case KVM_SEV_DBG_DECRYPT:
1549 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1550 break;
1551 case KVM_SEV_DBG_ENCRYPT:
1552 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1553 break;
1554 case KVM_SEV_LAUNCH_SECRET:
1555 r = sev_launch_secret(kvm, &sev_cmd);
1556 break;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001557 case KVM_SEV_GET_ATTESTATION_REPORT:
1558 r = sev_get_attestation_report(kvm, &sev_cmd);
1559 break;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001560 case KVM_SEV_SEND_START:
1561 r = sev_send_start(kvm, &sev_cmd);
1562 break;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001563 case KVM_SEV_SEND_UPDATE_DATA:
1564 r = sev_send_update_data(kvm, &sev_cmd);
1565 break;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001566 case KVM_SEV_SEND_FINISH:
1567 r = sev_send_finish(kvm, &sev_cmd);
1568 break;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001569 case KVM_SEV_SEND_CANCEL:
1570 r = sev_send_cancel(kvm, &sev_cmd);
1571 break;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001572 case KVM_SEV_RECEIVE_START:
1573 r = sev_receive_start(kvm, &sev_cmd);
1574 break;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001575 case KVM_SEV_RECEIVE_UPDATE_DATA:
1576 r = sev_receive_update_data(kvm, &sev_cmd);
1577 break;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001578 case KVM_SEV_RECEIVE_FINISH:
1579 r = sev_receive_finish(kvm, &sev_cmd);
1580 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001581 default:
1582 r = -EINVAL;
1583 goto out;
1584 }
1585
1586 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1587 r = -EFAULT;
1588
1589out:
1590 mutex_unlock(&kvm->lock);
1591 return r;
1592}
1593
1594int svm_register_enc_region(struct kvm *kvm,
1595 struct kvm_enc_region *range)
1596{
1597 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1598 struct enc_region *region;
1599 int ret = 0;
1600
1601 if (!sev_guest(kvm))
1602 return -ENOTTY;
1603
Nathan Tempelman54526d12021-04-08 22:32:14 +00001604 /* If kvm is mirroring encryption context it isn't responsible for it */
1605 if (is_mirroring_enc_context(kvm))
1606 return -EINVAL;
1607
Joerg Roedeleaf78262020-03-24 10:41:54 +01001608 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1609 return -EINVAL;
1610
1611 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1612 if (!region)
1613 return -ENOMEM;
1614
Peter Gonda19a23da2021-01-27 08:15:24 -08001615 mutex_lock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001616 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001617 if (IS_ERR(region->pages)) {
1618 ret = PTR_ERR(region->pages);
Peter Gonda19a23da2021-01-27 08:15:24 -08001619 mutex_unlock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001620 goto e_free;
1621 }
1622
Peter Gonda19a23da2021-01-27 08:15:24 -08001623 region->uaddr = range->addr;
1624 region->size = range->size;
1625
1626 list_add_tail(&region->list, &sev->regions_list);
1627 mutex_unlock(&kvm->lock);
1628
Joerg Roedeleaf78262020-03-24 10:41:54 +01001629 /*
1630 * The guest may change the memory encryption attribute from C=0 -> C=1
1631 * or vice versa for this memory range. Lets make sure caches are
1632 * flushed to ensure that guest data gets written into memory with
1633 * correct C-bit.
1634 */
1635 sev_clflush_pages(region->pages, region->npages);
1636
Joerg Roedeleaf78262020-03-24 10:41:54 +01001637 return ret;
1638
1639e_free:
1640 kfree(region);
1641 return ret;
1642}
1643
1644static struct enc_region *
1645find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1646{
1647 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1648 struct list_head *head = &sev->regions_list;
1649 struct enc_region *i;
1650
1651 list_for_each_entry(i, head, list) {
1652 if (i->uaddr == range->addr &&
1653 i->size == range->size)
1654 return i;
1655 }
1656
1657 return NULL;
1658}
1659
1660static void __unregister_enc_region_locked(struct kvm *kvm,
1661 struct enc_region *region)
1662{
1663 sev_unpin_memory(kvm, region->pages, region->npages);
1664 list_del(&region->list);
1665 kfree(region);
1666}
1667
1668int svm_unregister_enc_region(struct kvm *kvm,
1669 struct kvm_enc_region *range)
1670{
1671 struct enc_region *region;
1672 int ret;
1673
Nathan Tempelman54526d12021-04-08 22:32:14 +00001674 /* If kvm is mirroring encryption context it isn't responsible for it */
1675 if (is_mirroring_enc_context(kvm))
1676 return -EINVAL;
1677
Joerg Roedeleaf78262020-03-24 10:41:54 +01001678 mutex_lock(&kvm->lock);
1679
1680 if (!sev_guest(kvm)) {
1681 ret = -ENOTTY;
1682 goto failed;
1683 }
1684
1685 region = find_enc_region(kvm, range);
1686 if (!region) {
1687 ret = -EINVAL;
1688 goto failed;
1689 }
1690
1691 /*
1692 * Ensure that all guest tagged cache entries are flushed before
1693 * releasing the pages back to the system for use. CLFLUSH will
1694 * not do this, so issue a WBINVD.
1695 */
1696 wbinvd_on_all_cpus();
1697
1698 __unregister_enc_region_locked(kvm, region);
1699
1700 mutex_unlock(&kvm->lock);
1701 return 0;
1702
1703failed:
1704 mutex_unlock(&kvm->lock);
1705 return ret;
1706}
1707
Nathan Tempelman54526d12021-04-08 22:32:14 +00001708int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1709{
1710 struct file *source_kvm_file;
1711 struct kvm *source_kvm;
1712 struct kvm_sev_info *mirror_sev;
1713 unsigned int asid;
1714 int ret;
1715
1716 source_kvm_file = fget(source_fd);
1717 if (!file_is_kvm(source_kvm_file)) {
1718 ret = -EBADF;
1719 goto e_source_put;
1720 }
1721
1722 source_kvm = source_kvm_file->private_data;
1723 mutex_lock(&source_kvm->lock);
1724
1725 if (!sev_guest(source_kvm)) {
1726 ret = -EINVAL;
1727 goto e_source_unlock;
1728 }
1729
1730 /* Mirrors of mirrors should work, but let's not get silly */
1731 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1732 ret = -EINVAL;
1733 goto e_source_unlock;
1734 }
1735
1736 asid = to_kvm_svm(source_kvm)->sev_info.asid;
1737
1738 /*
1739 * The mirror kvm holds an enc_context_owner ref so its asid can't
1740 * disappear until we're done with it
1741 */
1742 kvm_get_kvm(source_kvm);
1743
1744 fput(source_kvm_file);
1745 mutex_unlock(&source_kvm->lock);
1746 mutex_lock(&kvm->lock);
1747
1748 if (sev_guest(kvm)) {
1749 ret = -EINVAL;
1750 goto e_mirror_unlock;
1751 }
1752
1753 /* Set enc_context_owner and copy its encryption context over */
1754 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1755 mirror_sev->enc_context_owner = source_kvm;
1756 mirror_sev->asid = asid;
1757 mirror_sev->active = true;
1758
1759 mutex_unlock(&kvm->lock);
1760 return 0;
1761
1762e_mirror_unlock:
1763 mutex_unlock(&kvm->lock);
1764 kvm_put_kvm(source_kvm);
1765 return ret;
1766e_source_unlock:
1767 mutex_unlock(&source_kvm->lock);
1768e_source_put:
1769 fput(source_kvm_file);
1770 return ret;
1771}
1772
Joerg Roedeleaf78262020-03-24 10:41:54 +01001773void sev_vm_destroy(struct kvm *kvm)
1774{
1775 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1776 struct list_head *head = &sev->regions_list;
1777 struct list_head *pos, *q;
1778
1779 if (!sev_guest(kvm))
1780 return;
1781
Nathan Tempelman54526d12021-04-08 22:32:14 +00001782 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1783 if (is_mirroring_enc_context(kvm)) {
1784 kvm_put_kvm(sev->enc_context_owner);
1785 return;
1786 }
1787
Joerg Roedeleaf78262020-03-24 10:41:54 +01001788 mutex_lock(&kvm->lock);
1789
1790 /*
1791 * Ensure that all guest tagged cache entries are flushed before
1792 * releasing the pages back to the system for use. CLFLUSH will
1793 * not do this, so issue a WBINVD.
1794 */
1795 wbinvd_on_all_cpus();
1796
1797 /*
1798 * if userspace was terminated before unregistering the memory regions
1799 * then lets unpin all the registered memory.
1800 */
1801 if (!list_empty(head)) {
1802 list_for_each_safe(pos, q, head) {
1803 __unregister_enc_region_locked(kvm,
1804 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001805 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001806 }
1807 }
1808
1809 mutex_unlock(&kvm->lock);
1810
1811 sev_unbind_asid(kvm, sev->handle);
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001812 sev_asid_free(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001813}
1814
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001815void __init sev_set_cpu_caps(void)
1816{
Sean Christopherson8d364a02021-04-21 19:11:17 -07001817 if (!sev_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001818 kvm_cpu_cap_clear(X86_FEATURE_SEV);
Sean Christopherson8d364a02021-04-21 19:11:17 -07001819 if (!sev_es_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001820 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
1821}
1822
Tom Lendacky916391a2020-12-10 11:09:38 -06001823void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001824{
Sean Christophersona479c332021-04-21 19:11:18 -07001825#ifdef CONFIG_KVM_AMD_SEV
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001826 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
Tom Lendacky916391a2020-12-10 11:09:38 -06001827 bool sev_es_supported = false;
1828 bool sev_supported = false;
1829
Sean Christophersona479c332021-04-21 19:11:18 -07001830 if (!sev_enabled || !npt_enabled)
Sean Christophersone8126bd2021-04-21 19:11:14 -07001831 goto out;
1832
Tom Lendacky916391a2020-12-10 11:09:38 -06001833 /* Does the CPU support SEV? */
1834 if (!boot_cpu_has(X86_FEATURE_SEV))
1835 goto out;
1836
1837 /* Retrieve SEV CPUID information */
1838 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1839
Tom Lendacky1edc1452020-12-10 11:09:49 -06001840 /* Set encryption bit location for SEV-ES guests */
1841 sev_enc_bit = ebx & 0x3f;
1842
Joerg Roedeleaf78262020-03-24 10:41:54 +01001843 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001844 max_sev_asid = ecx;
Sean Christopherson8cb756b2021-04-21 19:11:21 -07001845 if (!max_sev_asid)
Tom Lendacky916391a2020-12-10 11:09:38 -06001846 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001847
1848 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001849 min_sev_asid = edx;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001850 sev_me_mask = 1UL << (ebx & 0x3f);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001851
1852 /* Initialize SEV ASID bitmaps */
1853 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1854 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001855 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001856
1857 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
Sean Christophersonf31b88b2021-04-21 19:11:12 -07001858 if (!sev_reclaim_asid_bitmap) {
1859 bitmap_free(sev_asid_bitmap);
1860 sev_asid_bitmap = NULL;
Tom Lendacky916391a2020-12-10 11:09:38 -06001861 goto out;
Sean Christophersonf31b88b2021-04-21 19:11:12 -07001862 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001863
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001864 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1865 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1866 goto out;
1867
1868 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06001869 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001870
Tom Lendacky916391a2020-12-10 11:09:38 -06001871 /* SEV-ES support requested? */
Sean Christopherson8d364a02021-04-21 19:11:17 -07001872 if (!sev_es_enabled)
Tom Lendacky916391a2020-12-10 11:09:38 -06001873 goto out;
1874
1875 /* Does the CPU support SEV-ES? */
1876 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1877 goto out;
1878
1879 /* Has the system been allocated ASIDs for SEV-ES? */
1880 if (min_sev_asid == 1)
1881 goto out;
1882
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001883 sev_es_asid_count = min_sev_asid - 1;
1884 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1885 goto out;
1886
1887 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06001888 sev_es_supported = true;
1889
1890out:
Sean Christopherson8d364a02021-04-21 19:11:17 -07001891 sev_enabled = sev_supported;
1892 sev_es_enabled = sev_es_supported;
Sean Christophersona479c332021-04-21 19:11:18 -07001893#endif
Joerg Roedeleaf78262020-03-24 10:41:54 +01001894}
1895
1896void sev_hardware_teardown(void)
1897{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001898 if (!svm_sev_enabled())
1899 return;
1900
Joerg Roedeleaf78262020-03-24 10:41:54 +01001901 bitmap_free(sev_asid_bitmap);
1902 bitmap_free(sev_reclaim_asid_bitmap);
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001903 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1904 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001905
1906 sev_flush_asids();
1907}
1908
Sean Christophersonb95c2212021-04-21 19:11:22 -07001909int sev_cpu_init(struct svm_cpu_data *sd)
1910{
1911 if (!svm_sev_enabled())
1912 return 0;
1913
1914 sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
1915 if (!sd->sev_vmcbs)
1916 return -ENOMEM;
1917
1918 return 0;
1919}
1920
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001921/*
1922 * Pages used by hardware to hold guest encrypted state must be flushed before
1923 * returning them to the system.
1924 */
1925static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1926 unsigned long len)
1927{
1928 /*
1929 * If hardware enforced cache coherency for encrypted mappings of the
1930 * same physical page is supported, nothing to do.
1931 */
1932 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1933 return;
1934
1935 /*
1936 * If the VM Page Flush MSR is supported, use it to flush the page
1937 * (using the page virtual address and the guest ASID).
1938 */
1939 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1940 struct kvm_sev_info *sev;
1941 unsigned long va_start;
1942 u64 start, stop;
1943
1944 /* Align start and stop to page boundaries. */
1945 va_start = (unsigned long)va;
1946 start = (u64)va_start & PAGE_MASK;
1947 stop = PAGE_ALIGN((u64)va_start + len);
1948
1949 if (start < stop) {
1950 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1951
1952 while (start < stop) {
1953 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1954 start | sev->asid);
1955
1956 start += PAGE_SIZE;
1957 }
1958
1959 return;
1960 }
1961
1962 WARN(1, "Address overflow, using WBINVD\n");
1963 }
1964
1965 /*
1966 * Hardware should always have one of the above features,
1967 * but if not, use WBINVD and issue a warning.
1968 */
1969 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1970 wbinvd_on_all_cpus();
1971}
1972
1973void sev_free_vcpu(struct kvm_vcpu *vcpu)
1974{
1975 struct vcpu_svm *svm;
1976
1977 if (!sev_es_guest(vcpu->kvm))
1978 return;
1979
1980 svm = to_svm(vcpu);
1981
1982 if (vcpu->arch.guest_state_protected)
1983 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1984 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06001985
1986 if (svm->ghcb_sa_free)
1987 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001988}
1989
Tom Lendacky291bd202020-12-10 11:09:47 -06001990static void dump_ghcb(struct vcpu_svm *svm)
1991{
1992 struct ghcb *ghcb = svm->ghcb;
1993 unsigned int nbits;
1994
1995 /* Re-use the dump_invalid_vmcb module parameter */
1996 if (!dump_invalid_vmcb) {
1997 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1998 return;
1999 }
2000
2001 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2002
2003 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2004 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2005 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2006 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2007 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2008 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2009 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2010 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2011 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2012 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2013}
2014
2015static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2016{
2017 struct kvm_vcpu *vcpu = &svm->vcpu;
2018 struct ghcb *ghcb = svm->ghcb;
2019
2020 /*
2021 * The GHCB protocol so far allows for the following data
2022 * to be returned:
2023 * GPRs RAX, RBX, RCX, RDX
2024 *
Sean Christopherson25009142021-01-22 15:50:47 -08002025 * Copy their values, even if they may not have been written during the
2026 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06002027 */
Sean Christopherson25009142021-01-22 15:50:47 -08002028 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2029 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2030 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2031 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06002032}
2033
2034static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2035{
2036 struct vmcb_control_area *control = &svm->vmcb->control;
2037 struct kvm_vcpu *vcpu = &svm->vcpu;
2038 struct ghcb *ghcb = svm->ghcb;
2039 u64 exit_code;
2040
2041 /*
2042 * The GHCB protocol so far allows for the following data
2043 * to be supplied:
2044 * GPRs RAX, RBX, RCX, RDX
2045 * XCR0
2046 * CPL
2047 *
2048 * VMMCALL allows the guest to provide extra registers. KVM also
2049 * expects RSI for hypercalls, so include that, too.
2050 *
2051 * Copy their values to the appropriate location if supplied.
2052 */
2053 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2054
2055 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2056 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2057 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2058 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2059 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2060
2061 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2062
2063 if (ghcb_xcr0_is_valid(ghcb)) {
2064 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2065 kvm_update_cpuid_runtime(vcpu);
2066 }
2067
2068 /* Copy the GHCB exit information into the VMCB fields */
2069 exit_code = ghcb_get_sw_exit_code(ghcb);
2070 control->exit_code = lower_32_bits(exit_code);
2071 control->exit_code_hi = upper_32_bits(exit_code);
2072 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2073 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2074
2075 /* Clear the valid entries fields */
2076 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2077}
2078
2079static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2080{
2081 struct kvm_vcpu *vcpu;
2082 struct ghcb *ghcb;
2083 u64 exit_code = 0;
2084
2085 ghcb = svm->ghcb;
2086
2087 /* Only GHCB Usage code 0 is supported */
2088 if (ghcb->ghcb_usage)
2089 goto vmgexit_err;
2090
2091 /*
2092 * Retrieve the exit code now even though is may not be marked valid
2093 * as it could help with debugging.
2094 */
2095 exit_code = ghcb_get_sw_exit_code(ghcb);
2096
2097 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2098 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2099 !ghcb_sw_exit_info_2_is_valid(ghcb))
2100 goto vmgexit_err;
2101
2102 switch (ghcb_get_sw_exit_code(ghcb)) {
2103 case SVM_EXIT_READ_DR7:
2104 break;
2105 case SVM_EXIT_WRITE_DR7:
2106 if (!ghcb_rax_is_valid(ghcb))
2107 goto vmgexit_err;
2108 break;
2109 case SVM_EXIT_RDTSC:
2110 break;
2111 case SVM_EXIT_RDPMC:
2112 if (!ghcb_rcx_is_valid(ghcb))
2113 goto vmgexit_err;
2114 break;
2115 case SVM_EXIT_CPUID:
2116 if (!ghcb_rax_is_valid(ghcb) ||
2117 !ghcb_rcx_is_valid(ghcb))
2118 goto vmgexit_err;
2119 if (ghcb_get_rax(ghcb) == 0xd)
2120 if (!ghcb_xcr0_is_valid(ghcb))
2121 goto vmgexit_err;
2122 break;
2123 case SVM_EXIT_INVD:
2124 break;
2125 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002126 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2127 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06002128 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002129 } else {
2130 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2131 if (!ghcb_rax_is_valid(ghcb))
2132 goto vmgexit_err;
2133 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002134 break;
2135 case SVM_EXIT_MSR:
2136 if (!ghcb_rcx_is_valid(ghcb))
2137 goto vmgexit_err;
2138 if (ghcb_get_sw_exit_info_1(ghcb)) {
2139 if (!ghcb_rax_is_valid(ghcb) ||
2140 !ghcb_rdx_is_valid(ghcb))
2141 goto vmgexit_err;
2142 }
2143 break;
2144 case SVM_EXIT_VMMCALL:
2145 if (!ghcb_rax_is_valid(ghcb) ||
2146 !ghcb_cpl_is_valid(ghcb))
2147 goto vmgexit_err;
2148 break;
2149 case SVM_EXIT_RDTSCP:
2150 break;
2151 case SVM_EXIT_WBINVD:
2152 break;
2153 case SVM_EXIT_MONITOR:
2154 if (!ghcb_rax_is_valid(ghcb) ||
2155 !ghcb_rcx_is_valid(ghcb) ||
2156 !ghcb_rdx_is_valid(ghcb))
2157 goto vmgexit_err;
2158 break;
2159 case SVM_EXIT_MWAIT:
2160 if (!ghcb_rax_is_valid(ghcb) ||
2161 !ghcb_rcx_is_valid(ghcb))
2162 goto vmgexit_err;
2163 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002164 case SVM_VMGEXIT_MMIO_READ:
2165 case SVM_VMGEXIT_MMIO_WRITE:
2166 if (!ghcb_sw_scratch_is_valid(ghcb))
2167 goto vmgexit_err;
2168 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002169 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06002170 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05002171 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06002172 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2173 break;
2174 default:
2175 goto vmgexit_err;
2176 }
2177
2178 return 0;
2179
2180vmgexit_err:
2181 vcpu = &svm->vcpu;
2182
2183 if (ghcb->ghcb_usage) {
2184 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2185 ghcb->ghcb_usage);
2186 } else {
2187 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2188 exit_code);
2189 dump_ghcb(svm);
2190 }
2191
2192 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2193 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2194 vcpu->run->internal.ndata = 2;
2195 vcpu->run->internal.data[0] = exit_code;
2196 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2197
2198 return -EINVAL;
2199}
2200
2201static void pre_sev_es_run(struct vcpu_svm *svm)
2202{
2203 if (!svm->ghcb)
2204 return;
2205
Tom Lendacky8f423a82020-12-10 11:09:53 -06002206 if (svm->ghcb_sa_free) {
2207 /*
2208 * The scratch area lives outside the GHCB, so there is a
2209 * buffer that, depending on the operation performed, may
2210 * need to be synced, then freed.
2211 */
2212 if (svm->ghcb_sa_sync) {
2213 kvm_write_guest(svm->vcpu.kvm,
2214 ghcb_get_sw_scratch(svm->ghcb),
2215 svm->ghcb_sa, svm->ghcb_sa_len);
2216 svm->ghcb_sa_sync = false;
2217 }
2218
2219 kfree(svm->ghcb_sa);
2220 svm->ghcb_sa = NULL;
2221 svm->ghcb_sa_free = false;
2222 }
2223
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002224 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2225
Tom Lendacky291bd202020-12-10 11:09:47 -06002226 sev_es_sync_to_ghcb(svm);
2227
2228 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2229 svm->ghcb = NULL;
2230}
2231
Joerg Roedeleaf78262020-03-24 10:41:54 +01002232void pre_sev_run(struct vcpu_svm *svm, int cpu)
2233{
2234 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2235 int asid = sev_get_asid(svm->vcpu.kvm);
2236
Tom Lendacky291bd202020-12-10 11:09:47 -06002237 /* Perform any SEV-ES pre-run actions */
2238 pre_sev_es_run(svm);
2239
Joerg Roedeleaf78262020-03-24 10:41:54 +01002240 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05002241 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002242
2243 /*
2244 * Flush guest TLB:
2245 *
2246 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2247 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2248 */
2249 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07002250 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002251 return;
2252
Joerg Roedeleaf78262020-03-24 10:41:54 +01002253 sd->sev_vmcbs[asid] = svm->vmcb;
2254 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02002255 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002256}
Tom Lendacky291bd202020-12-10 11:09:47 -06002257
Tom Lendacky8f423a82020-12-10 11:09:53 -06002258#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2259static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2260{
2261 struct vmcb_control_area *control = &svm->vmcb->control;
2262 struct ghcb *ghcb = svm->ghcb;
2263 u64 ghcb_scratch_beg, ghcb_scratch_end;
2264 u64 scratch_gpa_beg, scratch_gpa_end;
2265 void *scratch_va;
2266
2267 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2268 if (!scratch_gpa_beg) {
2269 pr_err("vmgexit: scratch gpa not provided\n");
2270 return false;
2271 }
2272
2273 scratch_gpa_end = scratch_gpa_beg + len;
2274 if (scratch_gpa_end < scratch_gpa_beg) {
2275 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2276 len, scratch_gpa_beg);
2277 return false;
2278 }
2279
2280 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2281 /* Scratch area begins within GHCB */
2282 ghcb_scratch_beg = control->ghcb_gpa +
2283 offsetof(struct ghcb, shared_buffer);
2284 ghcb_scratch_end = control->ghcb_gpa +
2285 offsetof(struct ghcb, reserved_1);
2286
2287 /*
2288 * If the scratch area begins within the GHCB, it must be
2289 * completely contained in the GHCB shared buffer area.
2290 */
2291 if (scratch_gpa_beg < ghcb_scratch_beg ||
2292 scratch_gpa_end > ghcb_scratch_end) {
2293 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2294 scratch_gpa_beg, scratch_gpa_end);
2295 return false;
2296 }
2297
2298 scratch_va = (void *)svm->ghcb;
2299 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2300 } else {
2301 /*
2302 * The guest memory must be read into a kernel buffer, so
2303 * limit the size
2304 */
2305 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2306 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2307 len, GHCB_SCRATCH_AREA_LIMIT);
2308 return false;
2309 }
Sean Christophersoneba04b22021-03-30 19:30:25 -07002310 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002311 if (!scratch_va)
2312 return false;
2313
2314 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2315 /* Unable to copy scratch area from guest */
2316 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2317
2318 kfree(scratch_va);
2319 return false;
2320 }
2321
2322 /*
2323 * The scratch area is outside the GHCB. The operation will
2324 * dictate whether the buffer needs to be synced before running
2325 * the vCPU next time (i.e. a read was requested so the data
2326 * must be written back to the guest memory).
2327 */
2328 svm->ghcb_sa_sync = sync;
2329 svm->ghcb_sa_free = true;
2330 }
2331
2332 svm->ghcb_sa = scratch_va;
2333 svm->ghcb_sa_len = len;
2334
2335 return true;
2336}
2337
Tom Lendackyd3694662020-12-10 11:09:50 -06002338static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2339 unsigned int pos)
2340{
2341 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2342 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2343}
2344
2345static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2346{
2347 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2348}
2349
Tom Lendacky1edc1452020-12-10 11:09:49 -06002350static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2351{
2352 svm->vmcb->control.ghcb_gpa = value;
2353}
2354
Tom Lendacky291bd202020-12-10 11:09:47 -06002355static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2356{
Tom Lendacky1edc1452020-12-10 11:09:49 -06002357 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06002358 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002359 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06002360 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002361
2362 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2363
Tom Lendacky59e38b52020-12-10 11:09:52 -06002364 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2365 control->ghcb_gpa);
2366
Tom Lendacky1edc1452020-12-10 11:09:49 -06002367 switch (ghcb_info) {
2368 case GHCB_MSR_SEV_INFO_REQ:
2369 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2370 GHCB_VERSION_MIN,
2371 sev_enc_bit));
2372 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06002373 case GHCB_MSR_CPUID_REQ: {
2374 u64 cpuid_fn, cpuid_reg, cpuid_value;
2375
2376 cpuid_fn = get_ghcb_msr_bits(svm,
2377 GHCB_MSR_CPUID_FUNC_MASK,
2378 GHCB_MSR_CPUID_FUNC_POS);
2379
2380 /* Initialize the registers needed by the CPUID intercept */
2381 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2382 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2383
Paolo Bonzini63129752021-03-02 14:40:39 -05002384 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
Tom Lendackyd3694662020-12-10 11:09:50 -06002385 if (!ret) {
2386 ret = -EINVAL;
2387 break;
2388 }
2389
2390 cpuid_reg = get_ghcb_msr_bits(svm,
2391 GHCB_MSR_CPUID_REG_MASK,
2392 GHCB_MSR_CPUID_REG_POS);
2393 if (cpuid_reg == 0)
2394 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2395 else if (cpuid_reg == 1)
2396 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2397 else if (cpuid_reg == 2)
2398 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2399 else
2400 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2401
2402 set_ghcb_msr_bits(svm, cpuid_value,
2403 GHCB_MSR_CPUID_VALUE_MASK,
2404 GHCB_MSR_CPUID_VALUE_POS);
2405
2406 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2407 GHCB_MSR_INFO_MASK,
2408 GHCB_MSR_INFO_POS);
2409 break;
2410 }
Tom Lendackye1d71112020-12-10 11:09:51 -06002411 case GHCB_MSR_TERM_REQ: {
2412 u64 reason_set, reason_code;
2413
2414 reason_set = get_ghcb_msr_bits(svm,
2415 GHCB_MSR_TERM_REASON_SET_MASK,
2416 GHCB_MSR_TERM_REASON_SET_POS);
2417 reason_code = get_ghcb_msr_bits(svm,
2418 GHCB_MSR_TERM_REASON_MASK,
2419 GHCB_MSR_TERM_REASON_POS);
2420 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2421 reason_set, reason_code);
2422 fallthrough;
2423 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06002424 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06002425 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002426 }
2427
Tom Lendacky59e38b52020-12-10 11:09:52 -06002428 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2429 control->ghcb_gpa, ret);
2430
Tom Lendackyd3694662020-12-10 11:09:50 -06002431 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06002432}
2433
Paolo Bonzini63129752021-03-02 14:40:39 -05002434int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
Tom Lendacky291bd202020-12-10 11:09:47 -06002435{
Paolo Bonzini63129752021-03-02 14:40:39 -05002436 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -06002437 struct vmcb_control_area *control = &svm->vmcb->control;
2438 u64 ghcb_gpa, exit_code;
2439 struct ghcb *ghcb;
2440 int ret;
2441
2442 /* Validate the GHCB */
2443 ghcb_gpa = control->ghcb_gpa;
2444 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2445 return sev_handle_vmgexit_msr_protocol(svm);
2446
2447 if (!ghcb_gpa) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002448 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
Tom Lendacky291bd202020-12-10 11:09:47 -06002449 return -EINVAL;
2450 }
2451
Paolo Bonzini63129752021-03-02 14:40:39 -05002452 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002453 /* Unable to map GHCB from guest */
Paolo Bonzini63129752021-03-02 14:40:39 -05002454 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002455 ghcb_gpa);
2456 return -EINVAL;
2457 }
2458
2459 svm->ghcb = svm->ghcb_map.hva;
2460 ghcb = svm->ghcb_map.hva;
2461
Paolo Bonzini63129752021-03-02 14:40:39 -05002462 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002463
Tom Lendacky291bd202020-12-10 11:09:47 -06002464 exit_code = ghcb_get_sw_exit_code(ghcb);
2465
2466 ret = sev_es_validate_vmgexit(svm);
2467 if (ret)
2468 return ret;
2469
2470 sev_es_sync_from_ghcb(svm);
2471 ghcb_set_sw_exit_info_1(ghcb, 0);
2472 ghcb_set_sw_exit_info_2(ghcb, 0);
2473
2474 ret = -EINVAL;
2475 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002476 case SVM_VMGEXIT_MMIO_READ:
2477 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2478 break;
2479
Paolo Bonzini63129752021-03-02 14:40:39 -05002480 ret = kvm_sev_es_mmio_read(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002481 control->exit_info_1,
2482 control->exit_info_2,
2483 svm->ghcb_sa);
2484 break;
2485 case SVM_VMGEXIT_MMIO_WRITE:
2486 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2487 break;
2488
Paolo Bonzini63129752021-03-02 14:40:39 -05002489 ret = kvm_sev_es_mmio_write(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002490 control->exit_info_1,
2491 control->exit_info_2,
2492 svm->ghcb_sa);
2493 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002494 case SVM_VMGEXIT_NMI_COMPLETE:
Paolo Bonzini63129752021-03-02 14:40:39 -05002495 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002496 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06002497 case SVM_VMGEXIT_AP_HLT_LOOP:
Paolo Bonzini63129752021-03-02 14:40:39 -05002498 ret = kvm_emulate_ap_reset_hold(vcpu);
Tom Lendacky647daca2021-01-04 14:20:01 -06002499 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002500 case SVM_VMGEXIT_AP_JUMP_TABLE: {
Paolo Bonzini63129752021-03-02 14:40:39 -05002501 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002502
2503 switch (control->exit_info_1) {
2504 case 0:
2505 /* Set AP jump table address */
2506 sev->ap_jump_table = control->exit_info_2;
2507 break;
2508 case 1:
2509 /* Get AP jump table address */
2510 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2511 break;
2512 default:
2513 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2514 control->exit_info_1);
2515 ghcb_set_sw_exit_info_1(ghcb, 1);
2516 ghcb_set_sw_exit_info_2(ghcb,
2517 X86_TRAP_UD |
2518 SVM_EVTINJ_TYPE_EXEPT |
2519 SVM_EVTINJ_VALID);
2520 }
2521
2522 ret = 1;
2523 break;
2524 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002525 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
Paolo Bonzini63129752021-03-02 14:40:39 -05002526 vcpu_unimpl(vcpu,
Tom Lendacky291bd202020-12-10 11:09:47 -06002527 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2528 control->exit_info_1, control->exit_info_2);
2529 break;
2530 default:
Paolo Bonzini63129752021-03-02 14:40:39 -05002531 ret = svm_invoke_exit_handler(vcpu, exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002532 }
2533
2534 return ret;
2535}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002536
2537int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2538{
2539 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2540 return -EINVAL;
2541
2542 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2543 svm->ghcb_sa, svm->ghcb_sa_len, in);
2544}
Tom Lendacky376c6d22020-12-10 11:10:06 -06002545
2546void sev_es_init_vmcb(struct vcpu_svm *svm)
2547{
2548 struct kvm_vcpu *vcpu = &svm->vcpu;
2549
2550 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2551 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2552
2553 /*
2554 * An SEV-ES guest requires a VMSA area that is a separate from the
2555 * VMCB page. Do not include the encryption mask on the VMSA physical
2556 * address since hardware will access it using the guest key.
2557 */
2558 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2559
2560 /* Can't intercept CR register access, HV can't modify CR registers */
2561 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2562 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2563 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2564 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2565 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2566 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2567
2568 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2569
2570 /* Track EFER/CR register changes */
2571 svm_set_intercept(svm, TRAP_EFER_WRITE);
2572 svm_set_intercept(svm, TRAP_CR0_WRITE);
2573 svm_set_intercept(svm, TRAP_CR4_WRITE);
2574 svm_set_intercept(svm, TRAP_CR8_WRITE);
2575
2576 /* No support for enable_vmware_backdoor */
2577 clr_exception_intercept(svm, GP_VECTOR);
2578
2579 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2580 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2581
2582 /* Clear intercepts on selected MSRs */
2583 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2584 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2585 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2586 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2587 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2588 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2589}
2590
2591void sev_es_create_vcpu(struct vcpu_svm *svm)
2592{
2593 /*
2594 * Set the GHCB MSR value as per the GHCB specification when creating
2595 * a vCPU for an SEV-ES guest.
2596 */
2597 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2598 GHCB_VERSION_MIN,
2599 sev_enc_bit));
2600}
Tom Lendacky86137772020-12-10 11:10:07 -06002601
Michael Rotha7fc06d2021-02-02 13:01:26 -06002602void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
Tom Lendacky86137772020-12-10 11:10:07 -06002603{
2604 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2605 struct vmcb_save_area *hostsa;
Tom Lendacky86137772020-12-10 11:10:07 -06002606
2607 /*
2608 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2609 * of which one step is to perform a VMLOAD. Since hardware does not
2610 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2611 */
Sean Christopherson35a78312020-12-30 16:27:00 -08002612 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06002613
Tom Lendacky86137772020-12-10 11:10:07 -06002614 /* XCR0 is restored on VMEXIT, save the current host value */
2615 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2616 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2617
2618 /* PKRU is restored on VMEXIT, save the curent host value */
2619 hostsa->pkru = read_pkru();
2620
2621 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2622 hostsa->xss = host_xss;
2623}
2624
Tom Lendacky647daca2021-01-04 14:20:01 -06002625void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2626{
2627 struct vcpu_svm *svm = to_svm(vcpu);
2628
2629 /* First SIPI: Use the values as initially set by the VMM */
2630 if (!svm->received_first_sipi) {
2631 svm->received_first_sipi = true;
2632 return;
2633 }
2634
2635 /*
2636 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2637 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2638 * non-zero value.
2639 */
Tom Lendackya3ba26e2021-04-09 09:38:42 -05002640 if (!svm->ghcb)
2641 return;
2642
Tom Lendacky647daca2021-01-04 14:20:01 -06002643 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2644}