blob: a534ec9ae172c2ebe1884672e0934c82e90f84a1 [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060017#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060018#include <linux/trace_events.h>
Tom Lendacky86137772020-12-10 11:10:07 -060019#include <asm/fpu/internal.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010020
Tom Lendacky8640ca52020-12-15 12:44:07 -050021#include <asm/trapnr.h>
22
Joerg Roedeleaf78262020-03-24 10:41:54 +010023#include "x86.h"
24#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080025#include "svm_ops.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060026#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060027#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010028
Tom Lendacky86137772020-12-10 11:10:07 -060029#define __ex(x) __kvm_handle_fault_on_reboot(x)
30
Tom Lendacky1edc1452020-12-10 11:09:49 -060031static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010032static int sev_flush_asids(void);
33static DECLARE_RWSEM(sev_deactivate_lock);
34static DEFINE_MUTEX(sev_bitmap_lock);
35unsigned int max_sev_asid;
36static unsigned int min_sev_asid;
Brijesh Singhd3d1af82021-04-15 15:53:55 +000037static unsigned long sev_me_mask;
Joerg Roedeleaf78262020-03-24 10:41:54 +010038static unsigned long *sev_asid_bitmap;
39static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010040
41struct enc_region {
42 struct list_head list;
43 unsigned long npages;
44 struct page **pages;
45 unsigned long uaddr;
46 unsigned long size;
47};
48
49static int sev_flush_asids(void)
50{
51 int ret, error = 0;
52
53 /*
54 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
55 * so it must be guarded.
56 */
57 down_write(&sev_deactivate_lock);
58
59 wbinvd_on_all_cpus();
60 ret = sev_guest_df_flush(&error);
61
62 up_write(&sev_deactivate_lock);
63
64 if (ret)
65 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
66
67 return ret;
68}
69
Nathan Tempelman54526d12021-04-08 22:32:14 +000070static inline bool is_mirroring_enc_context(struct kvm *kvm)
71{
72 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
73}
74
Joerg Roedeleaf78262020-03-24 10:41:54 +010075/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -060076static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010077{
78 int pos;
79
80 /* Check if there are any ASIDs to reclaim before performing a flush */
Tom Lendacky80675b32020-12-10 11:10:05 -060081 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
82 if (pos >= max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010083 return false;
84
85 if (sev_flush_asids())
86 return false;
87
Tom Lendacky80675b32020-12-10 11:10:05 -060088 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +010089 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
90 max_sev_asid);
91 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
92
93 return true;
94}
95
Sean Christopherson9fa15212021-03-30 20:19:35 -070096static int sev_asid_new(bool es_active)
Joerg Roedeleaf78262020-03-24 10:41:54 +010097{
Tom Lendacky80675b32020-12-10 11:10:05 -060098 int pos, min_asid, max_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +010099 bool retry = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100100
101 mutex_lock(&sev_bitmap_lock);
102
103 /*
Tom Lendacky80675b32020-12-10 11:10:05 -0600104 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
105 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100106 */
Sean Christopherson9fa15212021-03-30 20:19:35 -0700107 min_asid = es_active ? 0 : min_sev_asid - 1;
108 max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100109again:
Tom Lendacky80675b32020-12-10 11:10:05 -0600110 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
111 if (pos >= max_asid) {
112 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100113 retry = false;
114 goto again;
115 }
116 mutex_unlock(&sev_bitmap_lock);
117 return -EBUSY;
118 }
119
120 __set_bit(pos, sev_asid_bitmap);
121
122 mutex_unlock(&sev_bitmap_lock);
123
124 return pos + 1;
125}
126
127static int sev_get_asid(struct kvm *kvm)
128{
129 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
130
131 return sev->asid;
132}
133
134static void sev_asid_free(int asid)
135{
136 struct svm_cpu_data *sd;
137 int cpu, pos;
138
139 mutex_lock(&sev_bitmap_lock);
140
141 pos = asid - 1;
142 __set_bit(pos, sev_reclaim_asid_bitmap);
143
144 for_each_possible_cpu(cpu) {
145 sd = per_cpu(svm_data, cpu);
146 sd->sev_vmcbs[pos] = NULL;
147 }
148
149 mutex_unlock(&sev_bitmap_lock);
150}
151
152static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
153{
154 struct sev_data_decommission *decommission;
155 struct sev_data_deactivate *data;
156
157 if (!handle)
158 return;
159
160 data = kzalloc(sizeof(*data), GFP_KERNEL);
161 if (!data)
162 return;
163
164 /* deactivate handle */
165 data->handle = handle;
166
167 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
168 down_read(&sev_deactivate_lock);
169 sev_guest_deactivate(data, NULL);
170 up_read(&sev_deactivate_lock);
171
172 kfree(data);
173
174 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
175 if (!decommission)
176 return;
177
178 /* decommission handle */
179 decommission->handle = handle;
180 sev_guest_decommission(decommission, NULL);
181
182 kfree(decommission);
183}
184
185static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
186{
187 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson9fa15212021-03-30 20:19:35 -0700188 bool es_active = argp->id == KVM_SEV_ES_INIT;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100189 int asid, ret;
190
Sean Christopherson87279062021-03-30 20:19:36 -0700191 if (kvm->created_vcpus)
192 return -EINVAL;
193
Joerg Roedeleaf78262020-03-24 10:41:54 +0100194 ret = -EBUSY;
195 if (unlikely(sev->active))
196 return ret;
197
Sean Christopherson9fa15212021-03-30 20:19:35 -0700198 asid = sev_asid_new(es_active);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100199 if (asid < 0)
200 return ret;
201
202 ret = sev_platform_init(&argp->error);
203 if (ret)
204 goto e_free;
205
206 sev->active = true;
Sean Christopherson9fa15212021-03-30 20:19:35 -0700207 sev->es_active = es_active;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100208 sev->asid = asid;
209 INIT_LIST_HEAD(&sev->regions_list);
210
211 return 0;
212
213e_free:
214 sev_asid_free(asid);
215 return ret;
216}
217
218static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
219{
220 struct sev_data_activate *data;
221 int asid = sev_get_asid(kvm);
222 int ret;
223
224 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
225 if (!data)
226 return -ENOMEM;
227
228 /* activate ASID on the given handle */
229 data->handle = handle;
230 data->asid = asid;
231 ret = sev_guest_activate(data, error);
232 kfree(data);
233
234 return ret;
235}
236
237static int __sev_issue_cmd(int fd, int id, void *data, int *error)
238{
239 struct fd f;
240 int ret;
241
242 f = fdget(fd);
243 if (!f.file)
244 return -EBADF;
245
246 ret = sev_issue_cmd_external_user(f.file, id, data, error);
247
248 fdput(f);
249 return ret;
250}
251
252static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
253{
254 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
255
256 return __sev_issue_cmd(sev->fd, id, data, error);
257}
258
259static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
260{
261 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
262 struct sev_data_launch_start *start;
263 struct kvm_sev_launch_start params;
264 void *dh_blob, *session_blob;
265 int *error = &argp->error;
266 int ret;
267
268 if (!sev_guest(kvm))
269 return -ENOTTY;
270
271 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
272 return -EFAULT;
273
274 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
275 if (!start)
276 return -ENOMEM;
277
278 dh_blob = NULL;
279 if (params.dh_uaddr) {
280 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
281 if (IS_ERR(dh_blob)) {
282 ret = PTR_ERR(dh_blob);
283 goto e_free;
284 }
285
286 start->dh_cert_address = __sme_set(__pa(dh_blob));
287 start->dh_cert_len = params.dh_len;
288 }
289
290 session_blob = NULL;
291 if (params.session_uaddr) {
292 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
293 if (IS_ERR(session_blob)) {
294 ret = PTR_ERR(session_blob);
295 goto e_free_dh;
296 }
297
298 start->session_address = __sme_set(__pa(session_blob));
299 start->session_len = params.session_len;
300 }
301
302 start->handle = params.handle;
303 start->policy = params.policy;
304
305 /* create memory encryption context */
306 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
307 if (ret)
308 goto e_free_session;
309
310 /* Bind ASID to this guest */
311 ret = sev_bind_asid(kvm, start->handle, error);
312 if (ret)
313 goto e_free_session;
314
315 /* return handle to userspace */
316 params.handle = start->handle;
317 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
318 sev_unbind_asid(kvm, start->handle);
319 ret = -EFAULT;
320 goto e_free_session;
321 }
322
323 sev->handle = start->handle;
324 sev->fd = argp->sev_fd;
325
326e_free_session:
327 kfree(session_blob);
328e_free_dh:
329 kfree(dh_blob);
330e_free:
331 kfree(start);
332 return ret;
333}
334
335static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
336 unsigned long ulen, unsigned long *n,
337 int write)
338{
339 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700340 unsigned long npages, size;
341 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100342 unsigned long locked, lock_limit;
343 struct page **pages;
344 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300345 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100346
Peter Gonda19a23da2021-01-27 08:15:24 -0800347 lockdep_assert_held(&kvm->lock);
348
Joerg Roedeleaf78262020-03-24 10:41:54 +0100349 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400350 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100351
352 /* Calculate number of pages. */
353 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
354 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
355 npages = (last - first + 1);
356
357 locked = sev->pages_locked + npages;
358 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
359 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
360 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400361 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100362 }
363
John Hubbard78824fa2020-05-25 23:22:06 -0700364 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400365 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700366
Joerg Roedeleaf78262020-03-24 10:41:54 +0100367 /* Avoid using vmalloc for smaller buffers. */
368 size = npages * sizeof(struct page *);
369 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700370 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100371 else
372 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
373
374 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400375 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100376
377 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700378 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100379 if (npinned != npages) {
380 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300381 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100382 goto err;
383 }
384
385 *n = npages;
386 sev->pages_locked = locked;
387
388 return pages;
389
390err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300391 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700392 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100393
394 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300395 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100396}
397
398static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
399 unsigned long npages)
400{
401 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
402
John Hubbarddc42c8a2020-05-25 23:22:07 -0700403 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100404 kvfree(pages);
405 sev->pages_locked -= npages;
406}
407
408static void sev_clflush_pages(struct page *pages[], unsigned long npages)
409{
410 uint8_t *page_virtual;
411 unsigned long i;
412
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000413 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
414 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100415 return;
416
417 for (i = 0; i < npages; i++) {
418 page_virtual = kmap_atomic(pages[i]);
419 clflush_cache_range(page_virtual, PAGE_SIZE);
420 kunmap_atomic(page_virtual);
421 }
422}
423
424static unsigned long get_num_contig_pages(unsigned long idx,
425 struct page **inpages, unsigned long npages)
426{
427 unsigned long paddr, next_paddr;
428 unsigned long i = idx + 1, pages = 1;
429
430 /* find the number of contiguous pages starting from idx */
431 paddr = __sme_page_pa(inpages[idx]);
432 while (i < npages) {
433 next_paddr = __sme_page_pa(inpages[i++]);
434 if ((paddr + PAGE_SIZE) == next_paddr) {
435 pages++;
436 paddr = next_paddr;
437 continue;
438 }
439 break;
440 }
441
442 return pages;
443}
444
445static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
446{
447 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
448 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
449 struct kvm_sev_launch_update_data params;
450 struct sev_data_launch_update_data *data;
451 struct page **inpages;
452 int ret;
453
454 if (!sev_guest(kvm))
455 return -ENOTTY;
456
457 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
458 return -EFAULT;
459
460 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
461 if (!data)
462 return -ENOMEM;
463
464 vaddr = params.uaddr;
465 size = params.len;
466 vaddr_end = vaddr + size;
467
468 /* Lock the user memory. */
469 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300470 if (IS_ERR(inpages)) {
471 ret = PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100472 goto e_free;
473 }
474
475 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400476 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
477 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100478 */
479 sev_clflush_pages(inpages, npages);
480
481 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
482 int offset, len;
483
484 /*
485 * If the user buffer is not page-aligned, calculate the offset
486 * within the page.
487 */
488 offset = vaddr & (PAGE_SIZE - 1);
489
490 /* Calculate the number of pages that can be encrypted in one go. */
491 pages = get_num_contig_pages(i, inpages, npages);
492
493 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
494
495 data->handle = sev->handle;
496 data->len = len;
497 data->address = __sme_page_pa(inpages[i]) + offset;
498 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
499 if (ret)
500 goto e_unpin;
501
502 size -= len;
503 next_vaddr = vaddr + len;
504 }
505
506e_unpin:
507 /* content of memory is updated, mark pages dirty */
508 for (i = 0; i < npages; i++) {
509 set_page_dirty_lock(inpages[i]);
510 mark_page_accessed(inpages[i]);
511 }
512 /* unlock the user pages */
513 sev_unpin_memory(kvm, inpages, npages);
514e_free:
515 kfree(data);
516 return ret;
517}
518
Tom Lendackyad731092020-12-10 11:10:09 -0600519static int sev_es_sync_vmsa(struct vcpu_svm *svm)
520{
521 struct vmcb_save_area *save = &svm->vmcb->save;
522
523 /* Check some debug related fields before encrypting the VMSA */
524 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
525 return -EINVAL;
526
527 /* Sync registgers */
528 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
529 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
530 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
531 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
532 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
533 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
534 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
535 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500536#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600537 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
538 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
539 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
540 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
541 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
542 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
543 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
544 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500545#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600546 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
547
548 /* Sync some non-GPR registers before encrypting */
549 save->xcr0 = svm->vcpu.arch.xcr0;
550 save->pkru = svm->vcpu.arch.pkru;
551 save->xss = svm->vcpu.arch.ia32_xss;
552
553 /*
554 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
555 * the traditional VMSA that is part of the VMCB. Copy the
556 * traditional VMSA as it has been built so far (in prep
557 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
558 */
559 memcpy(svm->vmsa, save, sizeof(*save));
560
561 return 0;
562}
563
564static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
565{
566 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
567 struct sev_data_launch_update_vmsa *vmsa;
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700568 struct kvm_vcpu *vcpu;
Tom Lendackyad731092020-12-10 11:10:09 -0600569 int i, ret;
570
571 if (!sev_es_guest(kvm))
572 return -ENOTTY;
573
574 vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL);
575 if (!vmsa)
576 return -ENOMEM;
577
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700578 kvm_for_each_vcpu(i, vcpu, kvm) {
579 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendackyad731092020-12-10 11:10:09 -0600580
581 /* Perform some pre-encryption checks against the VMSA */
582 ret = sev_es_sync_vmsa(svm);
583 if (ret)
584 goto e_free;
585
586 /*
587 * The LAUNCH_UPDATE_VMSA command will perform in-place
588 * encryption of the VMSA memory content (i.e it will write
589 * the same memory region with the guest's key), so invalidate
590 * it first.
591 */
592 clflush_cache_range(svm->vmsa, PAGE_SIZE);
593
594 vmsa->handle = sev->handle;
595 vmsa->address = __sme_pa(svm->vmsa);
596 vmsa->len = PAGE_SIZE;
597 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, vmsa,
598 &argp->error);
599 if (ret)
600 goto e_free;
601
602 svm->vcpu.arch.guest_state_protected = true;
603 }
604
605e_free:
606 kfree(vmsa);
607 return ret;
608}
609
Joerg Roedeleaf78262020-03-24 10:41:54 +0100610static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
611{
612 void __user *measure = (void __user *)(uintptr_t)argp->data;
613 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
614 struct sev_data_launch_measure *data;
615 struct kvm_sev_launch_measure params;
616 void __user *p = NULL;
617 void *blob = NULL;
618 int ret;
619
620 if (!sev_guest(kvm))
621 return -ENOTTY;
622
623 if (copy_from_user(&params, measure, sizeof(params)))
624 return -EFAULT;
625
626 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
627 if (!data)
628 return -ENOMEM;
629
630 /* User wants to query the blob length */
631 if (!params.len)
632 goto cmd;
633
634 p = (void __user *)(uintptr_t)params.uaddr;
635 if (p) {
636 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
637 ret = -EINVAL;
638 goto e_free;
639 }
640
641 ret = -ENOMEM;
Sean Christophersoneba04b22021-03-30 19:30:25 -0700642 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100643 if (!blob)
644 goto e_free;
645
646 data->address = __psp_pa(blob);
647 data->len = params.len;
648 }
649
650cmd:
651 data->handle = sev->handle;
652 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
653
654 /*
655 * If we query the session length, FW responded with expected data.
656 */
657 if (!params.len)
658 goto done;
659
660 if (ret)
661 goto e_free_blob;
662
663 if (blob) {
664 if (copy_to_user(p, blob, params.len))
665 ret = -EFAULT;
666 }
667
668done:
669 params.len = data->len;
670 if (copy_to_user(measure, &params, sizeof(params)))
671 ret = -EFAULT;
672e_free_blob:
673 kfree(blob);
674e_free:
675 kfree(data);
676 return ret;
677}
678
679static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
680{
681 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
682 struct sev_data_launch_finish *data;
683 int ret;
684
685 if (!sev_guest(kvm))
686 return -ENOTTY;
687
688 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
689 if (!data)
690 return -ENOMEM;
691
692 data->handle = sev->handle;
693 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
694
695 kfree(data);
696 return ret;
697}
698
699static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
700{
701 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
702 struct kvm_sev_guest_status params;
703 struct sev_data_guest_status *data;
704 int ret;
705
706 if (!sev_guest(kvm))
707 return -ENOTTY;
708
709 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
710 if (!data)
711 return -ENOMEM;
712
713 data->handle = sev->handle;
714 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
715 if (ret)
716 goto e_free;
717
718 params.policy = data->policy;
719 params.state = data->state;
720 params.handle = data->handle;
721
722 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
723 ret = -EFAULT;
724e_free:
725 kfree(data);
726 return ret;
727}
728
729static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
730 unsigned long dst, int size,
731 int *error, bool enc)
732{
733 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
734 struct sev_data_dbg *data;
735 int ret;
736
737 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
738 if (!data)
739 return -ENOMEM;
740
741 data->handle = sev->handle;
742 data->dst_addr = dst;
743 data->src_addr = src;
744 data->len = size;
745
746 ret = sev_issue_cmd(kvm,
747 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
748 data, error);
749 kfree(data);
750 return ret;
751}
752
753static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
754 unsigned long dst_paddr, int sz, int *err)
755{
756 int offset;
757
758 /*
759 * Its safe to read more than we are asked, caller should ensure that
760 * destination has enough space.
761 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100762 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000763 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100764 sz = round_up(sz + offset, 16);
765
766 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
767}
768
769static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
770 unsigned long __user dst_uaddr,
771 unsigned long dst_paddr,
772 int size, int *err)
773{
774 struct page *tpage = NULL;
775 int ret, offset;
776
777 /* if inputs are not 16-byte then use intermediate buffer */
778 if (!IS_ALIGNED(dst_paddr, 16) ||
779 !IS_ALIGNED(paddr, 16) ||
780 !IS_ALIGNED(size, 16)) {
781 tpage = (void *)alloc_page(GFP_KERNEL);
782 if (!tpage)
783 return -ENOMEM;
784
785 dst_paddr = __sme_page_pa(tpage);
786 }
787
788 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
789 if (ret)
790 goto e_free;
791
792 if (tpage) {
793 offset = paddr & 15;
794 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
795 page_address(tpage) + offset, size))
796 ret = -EFAULT;
797 }
798
799e_free:
800 if (tpage)
801 __free_page(tpage);
802
803 return ret;
804}
805
806static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
807 unsigned long __user vaddr,
808 unsigned long dst_paddr,
809 unsigned long __user dst_vaddr,
810 int size, int *error)
811{
812 struct page *src_tpage = NULL;
813 struct page *dst_tpage = NULL;
814 int ret, len = size;
815
816 /* If source buffer is not aligned then use an intermediate buffer */
817 if (!IS_ALIGNED(vaddr, 16)) {
818 src_tpage = alloc_page(GFP_KERNEL);
819 if (!src_tpage)
820 return -ENOMEM;
821
822 if (copy_from_user(page_address(src_tpage),
823 (void __user *)(uintptr_t)vaddr, size)) {
824 __free_page(src_tpage);
825 return -EFAULT;
826 }
827
828 paddr = __sme_page_pa(src_tpage);
829 }
830
831 /*
832 * If destination buffer or length is not aligned then do read-modify-write:
833 * - decrypt destination in an intermediate buffer
834 * - copy the source buffer in an intermediate buffer
835 * - use the intermediate buffer as source buffer
836 */
837 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
838 int dst_offset;
839
840 dst_tpage = alloc_page(GFP_KERNEL);
841 if (!dst_tpage) {
842 ret = -ENOMEM;
843 goto e_free;
844 }
845
846 ret = __sev_dbg_decrypt(kvm, dst_paddr,
847 __sme_page_pa(dst_tpage), size, error);
848 if (ret)
849 goto e_free;
850
851 /*
852 * If source is kernel buffer then use memcpy() otherwise
853 * copy_from_user().
854 */
855 dst_offset = dst_paddr & 15;
856
857 if (src_tpage)
858 memcpy(page_address(dst_tpage) + dst_offset,
859 page_address(src_tpage), size);
860 else {
861 if (copy_from_user(page_address(dst_tpage) + dst_offset,
862 (void __user *)(uintptr_t)vaddr, size)) {
863 ret = -EFAULT;
864 goto e_free;
865 }
866 }
867
868 paddr = __sme_page_pa(dst_tpage);
869 dst_paddr = round_down(dst_paddr, 16);
870 len = round_up(size, 16);
871 }
872
873 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
874
875e_free:
876 if (src_tpage)
877 __free_page(src_tpage);
878 if (dst_tpage)
879 __free_page(dst_tpage);
880 return ret;
881}
882
883static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
884{
885 unsigned long vaddr, vaddr_end, next_vaddr;
886 unsigned long dst_vaddr;
887 struct page **src_p, **dst_p;
888 struct kvm_sev_dbg debug;
889 unsigned long n;
890 unsigned int size;
891 int ret;
892
893 if (!sev_guest(kvm))
894 return -ENOTTY;
895
896 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
897 return -EFAULT;
898
899 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
900 return -EINVAL;
901 if (!debug.dst_uaddr)
902 return -EINVAL;
903
904 vaddr = debug.src_uaddr;
905 size = debug.len;
906 vaddr_end = vaddr + size;
907 dst_vaddr = debug.dst_uaddr;
908
909 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
910 int len, s_off, d_off;
911
912 /* lock userspace source and destination page */
913 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300914 if (IS_ERR(src_p))
915 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100916
917 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300918 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100919 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300920 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100921 }
922
923 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400924 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
925 * the pages; flush the destination too so that future accesses do not
926 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100927 */
928 sev_clflush_pages(src_p, 1);
929 sev_clflush_pages(dst_p, 1);
930
931 /*
932 * Since user buffer may not be page aligned, calculate the
933 * offset within the page.
934 */
935 s_off = vaddr & ~PAGE_MASK;
936 d_off = dst_vaddr & ~PAGE_MASK;
937 len = min_t(size_t, (PAGE_SIZE - s_off), size);
938
939 if (dec)
940 ret = __sev_dbg_decrypt_user(kvm,
941 __sme_page_pa(src_p[0]) + s_off,
942 dst_vaddr,
943 __sme_page_pa(dst_p[0]) + d_off,
944 len, &argp->error);
945 else
946 ret = __sev_dbg_encrypt_user(kvm,
947 __sme_page_pa(src_p[0]) + s_off,
948 vaddr,
949 __sme_page_pa(dst_p[0]) + d_off,
950 dst_vaddr,
951 len, &argp->error);
952
953 sev_unpin_memory(kvm, src_p, n);
954 sev_unpin_memory(kvm, dst_p, n);
955
956 if (ret)
957 goto err;
958
959 next_vaddr = vaddr + len;
960 dst_vaddr = dst_vaddr + len;
961 size -= len;
962 }
963err:
964 return ret;
965}
966
967static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
968{
969 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
970 struct sev_data_launch_secret *data;
971 struct kvm_sev_launch_secret params;
972 struct page **pages;
973 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700974 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100975 int ret, offset;
976
977 if (!sev_guest(kvm))
978 return -ENOTTY;
979
980 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
981 return -EFAULT;
982
983 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400984 if (IS_ERR(pages))
985 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100986
987 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400988 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
989 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -0700990 */
991 sev_clflush_pages(pages, n);
992
993 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +0100994 * The secret must be copied into contiguous memory region, lets verify
995 * that userspace memory pages are contiguous before we issue command.
996 */
997 if (get_num_contig_pages(0, pages, n) != n) {
998 ret = -EINVAL;
999 goto e_unpin_memory;
1000 }
1001
1002 ret = -ENOMEM;
1003 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1004 if (!data)
1005 goto e_unpin_memory;
1006
1007 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1008 data->guest_address = __sme_page_pa(pages[0]) + offset;
1009 data->guest_len = params.guest_len;
1010
1011 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1012 if (IS_ERR(blob)) {
1013 ret = PTR_ERR(blob);
1014 goto e_free;
1015 }
1016
1017 data->trans_address = __psp_pa(blob);
1018 data->trans_len = params.trans_len;
1019
1020 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1021 if (IS_ERR(hdr)) {
1022 ret = PTR_ERR(hdr);
1023 goto e_free_blob;
1024 }
1025 data->hdr_address = __psp_pa(hdr);
1026 data->hdr_len = params.hdr_len;
1027
1028 data->handle = sev->handle;
1029 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
1030
1031 kfree(hdr);
1032
1033e_free_blob:
1034 kfree(blob);
1035e_free:
1036 kfree(data);
1037e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -07001038 /* content of memory is updated, mark pages dirty */
1039 for (i = 0; i < n; i++) {
1040 set_page_dirty_lock(pages[i]);
1041 mark_page_accessed(pages[i]);
1042 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001043 sev_unpin_memory(kvm, pages, n);
1044 return ret;
1045}
1046
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001047static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1048{
1049 void __user *report = (void __user *)(uintptr_t)argp->data;
1050 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1051 struct sev_data_attestation_report *data;
1052 struct kvm_sev_attestation_report params;
1053 void __user *p;
1054 void *blob = NULL;
1055 int ret;
1056
1057 if (!sev_guest(kvm))
1058 return -ENOTTY;
1059
1060 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1061 return -EFAULT;
1062
1063 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1064 if (!data)
1065 return -ENOMEM;
1066
1067 /* User wants to query the blob length */
1068 if (!params.len)
1069 goto cmd;
1070
1071 p = (void __user *)(uintptr_t)params.uaddr;
1072 if (p) {
1073 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
1074 ret = -EINVAL;
1075 goto e_free;
1076 }
1077
1078 ret = -ENOMEM;
Sean Christophersoneba04b22021-03-30 19:30:25 -07001079 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001080 if (!blob)
1081 goto e_free;
1082
1083 data->address = __psp_pa(blob);
1084 data->len = params.len;
1085 memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce));
1086 }
1087cmd:
1088 data->handle = sev->handle;
1089 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error);
1090 /*
1091 * If we query the session length, FW responded with expected data.
1092 */
1093 if (!params.len)
1094 goto done;
1095
1096 if (ret)
1097 goto e_free_blob;
1098
1099 if (blob) {
1100 if (copy_to_user(p, blob, params.len))
1101 ret = -EFAULT;
1102 }
1103
1104done:
1105 params.len = data->len;
1106 if (copy_to_user(report, &params, sizeof(params)))
1107 ret = -EFAULT;
1108e_free_blob:
1109 kfree(blob);
1110e_free:
1111 kfree(data);
1112 return ret;
1113}
1114
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001115/* Userspace wants to query session length. */
1116static int
1117__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1118 struct kvm_sev_send_start *params)
1119{
1120 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1121 struct sev_data_send_start *data;
1122 int ret;
1123
1124 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1125 if (data == NULL)
1126 return -ENOMEM;
1127
1128 data->handle = sev->handle;
1129 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error);
1130 if (ret < 0)
1131 goto out;
1132
1133 params->session_len = data->session_len;
1134 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1135 sizeof(struct kvm_sev_send_start)))
1136 ret = -EFAULT;
1137
1138out:
1139 kfree(data);
1140 return ret;
1141}
1142
1143static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1144{
1145 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1146 struct sev_data_send_start *data;
1147 struct kvm_sev_send_start params;
1148 void *amd_certs, *session_data;
1149 void *pdh_cert, *plat_certs;
1150 int ret;
1151
1152 if (!sev_guest(kvm))
1153 return -ENOTTY;
1154
1155 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1156 sizeof(struct kvm_sev_send_start)))
1157 return -EFAULT;
1158
1159 /* if session_len is zero, userspace wants to query the session length */
1160 if (!params.session_len)
1161 return __sev_send_start_query_session_length(kvm, argp,
1162 &params);
1163
1164 /* some sanity checks */
1165 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1166 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1167 return -EINVAL;
1168
1169 /* allocate the memory to hold the session data blob */
1170 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1171 if (!session_data)
1172 return -ENOMEM;
1173
1174 /* copy the certificate blobs from userspace */
1175 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1176 params.pdh_cert_len);
1177 if (IS_ERR(pdh_cert)) {
1178 ret = PTR_ERR(pdh_cert);
1179 goto e_free_session;
1180 }
1181
1182 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1183 params.plat_certs_len);
1184 if (IS_ERR(plat_certs)) {
1185 ret = PTR_ERR(plat_certs);
1186 goto e_free_pdh;
1187 }
1188
1189 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1190 params.amd_certs_len);
1191 if (IS_ERR(amd_certs)) {
1192 ret = PTR_ERR(amd_certs);
1193 goto e_free_plat_cert;
1194 }
1195
1196 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1197 if (data == NULL) {
1198 ret = -ENOMEM;
1199 goto e_free_amd_cert;
1200 }
1201
1202 /* populate the FW SEND_START field with system physical address */
1203 data->pdh_cert_address = __psp_pa(pdh_cert);
1204 data->pdh_cert_len = params.pdh_cert_len;
1205 data->plat_certs_address = __psp_pa(plat_certs);
1206 data->plat_certs_len = params.plat_certs_len;
1207 data->amd_certs_address = __psp_pa(amd_certs);
1208 data->amd_certs_len = params.amd_certs_len;
1209 data->session_address = __psp_pa(session_data);
1210 data->session_len = params.session_len;
1211 data->handle = sev->handle;
1212
1213 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error);
1214
1215 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1216 session_data, params.session_len)) {
1217 ret = -EFAULT;
1218 goto e_free;
1219 }
1220
1221 params.policy = data->policy;
1222 params.session_len = data->session_len;
1223 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1224 sizeof(struct kvm_sev_send_start)))
1225 ret = -EFAULT;
1226
1227e_free:
1228 kfree(data);
1229e_free_amd_cert:
1230 kfree(amd_certs);
1231e_free_plat_cert:
1232 kfree(plat_certs);
1233e_free_pdh:
1234 kfree(pdh_cert);
1235e_free_session:
1236 kfree(session_data);
1237 return ret;
1238}
1239
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001240/* Userspace wants to query either header or trans length. */
1241static int
1242__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1243 struct kvm_sev_send_update_data *params)
1244{
1245 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1246 struct sev_data_send_update_data *data;
1247 int ret;
1248
1249 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1250 if (!data)
1251 return -ENOMEM;
1252
1253 data->handle = sev->handle;
1254 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error);
1255 if (ret < 0)
1256 goto out;
1257
1258 params->hdr_len = data->hdr_len;
1259 params->trans_len = data->trans_len;
1260
1261 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1262 sizeof(struct kvm_sev_send_update_data)))
1263 ret = -EFAULT;
1264
1265out:
1266 kfree(data);
1267 return ret;
1268}
1269
1270static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1271{
1272 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1273 struct sev_data_send_update_data *data;
1274 struct kvm_sev_send_update_data params;
1275 void *hdr, *trans_data;
1276 struct page **guest_page;
1277 unsigned long n;
1278 int ret, offset;
1279
1280 if (!sev_guest(kvm))
1281 return -ENOTTY;
1282
1283 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1284 sizeof(struct kvm_sev_send_update_data)))
1285 return -EFAULT;
1286
1287 /* userspace wants to query either header or trans length */
1288 if (!params.trans_len || !params.hdr_len)
1289 return __sev_send_update_data_query_lengths(kvm, argp, &params);
1290
1291 if (!params.trans_uaddr || !params.guest_uaddr ||
1292 !params.guest_len || !params.hdr_uaddr)
1293 return -EINVAL;
1294
1295 /* Check if we are crossing the page boundary */
1296 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1297 if ((params.guest_len + offset > PAGE_SIZE))
1298 return -EINVAL;
1299
1300 /* Pin guest memory */
1301 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1302 PAGE_SIZE, &n, 0);
1303 if (!guest_page)
1304 return -EFAULT;
1305
1306 /* allocate memory for header and transport buffer */
1307 ret = -ENOMEM;
1308 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1309 if (!hdr)
1310 goto e_unpin;
1311
1312 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1313 if (!trans_data)
1314 goto e_free_hdr;
1315
1316 data = kzalloc(sizeof(*data), GFP_KERNEL);
1317 if (!data)
1318 goto e_free_trans_data;
1319
1320 data->hdr_address = __psp_pa(hdr);
1321 data->hdr_len = params.hdr_len;
1322 data->trans_address = __psp_pa(trans_data);
1323 data->trans_len = params.trans_len;
1324
1325 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1326 data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) +
1327 offset;
1328 data->guest_address |= sev_me_mask;
1329 data->guest_len = params.guest_len;
1330 data->handle = sev->handle;
1331
1332 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error);
1333
1334 if (ret)
1335 goto e_free;
1336
1337 /* copy transport buffer to user space */
1338 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1339 trans_data, params.trans_len)) {
1340 ret = -EFAULT;
1341 goto e_free;
1342 }
1343
1344 /* Copy packet header to userspace. */
1345 ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1346 params.hdr_len);
1347
1348e_free:
1349 kfree(data);
1350e_free_trans_data:
1351 kfree(trans_data);
1352e_free_hdr:
1353 kfree(hdr);
1354e_unpin:
1355 sev_unpin_memory(kvm, guest_page, n);
1356
1357 return ret;
1358}
1359
Joerg Roedeleaf78262020-03-24 10:41:54 +01001360int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1361{
1362 struct kvm_sev_cmd sev_cmd;
1363 int r;
1364
Tom Lendacky916391a2020-12-10 11:09:38 -06001365 if (!svm_sev_enabled() || !sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001366 return -ENOTTY;
1367
1368 if (!argp)
1369 return 0;
1370
1371 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1372 return -EFAULT;
1373
1374 mutex_lock(&kvm->lock);
1375
Nathan Tempelman54526d12021-04-08 22:32:14 +00001376 /* enc_context_owner handles all memory enc operations */
1377 if (is_mirroring_enc_context(kvm)) {
1378 r = -EINVAL;
1379 goto out;
1380 }
1381
Joerg Roedeleaf78262020-03-24 10:41:54 +01001382 switch (sev_cmd.id) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001383 case KVM_SEV_ES_INIT:
1384 if (!sev_es) {
1385 r = -ENOTTY;
1386 goto out;
1387 }
1388 fallthrough;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001389 case KVM_SEV_INIT:
1390 r = sev_guest_init(kvm, &sev_cmd);
1391 break;
1392 case KVM_SEV_LAUNCH_START:
1393 r = sev_launch_start(kvm, &sev_cmd);
1394 break;
1395 case KVM_SEV_LAUNCH_UPDATE_DATA:
1396 r = sev_launch_update_data(kvm, &sev_cmd);
1397 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001398 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1399 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1400 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001401 case KVM_SEV_LAUNCH_MEASURE:
1402 r = sev_launch_measure(kvm, &sev_cmd);
1403 break;
1404 case KVM_SEV_LAUNCH_FINISH:
1405 r = sev_launch_finish(kvm, &sev_cmd);
1406 break;
1407 case KVM_SEV_GUEST_STATUS:
1408 r = sev_guest_status(kvm, &sev_cmd);
1409 break;
1410 case KVM_SEV_DBG_DECRYPT:
1411 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1412 break;
1413 case KVM_SEV_DBG_ENCRYPT:
1414 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1415 break;
1416 case KVM_SEV_LAUNCH_SECRET:
1417 r = sev_launch_secret(kvm, &sev_cmd);
1418 break;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001419 case KVM_SEV_GET_ATTESTATION_REPORT:
1420 r = sev_get_attestation_report(kvm, &sev_cmd);
1421 break;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001422 case KVM_SEV_SEND_START:
1423 r = sev_send_start(kvm, &sev_cmd);
1424 break;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001425 case KVM_SEV_SEND_UPDATE_DATA:
1426 r = sev_send_update_data(kvm, &sev_cmd);
1427 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001428 default:
1429 r = -EINVAL;
1430 goto out;
1431 }
1432
1433 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1434 r = -EFAULT;
1435
1436out:
1437 mutex_unlock(&kvm->lock);
1438 return r;
1439}
1440
1441int svm_register_enc_region(struct kvm *kvm,
1442 struct kvm_enc_region *range)
1443{
1444 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1445 struct enc_region *region;
1446 int ret = 0;
1447
1448 if (!sev_guest(kvm))
1449 return -ENOTTY;
1450
Nathan Tempelman54526d12021-04-08 22:32:14 +00001451 /* If kvm is mirroring encryption context it isn't responsible for it */
1452 if (is_mirroring_enc_context(kvm))
1453 return -EINVAL;
1454
Joerg Roedeleaf78262020-03-24 10:41:54 +01001455 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1456 return -EINVAL;
1457
1458 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1459 if (!region)
1460 return -ENOMEM;
1461
Peter Gonda19a23da2021-01-27 08:15:24 -08001462 mutex_lock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001463 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001464 if (IS_ERR(region->pages)) {
1465 ret = PTR_ERR(region->pages);
Peter Gonda19a23da2021-01-27 08:15:24 -08001466 mutex_unlock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001467 goto e_free;
1468 }
1469
Peter Gonda19a23da2021-01-27 08:15:24 -08001470 region->uaddr = range->addr;
1471 region->size = range->size;
1472
1473 list_add_tail(&region->list, &sev->regions_list);
1474 mutex_unlock(&kvm->lock);
1475
Joerg Roedeleaf78262020-03-24 10:41:54 +01001476 /*
1477 * The guest may change the memory encryption attribute from C=0 -> C=1
1478 * or vice versa for this memory range. Lets make sure caches are
1479 * flushed to ensure that guest data gets written into memory with
1480 * correct C-bit.
1481 */
1482 sev_clflush_pages(region->pages, region->npages);
1483
Joerg Roedeleaf78262020-03-24 10:41:54 +01001484 return ret;
1485
1486e_free:
1487 kfree(region);
1488 return ret;
1489}
1490
1491static struct enc_region *
1492find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1493{
1494 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1495 struct list_head *head = &sev->regions_list;
1496 struct enc_region *i;
1497
1498 list_for_each_entry(i, head, list) {
1499 if (i->uaddr == range->addr &&
1500 i->size == range->size)
1501 return i;
1502 }
1503
1504 return NULL;
1505}
1506
1507static void __unregister_enc_region_locked(struct kvm *kvm,
1508 struct enc_region *region)
1509{
1510 sev_unpin_memory(kvm, region->pages, region->npages);
1511 list_del(&region->list);
1512 kfree(region);
1513}
1514
1515int svm_unregister_enc_region(struct kvm *kvm,
1516 struct kvm_enc_region *range)
1517{
1518 struct enc_region *region;
1519 int ret;
1520
Nathan Tempelman54526d12021-04-08 22:32:14 +00001521 /* If kvm is mirroring encryption context it isn't responsible for it */
1522 if (is_mirroring_enc_context(kvm))
1523 return -EINVAL;
1524
Joerg Roedeleaf78262020-03-24 10:41:54 +01001525 mutex_lock(&kvm->lock);
1526
1527 if (!sev_guest(kvm)) {
1528 ret = -ENOTTY;
1529 goto failed;
1530 }
1531
1532 region = find_enc_region(kvm, range);
1533 if (!region) {
1534 ret = -EINVAL;
1535 goto failed;
1536 }
1537
1538 /*
1539 * Ensure that all guest tagged cache entries are flushed before
1540 * releasing the pages back to the system for use. CLFLUSH will
1541 * not do this, so issue a WBINVD.
1542 */
1543 wbinvd_on_all_cpus();
1544
1545 __unregister_enc_region_locked(kvm, region);
1546
1547 mutex_unlock(&kvm->lock);
1548 return 0;
1549
1550failed:
1551 mutex_unlock(&kvm->lock);
1552 return ret;
1553}
1554
Nathan Tempelman54526d12021-04-08 22:32:14 +00001555int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1556{
1557 struct file *source_kvm_file;
1558 struct kvm *source_kvm;
1559 struct kvm_sev_info *mirror_sev;
1560 unsigned int asid;
1561 int ret;
1562
1563 source_kvm_file = fget(source_fd);
1564 if (!file_is_kvm(source_kvm_file)) {
1565 ret = -EBADF;
1566 goto e_source_put;
1567 }
1568
1569 source_kvm = source_kvm_file->private_data;
1570 mutex_lock(&source_kvm->lock);
1571
1572 if (!sev_guest(source_kvm)) {
1573 ret = -EINVAL;
1574 goto e_source_unlock;
1575 }
1576
1577 /* Mirrors of mirrors should work, but let's not get silly */
1578 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1579 ret = -EINVAL;
1580 goto e_source_unlock;
1581 }
1582
1583 asid = to_kvm_svm(source_kvm)->sev_info.asid;
1584
1585 /*
1586 * The mirror kvm holds an enc_context_owner ref so its asid can't
1587 * disappear until we're done with it
1588 */
1589 kvm_get_kvm(source_kvm);
1590
1591 fput(source_kvm_file);
1592 mutex_unlock(&source_kvm->lock);
1593 mutex_lock(&kvm->lock);
1594
1595 if (sev_guest(kvm)) {
1596 ret = -EINVAL;
1597 goto e_mirror_unlock;
1598 }
1599
1600 /* Set enc_context_owner and copy its encryption context over */
1601 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1602 mirror_sev->enc_context_owner = source_kvm;
1603 mirror_sev->asid = asid;
1604 mirror_sev->active = true;
1605
1606 mutex_unlock(&kvm->lock);
1607 return 0;
1608
1609e_mirror_unlock:
1610 mutex_unlock(&kvm->lock);
1611 kvm_put_kvm(source_kvm);
1612 return ret;
1613e_source_unlock:
1614 mutex_unlock(&source_kvm->lock);
1615e_source_put:
1616 fput(source_kvm_file);
1617 return ret;
1618}
1619
Joerg Roedeleaf78262020-03-24 10:41:54 +01001620void sev_vm_destroy(struct kvm *kvm)
1621{
1622 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1623 struct list_head *head = &sev->regions_list;
1624 struct list_head *pos, *q;
1625
1626 if (!sev_guest(kvm))
1627 return;
1628
Nathan Tempelman54526d12021-04-08 22:32:14 +00001629 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1630 if (is_mirroring_enc_context(kvm)) {
1631 kvm_put_kvm(sev->enc_context_owner);
1632 return;
1633 }
1634
Joerg Roedeleaf78262020-03-24 10:41:54 +01001635 mutex_lock(&kvm->lock);
1636
1637 /*
1638 * Ensure that all guest tagged cache entries are flushed before
1639 * releasing the pages back to the system for use. CLFLUSH will
1640 * not do this, so issue a WBINVD.
1641 */
1642 wbinvd_on_all_cpus();
1643
1644 /*
1645 * if userspace was terminated before unregistering the memory regions
1646 * then lets unpin all the registered memory.
1647 */
1648 if (!list_empty(head)) {
1649 list_for_each_safe(pos, q, head) {
1650 __unregister_enc_region_locked(kvm,
1651 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001652 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001653 }
1654 }
1655
1656 mutex_unlock(&kvm->lock);
1657
1658 sev_unbind_asid(kvm, sev->handle);
1659 sev_asid_free(sev->asid);
1660}
1661
Tom Lendacky916391a2020-12-10 11:09:38 -06001662void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001663{
Tom Lendacky916391a2020-12-10 11:09:38 -06001664 unsigned int eax, ebx, ecx, edx;
1665 bool sev_es_supported = false;
1666 bool sev_supported = false;
1667
1668 /* Does the CPU support SEV? */
1669 if (!boot_cpu_has(X86_FEATURE_SEV))
1670 goto out;
1671
1672 /* Retrieve SEV CPUID information */
1673 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1674
Tom Lendacky1edc1452020-12-10 11:09:49 -06001675 /* Set encryption bit location for SEV-ES guests */
1676 sev_enc_bit = ebx & 0x3f;
1677
Joerg Roedeleaf78262020-03-24 10:41:54 +01001678 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001679 max_sev_asid = ecx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001680
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001681 if (!svm_sev_enabled())
Tom Lendacky916391a2020-12-10 11:09:38 -06001682 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001683
1684 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001685 min_sev_asid = edx;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001686 sev_me_mask = 1UL << (ebx & 0x3f);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001687
1688 /* Initialize SEV ASID bitmaps */
1689 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1690 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001691 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001692
1693 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1694 if (!sev_reclaim_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001695 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001696
Tom Lendacky916391a2020-12-10 11:09:38 -06001697 pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
1698 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001699
Tom Lendacky916391a2020-12-10 11:09:38 -06001700 /* SEV-ES support requested? */
1701 if (!sev_es)
1702 goto out;
1703
1704 /* Does the CPU support SEV-ES? */
1705 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1706 goto out;
1707
1708 /* Has the system been allocated ASIDs for SEV-ES? */
1709 if (min_sev_asid == 1)
1710 goto out;
1711
1712 pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1);
1713 sev_es_supported = true;
1714
1715out:
1716 sev = sev_supported;
1717 sev_es = sev_es_supported;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001718}
1719
1720void sev_hardware_teardown(void)
1721{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001722 if (!svm_sev_enabled())
1723 return;
1724
Joerg Roedeleaf78262020-03-24 10:41:54 +01001725 bitmap_free(sev_asid_bitmap);
1726 bitmap_free(sev_reclaim_asid_bitmap);
1727
1728 sev_flush_asids();
1729}
1730
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001731/*
1732 * Pages used by hardware to hold guest encrypted state must be flushed before
1733 * returning them to the system.
1734 */
1735static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1736 unsigned long len)
1737{
1738 /*
1739 * If hardware enforced cache coherency for encrypted mappings of the
1740 * same physical page is supported, nothing to do.
1741 */
1742 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1743 return;
1744
1745 /*
1746 * If the VM Page Flush MSR is supported, use it to flush the page
1747 * (using the page virtual address and the guest ASID).
1748 */
1749 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1750 struct kvm_sev_info *sev;
1751 unsigned long va_start;
1752 u64 start, stop;
1753
1754 /* Align start and stop to page boundaries. */
1755 va_start = (unsigned long)va;
1756 start = (u64)va_start & PAGE_MASK;
1757 stop = PAGE_ALIGN((u64)va_start + len);
1758
1759 if (start < stop) {
1760 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1761
1762 while (start < stop) {
1763 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1764 start | sev->asid);
1765
1766 start += PAGE_SIZE;
1767 }
1768
1769 return;
1770 }
1771
1772 WARN(1, "Address overflow, using WBINVD\n");
1773 }
1774
1775 /*
1776 * Hardware should always have one of the above features,
1777 * but if not, use WBINVD and issue a warning.
1778 */
1779 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1780 wbinvd_on_all_cpus();
1781}
1782
1783void sev_free_vcpu(struct kvm_vcpu *vcpu)
1784{
1785 struct vcpu_svm *svm;
1786
1787 if (!sev_es_guest(vcpu->kvm))
1788 return;
1789
1790 svm = to_svm(vcpu);
1791
1792 if (vcpu->arch.guest_state_protected)
1793 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1794 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06001795
1796 if (svm->ghcb_sa_free)
1797 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001798}
1799
Tom Lendacky291bd202020-12-10 11:09:47 -06001800static void dump_ghcb(struct vcpu_svm *svm)
1801{
1802 struct ghcb *ghcb = svm->ghcb;
1803 unsigned int nbits;
1804
1805 /* Re-use the dump_invalid_vmcb module parameter */
1806 if (!dump_invalid_vmcb) {
1807 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1808 return;
1809 }
1810
1811 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1812
1813 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1814 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1815 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1816 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1817 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1818 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1819 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1820 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1821 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1822 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1823}
1824
1825static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1826{
1827 struct kvm_vcpu *vcpu = &svm->vcpu;
1828 struct ghcb *ghcb = svm->ghcb;
1829
1830 /*
1831 * The GHCB protocol so far allows for the following data
1832 * to be returned:
1833 * GPRs RAX, RBX, RCX, RDX
1834 *
Sean Christopherson25009142021-01-22 15:50:47 -08001835 * Copy their values, even if they may not have been written during the
1836 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06001837 */
Sean Christopherson25009142021-01-22 15:50:47 -08001838 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1839 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1840 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1841 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06001842}
1843
1844static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1845{
1846 struct vmcb_control_area *control = &svm->vmcb->control;
1847 struct kvm_vcpu *vcpu = &svm->vcpu;
1848 struct ghcb *ghcb = svm->ghcb;
1849 u64 exit_code;
1850
1851 /*
1852 * The GHCB protocol so far allows for the following data
1853 * to be supplied:
1854 * GPRs RAX, RBX, RCX, RDX
1855 * XCR0
1856 * CPL
1857 *
1858 * VMMCALL allows the guest to provide extra registers. KVM also
1859 * expects RSI for hypercalls, so include that, too.
1860 *
1861 * Copy their values to the appropriate location if supplied.
1862 */
1863 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1864
1865 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1866 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1867 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1868 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1869 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1870
1871 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1872
1873 if (ghcb_xcr0_is_valid(ghcb)) {
1874 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1875 kvm_update_cpuid_runtime(vcpu);
1876 }
1877
1878 /* Copy the GHCB exit information into the VMCB fields */
1879 exit_code = ghcb_get_sw_exit_code(ghcb);
1880 control->exit_code = lower_32_bits(exit_code);
1881 control->exit_code_hi = upper_32_bits(exit_code);
1882 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1883 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1884
1885 /* Clear the valid entries fields */
1886 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1887}
1888
1889static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1890{
1891 struct kvm_vcpu *vcpu;
1892 struct ghcb *ghcb;
1893 u64 exit_code = 0;
1894
1895 ghcb = svm->ghcb;
1896
1897 /* Only GHCB Usage code 0 is supported */
1898 if (ghcb->ghcb_usage)
1899 goto vmgexit_err;
1900
1901 /*
1902 * Retrieve the exit code now even though is may not be marked valid
1903 * as it could help with debugging.
1904 */
1905 exit_code = ghcb_get_sw_exit_code(ghcb);
1906
1907 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
1908 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
1909 !ghcb_sw_exit_info_2_is_valid(ghcb))
1910 goto vmgexit_err;
1911
1912 switch (ghcb_get_sw_exit_code(ghcb)) {
1913 case SVM_EXIT_READ_DR7:
1914 break;
1915 case SVM_EXIT_WRITE_DR7:
1916 if (!ghcb_rax_is_valid(ghcb))
1917 goto vmgexit_err;
1918 break;
1919 case SVM_EXIT_RDTSC:
1920 break;
1921 case SVM_EXIT_RDPMC:
1922 if (!ghcb_rcx_is_valid(ghcb))
1923 goto vmgexit_err;
1924 break;
1925 case SVM_EXIT_CPUID:
1926 if (!ghcb_rax_is_valid(ghcb) ||
1927 !ghcb_rcx_is_valid(ghcb))
1928 goto vmgexit_err;
1929 if (ghcb_get_rax(ghcb) == 0xd)
1930 if (!ghcb_xcr0_is_valid(ghcb))
1931 goto vmgexit_err;
1932 break;
1933 case SVM_EXIT_INVD:
1934 break;
1935 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001936 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
1937 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06001938 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001939 } else {
1940 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
1941 if (!ghcb_rax_is_valid(ghcb))
1942 goto vmgexit_err;
1943 }
Tom Lendacky291bd202020-12-10 11:09:47 -06001944 break;
1945 case SVM_EXIT_MSR:
1946 if (!ghcb_rcx_is_valid(ghcb))
1947 goto vmgexit_err;
1948 if (ghcb_get_sw_exit_info_1(ghcb)) {
1949 if (!ghcb_rax_is_valid(ghcb) ||
1950 !ghcb_rdx_is_valid(ghcb))
1951 goto vmgexit_err;
1952 }
1953 break;
1954 case SVM_EXIT_VMMCALL:
1955 if (!ghcb_rax_is_valid(ghcb) ||
1956 !ghcb_cpl_is_valid(ghcb))
1957 goto vmgexit_err;
1958 break;
1959 case SVM_EXIT_RDTSCP:
1960 break;
1961 case SVM_EXIT_WBINVD:
1962 break;
1963 case SVM_EXIT_MONITOR:
1964 if (!ghcb_rax_is_valid(ghcb) ||
1965 !ghcb_rcx_is_valid(ghcb) ||
1966 !ghcb_rdx_is_valid(ghcb))
1967 goto vmgexit_err;
1968 break;
1969 case SVM_EXIT_MWAIT:
1970 if (!ghcb_rax_is_valid(ghcb) ||
1971 !ghcb_rcx_is_valid(ghcb))
1972 goto vmgexit_err;
1973 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06001974 case SVM_VMGEXIT_MMIO_READ:
1975 case SVM_VMGEXIT_MMIO_WRITE:
1976 if (!ghcb_sw_scratch_is_valid(ghcb))
1977 goto vmgexit_err;
1978 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05001979 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06001980 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05001981 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06001982 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1983 break;
1984 default:
1985 goto vmgexit_err;
1986 }
1987
1988 return 0;
1989
1990vmgexit_err:
1991 vcpu = &svm->vcpu;
1992
1993 if (ghcb->ghcb_usage) {
1994 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
1995 ghcb->ghcb_usage);
1996 } else {
1997 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
1998 exit_code);
1999 dump_ghcb(svm);
2000 }
2001
2002 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2003 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2004 vcpu->run->internal.ndata = 2;
2005 vcpu->run->internal.data[0] = exit_code;
2006 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2007
2008 return -EINVAL;
2009}
2010
2011static void pre_sev_es_run(struct vcpu_svm *svm)
2012{
2013 if (!svm->ghcb)
2014 return;
2015
Tom Lendacky8f423a82020-12-10 11:09:53 -06002016 if (svm->ghcb_sa_free) {
2017 /*
2018 * The scratch area lives outside the GHCB, so there is a
2019 * buffer that, depending on the operation performed, may
2020 * need to be synced, then freed.
2021 */
2022 if (svm->ghcb_sa_sync) {
2023 kvm_write_guest(svm->vcpu.kvm,
2024 ghcb_get_sw_scratch(svm->ghcb),
2025 svm->ghcb_sa, svm->ghcb_sa_len);
2026 svm->ghcb_sa_sync = false;
2027 }
2028
2029 kfree(svm->ghcb_sa);
2030 svm->ghcb_sa = NULL;
2031 svm->ghcb_sa_free = false;
2032 }
2033
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002034 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2035
Tom Lendacky291bd202020-12-10 11:09:47 -06002036 sev_es_sync_to_ghcb(svm);
2037
2038 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2039 svm->ghcb = NULL;
2040}
2041
Joerg Roedeleaf78262020-03-24 10:41:54 +01002042void pre_sev_run(struct vcpu_svm *svm, int cpu)
2043{
2044 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2045 int asid = sev_get_asid(svm->vcpu.kvm);
2046
Tom Lendacky291bd202020-12-10 11:09:47 -06002047 /* Perform any SEV-ES pre-run actions */
2048 pre_sev_es_run(svm);
2049
Joerg Roedeleaf78262020-03-24 10:41:54 +01002050 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05002051 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002052
2053 /*
2054 * Flush guest TLB:
2055 *
2056 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2057 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2058 */
2059 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07002060 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002061 return;
2062
Joerg Roedeleaf78262020-03-24 10:41:54 +01002063 sd->sev_vmcbs[asid] = svm->vmcb;
2064 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02002065 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002066}
Tom Lendacky291bd202020-12-10 11:09:47 -06002067
Tom Lendacky8f423a82020-12-10 11:09:53 -06002068#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2069static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2070{
2071 struct vmcb_control_area *control = &svm->vmcb->control;
2072 struct ghcb *ghcb = svm->ghcb;
2073 u64 ghcb_scratch_beg, ghcb_scratch_end;
2074 u64 scratch_gpa_beg, scratch_gpa_end;
2075 void *scratch_va;
2076
2077 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2078 if (!scratch_gpa_beg) {
2079 pr_err("vmgexit: scratch gpa not provided\n");
2080 return false;
2081 }
2082
2083 scratch_gpa_end = scratch_gpa_beg + len;
2084 if (scratch_gpa_end < scratch_gpa_beg) {
2085 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2086 len, scratch_gpa_beg);
2087 return false;
2088 }
2089
2090 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2091 /* Scratch area begins within GHCB */
2092 ghcb_scratch_beg = control->ghcb_gpa +
2093 offsetof(struct ghcb, shared_buffer);
2094 ghcb_scratch_end = control->ghcb_gpa +
2095 offsetof(struct ghcb, reserved_1);
2096
2097 /*
2098 * If the scratch area begins within the GHCB, it must be
2099 * completely contained in the GHCB shared buffer area.
2100 */
2101 if (scratch_gpa_beg < ghcb_scratch_beg ||
2102 scratch_gpa_end > ghcb_scratch_end) {
2103 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2104 scratch_gpa_beg, scratch_gpa_end);
2105 return false;
2106 }
2107
2108 scratch_va = (void *)svm->ghcb;
2109 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2110 } else {
2111 /*
2112 * The guest memory must be read into a kernel buffer, so
2113 * limit the size
2114 */
2115 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2116 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2117 len, GHCB_SCRATCH_AREA_LIMIT);
2118 return false;
2119 }
Sean Christophersoneba04b22021-03-30 19:30:25 -07002120 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002121 if (!scratch_va)
2122 return false;
2123
2124 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2125 /* Unable to copy scratch area from guest */
2126 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2127
2128 kfree(scratch_va);
2129 return false;
2130 }
2131
2132 /*
2133 * The scratch area is outside the GHCB. The operation will
2134 * dictate whether the buffer needs to be synced before running
2135 * the vCPU next time (i.e. a read was requested so the data
2136 * must be written back to the guest memory).
2137 */
2138 svm->ghcb_sa_sync = sync;
2139 svm->ghcb_sa_free = true;
2140 }
2141
2142 svm->ghcb_sa = scratch_va;
2143 svm->ghcb_sa_len = len;
2144
2145 return true;
2146}
2147
Tom Lendackyd3694662020-12-10 11:09:50 -06002148static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2149 unsigned int pos)
2150{
2151 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2152 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2153}
2154
2155static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2156{
2157 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2158}
2159
Tom Lendacky1edc1452020-12-10 11:09:49 -06002160static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2161{
2162 svm->vmcb->control.ghcb_gpa = value;
2163}
2164
Tom Lendacky291bd202020-12-10 11:09:47 -06002165static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2166{
Tom Lendacky1edc1452020-12-10 11:09:49 -06002167 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06002168 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002169 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06002170 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002171
2172 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2173
Tom Lendacky59e38b52020-12-10 11:09:52 -06002174 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2175 control->ghcb_gpa);
2176
Tom Lendacky1edc1452020-12-10 11:09:49 -06002177 switch (ghcb_info) {
2178 case GHCB_MSR_SEV_INFO_REQ:
2179 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2180 GHCB_VERSION_MIN,
2181 sev_enc_bit));
2182 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06002183 case GHCB_MSR_CPUID_REQ: {
2184 u64 cpuid_fn, cpuid_reg, cpuid_value;
2185
2186 cpuid_fn = get_ghcb_msr_bits(svm,
2187 GHCB_MSR_CPUID_FUNC_MASK,
2188 GHCB_MSR_CPUID_FUNC_POS);
2189
2190 /* Initialize the registers needed by the CPUID intercept */
2191 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2192 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2193
Paolo Bonzini63129752021-03-02 14:40:39 -05002194 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
Tom Lendackyd3694662020-12-10 11:09:50 -06002195 if (!ret) {
2196 ret = -EINVAL;
2197 break;
2198 }
2199
2200 cpuid_reg = get_ghcb_msr_bits(svm,
2201 GHCB_MSR_CPUID_REG_MASK,
2202 GHCB_MSR_CPUID_REG_POS);
2203 if (cpuid_reg == 0)
2204 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2205 else if (cpuid_reg == 1)
2206 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2207 else if (cpuid_reg == 2)
2208 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2209 else
2210 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2211
2212 set_ghcb_msr_bits(svm, cpuid_value,
2213 GHCB_MSR_CPUID_VALUE_MASK,
2214 GHCB_MSR_CPUID_VALUE_POS);
2215
2216 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2217 GHCB_MSR_INFO_MASK,
2218 GHCB_MSR_INFO_POS);
2219 break;
2220 }
Tom Lendackye1d71112020-12-10 11:09:51 -06002221 case GHCB_MSR_TERM_REQ: {
2222 u64 reason_set, reason_code;
2223
2224 reason_set = get_ghcb_msr_bits(svm,
2225 GHCB_MSR_TERM_REASON_SET_MASK,
2226 GHCB_MSR_TERM_REASON_SET_POS);
2227 reason_code = get_ghcb_msr_bits(svm,
2228 GHCB_MSR_TERM_REASON_MASK,
2229 GHCB_MSR_TERM_REASON_POS);
2230 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2231 reason_set, reason_code);
2232 fallthrough;
2233 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06002234 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06002235 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002236 }
2237
Tom Lendacky59e38b52020-12-10 11:09:52 -06002238 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2239 control->ghcb_gpa, ret);
2240
Tom Lendackyd3694662020-12-10 11:09:50 -06002241 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06002242}
2243
Paolo Bonzini63129752021-03-02 14:40:39 -05002244int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
Tom Lendacky291bd202020-12-10 11:09:47 -06002245{
Paolo Bonzini63129752021-03-02 14:40:39 -05002246 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -06002247 struct vmcb_control_area *control = &svm->vmcb->control;
2248 u64 ghcb_gpa, exit_code;
2249 struct ghcb *ghcb;
2250 int ret;
2251
2252 /* Validate the GHCB */
2253 ghcb_gpa = control->ghcb_gpa;
2254 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2255 return sev_handle_vmgexit_msr_protocol(svm);
2256
2257 if (!ghcb_gpa) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002258 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
Tom Lendacky291bd202020-12-10 11:09:47 -06002259 return -EINVAL;
2260 }
2261
Paolo Bonzini63129752021-03-02 14:40:39 -05002262 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002263 /* Unable to map GHCB from guest */
Paolo Bonzini63129752021-03-02 14:40:39 -05002264 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002265 ghcb_gpa);
2266 return -EINVAL;
2267 }
2268
2269 svm->ghcb = svm->ghcb_map.hva;
2270 ghcb = svm->ghcb_map.hva;
2271
Paolo Bonzini63129752021-03-02 14:40:39 -05002272 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002273
Tom Lendacky291bd202020-12-10 11:09:47 -06002274 exit_code = ghcb_get_sw_exit_code(ghcb);
2275
2276 ret = sev_es_validate_vmgexit(svm);
2277 if (ret)
2278 return ret;
2279
2280 sev_es_sync_from_ghcb(svm);
2281 ghcb_set_sw_exit_info_1(ghcb, 0);
2282 ghcb_set_sw_exit_info_2(ghcb, 0);
2283
2284 ret = -EINVAL;
2285 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002286 case SVM_VMGEXIT_MMIO_READ:
2287 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2288 break;
2289
Paolo Bonzini63129752021-03-02 14:40:39 -05002290 ret = kvm_sev_es_mmio_read(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002291 control->exit_info_1,
2292 control->exit_info_2,
2293 svm->ghcb_sa);
2294 break;
2295 case SVM_VMGEXIT_MMIO_WRITE:
2296 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2297 break;
2298
Paolo Bonzini63129752021-03-02 14:40:39 -05002299 ret = kvm_sev_es_mmio_write(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002300 control->exit_info_1,
2301 control->exit_info_2,
2302 svm->ghcb_sa);
2303 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002304 case SVM_VMGEXIT_NMI_COMPLETE:
Paolo Bonzini63129752021-03-02 14:40:39 -05002305 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002306 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06002307 case SVM_VMGEXIT_AP_HLT_LOOP:
Paolo Bonzini63129752021-03-02 14:40:39 -05002308 ret = kvm_emulate_ap_reset_hold(vcpu);
Tom Lendacky647daca2021-01-04 14:20:01 -06002309 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002310 case SVM_VMGEXIT_AP_JUMP_TABLE: {
Paolo Bonzini63129752021-03-02 14:40:39 -05002311 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002312
2313 switch (control->exit_info_1) {
2314 case 0:
2315 /* Set AP jump table address */
2316 sev->ap_jump_table = control->exit_info_2;
2317 break;
2318 case 1:
2319 /* Get AP jump table address */
2320 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2321 break;
2322 default:
2323 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2324 control->exit_info_1);
2325 ghcb_set_sw_exit_info_1(ghcb, 1);
2326 ghcb_set_sw_exit_info_2(ghcb,
2327 X86_TRAP_UD |
2328 SVM_EVTINJ_TYPE_EXEPT |
2329 SVM_EVTINJ_VALID);
2330 }
2331
2332 ret = 1;
2333 break;
2334 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002335 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
Paolo Bonzini63129752021-03-02 14:40:39 -05002336 vcpu_unimpl(vcpu,
Tom Lendacky291bd202020-12-10 11:09:47 -06002337 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2338 control->exit_info_1, control->exit_info_2);
2339 break;
2340 default:
Paolo Bonzini63129752021-03-02 14:40:39 -05002341 ret = svm_invoke_exit_handler(vcpu, exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002342 }
2343
2344 return ret;
2345}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002346
2347int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2348{
2349 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2350 return -EINVAL;
2351
2352 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2353 svm->ghcb_sa, svm->ghcb_sa_len, in);
2354}
Tom Lendacky376c6d22020-12-10 11:10:06 -06002355
2356void sev_es_init_vmcb(struct vcpu_svm *svm)
2357{
2358 struct kvm_vcpu *vcpu = &svm->vcpu;
2359
2360 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2361 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2362
2363 /*
2364 * An SEV-ES guest requires a VMSA area that is a separate from the
2365 * VMCB page. Do not include the encryption mask on the VMSA physical
2366 * address since hardware will access it using the guest key.
2367 */
2368 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2369
2370 /* Can't intercept CR register access, HV can't modify CR registers */
2371 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2372 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2373 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2374 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2375 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2376 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2377
2378 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2379
2380 /* Track EFER/CR register changes */
2381 svm_set_intercept(svm, TRAP_EFER_WRITE);
2382 svm_set_intercept(svm, TRAP_CR0_WRITE);
2383 svm_set_intercept(svm, TRAP_CR4_WRITE);
2384 svm_set_intercept(svm, TRAP_CR8_WRITE);
2385
2386 /* No support for enable_vmware_backdoor */
2387 clr_exception_intercept(svm, GP_VECTOR);
2388
2389 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2390 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2391
2392 /* Clear intercepts on selected MSRs */
2393 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2394 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2395 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2396 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2397 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2398 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2399}
2400
2401void sev_es_create_vcpu(struct vcpu_svm *svm)
2402{
2403 /*
2404 * Set the GHCB MSR value as per the GHCB specification when creating
2405 * a vCPU for an SEV-ES guest.
2406 */
2407 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2408 GHCB_VERSION_MIN,
2409 sev_enc_bit));
2410}
Tom Lendacky86137772020-12-10 11:10:07 -06002411
Michael Rotha7fc06d2021-02-02 13:01:26 -06002412void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
Tom Lendacky86137772020-12-10 11:10:07 -06002413{
2414 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2415 struct vmcb_save_area *hostsa;
Tom Lendacky86137772020-12-10 11:10:07 -06002416
2417 /*
2418 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2419 * of which one step is to perform a VMLOAD. Since hardware does not
2420 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2421 */
Sean Christopherson35a78312020-12-30 16:27:00 -08002422 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06002423
Tom Lendacky86137772020-12-10 11:10:07 -06002424 /* XCR0 is restored on VMEXIT, save the current host value */
2425 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2426 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2427
2428 /* PKRU is restored on VMEXIT, save the curent host value */
2429 hostsa->pkru = read_pkru();
2430
2431 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2432 hostsa->xss = host_xss;
2433}
2434
Tom Lendacky647daca2021-01-04 14:20:01 -06002435void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2436{
2437 struct vcpu_svm *svm = to_svm(vcpu);
2438
2439 /* First SIPI: Use the values as initially set by the VMM */
2440 if (!svm->received_first_sipi) {
2441 svm->received_first_sipi = true;
2442 return;
2443 }
2444
2445 /*
2446 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2447 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2448 * non-zero value.
2449 */
Tom Lendackya3ba26e2021-04-09 09:38:42 -05002450 if (!svm->ghcb)
2451 return;
2452
Tom Lendacky647daca2021-01-04 14:20:01 -06002453 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2454}