blob: b4e471b0a2313a623a0ae81cdedaf28e8037bb80 [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060017#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060018#include <linux/trace_events.h>
Tom Lendacky86137772020-12-10 11:10:07 -060019#include <asm/fpu/internal.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010020
Tom Lendacky8640ca52020-12-15 12:44:07 -050021#include <asm/trapnr.h>
22
Joerg Roedeleaf78262020-03-24 10:41:54 +010023#include "x86.h"
24#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080025#include "svm_ops.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060026#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060027#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010028
Tom Lendacky86137772020-12-10 11:10:07 -060029#define __ex(x) __kvm_handle_fault_on_reboot(x)
30
Tom Lendacky1edc1452020-12-10 11:09:49 -060031static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010032static int sev_flush_asids(void);
33static DECLARE_RWSEM(sev_deactivate_lock);
34static DEFINE_MUTEX(sev_bitmap_lock);
35unsigned int max_sev_asid;
36static unsigned int min_sev_asid;
Brijesh Singhd3d1af82021-04-15 15:53:55 +000037static unsigned long sev_me_mask;
Joerg Roedeleaf78262020-03-24 10:41:54 +010038static unsigned long *sev_asid_bitmap;
39static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010040
41struct enc_region {
42 struct list_head list;
43 unsigned long npages;
44 struct page **pages;
45 unsigned long uaddr;
46 unsigned long size;
47};
48
49static int sev_flush_asids(void)
50{
51 int ret, error = 0;
52
53 /*
54 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
55 * so it must be guarded.
56 */
57 down_write(&sev_deactivate_lock);
58
59 wbinvd_on_all_cpus();
60 ret = sev_guest_df_flush(&error);
61
62 up_write(&sev_deactivate_lock);
63
64 if (ret)
65 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
66
67 return ret;
68}
69
Nathan Tempelman54526d12021-04-08 22:32:14 +000070static inline bool is_mirroring_enc_context(struct kvm *kvm)
71{
72 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
73}
74
Joerg Roedeleaf78262020-03-24 10:41:54 +010075/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -060076static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010077{
78 int pos;
79
80 /* Check if there are any ASIDs to reclaim before performing a flush */
Tom Lendacky80675b32020-12-10 11:10:05 -060081 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
82 if (pos >= max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010083 return false;
84
85 if (sev_flush_asids())
86 return false;
87
Tom Lendacky80675b32020-12-10 11:10:05 -060088 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +010089 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
90 max_sev_asid);
91 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
92
93 return true;
94}
95
Sean Christopherson9fa15212021-03-30 20:19:35 -070096static int sev_asid_new(bool es_active)
Joerg Roedeleaf78262020-03-24 10:41:54 +010097{
Tom Lendacky80675b32020-12-10 11:10:05 -060098 int pos, min_asid, max_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +010099 bool retry = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100100
101 mutex_lock(&sev_bitmap_lock);
102
103 /*
Tom Lendacky80675b32020-12-10 11:10:05 -0600104 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
105 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100106 */
Sean Christopherson9fa15212021-03-30 20:19:35 -0700107 min_asid = es_active ? 0 : min_sev_asid - 1;
108 max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100109again:
Tom Lendacky80675b32020-12-10 11:10:05 -0600110 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
111 if (pos >= max_asid) {
112 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100113 retry = false;
114 goto again;
115 }
116 mutex_unlock(&sev_bitmap_lock);
117 return -EBUSY;
118 }
119
120 __set_bit(pos, sev_asid_bitmap);
121
122 mutex_unlock(&sev_bitmap_lock);
123
124 return pos + 1;
125}
126
127static int sev_get_asid(struct kvm *kvm)
128{
129 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
130
131 return sev->asid;
132}
133
134static void sev_asid_free(int asid)
135{
136 struct svm_cpu_data *sd;
137 int cpu, pos;
138
139 mutex_lock(&sev_bitmap_lock);
140
141 pos = asid - 1;
142 __set_bit(pos, sev_reclaim_asid_bitmap);
143
144 for_each_possible_cpu(cpu) {
145 sd = per_cpu(svm_data, cpu);
146 sd->sev_vmcbs[pos] = NULL;
147 }
148
149 mutex_unlock(&sev_bitmap_lock);
150}
151
152static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
153{
Sean Christopherson238eca82021-04-06 15:49:52 -0700154 struct sev_data_decommission decommission;
155 struct sev_data_deactivate deactivate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100156
157 if (!handle)
158 return;
159
Sean Christopherson238eca82021-04-06 15:49:52 -0700160 deactivate.handle = handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100161
162 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
163 down_read(&sev_deactivate_lock);
Sean Christopherson238eca82021-04-06 15:49:52 -0700164 sev_guest_deactivate(&deactivate, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100165 up_read(&sev_deactivate_lock);
166
Joerg Roedeleaf78262020-03-24 10:41:54 +0100167 /* decommission handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700168 decommission.handle = handle;
169 sev_guest_decommission(&decommission, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100170}
171
172static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
173{
174 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson9fa15212021-03-30 20:19:35 -0700175 bool es_active = argp->id == KVM_SEV_ES_INIT;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100176 int asid, ret;
177
Sean Christopherson87279062021-03-30 20:19:36 -0700178 if (kvm->created_vcpus)
179 return -EINVAL;
180
Joerg Roedeleaf78262020-03-24 10:41:54 +0100181 ret = -EBUSY;
182 if (unlikely(sev->active))
183 return ret;
184
Sean Christopherson9fa15212021-03-30 20:19:35 -0700185 asid = sev_asid_new(es_active);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100186 if (asid < 0)
187 return ret;
188
189 ret = sev_platform_init(&argp->error);
190 if (ret)
191 goto e_free;
192
193 sev->active = true;
Sean Christopherson9fa15212021-03-30 20:19:35 -0700194 sev->es_active = es_active;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100195 sev->asid = asid;
196 INIT_LIST_HEAD(&sev->regions_list);
197
198 return 0;
199
200e_free:
201 sev_asid_free(asid);
202 return ret;
203}
204
205static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
206{
Sean Christopherson238eca82021-04-06 15:49:52 -0700207 struct sev_data_activate activate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100208 int asid = sev_get_asid(kvm);
209 int ret;
210
Joerg Roedeleaf78262020-03-24 10:41:54 +0100211 /* activate ASID on the given handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700212 activate.handle = handle;
213 activate.asid = asid;
214 ret = sev_guest_activate(&activate, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100215
216 return ret;
217}
218
219static int __sev_issue_cmd(int fd, int id, void *data, int *error)
220{
221 struct fd f;
222 int ret;
223
224 f = fdget(fd);
225 if (!f.file)
226 return -EBADF;
227
228 ret = sev_issue_cmd_external_user(f.file, id, data, error);
229
230 fdput(f);
231 return ret;
232}
233
234static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
235{
236 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
237
238 return __sev_issue_cmd(sev->fd, id, data, error);
239}
240
241static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
242{
243 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700244 struct sev_data_launch_start start;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100245 struct kvm_sev_launch_start params;
246 void *dh_blob, *session_blob;
247 int *error = &argp->error;
248 int ret;
249
250 if (!sev_guest(kvm))
251 return -ENOTTY;
252
253 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
254 return -EFAULT;
255
Sean Christopherson238eca82021-04-06 15:49:52 -0700256 memset(&start, 0, sizeof(start));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100257
258 dh_blob = NULL;
259 if (params.dh_uaddr) {
260 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
Sean Christopherson238eca82021-04-06 15:49:52 -0700261 if (IS_ERR(dh_blob))
262 return PTR_ERR(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100263
Sean Christopherson238eca82021-04-06 15:49:52 -0700264 start.dh_cert_address = __sme_set(__pa(dh_blob));
265 start.dh_cert_len = params.dh_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100266 }
267
268 session_blob = NULL;
269 if (params.session_uaddr) {
270 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
271 if (IS_ERR(session_blob)) {
272 ret = PTR_ERR(session_blob);
273 goto e_free_dh;
274 }
275
Sean Christopherson238eca82021-04-06 15:49:52 -0700276 start.session_address = __sme_set(__pa(session_blob));
277 start.session_len = params.session_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100278 }
279
Sean Christopherson238eca82021-04-06 15:49:52 -0700280 start.handle = params.handle;
281 start.policy = params.policy;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100282
283 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -0700284 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100285 if (ret)
286 goto e_free_session;
287
288 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -0700289 ret = sev_bind_asid(kvm, start.handle, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100290 if (ret)
291 goto e_free_session;
292
293 /* return handle to userspace */
Sean Christopherson238eca82021-04-06 15:49:52 -0700294 params.handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100295 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700296 sev_unbind_asid(kvm, start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100297 ret = -EFAULT;
298 goto e_free_session;
299 }
300
Sean Christopherson238eca82021-04-06 15:49:52 -0700301 sev->handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100302 sev->fd = argp->sev_fd;
303
304e_free_session:
305 kfree(session_blob);
306e_free_dh:
307 kfree(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100308 return ret;
309}
310
311static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312 unsigned long ulen, unsigned long *n,
313 int write)
314{
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700316 unsigned long npages, size;
317 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100318 unsigned long locked, lock_limit;
319 struct page **pages;
320 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300321 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100322
Peter Gonda19a23da2021-01-27 08:15:24 -0800323 lockdep_assert_held(&kvm->lock);
324
Joerg Roedeleaf78262020-03-24 10:41:54 +0100325 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400326 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100327
328 /* Calculate number of pages. */
329 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
330 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
331 npages = (last - first + 1);
332
333 locked = sev->pages_locked + npages;
334 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
335 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
336 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400337 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100338 }
339
John Hubbard78824fa2020-05-25 23:22:06 -0700340 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400341 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700342
Joerg Roedeleaf78262020-03-24 10:41:54 +0100343 /* Avoid using vmalloc for smaller buffers. */
344 size = npages * sizeof(struct page *);
345 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700346 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100347 else
348 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
349
350 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400351 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100352
353 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700354 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100355 if (npinned != npages) {
356 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300357 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100358 goto err;
359 }
360
361 *n = npages;
362 sev->pages_locked = locked;
363
364 return pages;
365
366err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300367 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700368 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100369
370 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300371 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100372}
373
374static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
375 unsigned long npages)
376{
377 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
378
John Hubbarddc42c8a2020-05-25 23:22:07 -0700379 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100380 kvfree(pages);
381 sev->pages_locked -= npages;
382}
383
384static void sev_clflush_pages(struct page *pages[], unsigned long npages)
385{
386 uint8_t *page_virtual;
387 unsigned long i;
388
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000389 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
390 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100391 return;
392
393 for (i = 0; i < npages; i++) {
394 page_virtual = kmap_atomic(pages[i]);
395 clflush_cache_range(page_virtual, PAGE_SIZE);
396 kunmap_atomic(page_virtual);
397 }
398}
399
400static unsigned long get_num_contig_pages(unsigned long idx,
401 struct page **inpages, unsigned long npages)
402{
403 unsigned long paddr, next_paddr;
404 unsigned long i = idx + 1, pages = 1;
405
406 /* find the number of contiguous pages starting from idx */
407 paddr = __sme_page_pa(inpages[idx]);
408 while (i < npages) {
409 next_paddr = __sme_page_pa(inpages[i++]);
410 if ((paddr + PAGE_SIZE) == next_paddr) {
411 pages++;
412 paddr = next_paddr;
413 continue;
414 }
415 break;
416 }
417
418 return pages;
419}
420
421static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
422{
423 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
424 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
425 struct kvm_sev_launch_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700426 struct sev_data_launch_update_data data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100427 struct page **inpages;
428 int ret;
429
430 if (!sev_guest(kvm))
431 return -ENOTTY;
432
433 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
434 return -EFAULT;
435
Joerg Roedeleaf78262020-03-24 10:41:54 +0100436 vaddr = params.uaddr;
437 size = params.len;
438 vaddr_end = vaddr + size;
439
440 /* Lock the user memory. */
441 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Sean Christopherson238eca82021-04-06 15:49:52 -0700442 if (IS_ERR(inpages))
443 return PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100444
445 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400446 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
447 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100448 */
449 sev_clflush_pages(inpages, npages);
450
Sean Christopherson238eca82021-04-06 15:49:52 -0700451 data.reserved = 0;
452 data.handle = sev->handle;
453
Joerg Roedeleaf78262020-03-24 10:41:54 +0100454 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
455 int offset, len;
456
457 /*
458 * If the user buffer is not page-aligned, calculate the offset
459 * within the page.
460 */
461 offset = vaddr & (PAGE_SIZE - 1);
462
463 /* Calculate the number of pages that can be encrypted in one go. */
464 pages = get_num_contig_pages(i, inpages, npages);
465
466 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
467
Sean Christopherson238eca82021-04-06 15:49:52 -0700468 data.len = len;
469 data.address = __sme_page_pa(inpages[i]) + offset;
470 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100471 if (ret)
472 goto e_unpin;
473
474 size -= len;
475 next_vaddr = vaddr + len;
476 }
477
478e_unpin:
479 /* content of memory is updated, mark pages dirty */
480 for (i = 0; i < npages; i++) {
481 set_page_dirty_lock(inpages[i]);
482 mark_page_accessed(inpages[i]);
483 }
484 /* unlock the user pages */
485 sev_unpin_memory(kvm, inpages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100486 return ret;
487}
488
Tom Lendackyad731092020-12-10 11:10:09 -0600489static int sev_es_sync_vmsa(struct vcpu_svm *svm)
490{
491 struct vmcb_save_area *save = &svm->vmcb->save;
492
493 /* Check some debug related fields before encrypting the VMSA */
494 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
495 return -EINVAL;
496
497 /* Sync registgers */
498 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
499 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
500 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
501 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
502 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
503 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
504 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
505 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500506#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600507 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
508 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
509 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
510 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
511 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
512 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
513 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
514 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500515#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600516 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
517
518 /* Sync some non-GPR registers before encrypting */
519 save->xcr0 = svm->vcpu.arch.xcr0;
520 save->pkru = svm->vcpu.arch.pkru;
521 save->xss = svm->vcpu.arch.ia32_xss;
522
523 /*
524 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
525 * the traditional VMSA that is part of the VMCB. Copy the
526 * traditional VMSA as it has been built so far (in prep
527 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
528 */
529 memcpy(svm->vmsa, save, sizeof(*save));
530
531 return 0;
532}
533
534static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
535{
536 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700537 struct sev_data_launch_update_vmsa vmsa;
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700538 struct kvm_vcpu *vcpu;
Tom Lendackyad731092020-12-10 11:10:09 -0600539 int i, ret;
540
541 if (!sev_es_guest(kvm))
542 return -ENOTTY;
543
Sean Christopherson238eca82021-04-06 15:49:52 -0700544 vmsa.reserved = 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600545
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700546 kvm_for_each_vcpu(i, vcpu, kvm) {
547 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendackyad731092020-12-10 11:10:09 -0600548
549 /* Perform some pre-encryption checks against the VMSA */
550 ret = sev_es_sync_vmsa(svm);
551 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700552 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600553
554 /*
555 * The LAUNCH_UPDATE_VMSA command will perform in-place
556 * encryption of the VMSA memory content (i.e it will write
557 * the same memory region with the guest's key), so invalidate
558 * it first.
559 */
560 clflush_cache_range(svm->vmsa, PAGE_SIZE);
561
Sean Christopherson238eca82021-04-06 15:49:52 -0700562 vmsa.handle = sev->handle;
563 vmsa.address = __sme_pa(svm->vmsa);
564 vmsa.len = PAGE_SIZE;
565 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
Tom Lendackyad731092020-12-10 11:10:09 -0600566 &argp->error);
567 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700568 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600569
570 svm->vcpu.arch.guest_state_protected = true;
571 }
572
Sean Christopherson238eca82021-04-06 15:49:52 -0700573 return 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600574}
575
Joerg Roedeleaf78262020-03-24 10:41:54 +0100576static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
577{
578 void __user *measure = (void __user *)(uintptr_t)argp->data;
579 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700580 struct sev_data_launch_measure data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100581 struct kvm_sev_launch_measure params;
582 void __user *p = NULL;
583 void *blob = NULL;
584 int ret;
585
586 if (!sev_guest(kvm))
587 return -ENOTTY;
588
589 if (copy_from_user(&params, measure, sizeof(params)))
590 return -EFAULT;
591
Sean Christopherson238eca82021-04-06 15:49:52 -0700592 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100593
594 /* User wants to query the blob length */
595 if (!params.len)
596 goto cmd;
597
598 p = (void __user *)(uintptr_t)params.uaddr;
599 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700600 if (params.len > SEV_FW_BLOB_MAX_SIZE)
601 return -EINVAL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100602
Sean Christophersoneba04b22021-03-30 19:30:25 -0700603 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100604 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -0700605 return -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100606
Sean Christopherson238eca82021-04-06 15:49:52 -0700607 data.address = __psp_pa(blob);
608 data.len = params.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100609 }
610
611cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -0700612 data.handle = sev->handle;
613 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100614
615 /*
616 * If we query the session length, FW responded with expected data.
617 */
618 if (!params.len)
619 goto done;
620
621 if (ret)
622 goto e_free_blob;
623
624 if (blob) {
625 if (copy_to_user(p, blob, params.len))
626 ret = -EFAULT;
627 }
628
629done:
Sean Christopherson238eca82021-04-06 15:49:52 -0700630 params.len = data.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100631 if (copy_to_user(measure, &params, sizeof(params)))
632 ret = -EFAULT;
633e_free_blob:
634 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100635 return ret;
636}
637
638static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
639{
640 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700641 struct sev_data_launch_finish data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100642
643 if (!sev_guest(kvm))
644 return -ENOTTY;
645
Sean Christopherson238eca82021-04-06 15:49:52 -0700646 data.handle = sev->handle;
647 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100648}
649
650static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
651{
652 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
653 struct kvm_sev_guest_status params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700654 struct sev_data_guest_status data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100655 int ret;
656
657 if (!sev_guest(kvm))
658 return -ENOTTY;
659
Sean Christopherson238eca82021-04-06 15:49:52 -0700660 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100661
Sean Christopherson238eca82021-04-06 15:49:52 -0700662 data.handle = sev->handle;
663 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100664 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700665 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100666
Sean Christopherson238eca82021-04-06 15:49:52 -0700667 params.policy = data.policy;
668 params.state = data.state;
669 params.handle = data.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100670
671 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
672 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -0700673
Joerg Roedeleaf78262020-03-24 10:41:54 +0100674 return ret;
675}
676
677static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
678 unsigned long dst, int size,
679 int *error, bool enc)
680{
681 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700682 struct sev_data_dbg data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100683
Sean Christopherson238eca82021-04-06 15:49:52 -0700684 data.reserved = 0;
685 data.handle = sev->handle;
686 data.dst_addr = dst;
687 data.src_addr = src;
688 data.len = size;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100689
Sean Christopherson238eca82021-04-06 15:49:52 -0700690 return sev_issue_cmd(kvm,
691 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
692 &data, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100693}
694
695static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
696 unsigned long dst_paddr, int sz, int *err)
697{
698 int offset;
699
700 /*
701 * Its safe to read more than we are asked, caller should ensure that
702 * destination has enough space.
703 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100704 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000705 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100706 sz = round_up(sz + offset, 16);
707
708 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
709}
710
711static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
712 unsigned long __user dst_uaddr,
713 unsigned long dst_paddr,
714 int size, int *err)
715{
716 struct page *tpage = NULL;
717 int ret, offset;
718
719 /* if inputs are not 16-byte then use intermediate buffer */
720 if (!IS_ALIGNED(dst_paddr, 16) ||
721 !IS_ALIGNED(paddr, 16) ||
722 !IS_ALIGNED(size, 16)) {
723 tpage = (void *)alloc_page(GFP_KERNEL);
724 if (!tpage)
725 return -ENOMEM;
726
727 dst_paddr = __sme_page_pa(tpage);
728 }
729
730 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
731 if (ret)
732 goto e_free;
733
734 if (tpage) {
735 offset = paddr & 15;
736 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
737 page_address(tpage) + offset, size))
738 ret = -EFAULT;
739 }
740
741e_free:
742 if (tpage)
743 __free_page(tpage);
744
745 return ret;
746}
747
748static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
749 unsigned long __user vaddr,
750 unsigned long dst_paddr,
751 unsigned long __user dst_vaddr,
752 int size, int *error)
753{
754 struct page *src_tpage = NULL;
755 struct page *dst_tpage = NULL;
756 int ret, len = size;
757
758 /* If source buffer is not aligned then use an intermediate buffer */
759 if (!IS_ALIGNED(vaddr, 16)) {
760 src_tpage = alloc_page(GFP_KERNEL);
761 if (!src_tpage)
762 return -ENOMEM;
763
764 if (copy_from_user(page_address(src_tpage),
765 (void __user *)(uintptr_t)vaddr, size)) {
766 __free_page(src_tpage);
767 return -EFAULT;
768 }
769
770 paddr = __sme_page_pa(src_tpage);
771 }
772
773 /*
774 * If destination buffer or length is not aligned then do read-modify-write:
775 * - decrypt destination in an intermediate buffer
776 * - copy the source buffer in an intermediate buffer
777 * - use the intermediate buffer as source buffer
778 */
779 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
780 int dst_offset;
781
782 dst_tpage = alloc_page(GFP_KERNEL);
783 if (!dst_tpage) {
784 ret = -ENOMEM;
785 goto e_free;
786 }
787
788 ret = __sev_dbg_decrypt(kvm, dst_paddr,
789 __sme_page_pa(dst_tpage), size, error);
790 if (ret)
791 goto e_free;
792
793 /*
794 * If source is kernel buffer then use memcpy() otherwise
795 * copy_from_user().
796 */
797 dst_offset = dst_paddr & 15;
798
799 if (src_tpage)
800 memcpy(page_address(dst_tpage) + dst_offset,
801 page_address(src_tpage), size);
802 else {
803 if (copy_from_user(page_address(dst_tpage) + dst_offset,
804 (void __user *)(uintptr_t)vaddr, size)) {
805 ret = -EFAULT;
806 goto e_free;
807 }
808 }
809
810 paddr = __sme_page_pa(dst_tpage);
811 dst_paddr = round_down(dst_paddr, 16);
812 len = round_up(size, 16);
813 }
814
815 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
816
817e_free:
818 if (src_tpage)
819 __free_page(src_tpage);
820 if (dst_tpage)
821 __free_page(dst_tpage);
822 return ret;
823}
824
825static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
826{
827 unsigned long vaddr, vaddr_end, next_vaddr;
828 unsigned long dst_vaddr;
829 struct page **src_p, **dst_p;
830 struct kvm_sev_dbg debug;
831 unsigned long n;
832 unsigned int size;
833 int ret;
834
835 if (!sev_guest(kvm))
836 return -ENOTTY;
837
838 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
839 return -EFAULT;
840
841 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
842 return -EINVAL;
843 if (!debug.dst_uaddr)
844 return -EINVAL;
845
846 vaddr = debug.src_uaddr;
847 size = debug.len;
848 vaddr_end = vaddr + size;
849 dst_vaddr = debug.dst_uaddr;
850
851 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
852 int len, s_off, d_off;
853
854 /* lock userspace source and destination page */
855 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300856 if (IS_ERR(src_p))
857 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100858
859 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300860 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100861 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300862 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100863 }
864
865 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400866 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
867 * the pages; flush the destination too so that future accesses do not
868 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100869 */
870 sev_clflush_pages(src_p, 1);
871 sev_clflush_pages(dst_p, 1);
872
873 /*
874 * Since user buffer may not be page aligned, calculate the
875 * offset within the page.
876 */
877 s_off = vaddr & ~PAGE_MASK;
878 d_off = dst_vaddr & ~PAGE_MASK;
879 len = min_t(size_t, (PAGE_SIZE - s_off), size);
880
881 if (dec)
882 ret = __sev_dbg_decrypt_user(kvm,
883 __sme_page_pa(src_p[0]) + s_off,
884 dst_vaddr,
885 __sme_page_pa(dst_p[0]) + d_off,
886 len, &argp->error);
887 else
888 ret = __sev_dbg_encrypt_user(kvm,
889 __sme_page_pa(src_p[0]) + s_off,
890 vaddr,
891 __sme_page_pa(dst_p[0]) + d_off,
892 dst_vaddr,
893 len, &argp->error);
894
895 sev_unpin_memory(kvm, src_p, n);
896 sev_unpin_memory(kvm, dst_p, n);
897
898 if (ret)
899 goto err;
900
901 next_vaddr = vaddr + len;
902 dst_vaddr = dst_vaddr + len;
903 size -= len;
904 }
905err:
906 return ret;
907}
908
909static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
910{
911 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700912 struct sev_data_launch_secret data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100913 struct kvm_sev_launch_secret params;
914 struct page **pages;
915 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700916 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100917 int ret, offset;
918
919 if (!sev_guest(kvm))
920 return -ENOTTY;
921
922 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
923 return -EFAULT;
924
925 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400926 if (IS_ERR(pages))
927 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100928
929 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400930 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
931 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -0700932 */
933 sev_clflush_pages(pages, n);
934
935 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +0100936 * The secret must be copied into contiguous memory region, lets verify
937 * that userspace memory pages are contiguous before we issue command.
938 */
939 if (get_num_contig_pages(0, pages, n) != n) {
940 ret = -EINVAL;
941 goto e_unpin_memory;
942 }
943
Sean Christopherson238eca82021-04-06 15:49:52 -0700944 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100945
946 offset = params.guest_uaddr & (PAGE_SIZE - 1);
Sean Christopherson238eca82021-04-06 15:49:52 -0700947 data.guest_address = __sme_page_pa(pages[0]) + offset;
948 data.guest_len = params.guest_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100949
950 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
951 if (IS_ERR(blob)) {
952 ret = PTR_ERR(blob);
Sean Christopherson238eca82021-04-06 15:49:52 -0700953 goto e_unpin_memory;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100954 }
955
Sean Christopherson238eca82021-04-06 15:49:52 -0700956 data.trans_address = __psp_pa(blob);
957 data.trans_len = params.trans_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100958
959 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
960 if (IS_ERR(hdr)) {
961 ret = PTR_ERR(hdr);
962 goto e_free_blob;
963 }
Sean Christopherson238eca82021-04-06 15:49:52 -0700964 data.hdr_address = __psp_pa(hdr);
965 data.hdr_len = params.hdr_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100966
Sean Christopherson238eca82021-04-06 15:49:52 -0700967 data.handle = sev->handle;
968 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100969
970 kfree(hdr);
971
972e_free_blob:
973 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100974e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -0700975 /* content of memory is updated, mark pages dirty */
976 for (i = 0; i < n; i++) {
977 set_page_dirty_lock(pages[i]);
978 mark_page_accessed(pages[i]);
979 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100980 sev_unpin_memory(kvm, pages, n);
981 return ret;
982}
983
Brijesh Singh2c07ded2021-01-04 09:17:49 -0600984static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
985{
986 void __user *report = (void __user *)(uintptr_t)argp->data;
987 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700988 struct sev_data_attestation_report data;
Brijesh Singh2c07ded2021-01-04 09:17:49 -0600989 struct kvm_sev_attestation_report params;
990 void __user *p;
991 void *blob = NULL;
992 int ret;
993
994 if (!sev_guest(kvm))
995 return -ENOTTY;
996
997 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
998 return -EFAULT;
999
Sean Christopherson238eca82021-04-06 15:49:52 -07001000 memset(&data, 0, sizeof(data));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001001
1002 /* User wants to query the blob length */
1003 if (!params.len)
1004 goto cmd;
1005
1006 p = (void __user *)(uintptr_t)params.uaddr;
1007 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -07001008 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1009 return -EINVAL;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001010
Sean Christophersoneba04b22021-03-30 19:30:25 -07001011 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001012 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -07001013 return -ENOMEM;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001014
Sean Christopherson238eca82021-04-06 15:49:52 -07001015 data.address = __psp_pa(blob);
1016 data.len = params.len;
1017 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001018 }
1019cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -07001020 data.handle = sev->handle;
1021 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001022 /*
1023 * If we query the session length, FW responded with expected data.
1024 */
1025 if (!params.len)
1026 goto done;
1027
1028 if (ret)
1029 goto e_free_blob;
1030
1031 if (blob) {
1032 if (copy_to_user(p, blob, params.len))
1033 ret = -EFAULT;
1034 }
1035
1036done:
Sean Christopherson238eca82021-04-06 15:49:52 -07001037 params.len = data.len;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001038 if (copy_to_user(report, &params, sizeof(params)))
1039 ret = -EFAULT;
1040e_free_blob:
1041 kfree(blob);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001042 return ret;
1043}
1044
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001045/* Userspace wants to query session length. */
1046static int
1047__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1048 struct kvm_sev_send_start *params)
1049{
1050 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001051 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001052 int ret;
1053
Sean Christopherson238eca82021-04-06 15:49:52 -07001054 data.handle = sev->handle;
1055 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001056 if (ret < 0)
Sean Christopherson238eca82021-04-06 15:49:52 -07001057 return ret;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001058
Sean Christopherson238eca82021-04-06 15:49:52 -07001059 params->session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001060 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1061 sizeof(struct kvm_sev_send_start)))
1062 ret = -EFAULT;
1063
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001064 return ret;
1065}
1066
1067static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1068{
1069 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001070 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001071 struct kvm_sev_send_start params;
1072 void *amd_certs, *session_data;
1073 void *pdh_cert, *plat_certs;
1074 int ret;
1075
1076 if (!sev_guest(kvm))
1077 return -ENOTTY;
1078
1079 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1080 sizeof(struct kvm_sev_send_start)))
1081 return -EFAULT;
1082
1083 /* if session_len is zero, userspace wants to query the session length */
1084 if (!params.session_len)
1085 return __sev_send_start_query_session_length(kvm, argp,
1086 &params);
1087
1088 /* some sanity checks */
1089 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1090 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1091 return -EINVAL;
1092
1093 /* allocate the memory to hold the session data blob */
1094 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1095 if (!session_data)
1096 return -ENOMEM;
1097
1098 /* copy the certificate blobs from userspace */
1099 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1100 params.pdh_cert_len);
1101 if (IS_ERR(pdh_cert)) {
1102 ret = PTR_ERR(pdh_cert);
1103 goto e_free_session;
1104 }
1105
1106 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1107 params.plat_certs_len);
1108 if (IS_ERR(plat_certs)) {
1109 ret = PTR_ERR(plat_certs);
1110 goto e_free_pdh;
1111 }
1112
1113 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1114 params.amd_certs_len);
1115 if (IS_ERR(amd_certs)) {
1116 ret = PTR_ERR(amd_certs);
1117 goto e_free_plat_cert;
1118 }
1119
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001120 /* populate the FW SEND_START field with system physical address */
Sean Christopherson238eca82021-04-06 15:49:52 -07001121 memset(&data, 0, sizeof(data));
1122 data.pdh_cert_address = __psp_pa(pdh_cert);
1123 data.pdh_cert_len = params.pdh_cert_len;
1124 data.plat_certs_address = __psp_pa(plat_certs);
1125 data.plat_certs_len = params.plat_certs_len;
1126 data.amd_certs_address = __psp_pa(amd_certs);
1127 data.amd_certs_len = params.amd_certs_len;
1128 data.session_address = __psp_pa(session_data);
1129 data.session_len = params.session_len;
1130 data.handle = sev->handle;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001131
Sean Christopherson238eca82021-04-06 15:49:52 -07001132 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001133
1134 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1135 session_data, params.session_len)) {
1136 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001137 goto e_free_amd_cert;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001138 }
1139
Sean Christopherson238eca82021-04-06 15:49:52 -07001140 params.policy = data.policy;
1141 params.session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001142 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1143 sizeof(struct kvm_sev_send_start)))
1144 ret = -EFAULT;
1145
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001146e_free_amd_cert:
1147 kfree(amd_certs);
1148e_free_plat_cert:
1149 kfree(plat_certs);
1150e_free_pdh:
1151 kfree(pdh_cert);
1152e_free_session:
1153 kfree(session_data);
1154 return ret;
1155}
1156
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001157/* Userspace wants to query either header or trans length. */
1158static int
1159__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1160 struct kvm_sev_send_update_data *params)
1161{
1162 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001163 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001164 int ret;
1165
Sean Christopherson238eca82021-04-06 15:49:52 -07001166 data.handle = sev->handle;
1167 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001168 if (ret < 0)
Sean Christopherson238eca82021-04-06 15:49:52 -07001169 return ret;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001170
Sean Christopherson238eca82021-04-06 15:49:52 -07001171 params->hdr_len = data.hdr_len;
1172 params->trans_len = data.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001173
1174 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1175 sizeof(struct kvm_sev_send_update_data)))
1176 ret = -EFAULT;
1177
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001178 return ret;
1179}
1180
1181static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1182{
1183 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001184 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001185 struct kvm_sev_send_update_data params;
1186 void *hdr, *trans_data;
1187 struct page **guest_page;
1188 unsigned long n;
1189 int ret, offset;
1190
1191 if (!sev_guest(kvm))
1192 return -ENOTTY;
1193
1194 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1195 sizeof(struct kvm_sev_send_update_data)))
1196 return -EFAULT;
1197
1198 /* userspace wants to query either header or trans length */
1199 if (!params.trans_len || !params.hdr_len)
1200 return __sev_send_update_data_query_lengths(kvm, argp, &params);
1201
1202 if (!params.trans_uaddr || !params.guest_uaddr ||
1203 !params.guest_len || !params.hdr_uaddr)
1204 return -EINVAL;
1205
1206 /* Check if we are crossing the page boundary */
1207 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1208 if ((params.guest_len + offset > PAGE_SIZE))
1209 return -EINVAL;
1210
1211 /* Pin guest memory */
1212 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1213 PAGE_SIZE, &n, 0);
1214 if (!guest_page)
1215 return -EFAULT;
1216
1217 /* allocate memory for header and transport buffer */
1218 ret = -ENOMEM;
1219 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1220 if (!hdr)
1221 goto e_unpin;
1222
1223 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1224 if (!trans_data)
1225 goto e_free_hdr;
1226
Sean Christopherson238eca82021-04-06 15:49:52 -07001227 memset(&data, 0, sizeof(data));
1228 data.hdr_address = __psp_pa(hdr);
1229 data.hdr_len = params.hdr_len;
1230 data.trans_address = __psp_pa(trans_data);
1231 data.trans_len = params.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001232
1233 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001234 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1235 data.guest_address |= sev_me_mask;
1236 data.guest_len = params.guest_len;
1237 data.handle = sev->handle;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001238
Sean Christopherson238eca82021-04-06 15:49:52 -07001239 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001240
1241 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001242 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001243
1244 /* copy transport buffer to user space */
1245 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1246 trans_data, params.trans_len)) {
1247 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001248 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001249 }
1250
1251 /* Copy packet header to userspace. */
1252 ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1253 params.hdr_len);
1254
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001255e_free_trans_data:
1256 kfree(trans_data);
1257e_free_hdr:
1258 kfree(hdr);
1259e_unpin:
1260 sev_unpin_memory(kvm, guest_page, n);
1261
1262 return ret;
1263}
1264
Brijesh Singhfddecf62021-04-15 15:54:15 +00001265static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1266{
1267 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001268 struct sev_data_send_finish data;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001269
1270 if (!sev_guest(kvm))
1271 return -ENOTTY;
1272
Sean Christopherson238eca82021-04-06 15:49:52 -07001273 data.handle = sev->handle;
1274 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
Brijesh Singhfddecf62021-04-15 15:54:15 +00001275}
1276
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001277static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1278{
1279 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001280 struct sev_data_send_cancel data;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001281
1282 if (!sev_guest(kvm))
1283 return -ENOTTY;
1284
Sean Christopherson238eca82021-04-06 15:49:52 -07001285 data.handle = sev->handle;
1286 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001287}
1288
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001289static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1290{
1291 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001292 struct sev_data_receive_start start;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001293 struct kvm_sev_receive_start params;
1294 int *error = &argp->error;
1295 void *session_data;
1296 void *pdh_data;
1297 int ret;
1298
1299 if (!sev_guest(kvm))
1300 return -ENOTTY;
1301
1302 /* Get parameter from the userspace */
1303 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1304 sizeof(struct kvm_sev_receive_start)))
1305 return -EFAULT;
1306
1307 /* some sanity checks */
1308 if (!params.pdh_uaddr || !params.pdh_len ||
1309 !params.session_uaddr || !params.session_len)
1310 return -EINVAL;
1311
1312 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1313 if (IS_ERR(pdh_data))
1314 return PTR_ERR(pdh_data);
1315
1316 session_data = psp_copy_user_blob(params.session_uaddr,
1317 params.session_len);
1318 if (IS_ERR(session_data)) {
1319 ret = PTR_ERR(session_data);
1320 goto e_free_pdh;
1321 }
1322
Sean Christopherson238eca82021-04-06 15:49:52 -07001323 memset(&start, 0, sizeof(start));
1324 start.handle = params.handle;
1325 start.policy = params.policy;
1326 start.pdh_cert_address = __psp_pa(pdh_data);
1327 start.pdh_cert_len = params.pdh_len;
1328 start.session_address = __psp_pa(session_data);
1329 start.session_len = params.session_len;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001330
1331 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -07001332 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001333 error);
1334 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001335 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001336
1337 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -07001338 ret = sev_bind_asid(kvm, start.handle, error);
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001339 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001340 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001341
Sean Christopherson238eca82021-04-06 15:49:52 -07001342 params.handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001343 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1344 &params, sizeof(struct kvm_sev_receive_start))) {
1345 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001346 sev_unbind_asid(kvm, start.handle);
1347 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001348 }
1349
Sean Christopherson238eca82021-04-06 15:49:52 -07001350 sev->handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001351 sev->fd = argp->sev_fd;
1352
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001353e_free_session:
1354 kfree(session_data);
1355e_free_pdh:
1356 kfree(pdh_data);
1357
1358 return ret;
1359}
1360
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001361static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1362{
1363 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1364 struct kvm_sev_receive_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -07001365 struct sev_data_receive_update_data data;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001366 void *hdr = NULL, *trans = NULL;
1367 struct page **guest_page;
1368 unsigned long n;
1369 int ret, offset;
1370
1371 if (!sev_guest(kvm))
1372 return -EINVAL;
1373
1374 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1375 sizeof(struct kvm_sev_receive_update_data)))
1376 return -EFAULT;
1377
1378 if (!params.hdr_uaddr || !params.hdr_len ||
1379 !params.guest_uaddr || !params.guest_len ||
1380 !params.trans_uaddr || !params.trans_len)
1381 return -EINVAL;
1382
1383 /* Check if we are crossing the page boundary */
1384 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1385 if ((params.guest_len + offset > PAGE_SIZE))
1386 return -EINVAL;
1387
1388 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1389 if (IS_ERR(hdr))
1390 return PTR_ERR(hdr);
1391
1392 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1393 if (IS_ERR(trans)) {
1394 ret = PTR_ERR(trans);
1395 goto e_free_hdr;
1396 }
1397
Sean Christopherson238eca82021-04-06 15:49:52 -07001398 memset(&data, 0, sizeof(data));
1399 data.hdr_address = __psp_pa(hdr);
1400 data.hdr_len = params.hdr_len;
1401 data.trans_address = __psp_pa(trans);
1402 data.trans_len = params.trans_len;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001403
1404 /* Pin guest memory */
1405 ret = -EFAULT;
1406 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1407 PAGE_SIZE, &n, 0);
1408 if (!guest_page)
Sean Christopherson238eca82021-04-06 15:49:52 -07001409 goto e_free_trans;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001410
1411 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001412 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1413 data.guest_address |= sev_me_mask;
1414 data.guest_len = params.guest_len;
1415 data.handle = sev->handle;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001416
Sean Christopherson238eca82021-04-06 15:49:52 -07001417 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001418 &argp->error);
1419
1420 sev_unpin_memory(kvm, guest_page, n);
1421
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001422e_free_trans:
1423 kfree(trans);
1424e_free_hdr:
1425 kfree(hdr);
1426
1427 return ret;
1428}
1429
Brijesh Singh6a443de2021-04-15 15:55:40 +00001430static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1431{
1432 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001433 struct sev_data_receive_finish data;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001434
1435 if (!sev_guest(kvm))
1436 return -ENOTTY;
1437
Sean Christopherson238eca82021-04-06 15:49:52 -07001438 data.handle = sev->handle;
1439 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
Brijesh Singh6a443de2021-04-15 15:55:40 +00001440}
1441
Joerg Roedeleaf78262020-03-24 10:41:54 +01001442int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1443{
1444 struct kvm_sev_cmd sev_cmd;
1445 int r;
1446
Tom Lendacky916391a2020-12-10 11:09:38 -06001447 if (!svm_sev_enabled() || !sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001448 return -ENOTTY;
1449
1450 if (!argp)
1451 return 0;
1452
1453 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1454 return -EFAULT;
1455
1456 mutex_lock(&kvm->lock);
1457
Nathan Tempelman54526d12021-04-08 22:32:14 +00001458 /* enc_context_owner handles all memory enc operations */
1459 if (is_mirroring_enc_context(kvm)) {
1460 r = -EINVAL;
1461 goto out;
1462 }
1463
Joerg Roedeleaf78262020-03-24 10:41:54 +01001464 switch (sev_cmd.id) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001465 case KVM_SEV_ES_INIT:
1466 if (!sev_es) {
1467 r = -ENOTTY;
1468 goto out;
1469 }
1470 fallthrough;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001471 case KVM_SEV_INIT:
1472 r = sev_guest_init(kvm, &sev_cmd);
1473 break;
1474 case KVM_SEV_LAUNCH_START:
1475 r = sev_launch_start(kvm, &sev_cmd);
1476 break;
1477 case KVM_SEV_LAUNCH_UPDATE_DATA:
1478 r = sev_launch_update_data(kvm, &sev_cmd);
1479 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001480 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1481 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1482 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001483 case KVM_SEV_LAUNCH_MEASURE:
1484 r = sev_launch_measure(kvm, &sev_cmd);
1485 break;
1486 case KVM_SEV_LAUNCH_FINISH:
1487 r = sev_launch_finish(kvm, &sev_cmd);
1488 break;
1489 case KVM_SEV_GUEST_STATUS:
1490 r = sev_guest_status(kvm, &sev_cmd);
1491 break;
1492 case KVM_SEV_DBG_DECRYPT:
1493 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1494 break;
1495 case KVM_SEV_DBG_ENCRYPT:
1496 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1497 break;
1498 case KVM_SEV_LAUNCH_SECRET:
1499 r = sev_launch_secret(kvm, &sev_cmd);
1500 break;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001501 case KVM_SEV_GET_ATTESTATION_REPORT:
1502 r = sev_get_attestation_report(kvm, &sev_cmd);
1503 break;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001504 case KVM_SEV_SEND_START:
1505 r = sev_send_start(kvm, &sev_cmd);
1506 break;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001507 case KVM_SEV_SEND_UPDATE_DATA:
1508 r = sev_send_update_data(kvm, &sev_cmd);
1509 break;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001510 case KVM_SEV_SEND_FINISH:
1511 r = sev_send_finish(kvm, &sev_cmd);
1512 break;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001513 case KVM_SEV_SEND_CANCEL:
1514 r = sev_send_cancel(kvm, &sev_cmd);
1515 break;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001516 case KVM_SEV_RECEIVE_START:
1517 r = sev_receive_start(kvm, &sev_cmd);
1518 break;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001519 case KVM_SEV_RECEIVE_UPDATE_DATA:
1520 r = sev_receive_update_data(kvm, &sev_cmd);
1521 break;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001522 case KVM_SEV_RECEIVE_FINISH:
1523 r = sev_receive_finish(kvm, &sev_cmd);
1524 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001525 default:
1526 r = -EINVAL;
1527 goto out;
1528 }
1529
1530 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1531 r = -EFAULT;
1532
1533out:
1534 mutex_unlock(&kvm->lock);
1535 return r;
1536}
1537
1538int svm_register_enc_region(struct kvm *kvm,
1539 struct kvm_enc_region *range)
1540{
1541 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1542 struct enc_region *region;
1543 int ret = 0;
1544
1545 if (!sev_guest(kvm))
1546 return -ENOTTY;
1547
Nathan Tempelman54526d12021-04-08 22:32:14 +00001548 /* If kvm is mirroring encryption context it isn't responsible for it */
1549 if (is_mirroring_enc_context(kvm))
1550 return -EINVAL;
1551
Joerg Roedeleaf78262020-03-24 10:41:54 +01001552 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1553 return -EINVAL;
1554
1555 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1556 if (!region)
1557 return -ENOMEM;
1558
Peter Gonda19a23da2021-01-27 08:15:24 -08001559 mutex_lock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001560 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001561 if (IS_ERR(region->pages)) {
1562 ret = PTR_ERR(region->pages);
Peter Gonda19a23da2021-01-27 08:15:24 -08001563 mutex_unlock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001564 goto e_free;
1565 }
1566
Peter Gonda19a23da2021-01-27 08:15:24 -08001567 region->uaddr = range->addr;
1568 region->size = range->size;
1569
1570 list_add_tail(&region->list, &sev->regions_list);
1571 mutex_unlock(&kvm->lock);
1572
Joerg Roedeleaf78262020-03-24 10:41:54 +01001573 /*
1574 * The guest may change the memory encryption attribute from C=0 -> C=1
1575 * or vice versa for this memory range. Lets make sure caches are
1576 * flushed to ensure that guest data gets written into memory with
1577 * correct C-bit.
1578 */
1579 sev_clflush_pages(region->pages, region->npages);
1580
Joerg Roedeleaf78262020-03-24 10:41:54 +01001581 return ret;
1582
1583e_free:
1584 kfree(region);
1585 return ret;
1586}
1587
1588static struct enc_region *
1589find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1590{
1591 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1592 struct list_head *head = &sev->regions_list;
1593 struct enc_region *i;
1594
1595 list_for_each_entry(i, head, list) {
1596 if (i->uaddr == range->addr &&
1597 i->size == range->size)
1598 return i;
1599 }
1600
1601 return NULL;
1602}
1603
1604static void __unregister_enc_region_locked(struct kvm *kvm,
1605 struct enc_region *region)
1606{
1607 sev_unpin_memory(kvm, region->pages, region->npages);
1608 list_del(&region->list);
1609 kfree(region);
1610}
1611
1612int svm_unregister_enc_region(struct kvm *kvm,
1613 struct kvm_enc_region *range)
1614{
1615 struct enc_region *region;
1616 int ret;
1617
Nathan Tempelman54526d12021-04-08 22:32:14 +00001618 /* If kvm is mirroring encryption context it isn't responsible for it */
1619 if (is_mirroring_enc_context(kvm))
1620 return -EINVAL;
1621
Joerg Roedeleaf78262020-03-24 10:41:54 +01001622 mutex_lock(&kvm->lock);
1623
1624 if (!sev_guest(kvm)) {
1625 ret = -ENOTTY;
1626 goto failed;
1627 }
1628
1629 region = find_enc_region(kvm, range);
1630 if (!region) {
1631 ret = -EINVAL;
1632 goto failed;
1633 }
1634
1635 /*
1636 * Ensure that all guest tagged cache entries are flushed before
1637 * releasing the pages back to the system for use. CLFLUSH will
1638 * not do this, so issue a WBINVD.
1639 */
1640 wbinvd_on_all_cpus();
1641
1642 __unregister_enc_region_locked(kvm, region);
1643
1644 mutex_unlock(&kvm->lock);
1645 return 0;
1646
1647failed:
1648 mutex_unlock(&kvm->lock);
1649 return ret;
1650}
1651
Nathan Tempelman54526d12021-04-08 22:32:14 +00001652int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1653{
1654 struct file *source_kvm_file;
1655 struct kvm *source_kvm;
1656 struct kvm_sev_info *mirror_sev;
1657 unsigned int asid;
1658 int ret;
1659
1660 source_kvm_file = fget(source_fd);
1661 if (!file_is_kvm(source_kvm_file)) {
1662 ret = -EBADF;
1663 goto e_source_put;
1664 }
1665
1666 source_kvm = source_kvm_file->private_data;
1667 mutex_lock(&source_kvm->lock);
1668
1669 if (!sev_guest(source_kvm)) {
1670 ret = -EINVAL;
1671 goto e_source_unlock;
1672 }
1673
1674 /* Mirrors of mirrors should work, but let's not get silly */
1675 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1676 ret = -EINVAL;
1677 goto e_source_unlock;
1678 }
1679
1680 asid = to_kvm_svm(source_kvm)->sev_info.asid;
1681
1682 /*
1683 * The mirror kvm holds an enc_context_owner ref so its asid can't
1684 * disappear until we're done with it
1685 */
1686 kvm_get_kvm(source_kvm);
1687
1688 fput(source_kvm_file);
1689 mutex_unlock(&source_kvm->lock);
1690 mutex_lock(&kvm->lock);
1691
1692 if (sev_guest(kvm)) {
1693 ret = -EINVAL;
1694 goto e_mirror_unlock;
1695 }
1696
1697 /* Set enc_context_owner and copy its encryption context over */
1698 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1699 mirror_sev->enc_context_owner = source_kvm;
1700 mirror_sev->asid = asid;
1701 mirror_sev->active = true;
1702
1703 mutex_unlock(&kvm->lock);
1704 return 0;
1705
1706e_mirror_unlock:
1707 mutex_unlock(&kvm->lock);
1708 kvm_put_kvm(source_kvm);
1709 return ret;
1710e_source_unlock:
1711 mutex_unlock(&source_kvm->lock);
1712e_source_put:
1713 fput(source_kvm_file);
1714 return ret;
1715}
1716
Joerg Roedeleaf78262020-03-24 10:41:54 +01001717void sev_vm_destroy(struct kvm *kvm)
1718{
1719 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1720 struct list_head *head = &sev->regions_list;
1721 struct list_head *pos, *q;
1722
1723 if (!sev_guest(kvm))
1724 return;
1725
Nathan Tempelman54526d12021-04-08 22:32:14 +00001726 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1727 if (is_mirroring_enc_context(kvm)) {
1728 kvm_put_kvm(sev->enc_context_owner);
1729 return;
1730 }
1731
Joerg Roedeleaf78262020-03-24 10:41:54 +01001732 mutex_lock(&kvm->lock);
1733
1734 /*
1735 * Ensure that all guest tagged cache entries are flushed before
1736 * releasing the pages back to the system for use. CLFLUSH will
1737 * not do this, so issue a WBINVD.
1738 */
1739 wbinvd_on_all_cpus();
1740
1741 /*
1742 * if userspace was terminated before unregistering the memory regions
1743 * then lets unpin all the registered memory.
1744 */
1745 if (!list_empty(head)) {
1746 list_for_each_safe(pos, q, head) {
1747 __unregister_enc_region_locked(kvm,
1748 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001749 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001750 }
1751 }
1752
1753 mutex_unlock(&kvm->lock);
1754
1755 sev_unbind_asid(kvm, sev->handle);
1756 sev_asid_free(sev->asid);
1757}
1758
Tom Lendacky916391a2020-12-10 11:09:38 -06001759void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001760{
Tom Lendacky916391a2020-12-10 11:09:38 -06001761 unsigned int eax, ebx, ecx, edx;
1762 bool sev_es_supported = false;
1763 bool sev_supported = false;
1764
1765 /* Does the CPU support SEV? */
1766 if (!boot_cpu_has(X86_FEATURE_SEV))
1767 goto out;
1768
1769 /* Retrieve SEV CPUID information */
1770 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1771
Tom Lendacky1edc1452020-12-10 11:09:49 -06001772 /* Set encryption bit location for SEV-ES guests */
1773 sev_enc_bit = ebx & 0x3f;
1774
Joerg Roedeleaf78262020-03-24 10:41:54 +01001775 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001776 max_sev_asid = ecx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001777
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001778 if (!svm_sev_enabled())
Tom Lendacky916391a2020-12-10 11:09:38 -06001779 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001780
1781 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001782 min_sev_asid = edx;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001783 sev_me_mask = 1UL << (ebx & 0x3f);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001784
1785 /* Initialize SEV ASID bitmaps */
1786 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1787 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001788 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001789
1790 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1791 if (!sev_reclaim_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001792 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001793
Tom Lendacky916391a2020-12-10 11:09:38 -06001794 pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
1795 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001796
Tom Lendacky916391a2020-12-10 11:09:38 -06001797 /* SEV-ES support requested? */
1798 if (!sev_es)
1799 goto out;
1800
1801 /* Does the CPU support SEV-ES? */
1802 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1803 goto out;
1804
1805 /* Has the system been allocated ASIDs for SEV-ES? */
1806 if (min_sev_asid == 1)
1807 goto out;
1808
1809 pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1);
1810 sev_es_supported = true;
1811
1812out:
1813 sev = sev_supported;
1814 sev_es = sev_es_supported;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001815}
1816
1817void sev_hardware_teardown(void)
1818{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001819 if (!svm_sev_enabled())
1820 return;
1821
Joerg Roedeleaf78262020-03-24 10:41:54 +01001822 bitmap_free(sev_asid_bitmap);
1823 bitmap_free(sev_reclaim_asid_bitmap);
1824
1825 sev_flush_asids();
1826}
1827
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001828/*
1829 * Pages used by hardware to hold guest encrypted state must be flushed before
1830 * returning them to the system.
1831 */
1832static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1833 unsigned long len)
1834{
1835 /*
1836 * If hardware enforced cache coherency for encrypted mappings of the
1837 * same physical page is supported, nothing to do.
1838 */
1839 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1840 return;
1841
1842 /*
1843 * If the VM Page Flush MSR is supported, use it to flush the page
1844 * (using the page virtual address and the guest ASID).
1845 */
1846 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1847 struct kvm_sev_info *sev;
1848 unsigned long va_start;
1849 u64 start, stop;
1850
1851 /* Align start and stop to page boundaries. */
1852 va_start = (unsigned long)va;
1853 start = (u64)va_start & PAGE_MASK;
1854 stop = PAGE_ALIGN((u64)va_start + len);
1855
1856 if (start < stop) {
1857 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1858
1859 while (start < stop) {
1860 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1861 start | sev->asid);
1862
1863 start += PAGE_SIZE;
1864 }
1865
1866 return;
1867 }
1868
1869 WARN(1, "Address overflow, using WBINVD\n");
1870 }
1871
1872 /*
1873 * Hardware should always have one of the above features,
1874 * but if not, use WBINVD and issue a warning.
1875 */
1876 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1877 wbinvd_on_all_cpus();
1878}
1879
1880void sev_free_vcpu(struct kvm_vcpu *vcpu)
1881{
1882 struct vcpu_svm *svm;
1883
1884 if (!sev_es_guest(vcpu->kvm))
1885 return;
1886
1887 svm = to_svm(vcpu);
1888
1889 if (vcpu->arch.guest_state_protected)
1890 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1891 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06001892
1893 if (svm->ghcb_sa_free)
1894 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001895}
1896
Tom Lendacky291bd202020-12-10 11:09:47 -06001897static void dump_ghcb(struct vcpu_svm *svm)
1898{
1899 struct ghcb *ghcb = svm->ghcb;
1900 unsigned int nbits;
1901
1902 /* Re-use the dump_invalid_vmcb module parameter */
1903 if (!dump_invalid_vmcb) {
1904 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1905 return;
1906 }
1907
1908 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1909
1910 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1911 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1912 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1913 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1914 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1915 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1916 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1917 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1918 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1919 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1920}
1921
1922static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1923{
1924 struct kvm_vcpu *vcpu = &svm->vcpu;
1925 struct ghcb *ghcb = svm->ghcb;
1926
1927 /*
1928 * The GHCB protocol so far allows for the following data
1929 * to be returned:
1930 * GPRs RAX, RBX, RCX, RDX
1931 *
Sean Christopherson25009142021-01-22 15:50:47 -08001932 * Copy their values, even if they may not have been written during the
1933 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06001934 */
Sean Christopherson25009142021-01-22 15:50:47 -08001935 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1936 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1937 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1938 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06001939}
1940
1941static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1942{
1943 struct vmcb_control_area *control = &svm->vmcb->control;
1944 struct kvm_vcpu *vcpu = &svm->vcpu;
1945 struct ghcb *ghcb = svm->ghcb;
1946 u64 exit_code;
1947
1948 /*
1949 * The GHCB protocol so far allows for the following data
1950 * to be supplied:
1951 * GPRs RAX, RBX, RCX, RDX
1952 * XCR0
1953 * CPL
1954 *
1955 * VMMCALL allows the guest to provide extra registers. KVM also
1956 * expects RSI for hypercalls, so include that, too.
1957 *
1958 * Copy their values to the appropriate location if supplied.
1959 */
1960 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1961
1962 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1963 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1964 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1965 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1966 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1967
1968 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1969
1970 if (ghcb_xcr0_is_valid(ghcb)) {
1971 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1972 kvm_update_cpuid_runtime(vcpu);
1973 }
1974
1975 /* Copy the GHCB exit information into the VMCB fields */
1976 exit_code = ghcb_get_sw_exit_code(ghcb);
1977 control->exit_code = lower_32_bits(exit_code);
1978 control->exit_code_hi = upper_32_bits(exit_code);
1979 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1980 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1981
1982 /* Clear the valid entries fields */
1983 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1984}
1985
1986static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1987{
1988 struct kvm_vcpu *vcpu;
1989 struct ghcb *ghcb;
1990 u64 exit_code = 0;
1991
1992 ghcb = svm->ghcb;
1993
1994 /* Only GHCB Usage code 0 is supported */
1995 if (ghcb->ghcb_usage)
1996 goto vmgexit_err;
1997
1998 /*
1999 * Retrieve the exit code now even though is may not be marked valid
2000 * as it could help with debugging.
2001 */
2002 exit_code = ghcb_get_sw_exit_code(ghcb);
2003
2004 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2005 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2006 !ghcb_sw_exit_info_2_is_valid(ghcb))
2007 goto vmgexit_err;
2008
2009 switch (ghcb_get_sw_exit_code(ghcb)) {
2010 case SVM_EXIT_READ_DR7:
2011 break;
2012 case SVM_EXIT_WRITE_DR7:
2013 if (!ghcb_rax_is_valid(ghcb))
2014 goto vmgexit_err;
2015 break;
2016 case SVM_EXIT_RDTSC:
2017 break;
2018 case SVM_EXIT_RDPMC:
2019 if (!ghcb_rcx_is_valid(ghcb))
2020 goto vmgexit_err;
2021 break;
2022 case SVM_EXIT_CPUID:
2023 if (!ghcb_rax_is_valid(ghcb) ||
2024 !ghcb_rcx_is_valid(ghcb))
2025 goto vmgexit_err;
2026 if (ghcb_get_rax(ghcb) == 0xd)
2027 if (!ghcb_xcr0_is_valid(ghcb))
2028 goto vmgexit_err;
2029 break;
2030 case SVM_EXIT_INVD:
2031 break;
2032 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002033 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2034 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06002035 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002036 } else {
2037 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2038 if (!ghcb_rax_is_valid(ghcb))
2039 goto vmgexit_err;
2040 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002041 break;
2042 case SVM_EXIT_MSR:
2043 if (!ghcb_rcx_is_valid(ghcb))
2044 goto vmgexit_err;
2045 if (ghcb_get_sw_exit_info_1(ghcb)) {
2046 if (!ghcb_rax_is_valid(ghcb) ||
2047 !ghcb_rdx_is_valid(ghcb))
2048 goto vmgexit_err;
2049 }
2050 break;
2051 case SVM_EXIT_VMMCALL:
2052 if (!ghcb_rax_is_valid(ghcb) ||
2053 !ghcb_cpl_is_valid(ghcb))
2054 goto vmgexit_err;
2055 break;
2056 case SVM_EXIT_RDTSCP:
2057 break;
2058 case SVM_EXIT_WBINVD:
2059 break;
2060 case SVM_EXIT_MONITOR:
2061 if (!ghcb_rax_is_valid(ghcb) ||
2062 !ghcb_rcx_is_valid(ghcb) ||
2063 !ghcb_rdx_is_valid(ghcb))
2064 goto vmgexit_err;
2065 break;
2066 case SVM_EXIT_MWAIT:
2067 if (!ghcb_rax_is_valid(ghcb) ||
2068 !ghcb_rcx_is_valid(ghcb))
2069 goto vmgexit_err;
2070 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002071 case SVM_VMGEXIT_MMIO_READ:
2072 case SVM_VMGEXIT_MMIO_WRITE:
2073 if (!ghcb_sw_scratch_is_valid(ghcb))
2074 goto vmgexit_err;
2075 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002076 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06002077 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05002078 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06002079 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2080 break;
2081 default:
2082 goto vmgexit_err;
2083 }
2084
2085 return 0;
2086
2087vmgexit_err:
2088 vcpu = &svm->vcpu;
2089
2090 if (ghcb->ghcb_usage) {
2091 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2092 ghcb->ghcb_usage);
2093 } else {
2094 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2095 exit_code);
2096 dump_ghcb(svm);
2097 }
2098
2099 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2100 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2101 vcpu->run->internal.ndata = 2;
2102 vcpu->run->internal.data[0] = exit_code;
2103 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2104
2105 return -EINVAL;
2106}
2107
2108static void pre_sev_es_run(struct vcpu_svm *svm)
2109{
2110 if (!svm->ghcb)
2111 return;
2112
Tom Lendacky8f423a82020-12-10 11:09:53 -06002113 if (svm->ghcb_sa_free) {
2114 /*
2115 * The scratch area lives outside the GHCB, so there is a
2116 * buffer that, depending on the operation performed, may
2117 * need to be synced, then freed.
2118 */
2119 if (svm->ghcb_sa_sync) {
2120 kvm_write_guest(svm->vcpu.kvm,
2121 ghcb_get_sw_scratch(svm->ghcb),
2122 svm->ghcb_sa, svm->ghcb_sa_len);
2123 svm->ghcb_sa_sync = false;
2124 }
2125
2126 kfree(svm->ghcb_sa);
2127 svm->ghcb_sa = NULL;
2128 svm->ghcb_sa_free = false;
2129 }
2130
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002131 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2132
Tom Lendacky291bd202020-12-10 11:09:47 -06002133 sev_es_sync_to_ghcb(svm);
2134
2135 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2136 svm->ghcb = NULL;
2137}
2138
Joerg Roedeleaf78262020-03-24 10:41:54 +01002139void pre_sev_run(struct vcpu_svm *svm, int cpu)
2140{
2141 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2142 int asid = sev_get_asid(svm->vcpu.kvm);
2143
Tom Lendacky291bd202020-12-10 11:09:47 -06002144 /* Perform any SEV-ES pre-run actions */
2145 pre_sev_es_run(svm);
2146
Joerg Roedeleaf78262020-03-24 10:41:54 +01002147 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05002148 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002149
2150 /*
2151 * Flush guest TLB:
2152 *
2153 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2154 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2155 */
2156 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07002157 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002158 return;
2159
Joerg Roedeleaf78262020-03-24 10:41:54 +01002160 sd->sev_vmcbs[asid] = svm->vmcb;
2161 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02002162 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002163}
Tom Lendacky291bd202020-12-10 11:09:47 -06002164
Tom Lendacky8f423a82020-12-10 11:09:53 -06002165#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2166static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2167{
2168 struct vmcb_control_area *control = &svm->vmcb->control;
2169 struct ghcb *ghcb = svm->ghcb;
2170 u64 ghcb_scratch_beg, ghcb_scratch_end;
2171 u64 scratch_gpa_beg, scratch_gpa_end;
2172 void *scratch_va;
2173
2174 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2175 if (!scratch_gpa_beg) {
2176 pr_err("vmgexit: scratch gpa not provided\n");
2177 return false;
2178 }
2179
2180 scratch_gpa_end = scratch_gpa_beg + len;
2181 if (scratch_gpa_end < scratch_gpa_beg) {
2182 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2183 len, scratch_gpa_beg);
2184 return false;
2185 }
2186
2187 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2188 /* Scratch area begins within GHCB */
2189 ghcb_scratch_beg = control->ghcb_gpa +
2190 offsetof(struct ghcb, shared_buffer);
2191 ghcb_scratch_end = control->ghcb_gpa +
2192 offsetof(struct ghcb, reserved_1);
2193
2194 /*
2195 * If the scratch area begins within the GHCB, it must be
2196 * completely contained in the GHCB shared buffer area.
2197 */
2198 if (scratch_gpa_beg < ghcb_scratch_beg ||
2199 scratch_gpa_end > ghcb_scratch_end) {
2200 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2201 scratch_gpa_beg, scratch_gpa_end);
2202 return false;
2203 }
2204
2205 scratch_va = (void *)svm->ghcb;
2206 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2207 } else {
2208 /*
2209 * The guest memory must be read into a kernel buffer, so
2210 * limit the size
2211 */
2212 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2213 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2214 len, GHCB_SCRATCH_AREA_LIMIT);
2215 return false;
2216 }
Sean Christophersoneba04b22021-03-30 19:30:25 -07002217 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002218 if (!scratch_va)
2219 return false;
2220
2221 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2222 /* Unable to copy scratch area from guest */
2223 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2224
2225 kfree(scratch_va);
2226 return false;
2227 }
2228
2229 /*
2230 * The scratch area is outside the GHCB. The operation will
2231 * dictate whether the buffer needs to be synced before running
2232 * the vCPU next time (i.e. a read was requested so the data
2233 * must be written back to the guest memory).
2234 */
2235 svm->ghcb_sa_sync = sync;
2236 svm->ghcb_sa_free = true;
2237 }
2238
2239 svm->ghcb_sa = scratch_va;
2240 svm->ghcb_sa_len = len;
2241
2242 return true;
2243}
2244
Tom Lendackyd3694662020-12-10 11:09:50 -06002245static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2246 unsigned int pos)
2247{
2248 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2249 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2250}
2251
2252static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2253{
2254 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2255}
2256
Tom Lendacky1edc1452020-12-10 11:09:49 -06002257static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2258{
2259 svm->vmcb->control.ghcb_gpa = value;
2260}
2261
Tom Lendacky291bd202020-12-10 11:09:47 -06002262static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2263{
Tom Lendacky1edc1452020-12-10 11:09:49 -06002264 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06002265 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002266 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06002267 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002268
2269 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2270
Tom Lendacky59e38b52020-12-10 11:09:52 -06002271 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2272 control->ghcb_gpa);
2273
Tom Lendacky1edc1452020-12-10 11:09:49 -06002274 switch (ghcb_info) {
2275 case GHCB_MSR_SEV_INFO_REQ:
2276 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2277 GHCB_VERSION_MIN,
2278 sev_enc_bit));
2279 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06002280 case GHCB_MSR_CPUID_REQ: {
2281 u64 cpuid_fn, cpuid_reg, cpuid_value;
2282
2283 cpuid_fn = get_ghcb_msr_bits(svm,
2284 GHCB_MSR_CPUID_FUNC_MASK,
2285 GHCB_MSR_CPUID_FUNC_POS);
2286
2287 /* Initialize the registers needed by the CPUID intercept */
2288 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2289 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2290
Paolo Bonzini63129752021-03-02 14:40:39 -05002291 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
Tom Lendackyd3694662020-12-10 11:09:50 -06002292 if (!ret) {
2293 ret = -EINVAL;
2294 break;
2295 }
2296
2297 cpuid_reg = get_ghcb_msr_bits(svm,
2298 GHCB_MSR_CPUID_REG_MASK,
2299 GHCB_MSR_CPUID_REG_POS);
2300 if (cpuid_reg == 0)
2301 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2302 else if (cpuid_reg == 1)
2303 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2304 else if (cpuid_reg == 2)
2305 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2306 else
2307 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2308
2309 set_ghcb_msr_bits(svm, cpuid_value,
2310 GHCB_MSR_CPUID_VALUE_MASK,
2311 GHCB_MSR_CPUID_VALUE_POS);
2312
2313 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2314 GHCB_MSR_INFO_MASK,
2315 GHCB_MSR_INFO_POS);
2316 break;
2317 }
Tom Lendackye1d71112020-12-10 11:09:51 -06002318 case GHCB_MSR_TERM_REQ: {
2319 u64 reason_set, reason_code;
2320
2321 reason_set = get_ghcb_msr_bits(svm,
2322 GHCB_MSR_TERM_REASON_SET_MASK,
2323 GHCB_MSR_TERM_REASON_SET_POS);
2324 reason_code = get_ghcb_msr_bits(svm,
2325 GHCB_MSR_TERM_REASON_MASK,
2326 GHCB_MSR_TERM_REASON_POS);
2327 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2328 reason_set, reason_code);
2329 fallthrough;
2330 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06002331 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06002332 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002333 }
2334
Tom Lendacky59e38b52020-12-10 11:09:52 -06002335 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2336 control->ghcb_gpa, ret);
2337
Tom Lendackyd3694662020-12-10 11:09:50 -06002338 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06002339}
2340
Paolo Bonzini63129752021-03-02 14:40:39 -05002341int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
Tom Lendacky291bd202020-12-10 11:09:47 -06002342{
Paolo Bonzini63129752021-03-02 14:40:39 -05002343 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -06002344 struct vmcb_control_area *control = &svm->vmcb->control;
2345 u64 ghcb_gpa, exit_code;
2346 struct ghcb *ghcb;
2347 int ret;
2348
2349 /* Validate the GHCB */
2350 ghcb_gpa = control->ghcb_gpa;
2351 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2352 return sev_handle_vmgexit_msr_protocol(svm);
2353
2354 if (!ghcb_gpa) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002355 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
Tom Lendacky291bd202020-12-10 11:09:47 -06002356 return -EINVAL;
2357 }
2358
Paolo Bonzini63129752021-03-02 14:40:39 -05002359 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002360 /* Unable to map GHCB from guest */
Paolo Bonzini63129752021-03-02 14:40:39 -05002361 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002362 ghcb_gpa);
2363 return -EINVAL;
2364 }
2365
2366 svm->ghcb = svm->ghcb_map.hva;
2367 ghcb = svm->ghcb_map.hva;
2368
Paolo Bonzini63129752021-03-02 14:40:39 -05002369 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002370
Tom Lendacky291bd202020-12-10 11:09:47 -06002371 exit_code = ghcb_get_sw_exit_code(ghcb);
2372
2373 ret = sev_es_validate_vmgexit(svm);
2374 if (ret)
2375 return ret;
2376
2377 sev_es_sync_from_ghcb(svm);
2378 ghcb_set_sw_exit_info_1(ghcb, 0);
2379 ghcb_set_sw_exit_info_2(ghcb, 0);
2380
2381 ret = -EINVAL;
2382 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002383 case SVM_VMGEXIT_MMIO_READ:
2384 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2385 break;
2386
Paolo Bonzini63129752021-03-02 14:40:39 -05002387 ret = kvm_sev_es_mmio_read(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002388 control->exit_info_1,
2389 control->exit_info_2,
2390 svm->ghcb_sa);
2391 break;
2392 case SVM_VMGEXIT_MMIO_WRITE:
2393 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2394 break;
2395
Paolo Bonzini63129752021-03-02 14:40:39 -05002396 ret = kvm_sev_es_mmio_write(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002397 control->exit_info_1,
2398 control->exit_info_2,
2399 svm->ghcb_sa);
2400 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002401 case SVM_VMGEXIT_NMI_COMPLETE:
Paolo Bonzini63129752021-03-02 14:40:39 -05002402 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002403 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06002404 case SVM_VMGEXIT_AP_HLT_LOOP:
Paolo Bonzini63129752021-03-02 14:40:39 -05002405 ret = kvm_emulate_ap_reset_hold(vcpu);
Tom Lendacky647daca2021-01-04 14:20:01 -06002406 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002407 case SVM_VMGEXIT_AP_JUMP_TABLE: {
Paolo Bonzini63129752021-03-02 14:40:39 -05002408 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002409
2410 switch (control->exit_info_1) {
2411 case 0:
2412 /* Set AP jump table address */
2413 sev->ap_jump_table = control->exit_info_2;
2414 break;
2415 case 1:
2416 /* Get AP jump table address */
2417 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2418 break;
2419 default:
2420 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2421 control->exit_info_1);
2422 ghcb_set_sw_exit_info_1(ghcb, 1);
2423 ghcb_set_sw_exit_info_2(ghcb,
2424 X86_TRAP_UD |
2425 SVM_EVTINJ_TYPE_EXEPT |
2426 SVM_EVTINJ_VALID);
2427 }
2428
2429 ret = 1;
2430 break;
2431 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002432 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
Paolo Bonzini63129752021-03-02 14:40:39 -05002433 vcpu_unimpl(vcpu,
Tom Lendacky291bd202020-12-10 11:09:47 -06002434 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2435 control->exit_info_1, control->exit_info_2);
2436 break;
2437 default:
Paolo Bonzini63129752021-03-02 14:40:39 -05002438 ret = svm_invoke_exit_handler(vcpu, exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002439 }
2440
2441 return ret;
2442}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002443
2444int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2445{
2446 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2447 return -EINVAL;
2448
2449 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2450 svm->ghcb_sa, svm->ghcb_sa_len, in);
2451}
Tom Lendacky376c6d22020-12-10 11:10:06 -06002452
2453void sev_es_init_vmcb(struct vcpu_svm *svm)
2454{
2455 struct kvm_vcpu *vcpu = &svm->vcpu;
2456
2457 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2458 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2459
2460 /*
2461 * An SEV-ES guest requires a VMSA area that is a separate from the
2462 * VMCB page. Do not include the encryption mask on the VMSA physical
2463 * address since hardware will access it using the guest key.
2464 */
2465 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2466
2467 /* Can't intercept CR register access, HV can't modify CR registers */
2468 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2469 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2470 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2471 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2472 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2473 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2474
2475 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2476
2477 /* Track EFER/CR register changes */
2478 svm_set_intercept(svm, TRAP_EFER_WRITE);
2479 svm_set_intercept(svm, TRAP_CR0_WRITE);
2480 svm_set_intercept(svm, TRAP_CR4_WRITE);
2481 svm_set_intercept(svm, TRAP_CR8_WRITE);
2482
2483 /* No support for enable_vmware_backdoor */
2484 clr_exception_intercept(svm, GP_VECTOR);
2485
2486 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2487 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2488
2489 /* Clear intercepts on selected MSRs */
2490 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2491 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2492 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2493 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2494 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2495 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2496}
2497
2498void sev_es_create_vcpu(struct vcpu_svm *svm)
2499{
2500 /*
2501 * Set the GHCB MSR value as per the GHCB specification when creating
2502 * a vCPU for an SEV-ES guest.
2503 */
2504 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2505 GHCB_VERSION_MIN,
2506 sev_enc_bit));
2507}
Tom Lendacky86137772020-12-10 11:10:07 -06002508
Michael Rotha7fc06d2021-02-02 13:01:26 -06002509void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
Tom Lendacky86137772020-12-10 11:10:07 -06002510{
2511 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2512 struct vmcb_save_area *hostsa;
Tom Lendacky86137772020-12-10 11:10:07 -06002513
2514 /*
2515 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2516 * of which one step is to perform a VMLOAD. Since hardware does not
2517 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2518 */
Sean Christopherson35a78312020-12-30 16:27:00 -08002519 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06002520
Tom Lendacky86137772020-12-10 11:10:07 -06002521 /* XCR0 is restored on VMEXIT, save the current host value */
2522 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2523 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2524
2525 /* PKRU is restored on VMEXIT, save the curent host value */
2526 hostsa->pkru = read_pkru();
2527
2528 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2529 hostsa->xss = host_xss;
2530}
2531
Tom Lendacky647daca2021-01-04 14:20:01 -06002532void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2533{
2534 struct vcpu_svm *svm = to_svm(vcpu);
2535
2536 /* First SIPI: Use the values as initially set by the VMM */
2537 if (!svm->received_first_sipi) {
2538 svm->received_first_sipi = true;
2539 return;
2540 }
2541
2542 /*
2543 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2544 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2545 * non-zero value.
2546 */
Tom Lendackya3ba26e2021-04-09 09:38:42 -05002547 if (!svm->ghcb)
2548 return;
2549
Tom Lendacky647daca2021-01-04 14:20:01 -06002550 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2551}