blob: ac652bc476ae7ec7df0d45a7f234d48076549db8 [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060017#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060018#include <linux/trace_events.h>
Tom Lendacky86137772020-12-10 11:10:07 -060019#include <asm/fpu/internal.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010020
Tom Lendacky8640ca52020-12-15 12:44:07 -050021#include <asm/trapnr.h>
22
Joerg Roedeleaf78262020-03-24 10:41:54 +010023#include "x86.h"
24#include "svm.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060025#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060026#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010027
Tom Lendacky86137772020-12-10 11:10:07 -060028#define __ex(x) __kvm_handle_fault_on_reboot(x)
29
Tom Lendacky1edc1452020-12-10 11:09:49 -060030static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010031static int sev_flush_asids(void);
32static DECLARE_RWSEM(sev_deactivate_lock);
33static DEFINE_MUTEX(sev_bitmap_lock);
34unsigned int max_sev_asid;
35static unsigned int min_sev_asid;
36static unsigned long *sev_asid_bitmap;
37static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010038
39struct enc_region {
40 struct list_head list;
41 unsigned long npages;
42 struct page **pages;
43 unsigned long uaddr;
44 unsigned long size;
45};
46
47static int sev_flush_asids(void)
48{
49 int ret, error = 0;
50
51 /*
52 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
53 * so it must be guarded.
54 */
55 down_write(&sev_deactivate_lock);
56
57 wbinvd_on_all_cpus();
58 ret = sev_guest_df_flush(&error);
59
60 up_write(&sev_deactivate_lock);
61
62 if (ret)
63 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
64
65 return ret;
66}
67
68/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -060069static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010070{
71 int pos;
72
73 /* Check if there are any ASIDs to reclaim before performing a flush */
Tom Lendacky80675b32020-12-10 11:10:05 -060074 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
75 if (pos >= max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010076 return false;
77
78 if (sev_flush_asids())
79 return false;
80
Tom Lendacky80675b32020-12-10 11:10:05 -060081 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +010082 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
83 max_sev_asid);
84 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
85
86 return true;
87}
88
Tom Lendacky80675b32020-12-10 11:10:05 -060089static int sev_asid_new(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +010090{
Tom Lendacky80675b32020-12-10 11:10:05 -060091 int pos, min_asid, max_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +010092 bool retry = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +010093
94 mutex_lock(&sev_bitmap_lock);
95
96 /*
Tom Lendacky80675b32020-12-10 11:10:05 -060097 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
98 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +010099 */
Tom Lendacky80675b32020-12-10 11:10:05 -0600100 min_asid = sev->es_active ? 0 : min_sev_asid - 1;
101 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100102again:
Tom Lendacky80675b32020-12-10 11:10:05 -0600103 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
104 if (pos >= max_asid) {
105 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100106 retry = false;
107 goto again;
108 }
109 mutex_unlock(&sev_bitmap_lock);
110 return -EBUSY;
111 }
112
113 __set_bit(pos, sev_asid_bitmap);
114
115 mutex_unlock(&sev_bitmap_lock);
116
117 return pos + 1;
118}
119
120static int sev_get_asid(struct kvm *kvm)
121{
122 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
123
124 return sev->asid;
125}
126
127static void sev_asid_free(int asid)
128{
129 struct svm_cpu_data *sd;
130 int cpu, pos;
131
132 mutex_lock(&sev_bitmap_lock);
133
134 pos = asid - 1;
135 __set_bit(pos, sev_reclaim_asid_bitmap);
136
137 for_each_possible_cpu(cpu) {
138 sd = per_cpu(svm_data, cpu);
139 sd->sev_vmcbs[pos] = NULL;
140 }
141
142 mutex_unlock(&sev_bitmap_lock);
143}
144
145static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
146{
147 struct sev_data_decommission *decommission;
148 struct sev_data_deactivate *data;
149
150 if (!handle)
151 return;
152
153 data = kzalloc(sizeof(*data), GFP_KERNEL);
154 if (!data)
155 return;
156
157 /* deactivate handle */
158 data->handle = handle;
159
160 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
161 down_read(&sev_deactivate_lock);
162 sev_guest_deactivate(data, NULL);
163 up_read(&sev_deactivate_lock);
164
165 kfree(data);
166
167 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
168 if (!decommission)
169 return;
170
171 /* decommission handle */
172 decommission->handle = handle;
173 sev_guest_decommission(decommission, NULL);
174
175 kfree(decommission);
176}
177
178static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
179{
180 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
181 int asid, ret;
182
183 ret = -EBUSY;
184 if (unlikely(sev->active))
185 return ret;
186
Tom Lendacky80675b32020-12-10 11:10:05 -0600187 asid = sev_asid_new(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100188 if (asid < 0)
189 return ret;
190
191 ret = sev_platform_init(&argp->error);
192 if (ret)
193 goto e_free;
194
195 sev->active = true;
196 sev->asid = asid;
197 INIT_LIST_HEAD(&sev->regions_list);
198
199 return 0;
200
201e_free:
202 sev_asid_free(asid);
203 return ret;
204}
205
Tom Lendackyad731092020-12-10 11:10:09 -0600206static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
207{
208 if (!sev_es)
209 return -ENOTTY;
210
211 to_kvm_svm(kvm)->sev_info.es_active = true;
212
213 return sev_guest_init(kvm, argp);
214}
215
Joerg Roedeleaf78262020-03-24 10:41:54 +0100216static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
217{
218 struct sev_data_activate *data;
219 int asid = sev_get_asid(kvm);
220 int ret;
221
222 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
223 if (!data)
224 return -ENOMEM;
225
226 /* activate ASID on the given handle */
227 data->handle = handle;
228 data->asid = asid;
229 ret = sev_guest_activate(data, error);
230 kfree(data);
231
232 return ret;
233}
234
235static int __sev_issue_cmd(int fd, int id, void *data, int *error)
236{
237 struct fd f;
238 int ret;
239
240 f = fdget(fd);
241 if (!f.file)
242 return -EBADF;
243
244 ret = sev_issue_cmd_external_user(f.file, id, data, error);
245
246 fdput(f);
247 return ret;
248}
249
250static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
251{
252 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
253
254 return __sev_issue_cmd(sev->fd, id, data, error);
255}
256
257static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
258{
259 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
260 struct sev_data_launch_start *start;
261 struct kvm_sev_launch_start params;
262 void *dh_blob, *session_blob;
263 int *error = &argp->error;
264 int ret;
265
266 if (!sev_guest(kvm))
267 return -ENOTTY;
268
269 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
270 return -EFAULT;
271
272 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
273 if (!start)
274 return -ENOMEM;
275
276 dh_blob = NULL;
277 if (params.dh_uaddr) {
278 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
279 if (IS_ERR(dh_blob)) {
280 ret = PTR_ERR(dh_blob);
281 goto e_free;
282 }
283
284 start->dh_cert_address = __sme_set(__pa(dh_blob));
285 start->dh_cert_len = params.dh_len;
286 }
287
288 session_blob = NULL;
289 if (params.session_uaddr) {
290 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
291 if (IS_ERR(session_blob)) {
292 ret = PTR_ERR(session_blob);
293 goto e_free_dh;
294 }
295
296 start->session_address = __sme_set(__pa(session_blob));
297 start->session_len = params.session_len;
298 }
299
300 start->handle = params.handle;
301 start->policy = params.policy;
302
303 /* create memory encryption context */
304 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
305 if (ret)
306 goto e_free_session;
307
308 /* Bind ASID to this guest */
309 ret = sev_bind_asid(kvm, start->handle, error);
310 if (ret)
311 goto e_free_session;
312
313 /* return handle to userspace */
314 params.handle = start->handle;
315 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
316 sev_unbind_asid(kvm, start->handle);
317 ret = -EFAULT;
318 goto e_free_session;
319 }
320
321 sev->handle = start->handle;
322 sev->fd = argp->sev_fd;
323
324e_free_session:
325 kfree(session_blob);
326e_free_dh:
327 kfree(dh_blob);
328e_free:
329 kfree(start);
330 return ret;
331}
332
333static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
334 unsigned long ulen, unsigned long *n,
335 int write)
336{
337 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700338 unsigned long npages, size;
339 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100340 unsigned long locked, lock_limit;
341 struct page **pages;
342 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300343 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100344
345 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400346 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100347
348 /* Calculate number of pages. */
349 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
350 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
351 npages = (last - first + 1);
352
353 locked = sev->pages_locked + npages;
354 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
355 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
356 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400357 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100358 }
359
John Hubbard78824fa2020-05-25 23:22:06 -0700360 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400361 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700362
Joerg Roedeleaf78262020-03-24 10:41:54 +0100363 /* Avoid using vmalloc for smaller buffers. */
364 size = npages * sizeof(struct page *);
365 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700366 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100367 else
368 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
369
370 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400371 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100372
373 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700374 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100375 if (npinned != npages) {
376 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300377 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100378 goto err;
379 }
380
381 *n = npages;
382 sev->pages_locked = locked;
383
384 return pages;
385
386err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300387 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700388 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100389
390 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300391 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100392}
393
394static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
395 unsigned long npages)
396{
397 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
398
John Hubbarddc42c8a2020-05-25 23:22:07 -0700399 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100400 kvfree(pages);
401 sev->pages_locked -= npages;
402}
403
404static void sev_clflush_pages(struct page *pages[], unsigned long npages)
405{
406 uint8_t *page_virtual;
407 unsigned long i;
408
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000409 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
410 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100411 return;
412
413 for (i = 0; i < npages; i++) {
414 page_virtual = kmap_atomic(pages[i]);
415 clflush_cache_range(page_virtual, PAGE_SIZE);
416 kunmap_atomic(page_virtual);
417 }
418}
419
420static unsigned long get_num_contig_pages(unsigned long idx,
421 struct page **inpages, unsigned long npages)
422{
423 unsigned long paddr, next_paddr;
424 unsigned long i = idx + 1, pages = 1;
425
426 /* find the number of contiguous pages starting from idx */
427 paddr = __sme_page_pa(inpages[idx]);
428 while (i < npages) {
429 next_paddr = __sme_page_pa(inpages[i++]);
430 if ((paddr + PAGE_SIZE) == next_paddr) {
431 pages++;
432 paddr = next_paddr;
433 continue;
434 }
435 break;
436 }
437
438 return pages;
439}
440
441static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
442{
443 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
444 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
445 struct kvm_sev_launch_update_data params;
446 struct sev_data_launch_update_data *data;
447 struct page **inpages;
448 int ret;
449
450 if (!sev_guest(kvm))
451 return -ENOTTY;
452
453 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
454 return -EFAULT;
455
456 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
457 if (!data)
458 return -ENOMEM;
459
460 vaddr = params.uaddr;
461 size = params.len;
462 vaddr_end = vaddr + size;
463
464 /* Lock the user memory. */
465 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300466 if (IS_ERR(inpages)) {
467 ret = PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100468 goto e_free;
469 }
470
471 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400472 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
473 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100474 */
475 sev_clflush_pages(inpages, npages);
476
477 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
478 int offset, len;
479
480 /*
481 * If the user buffer is not page-aligned, calculate the offset
482 * within the page.
483 */
484 offset = vaddr & (PAGE_SIZE - 1);
485
486 /* Calculate the number of pages that can be encrypted in one go. */
487 pages = get_num_contig_pages(i, inpages, npages);
488
489 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
490
491 data->handle = sev->handle;
492 data->len = len;
493 data->address = __sme_page_pa(inpages[i]) + offset;
494 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
495 if (ret)
496 goto e_unpin;
497
498 size -= len;
499 next_vaddr = vaddr + len;
500 }
501
502e_unpin:
503 /* content of memory is updated, mark pages dirty */
504 for (i = 0; i < npages; i++) {
505 set_page_dirty_lock(inpages[i]);
506 mark_page_accessed(inpages[i]);
507 }
508 /* unlock the user pages */
509 sev_unpin_memory(kvm, inpages, npages);
510e_free:
511 kfree(data);
512 return ret;
513}
514
Tom Lendackyad731092020-12-10 11:10:09 -0600515static int sev_es_sync_vmsa(struct vcpu_svm *svm)
516{
517 struct vmcb_save_area *save = &svm->vmcb->save;
518
519 /* Check some debug related fields before encrypting the VMSA */
520 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
521 return -EINVAL;
522
523 /* Sync registgers */
524 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
525 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
526 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
527 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
528 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
529 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
530 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
531 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500532#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600533 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
534 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
535 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
536 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
537 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
538 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
539 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
540 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500541#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600542 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
543
544 /* Sync some non-GPR registers before encrypting */
545 save->xcr0 = svm->vcpu.arch.xcr0;
546 save->pkru = svm->vcpu.arch.pkru;
547 save->xss = svm->vcpu.arch.ia32_xss;
548
549 /*
550 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
551 * the traditional VMSA that is part of the VMCB. Copy the
552 * traditional VMSA as it has been built so far (in prep
553 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
554 */
555 memcpy(svm->vmsa, save, sizeof(*save));
556
557 return 0;
558}
559
560static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
561{
562 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
563 struct sev_data_launch_update_vmsa *vmsa;
564 int i, ret;
565
566 if (!sev_es_guest(kvm))
567 return -ENOTTY;
568
569 vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL);
570 if (!vmsa)
571 return -ENOMEM;
572
573 for (i = 0; i < kvm->created_vcpus; i++) {
574 struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
575
576 /* Perform some pre-encryption checks against the VMSA */
577 ret = sev_es_sync_vmsa(svm);
578 if (ret)
579 goto e_free;
580
581 /*
582 * The LAUNCH_UPDATE_VMSA command will perform in-place
583 * encryption of the VMSA memory content (i.e it will write
584 * the same memory region with the guest's key), so invalidate
585 * it first.
586 */
587 clflush_cache_range(svm->vmsa, PAGE_SIZE);
588
589 vmsa->handle = sev->handle;
590 vmsa->address = __sme_pa(svm->vmsa);
591 vmsa->len = PAGE_SIZE;
592 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, vmsa,
593 &argp->error);
594 if (ret)
595 goto e_free;
596
597 svm->vcpu.arch.guest_state_protected = true;
598 }
599
600e_free:
601 kfree(vmsa);
602 return ret;
603}
604
Joerg Roedeleaf78262020-03-24 10:41:54 +0100605static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
606{
607 void __user *measure = (void __user *)(uintptr_t)argp->data;
608 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
609 struct sev_data_launch_measure *data;
610 struct kvm_sev_launch_measure params;
611 void __user *p = NULL;
612 void *blob = NULL;
613 int ret;
614
615 if (!sev_guest(kvm))
616 return -ENOTTY;
617
618 if (copy_from_user(&params, measure, sizeof(params)))
619 return -EFAULT;
620
621 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
622 if (!data)
623 return -ENOMEM;
624
625 /* User wants to query the blob length */
626 if (!params.len)
627 goto cmd;
628
629 p = (void __user *)(uintptr_t)params.uaddr;
630 if (p) {
631 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
632 ret = -EINVAL;
633 goto e_free;
634 }
635
636 ret = -ENOMEM;
637 blob = kmalloc(params.len, GFP_KERNEL);
638 if (!blob)
639 goto e_free;
640
641 data->address = __psp_pa(blob);
642 data->len = params.len;
643 }
644
645cmd:
646 data->handle = sev->handle;
647 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
648
649 /*
650 * If we query the session length, FW responded with expected data.
651 */
652 if (!params.len)
653 goto done;
654
655 if (ret)
656 goto e_free_blob;
657
658 if (blob) {
659 if (copy_to_user(p, blob, params.len))
660 ret = -EFAULT;
661 }
662
663done:
664 params.len = data->len;
665 if (copy_to_user(measure, &params, sizeof(params)))
666 ret = -EFAULT;
667e_free_blob:
668 kfree(blob);
669e_free:
670 kfree(data);
671 return ret;
672}
673
674static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
675{
676 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
677 struct sev_data_launch_finish *data;
678 int ret;
679
680 if (!sev_guest(kvm))
681 return -ENOTTY;
682
683 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
684 if (!data)
685 return -ENOMEM;
686
687 data->handle = sev->handle;
688 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
689
690 kfree(data);
691 return ret;
692}
693
694static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
695{
696 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
697 struct kvm_sev_guest_status params;
698 struct sev_data_guest_status *data;
699 int ret;
700
701 if (!sev_guest(kvm))
702 return -ENOTTY;
703
704 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
705 if (!data)
706 return -ENOMEM;
707
708 data->handle = sev->handle;
709 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
710 if (ret)
711 goto e_free;
712
713 params.policy = data->policy;
714 params.state = data->state;
715 params.handle = data->handle;
716
717 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
718 ret = -EFAULT;
719e_free:
720 kfree(data);
721 return ret;
722}
723
724static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
725 unsigned long dst, int size,
726 int *error, bool enc)
727{
728 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
729 struct sev_data_dbg *data;
730 int ret;
731
732 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
733 if (!data)
734 return -ENOMEM;
735
736 data->handle = sev->handle;
737 data->dst_addr = dst;
738 data->src_addr = src;
739 data->len = size;
740
741 ret = sev_issue_cmd(kvm,
742 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
743 data, error);
744 kfree(data);
745 return ret;
746}
747
748static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
749 unsigned long dst_paddr, int sz, int *err)
750{
751 int offset;
752
753 /*
754 * Its safe to read more than we are asked, caller should ensure that
755 * destination has enough space.
756 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100757 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000758 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100759 sz = round_up(sz + offset, 16);
760
761 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
762}
763
764static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
765 unsigned long __user dst_uaddr,
766 unsigned long dst_paddr,
767 int size, int *err)
768{
769 struct page *tpage = NULL;
770 int ret, offset;
771
772 /* if inputs are not 16-byte then use intermediate buffer */
773 if (!IS_ALIGNED(dst_paddr, 16) ||
774 !IS_ALIGNED(paddr, 16) ||
775 !IS_ALIGNED(size, 16)) {
776 tpage = (void *)alloc_page(GFP_KERNEL);
777 if (!tpage)
778 return -ENOMEM;
779
780 dst_paddr = __sme_page_pa(tpage);
781 }
782
783 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
784 if (ret)
785 goto e_free;
786
787 if (tpage) {
788 offset = paddr & 15;
789 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
790 page_address(tpage) + offset, size))
791 ret = -EFAULT;
792 }
793
794e_free:
795 if (tpage)
796 __free_page(tpage);
797
798 return ret;
799}
800
801static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
802 unsigned long __user vaddr,
803 unsigned long dst_paddr,
804 unsigned long __user dst_vaddr,
805 int size, int *error)
806{
807 struct page *src_tpage = NULL;
808 struct page *dst_tpage = NULL;
809 int ret, len = size;
810
811 /* If source buffer is not aligned then use an intermediate buffer */
812 if (!IS_ALIGNED(vaddr, 16)) {
813 src_tpage = alloc_page(GFP_KERNEL);
814 if (!src_tpage)
815 return -ENOMEM;
816
817 if (copy_from_user(page_address(src_tpage),
818 (void __user *)(uintptr_t)vaddr, size)) {
819 __free_page(src_tpage);
820 return -EFAULT;
821 }
822
823 paddr = __sme_page_pa(src_tpage);
824 }
825
826 /*
827 * If destination buffer or length is not aligned then do read-modify-write:
828 * - decrypt destination in an intermediate buffer
829 * - copy the source buffer in an intermediate buffer
830 * - use the intermediate buffer as source buffer
831 */
832 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
833 int dst_offset;
834
835 dst_tpage = alloc_page(GFP_KERNEL);
836 if (!dst_tpage) {
837 ret = -ENOMEM;
838 goto e_free;
839 }
840
841 ret = __sev_dbg_decrypt(kvm, dst_paddr,
842 __sme_page_pa(dst_tpage), size, error);
843 if (ret)
844 goto e_free;
845
846 /*
847 * If source is kernel buffer then use memcpy() otherwise
848 * copy_from_user().
849 */
850 dst_offset = dst_paddr & 15;
851
852 if (src_tpage)
853 memcpy(page_address(dst_tpage) + dst_offset,
854 page_address(src_tpage), size);
855 else {
856 if (copy_from_user(page_address(dst_tpage) + dst_offset,
857 (void __user *)(uintptr_t)vaddr, size)) {
858 ret = -EFAULT;
859 goto e_free;
860 }
861 }
862
863 paddr = __sme_page_pa(dst_tpage);
864 dst_paddr = round_down(dst_paddr, 16);
865 len = round_up(size, 16);
866 }
867
868 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
869
870e_free:
871 if (src_tpage)
872 __free_page(src_tpage);
873 if (dst_tpage)
874 __free_page(dst_tpage);
875 return ret;
876}
877
878static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
879{
880 unsigned long vaddr, vaddr_end, next_vaddr;
881 unsigned long dst_vaddr;
882 struct page **src_p, **dst_p;
883 struct kvm_sev_dbg debug;
884 unsigned long n;
885 unsigned int size;
886 int ret;
887
888 if (!sev_guest(kvm))
889 return -ENOTTY;
890
891 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
892 return -EFAULT;
893
894 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
895 return -EINVAL;
896 if (!debug.dst_uaddr)
897 return -EINVAL;
898
899 vaddr = debug.src_uaddr;
900 size = debug.len;
901 vaddr_end = vaddr + size;
902 dst_vaddr = debug.dst_uaddr;
903
904 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
905 int len, s_off, d_off;
906
907 /* lock userspace source and destination page */
908 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300909 if (IS_ERR(src_p))
910 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100911
912 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300913 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100914 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300915 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100916 }
917
918 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400919 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
920 * the pages; flush the destination too so that future accesses do not
921 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100922 */
923 sev_clflush_pages(src_p, 1);
924 sev_clflush_pages(dst_p, 1);
925
926 /*
927 * Since user buffer may not be page aligned, calculate the
928 * offset within the page.
929 */
930 s_off = vaddr & ~PAGE_MASK;
931 d_off = dst_vaddr & ~PAGE_MASK;
932 len = min_t(size_t, (PAGE_SIZE - s_off), size);
933
934 if (dec)
935 ret = __sev_dbg_decrypt_user(kvm,
936 __sme_page_pa(src_p[0]) + s_off,
937 dst_vaddr,
938 __sme_page_pa(dst_p[0]) + d_off,
939 len, &argp->error);
940 else
941 ret = __sev_dbg_encrypt_user(kvm,
942 __sme_page_pa(src_p[0]) + s_off,
943 vaddr,
944 __sme_page_pa(dst_p[0]) + d_off,
945 dst_vaddr,
946 len, &argp->error);
947
948 sev_unpin_memory(kvm, src_p, n);
949 sev_unpin_memory(kvm, dst_p, n);
950
951 if (ret)
952 goto err;
953
954 next_vaddr = vaddr + len;
955 dst_vaddr = dst_vaddr + len;
956 size -= len;
957 }
958err:
959 return ret;
960}
961
962static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
963{
964 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
965 struct sev_data_launch_secret *data;
966 struct kvm_sev_launch_secret params;
967 struct page **pages;
968 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700969 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100970 int ret, offset;
971
972 if (!sev_guest(kvm))
973 return -ENOTTY;
974
975 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
976 return -EFAULT;
977
978 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400979 if (IS_ERR(pages))
980 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100981
982 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400983 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
984 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -0700985 */
986 sev_clflush_pages(pages, n);
987
988 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +0100989 * The secret must be copied into contiguous memory region, lets verify
990 * that userspace memory pages are contiguous before we issue command.
991 */
992 if (get_num_contig_pages(0, pages, n) != n) {
993 ret = -EINVAL;
994 goto e_unpin_memory;
995 }
996
997 ret = -ENOMEM;
998 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
999 if (!data)
1000 goto e_unpin_memory;
1001
1002 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1003 data->guest_address = __sme_page_pa(pages[0]) + offset;
1004 data->guest_len = params.guest_len;
1005
1006 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1007 if (IS_ERR(blob)) {
1008 ret = PTR_ERR(blob);
1009 goto e_free;
1010 }
1011
1012 data->trans_address = __psp_pa(blob);
1013 data->trans_len = params.trans_len;
1014
1015 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1016 if (IS_ERR(hdr)) {
1017 ret = PTR_ERR(hdr);
1018 goto e_free_blob;
1019 }
1020 data->hdr_address = __psp_pa(hdr);
1021 data->hdr_len = params.hdr_len;
1022
1023 data->handle = sev->handle;
1024 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
1025
1026 kfree(hdr);
1027
1028e_free_blob:
1029 kfree(blob);
1030e_free:
1031 kfree(data);
1032e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -07001033 /* content of memory is updated, mark pages dirty */
1034 for (i = 0; i < n; i++) {
1035 set_page_dirty_lock(pages[i]);
1036 mark_page_accessed(pages[i]);
1037 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001038 sev_unpin_memory(kvm, pages, n);
1039 return ret;
1040}
1041
1042int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1043{
1044 struct kvm_sev_cmd sev_cmd;
1045 int r;
1046
Tom Lendacky916391a2020-12-10 11:09:38 -06001047 if (!svm_sev_enabled() || !sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001048 return -ENOTTY;
1049
1050 if (!argp)
1051 return 0;
1052
1053 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1054 return -EFAULT;
1055
1056 mutex_lock(&kvm->lock);
1057
1058 switch (sev_cmd.id) {
1059 case KVM_SEV_INIT:
1060 r = sev_guest_init(kvm, &sev_cmd);
1061 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001062 case KVM_SEV_ES_INIT:
1063 r = sev_es_guest_init(kvm, &sev_cmd);
1064 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001065 case KVM_SEV_LAUNCH_START:
1066 r = sev_launch_start(kvm, &sev_cmd);
1067 break;
1068 case KVM_SEV_LAUNCH_UPDATE_DATA:
1069 r = sev_launch_update_data(kvm, &sev_cmd);
1070 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001071 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1072 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1073 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001074 case KVM_SEV_LAUNCH_MEASURE:
1075 r = sev_launch_measure(kvm, &sev_cmd);
1076 break;
1077 case KVM_SEV_LAUNCH_FINISH:
1078 r = sev_launch_finish(kvm, &sev_cmd);
1079 break;
1080 case KVM_SEV_GUEST_STATUS:
1081 r = sev_guest_status(kvm, &sev_cmd);
1082 break;
1083 case KVM_SEV_DBG_DECRYPT:
1084 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1085 break;
1086 case KVM_SEV_DBG_ENCRYPT:
1087 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1088 break;
1089 case KVM_SEV_LAUNCH_SECRET:
1090 r = sev_launch_secret(kvm, &sev_cmd);
1091 break;
1092 default:
1093 r = -EINVAL;
1094 goto out;
1095 }
1096
1097 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1098 r = -EFAULT;
1099
1100out:
1101 mutex_unlock(&kvm->lock);
1102 return r;
1103}
1104
1105int svm_register_enc_region(struct kvm *kvm,
1106 struct kvm_enc_region *range)
1107{
1108 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1109 struct enc_region *region;
1110 int ret = 0;
1111
1112 if (!sev_guest(kvm))
1113 return -ENOTTY;
1114
1115 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1116 return -EINVAL;
1117
1118 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1119 if (!region)
1120 return -ENOMEM;
1121
1122 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001123 if (IS_ERR(region->pages)) {
1124 ret = PTR_ERR(region->pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001125 goto e_free;
1126 }
1127
1128 /*
1129 * The guest may change the memory encryption attribute from C=0 -> C=1
1130 * or vice versa for this memory range. Lets make sure caches are
1131 * flushed to ensure that guest data gets written into memory with
1132 * correct C-bit.
1133 */
1134 sev_clflush_pages(region->pages, region->npages);
1135
1136 region->uaddr = range->addr;
1137 region->size = range->size;
1138
1139 mutex_lock(&kvm->lock);
1140 list_add_tail(&region->list, &sev->regions_list);
1141 mutex_unlock(&kvm->lock);
1142
1143 return ret;
1144
1145e_free:
1146 kfree(region);
1147 return ret;
1148}
1149
1150static struct enc_region *
1151find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1152{
1153 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1154 struct list_head *head = &sev->regions_list;
1155 struct enc_region *i;
1156
1157 list_for_each_entry(i, head, list) {
1158 if (i->uaddr == range->addr &&
1159 i->size == range->size)
1160 return i;
1161 }
1162
1163 return NULL;
1164}
1165
1166static void __unregister_enc_region_locked(struct kvm *kvm,
1167 struct enc_region *region)
1168{
1169 sev_unpin_memory(kvm, region->pages, region->npages);
1170 list_del(&region->list);
1171 kfree(region);
1172}
1173
1174int svm_unregister_enc_region(struct kvm *kvm,
1175 struct kvm_enc_region *range)
1176{
1177 struct enc_region *region;
1178 int ret;
1179
1180 mutex_lock(&kvm->lock);
1181
1182 if (!sev_guest(kvm)) {
1183 ret = -ENOTTY;
1184 goto failed;
1185 }
1186
1187 region = find_enc_region(kvm, range);
1188 if (!region) {
1189 ret = -EINVAL;
1190 goto failed;
1191 }
1192
1193 /*
1194 * Ensure that all guest tagged cache entries are flushed before
1195 * releasing the pages back to the system for use. CLFLUSH will
1196 * not do this, so issue a WBINVD.
1197 */
1198 wbinvd_on_all_cpus();
1199
1200 __unregister_enc_region_locked(kvm, region);
1201
1202 mutex_unlock(&kvm->lock);
1203 return 0;
1204
1205failed:
1206 mutex_unlock(&kvm->lock);
1207 return ret;
1208}
1209
1210void sev_vm_destroy(struct kvm *kvm)
1211{
1212 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1213 struct list_head *head = &sev->regions_list;
1214 struct list_head *pos, *q;
1215
1216 if (!sev_guest(kvm))
1217 return;
1218
1219 mutex_lock(&kvm->lock);
1220
1221 /*
1222 * Ensure that all guest tagged cache entries are flushed before
1223 * releasing the pages back to the system for use. CLFLUSH will
1224 * not do this, so issue a WBINVD.
1225 */
1226 wbinvd_on_all_cpus();
1227
1228 /*
1229 * if userspace was terminated before unregistering the memory regions
1230 * then lets unpin all the registered memory.
1231 */
1232 if (!list_empty(head)) {
1233 list_for_each_safe(pos, q, head) {
1234 __unregister_enc_region_locked(kvm,
1235 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001236 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001237 }
1238 }
1239
1240 mutex_unlock(&kvm->lock);
1241
1242 sev_unbind_asid(kvm, sev->handle);
1243 sev_asid_free(sev->asid);
1244}
1245
Tom Lendacky916391a2020-12-10 11:09:38 -06001246void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001247{
Tom Lendacky916391a2020-12-10 11:09:38 -06001248 unsigned int eax, ebx, ecx, edx;
1249 bool sev_es_supported = false;
1250 bool sev_supported = false;
1251
1252 /* Does the CPU support SEV? */
1253 if (!boot_cpu_has(X86_FEATURE_SEV))
1254 goto out;
1255
1256 /* Retrieve SEV CPUID information */
1257 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1258
Tom Lendacky1edc1452020-12-10 11:09:49 -06001259 /* Set encryption bit location for SEV-ES guests */
1260 sev_enc_bit = ebx & 0x3f;
1261
Joerg Roedeleaf78262020-03-24 10:41:54 +01001262 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001263 max_sev_asid = ecx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001264
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001265 if (!svm_sev_enabled())
Tom Lendacky916391a2020-12-10 11:09:38 -06001266 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001267
1268 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001269 min_sev_asid = edx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001270
1271 /* Initialize SEV ASID bitmaps */
1272 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1273 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001274 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001275
1276 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1277 if (!sev_reclaim_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001278 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001279
Tom Lendacky916391a2020-12-10 11:09:38 -06001280 pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
1281 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001282
Tom Lendacky916391a2020-12-10 11:09:38 -06001283 /* SEV-ES support requested? */
1284 if (!sev_es)
1285 goto out;
1286
1287 /* Does the CPU support SEV-ES? */
1288 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1289 goto out;
1290
1291 /* Has the system been allocated ASIDs for SEV-ES? */
1292 if (min_sev_asid == 1)
1293 goto out;
1294
1295 pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1);
1296 sev_es_supported = true;
1297
1298out:
1299 sev = sev_supported;
1300 sev_es = sev_es_supported;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001301}
1302
1303void sev_hardware_teardown(void)
1304{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001305 if (!svm_sev_enabled())
1306 return;
1307
Joerg Roedeleaf78262020-03-24 10:41:54 +01001308 bitmap_free(sev_asid_bitmap);
1309 bitmap_free(sev_reclaim_asid_bitmap);
1310
1311 sev_flush_asids();
1312}
1313
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001314/*
1315 * Pages used by hardware to hold guest encrypted state must be flushed before
1316 * returning them to the system.
1317 */
1318static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1319 unsigned long len)
1320{
1321 /*
1322 * If hardware enforced cache coherency for encrypted mappings of the
1323 * same physical page is supported, nothing to do.
1324 */
1325 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1326 return;
1327
1328 /*
1329 * If the VM Page Flush MSR is supported, use it to flush the page
1330 * (using the page virtual address and the guest ASID).
1331 */
1332 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1333 struct kvm_sev_info *sev;
1334 unsigned long va_start;
1335 u64 start, stop;
1336
1337 /* Align start and stop to page boundaries. */
1338 va_start = (unsigned long)va;
1339 start = (u64)va_start & PAGE_MASK;
1340 stop = PAGE_ALIGN((u64)va_start + len);
1341
1342 if (start < stop) {
1343 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1344
1345 while (start < stop) {
1346 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1347 start | sev->asid);
1348
1349 start += PAGE_SIZE;
1350 }
1351
1352 return;
1353 }
1354
1355 WARN(1, "Address overflow, using WBINVD\n");
1356 }
1357
1358 /*
1359 * Hardware should always have one of the above features,
1360 * but if not, use WBINVD and issue a warning.
1361 */
1362 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1363 wbinvd_on_all_cpus();
1364}
1365
1366void sev_free_vcpu(struct kvm_vcpu *vcpu)
1367{
1368 struct vcpu_svm *svm;
1369
1370 if (!sev_es_guest(vcpu->kvm))
1371 return;
1372
1373 svm = to_svm(vcpu);
1374
1375 if (vcpu->arch.guest_state_protected)
1376 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1377 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06001378
1379 if (svm->ghcb_sa_free)
1380 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001381}
1382
Tom Lendacky291bd202020-12-10 11:09:47 -06001383static void dump_ghcb(struct vcpu_svm *svm)
1384{
1385 struct ghcb *ghcb = svm->ghcb;
1386 unsigned int nbits;
1387
1388 /* Re-use the dump_invalid_vmcb module parameter */
1389 if (!dump_invalid_vmcb) {
1390 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1391 return;
1392 }
1393
1394 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1395
1396 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1397 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1398 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1399 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1400 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1401 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1402 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1403 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1404 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1405 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1406}
1407
1408static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1409{
1410 struct kvm_vcpu *vcpu = &svm->vcpu;
1411 struct ghcb *ghcb = svm->ghcb;
1412
1413 /*
1414 * The GHCB protocol so far allows for the following data
1415 * to be returned:
1416 * GPRs RAX, RBX, RCX, RDX
1417 *
Sean Christopherson25009142021-01-22 15:50:47 -08001418 * Copy their values, even if they may not have been written during the
1419 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06001420 */
Sean Christopherson25009142021-01-22 15:50:47 -08001421 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1422 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1423 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1424 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06001425}
1426
1427static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1428{
1429 struct vmcb_control_area *control = &svm->vmcb->control;
1430 struct kvm_vcpu *vcpu = &svm->vcpu;
1431 struct ghcb *ghcb = svm->ghcb;
1432 u64 exit_code;
1433
1434 /*
1435 * The GHCB protocol so far allows for the following data
1436 * to be supplied:
1437 * GPRs RAX, RBX, RCX, RDX
1438 * XCR0
1439 * CPL
1440 *
1441 * VMMCALL allows the guest to provide extra registers. KVM also
1442 * expects RSI for hypercalls, so include that, too.
1443 *
1444 * Copy their values to the appropriate location if supplied.
1445 */
1446 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1447
1448 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1449 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1450 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1451 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1452 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1453
1454 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1455
1456 if (ghcb_xcr0_is_valid(ghcb)) {
1457 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1458 kvm_update_cpuid_runtime(vcpu);
1459 }
1460
1461 /* Copy the GHCB exit information into the VMCB fields */
1462 exit_code = ghcb_get_sw_exit_code(ghcb);
1463 control->exit_code = lower_32_bits(exit_code);
1464 control->exit_code_hi = upper_32_bits(exit_code);
1465 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1466 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1467
1468 /* Clear the valid entries fields */
1469 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1470}
1471
1472static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1473{
1474 struct kvm_vcpu *vcpu;
1475 struct ghcb *ghcb;
1476 u64 exit_code = 0;
1477
1478 ghcb = svm->ghcb;
1479
1480 /* Only GHCB Usage code 0 is supported */
1481 if (ghcb->ghcb_usage)
1482 goto vmgexit_err;
1483
1484 /*
1485 * Retrieve the exit code now even though is may not be marked valid
1486 * as it could help with debugging.
1487 */
1488 exit_code = ghcb_get_sw_exit_code(ghcb);
1489
1490 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
1491 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
1492 !ghcb_sw_exit_info_2_is_valid(ghcb))
1493 goto vmgexit_err;
1494
1495 switch (ghcb_get_sw_exit_code(ghcb)) {
1496 case SVM_EXIT_READ_DR7:
1497 break;
1498 case SVM_EXIT_WRITE_DR7:
1499 if (!ghcb_rax_is_valid(ghcb))
1500 goto vmgexit_err;
1501 break;
1502 case SVM_EXIT_RDTSC:
1503 break;
1504 case SVM_EXIT_RDPMC:
1505 if (!ghcb_rcx_is_valid(ghcb))
1506 goto vmgexit_err;
1507 break;
1508 case SVM_EXIT_CPUID:
1509 if (!ghcb_rax_is_valid(ghcb) ||
1510 !ghcb_rcx_is_valid(ghcb))
1511 goto vmgexit_err;
1512 if (ghcb_get_rax(ghcb) == 0xd)
1513 if (!ghcb_xcr0_is_valid(ghcb))
1514 goto vmgexit_err;
1515 break;
1516 case SVM_EXIT_INVD:
1517 break;
1518 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001519 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
1520 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06001521 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001522 } else {
1523 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
1524 if (!ghcb_rax_is_valid(ghcb))
1525 goto vmgexit_err;
1526 }
Tom Lendacky291bd202020-12-10 11:09:47 -06001527 break;
1528 case SVM_EXIT_MSR:
1529 if (!ghcb_rcx_is_valid(ghcb))
1530 goto vmgexit_err;
1531 if (ghcb_get_sw_exit_info_1(ghcb)) {
1532 if (!ghcb_rax_is_valid(ghcb) ||
1533 !ghcb_rdx_is_valid(ghcb))
1534 goto vmgexit_err;
1535 }
1536 break;
1537 case SVM_EXIT_VMMCALL:
1538 if (!ghcb_rax_is_valid(ghcb) ||
1539 !ghcb_cpl_is_valid(ghcb))
1540 goto vmgexit_err;
1541 break;
1542 case SVM_EXIT_RDTSCP:
1543 break;
1544 case SVM_EXIT_WBINVD:
1545 break;
1546 case SVM_EXIT_MONITOR:
1547 if (!ghcb_rax_is_valid(ghcb) ||
1548 !ghcb_rcx_is_valid(ghcb) ||
1549 !ghcb_rdx_is_valid(ghcb))
1550 goto vmgexit_err;
1551 break;
1552 case SVM_EXIT_MWAIT:
1553 if (!ghcb_rax_is_valid(ghcb) ||
1554 !ghcb_rcx_is_valid(ghcb))
1555 goto vmgexit_err;
1556 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06001557 case SVM_VMGEXIT_MMIO_READ:
1558 case SVM_VMGEXIT_MMIO_WRITE:
1559 if (!ghcb_sw_scratch_is_valid(ghcb))
1560 goto vmgexit_err;
1561 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05001562 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06001563 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05001564 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06001565 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1566 break;
1567 default:
1568 goto vmgexit_err;
1569 }
1570
1571 return 0;
1572
1573vmgexit_err:
1574 vcpu = &svm->vcpu;
1575
1576 if (ghcb->ghcb_usage) {
1577 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
1578 ghcb->ghcb_usage);
1579 } else {
1580 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
1581 exit_code);
1582 dump_ghcb(svm);
1583 }
1584
1585 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1586 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
1587 vcpu->run->internal.ndata = 2;
1588 vcpu->run->internal.data[0] = exit_code;
1589 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
1590
1591 return -EINVAL;
1592}
1593
1594static void pre_sev_es_run(struct vcpu_svm *svm)
1595{
1596 if (!svm->ghcb)
1597 return;
1598
Tom Lendacky8f423a82020-12-10 11:09:53 -06001599 if (svm->ghcb_sa_free) {
1600 /*
1601 * The scratch area lives outside the GHCB, so there is a
1602 * buffer that, depending on the operation performed, may
1603 * need to be synced, then freed.
1604 */
1605 if (svm->ghcb_sa_sync) {
1606 kvm_write_guest(svm->vcpu.kvm,
1607 ghcb_get_sw_scratch(svm->ghcb),
1608 svm->ghcb_sa, svm->ghcb_sa_len);
1609 svm->ghcb_sa_sync = false;
1610 }
1611
1612 kfree(svm->ghcb_sa);
1613 svm->ghcb_sa = NULL;
1614 svm->ghcb_sa_free = false;
1615 }
1616
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06001617 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
1618
Tom Lendacky291bd202020-12-10 11:09:47 -06001619 sev_es_sync_to_ghcb(svm);
1620
1621 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
1622 svm->ghcb = NULL;
1623}
1624
Joerg Roedeleaf78262020-03-24 10:41:54 +01001625void pre_sev_run(struct vcpu_svm *svm, int cpu)
1626{
1627 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1628 int asid = sev_get_asid(svm->vcpu.kvm);
1629
Tom Lendacky291bd202020-12-10 11:09:47 -06001630 /* Perform any SEV-ES pre-run actions */
1631 pre_sev_es_run(svm);
1632
Joerg Roedeleaf78262020-03-24 10:41:54 +01001633 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05001634 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001635
1636 /*
1637 * Flush guest TLB:
1638 *
1639 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1640 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1641 */
1642 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07001643 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001644 return;
1645
Joerg Roedeleaf78262020-03-24 10:41:54 +01001646 sd->sev_vmcbs[asid] = svm->vmcb;
1647 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02001648 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001649}
Tom Lendacky291bd202020-12-10 11:09:47 -06001650
Tom Lendacky8f423a82020-12-10 11:09:53 -06001651#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
1652static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
1653{
1654 struct vmcb_control_area *control = &svm->vmcb->control;
1655 struct ghcb *ghcb = svm->ghcb;
1656 u64 ghcb_scratch_beg, ghcb_scratch_end;
1657 u64 scratch_gpa_beg, scratch_gpa_end;
1658 void *scratch_va;
1659
1660 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
1661 if (!scratch_gpa_beg) {
1662 pr_err("vmgexit: scratch gpa not provided\n");
1663 return false;
1664 }
1665
1666 scratch_gpa_end = scratch_gpa_beg + len;
1667 if (scratch_gpa_end < scratch_gpa_beg) {
1668 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
1669 len, scratch_gpa_beg);
1670 return false;
1671 }
1672
1673 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
1674 /* Scratch area begins within GHCB */
1675 ghcb_scratch_beg = control->ghcb_gpa +
1676 offsetof(struct ghcb, shared_buffer);
1677 ghcb_scratch_end = control->ghcb_gpa +
1678 offsetof(struct ghcb, reserved_1);
1679
1680 /*
1681 * If the scratch area begins within the GHCB, it must be
1682 * completely contained in the GHCB shared buffer area.
1683 */
1684 if (scratch_gpa_beg < ghcb_scratch_beg ||
1685 scratch_gpa_end > ghcb_scratch_end) {
1686 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
1687 scratch_gpa_beg, scratch_gpa_end);
1688 return false;
1689 }
1690
1691 scratch_va = (void *)svm->ghcb;
1692 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
1693 } else {
1694 /*
1695 * The guest memory must be read into a kernel buffer, so
1696 * limit the size
1697 */
1698 if (len > GHCB_SCRATCH_AREA_LIMIT) {
1699 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
1700 len, GHCB_SCRATCH_AREA_LIMIT);
1701 return false;
1702 }
1703 scratch_va = kzalloc(len, GFP_KERNEL);
1704 if (!scratch_va)
1705 return false;
1706
1707 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
1708 /* Unable to copy scratch area from guest */
1709 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
1710
1711 kfree(scratch_va);
1712 return false;
1713 }
1714
1715 /*
1716 * The scratch area is outside the GHCB. The operation will
1717 * dictate whether the buffer needs to be synced before running
1718 * the vCPU next time (i.e. a read was requested so the data
1719 * must be written back to the guest memory).
1720 */
1721 svm->ghcb_sa_sync = sync;
1722 svm->ghcb_sa_free = true;
1723 }
1724
1725 svm->ghcb_sa = scratch_va;
1726 svm->ghcb_sa_len = len;
1727
1728 return true;
1729}
1730
Tom Lendackyd3694662020-12-10 11:09:50 -06001731static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
1732 unsigned int pos)
1733{
1734 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
1735 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
1736}
1737
1738static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
1739{
1740 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
1741}
1742
Tom Lendacky1edc1452020-12-10 11:09:49 -06001743static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
1744{
1745 svm->vmcb->control.ghcb_gpa = value;
1746}
1747
Tom Lendacky291bd202020-12-10 11:09:47 -06001748static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
1749{
Tom Lendacky1edc1452020-12-10 11:09:49 -06001750 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06001751 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001752 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06001753 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001754
1755 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
1756
Tom Lendacky59e38b52020-12-10 11:09:52 -06001757 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
1758 control->ghcb_gpa);
1759
Tom Lendacky1edc1452020-12-10 11:09:49 -06001760 switch (ghcb_info) {
1761 case GHCB_MSR_SEV_INFO_REQ:
1762 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
1763 GHCB_VERSION_MIN,
1764 sev_enc_bit));
1765 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06001766 case GHCB_MSR_CPUID_REQ: {
1767 u64 cpuid_fn, cpuid_reg, cpuid_value;
1768
1769 cpuid_fn = get_ghcb_msr_bits(svm,
1770 GHCB_MSR_CPUID_FUNC_MASK,
1771 GHCB_MSR_CPUID_FUNC_POS);
1772
1773 /* Initialize the registers needed by the CPUID intercept */
1774 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
1775 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
1776
1777 ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
1778 if (!ret) {
1779 ret = -EINVAL;
1780 break;
1781 }
1782
1783 cpuid_reg = get_ghcb_msr_bits(svm,
1784 GHCB_MSR_CPUID_REG_MASK,
1785 GHCB_MSR_CPUID_REG_POS);
1786 if (cpuid_reg == 0)
1787 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
1788 else if (cpuid_reg == 1)
1789 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
1790 else if (cpuid_reg == 2)
1791 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
1792 else
1793 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
1794
1795 set_ghcb_msr_bits(svm, cpuid_value,
1796 GHCB_MSR_CPUID_VALUE_MASK,
1797 GHCB_MSR_CPUID_VALUE_POS);
1798
1799 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
1800 GHCB_MSR_INFO_MASK,
1801 GHCB_MSR_INFO_POS);
1802 break;
1803 }
Tom Lendackye1d71112020-12-10 11:09:51 -06001804 case GHCB_MSR_TERM_REQ: {
1805 u64 reason_set, reason_code;
1806
1807 reason_set = get_ghcb_msr_bits(svm,
1808 GHCB_MSR_TERM_REASON_SET_MASK,
1809 GHCB_MSR_TERM_REASON_SET_POS);
1810 reason_code = get_ghcb_msr_bits(svm,
1811 GHCB_MSR_TERM_REASON_MASK,
1812 GHCB_MSR_TERM_REASON_POS);
1813 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
1814 reason_set, reason_code);
1815 fallthrough;
1816 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06001817 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06001818 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001819 }
1820
Tom Lendacky59e38b52020-12-10 11:09:52 -06001821 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
1822 control->ghcb_gpa, ret);
1823
Tom Lendackyd3694662020-12-10 11:09:50 -06001824 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06001825}
1826
1827int sev_handle_vmgexit(struct vcpu_svm *svm)
1828{
1829 struct vmcb_control_area *control = &svm->vmcb->control;
1830 u64 ghcb_gpa, exit_code;
1831 struct ghcb *ghcb;
1832 int ret;
1833
1834 /* Validate the GHCB */
1835 ghcb_gpa = control->ghcb_gpa;
1836 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
1837 return sev_handle_vmgexit_msr_protocol(svm);
1838
1839 if (!ghcb_gpa) {
1840 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
1841 return -EINVAL;
1842 }
1843
1844 if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
1845 /* Unable to map GHCB from guest */
1846 vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
1847 ghcb_gpa);
1848 return -EINVAL;
1849 }
1850
1851 svm->ghcb = svm->ghcb_map.hva;
1852 ghcb = svm->ghcb_map.hva;
1853
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06001854 trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb);
1855
Tom Lendacky291bd202020-12-10 11:09:47 -06001856 exit_code = ghcb_get_sw_exit_code(ghcb);
1857
1858 ret = sev_es_validate_vmgexit(svm);
1859 if (ret)
1860 return ret;
1861
1862 sev_es_sync_from_ghcb(svm);
1863 ghcb_set_sw_exit_info_1(ghcb, 0);
1864 ghcb_set_sw_exit_info_2(ghcb, 0);
1865
1866 ret = -EINVAL;
1867 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06001868 case SVM_VMGEXIT_MMIO_READ:
1869 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
1870 break;
1871
1872 ret = kvm_sev_es_mmio_read(&svm->vcpu,
1873 control->exit_info_1,
1874 control->exit_info_2,
1875 svm->ghcb_sa);
1876 break;
1877 case SVM_VMGEXIT_MMIO_WRITE:
1878 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
1879 break;
1880
1881 ret = kvm_sev_es_mmio_write(&svm->vcpu,
1882 control->exit_info_1,
1883 control->exit_info_2,
1884 svm->ghcb_sa);
1885 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05001886 case SVM_VMGEXIT_NMI_COMPLETE:
1887 ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
1888 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06001889 case SVM_VMGEXIT_AP_HLT_LOOP:
1890 ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
1891 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05001892 case SVM_VMGEXIT_AP_JUMP_TABLE: {
1893 struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1894
1895 switch (control->exit_info_1) {
1896 case 0:
1897 /* Set AP jump table address */
1898 sev->ap_jump_table = control->exit_info_2;
1899 break;
1900 case 1:
1901 /* Get AP jump table address */
1902 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
1903 break;
1904 default:
1905 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
1906 control->exit_info_1);
1907 ghcb_set_sw_exit_info_1(ghcb, 1);
1908 ghcb_set_sw_exit_info_2(ghcb,
1909 X86_TRAP_UD |
1910 SVM_EVTINJ_TYPE_EXEPT |
1911 SVM_EVTINJ_VALID);
1912 }
1913
1914 ret = 1;
1915 break;
1916 }
Tom Lendacky291bd202020-12-10 11:09:47 -06001917 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1918 vcpu_unimpl(&svm->vcpu,
1919 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
1920 control->exit_info_1, control->exit_info_2);
1921 break;
1922 default:
1923 ret = svm_invoke_exit_handler(svm, exit_code);
1924 }
1925
1926 return ret;
1927}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001928
1929int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
1930{
1931 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
1932 return -EINVAL;
1933
1934 return kvm_sev_es_string_io(&svm->vcpu, size, port,
1935 svm->ghcb_sa, svm->ghcb_sa_len, in);
1936}
Tom Lendacky376c6d22020-12-10 11:10:06 -06001937
1938void sev_es_init_vmcb(struct vcpu_svm *svm)
1939{
1940 struct kvm_vcpu *vcpu = &svm->vcpu;
1941
1942 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
1943 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1944
1945 /*
1946 * An SEV-ES guest requires a VMSA area that is a separate from the
1947 * VMCB page. Do not include the encryption mask on the VMSA physical
1948 * address since hardware will access it using the guest key.
1949 */
1950 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
1951
1952 /* Can't intercept CR register access, HV can't modify CR registers */
1953 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1954 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
1955 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
1956 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1957 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
1958 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
1959
1960 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1961
1962 /* Track EFER/CR register changes */
1963 svm_set_intercept(svm, TRAP_EFER_WRITE);
1964 svm_set_intercept(svm, TRAP_CR0_WRITE);
1965 svm_set_intercept(svm, TRAP_CR4_WRITE);
1966 svm_set_intercept(svm, TRAP_CR8_WRITE);
1967
1968 /* No support for enable_vmware_backdoor */
1969 clr_exception_intercept(svm, GP_VECTOR);
1970
1971 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
1972 svm_clr_intercept(svm, INTERCEPT_XSETBV);
1973
1974 /* Clear intercepts on selected MSRs */
1975 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
1976 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
1977 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1978 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1979 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1980 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1981}
1982
1983void sev_es_create_vcpu(struct vcpu_svm *svm)
1984{
1985 /*
1986 * Set the GHCB MSR value as per the GHCB specification when creating
1987 * a vCPU for an SEV-ES guest.
1988 */
1989 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
1990 GHCB_VERSION_MIN,
1991 sev_enc_bit));
1992}
Tom Lendacky86137772020-12-10 11:10:07 -06001993
1994void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
1995{
1996 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1997 struct vmcb_save_area *hostsa;
1998 unsigned int i;
1999
2000 /*
2001 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2002 * of which one step is to perform a VMLOAD. Since hardware does not
2003 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2004 */
Nathan Chancellorf65cf842020-12-18 23:37:11 -07002005 asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
Tom Lendacky86137772020-12-10 11:10:07 -06002006
2007 /*
2008 * Certain MSRs are restored on VMEXIT, only save ones that aren't
2009 * restored.
2010 */
2011 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
2012 if (host_save_user_msrs[i].sev_es_restored)
2013 continue;
2014
2015 rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
2016 }
2017
2018 /* XCR0 is restored on VMEXIT, save the current host value */
2019 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2020 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2021
2022 /* PKRU is restored on VMEXIT, save the curent host value */
2023 hostsa->pkru = read_pkru();
2024
2025 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2026 hostsa->xss = host_xss;
2027}
2028
2029void sev_es_vcpu_put(struct vcpu_svm *svm)
2030{
2031 unsigned int i;
2032
2033 /*
2034 * Certain MSRs are restored on VMEXIT and were saved with vmsave in
2035 * sev_es_vcpu_load() above. Only restore ones that weren't.
2036 */
2037 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
2038 if (host_save_user_msrs[i].sev_es_restored)
2039 continue;
2040
2041 wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
2042 }
2043}
Tom Lendacky647daca2021-01-04 14:20:01 -06002044
2045void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2046{
2047 struct vcpu_svm *svm = to_svm(vcpu);
2048
2049 /* First SIPI: Use the values as initially set by the VMM */
2050 if (!svm->received_first_sipi) {
2051 svm->received_first_sipi = true;
2052 return;
2053 }
2054
2055 /*
2056 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2057 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2058 * non-zero value.
2059 */
2060 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2061}