blob: 584fede0b733f80f468833cbb74ffde87d07723c [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060017#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060018#include <linux/trace_events.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010019
20#include "x86.h"
21#include "svm.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060022#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060023#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010024
Tom Lendacky1edc1452020-12-10 11:09:49 -060025static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010026static int sev_flush_asids(void);
27static DECLARE_RWSEM(sev_deactivate_lock);
28static DEFINE_MUTEX(sev_bitmap_lock);
29unsigned int max_sev_asid;
30static unsigned int min_sev_asid;
31static unsigned long *sev_asid_bitmap;
32static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010033
34struct enc_region {
35 struct list_head list;
36 unsigned long npages;
37 struct page **pages;
38 unsigned long uaddr;
39 unsigned long size;
40};
41
42static int sev_flush_asids(void)
43{
44 int ret, error = 0;
45
46 /*
47 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
48 * so it must be guarded.
49 */
50 down_write(&sev_deactivate_lock);
51
52 wbinvd_on_all_cpus();
53 ret = sev_guest_df_flush(&error);
54
55 up_write(&sev_deactivate_lock);
56
57 if (ret)
58 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
59
60 return ret;
61}
62
63/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -060064static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010065{
66 int pos;
67
68 /* Check if there are any ASIDs to reclaim before performing a flush */
Tom Lendacky80675b32020-12-10 11:10:05 -060069 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
70 if (pos >= max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010071 return false;
72
73 if (sev_flush_asids())
74 return false;
75
Tom Lendacky80675b32020-12-10 11:10:05 -060076 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +010077 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
78 max_sev_asid);
79 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
80
81 return true;
82}
83
Tom Lendacky80675b32020-12-10 11:10:05 -060084static int sev_asid_new(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +010085{
Tom Lendacky80675b32020-12-10 11:10:05 -060086 int pos, min_asid, max_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +010087 bool retry = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +010088
89 mutex_lock(&sev_bitmap_lock);
90
91 /*
Tom Lendacky80675b32020-12-10 11:10:05 -060092 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
93 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +010094 */
Tom Lendacky80675b32020-12-10 11:10:05 -060095 min_asid = sev->es_active ? 0 : min_sev_asid - 1;
96 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +010097again:
Tom Lendacky80675b32020-12-10 11:10:05 -060098 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
99 if (pos >= max_asid) {
100 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100101 retry = false;
102 goto again;
103 }
104 mutex_unlock(&sev_bitmap_lock);
105 return -EBUSY;
106 }
107
108 __set_bit(pos, sev_asid_bitmap);
109
110 mutex_unlock(&sev_bitmap_lock);
111
112 return pos + 1;
113}
114
115static int sev_get_asid(struct kvm *kvm)
116{
117 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
118
119 return sev->asid;
120}
121
122static void sev_asid_free(int asid)
123{
124 struct svm_cpu_data *sd;
125 int cpu, pos;
126
127 mutex_lock(&sev_bitmap_lock);
128
129 pos = asid - 1;
130 __set_bit(pos, sev_reclaim_asid_bitmap);
131
132 for_each_possible_cpu(cpu) {
133 sd = per_cpu(svm_data, cpu);
134 sd->sev_vmcbs[pos] = NULL;
135 }
136
137 mutex_unlock(&sev_bitmap_lock);
138}
139
140static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
141{
142 struct sev_data_decommission *decommission;
143 struct sev_data_deactivate *data;
144
145 if (!handle)
146 return;
147
148 data = kzalloc(sizeof(*data), GFP_KERNEL);
149 if (!data)
150 return;
151
152 /* deactivate handle */
153 data->handle = handle;
154
155 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
156 down_read(&sev_deactivate_lock);
157 sev_guest_deactivate(data, NULL);
158 up_read(&sev_deactivate_lock);
159
160 kfree(data);
161
162 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
163 if (!decommission)
164 return;
165
166 /* decommission handle */
167 decommission->handle = handle;
168 sev_guest_decommission(decommission, NULL);
169
170 kfree(decommission);
171}
172
173static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
174{
175 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
176 int asid, ret;
177
178 ret = -EBUSY;
179 if (unlikely(sev->active))
180 return ret;
181
Tom Lendacky80675b32020-12-10 11:10:05 -0600182 asid = sev_asid_new(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100183 if (asid < 0)
184 return ret;
185
186 ret = sev_platform_init(&argp->error);
187 if (ret)
188 goto e_free;
189
190 sev->active = true;
191 sev->asid = asid;
192 INIT_LIST_HEAD(&sev->regions_list);
193
194 return 0;
195
196e_free:
197 sev_asid_free(asid);
198 return ret;
199}
200
201static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
202{
203 struct sev_data_activate *data;
204 int asid = sev_get_asid(kvm);
205 int ret;
206
207 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
208 if (!data)
209 return -ENOMEM;
210
211 /* activate ASID on the given handle */
212 data->handle = handle;
213 data->asid = asid;
214 ret = sev_guest_activate(data, error);
215 kfree(data);
216
217 return ret;
218}
219
220static int __sev_issue_cmd(int fd, int id, void *data, int *error)
221{
222 struct fd f;
223 int ret;
224
225 f = fdget(fd);
226 if (!f.file)
227 return -EBADF;
228
229 ret = sev_issue_cmd_external_user(f.file, id, data, error);
230
231 fdput(f);
232 return ret;
233}
234
235static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
236{
237 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238
239 return __sev_issue_cmd(sev->fd, id, data, error);
240}
241
242static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
243{
244 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
245 struct sev_data_launch_start *start;
246 struct kvm_sev_launch_start params;
247 void *dh_blob, *session_blob;
248 int *error = &argp->error;
249 int ret;
250
251 if (!sev_guest(kvm))
252 return -ENOTTY;
253
254 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
255 return -EFAULT;
256
257 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
258 if (!start)
259 return -ENOMEM;
260
261 dh_blob = NULL;
262 if (params.dh_uaddr) {
263 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
264 if (IS_ERR(dh_blob)) {
265 ret = PTR_ERR(dh_blob);
266 goto e_free;
267 }
268
269 start->dh_cert_address = __sme_set(__pa(dh_blob));
270 start->dh_cert_len = params.dh_len;
271 }
272
273 session_blob = NULL;
274 if (params.session_uaddr) {
275 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
276 if (IS_ERR(session_blob)) {
277 ret = PTR_ERR(session_blob);
278 goto e_free_dh;
279 }
280
281 start->session_address = __sme_set(__pa(session_blob));
282 start->session_len = params.session_len;
283 }
284
285 start->handle = params.handle;
286 start->policy = params.policy;
287
288 /* create memory encryption context */
289 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
290 if (ret)
291 goto e_free_session;
292
293 /* Bind ASID to this guest */
294 ret = sev_bind_asid(kvm, start->handle, error);
295 if (ret)
296 goto e_free_session;
297
298 /* return handle to userspace */
299 params.handle = start->handle;
300 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
301 sev_unbind_asid(kvm, start->handle);
302 ret = -EFAULT;
303 goto e_free_session;
304 }
305
306 sev->handle = start->handle;
307 sev->fd = argp->sev_fd;
308
309e_free_session:
310 kfree(session_blob);
311e_free_dh:
312 kfree(dh_blob);
313e_free:
314 kfree(start);
315 return ret;
316}
317
318static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
319 unsigned long ulen, unsigned long *n,
320 int write)
321{
322 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700323 unsigned long npages, size;
324 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100325 unsigned long locked, lock_limit;
326 struct page **pages;
327 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300328 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100329
330 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400331 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100332
333 /* Calculate number of pages. */
334 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
335 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
336 npages = (last - first + 1);
337
338 locked = sev->pages_locked + npages;
339 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
340 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
341 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400342 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100343 }
344
John Hubbard78824fa2020-05-25 23:22:06 -0700345 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400346 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700347
Joerg Roedeleaf78262020-03-24 10:41:54 +0100348 /* Avoid using vmalloc for smaller buffers. */
349 size = npages * sizeof(struct page *);
350 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700351 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100352 else
353 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
354
355 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400356 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100357
358 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700359 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100360 if (npinned != npages) {
361 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300362 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100363 goto err;
364 }
365
366 *n = npages;
367 sev->pages_locked = locked;
368
369 return pages;
370
371err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300372 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700373 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100374
375 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300376 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100377}
378
379static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
380 unsigned long npages)
381{
382 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
383
John Hubbarddc42c8a2020-05-25 23:22:07 -0700384 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100385 kvfree(pages);
386 sev->pages_locked -= npages;
387}
388
389static void sev_clflush_pages(struct page *pages[], unsigned long npages)
390{
391 uint8_t *page_virtual;
392 unsigned long i;
393
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000394 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
395 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100396 return;
397
398 for (i = 0; i < npages; i++) {
399 page_virtual = kmap_atomic(pages[i]);
400 clflush_cache_range(page_virtual, PAGE_SIZE);
401 kunmap_atomic(page_virtual);
402 }
403}
404
405static unsigned long get_num_contig_pages(unsigned long idx,
406 struct page **inpages, unsigned long npages)
407{
408 unsigned long paddr, next_paddr;
409 unsigned long i = idx + 1, pages = 1;
410
411 /* find the number of contiguous pages starting from idx */
412 paddr = __sme_page_pa(inpages[idx]);
413 while (i < npages) {
414 next_paddr = __sme_page_pa(inpages[i++]);
415 if ((paddr + PAGE_SIZE) == next_paddr) {
416 pages++;
417 paddr = next_paddr;
418 continue;
419 }
420 break;
421 }
422
423 return pages;
424}
425
426static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
427{
428 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
429 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
430 struct kvm_sev_launch_update_data params;
431 struct sev_data_launch_update_data *data;
432 struct page **inpages;
433 int ret;
434
435 if (!sev_guest(kvm))
436 return -ENOTTY;
437
438 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
439 return -EFAULT;
440
441 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
442 if (!data)
443 return -ENOMEM;
444
445 vaddr = params.uaddr;
446 size = params.len;
447 vaddr_end = vaddr + size;
448
449 /* Lock the user memory. */
450 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300451 if (IS_ERR(inpages)) {
452 ret = PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100453 goto e_free;
454 }
455
456 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400457 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
458 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100459 */
460 sev_clflush_pages(inpages, npages);
461
462 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
463 int offset, len;
464
465 /*
466 * If the user buffer is not page-aligned, calculate the offset
467 * within the page.
468 */
469 offset = vaddr & (PAGE_SIZE - 1);
470
471 /* Calculate the number of pages that can be encrypted in one go. */
472 pages = get_num_contig_pages(i, inpages, npages);
473
474 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
475
476 data->handle = sev->handle;
477 data->len = len;
478 data->address = __sme_page_pa(inpages[i]) + offset;
479 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
480 if (ret)
481 goto e_unpin;
482
483 size -= len;
484 next_vaddr = vaddr + len;
485 }
486
487e_unpin:
488 /* content of memory is updated, mark pages dirty */
489 for (i = 0; i < npages; i++) {
490 set_page_dirty_lock(inpages[i]);
491 mark_page_accessed(inpages[i]);
492 }
493 /* unlock the user pages */
494 sev_unpin_memory(kvm, inpages, npages);
495e_free:
496 kfree(data);
497 return ret;
498}
499
500static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
501{
502 void __user *measure = (void __user *)(uintptr_t)argp->data;
503 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
504 struct sev_data_launch_measure *data;
505 struct kvm_sev_launch_measure params;
506 void __user *p = NULL;
507 void *blob = NULL;
508 int ret;
509
510 if (!sev_guest(kvm))
511 return -ENOTTY;
512
513 if (copy_from_user(&params, measure, sizeof(params)))
514 return -EFAULT;
515
516 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
517 if (!data)
518 return -ENOMEM;
519
520 /* User wants to query the blob length */
521 if (!params.len)
522 goto cmd;
523
524 p = (void __user *)(uintptr_t)params.uaddr;
525 if (p) {
526 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
527 ret = -EINVAL;
528 goto e_free;
529 }
530
531 ret = -ENOMEM;
532 blob = kmalloc(params.len, GFP_KERNEL);
533 if (!blob)
534 goto e_free;
535
536 data->address = __psp_pa(blob);
537 data->len = params.len;
538 }
539
540cmd:
541 data->handle = sev->handle;
542 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
543
544 /*
545 * If we query the session length, FW responded with expected data.
546 */
547 if (!params.len)
548 goto done;
549
550 if (ret)
551 goto e_free_blob;
552
553 if (blob) {
554 if (copy_to_user(p, blob, params.len))
555 ret = -EFAULT;
556 }
557
558done:
559 params.len = data->len;
560 if (copy_to_user(measure, &params, sizeof(params)))
561 ret = -EFAULT;
562e_free_blob:
563 kfree(blob);
564e_free:
565 kfree(data);
566 return ret;
567}
568
569static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
570{
571 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
572 struct sev_data_launch_finish *data;
573 int ret;
574
575 if (!sev_guest(kvm))
576 return -ENOTTY;
577
578 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
579 if (!data)
580 return -ENOMEM;
581
582 data->handle = sev->handle;
583 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
584
585 kfree(data);
586 return ret;
587}
588
589static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
590{
591 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
592 struct kvm_sev_guest_status params;
593 struct sev_data_guest_status *data;
594 int ret;
595
596 if (!sev_guest(kvm))
597 return -ENOTTY;
598
599 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
600 if (!data)
601 return -ENOMEM;
602
603 data->handle = sev->handle;
604 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
605 if (ret)
606 goto e_free;
607
608 params.policy = data->policy;
609 params.state = data->state;
610 params.handle = data->handle;
611
612 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
613 ret = -EFAULT;
614e_free:
615 kfree(data);
616 return ret;
617}
618
619static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
620 unsigned long dst, int size,
621 int *error, bool enc)
622{
623 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
624 struct sev_data_dbg *data;
625 int ret;
626
627 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
628 if (!data)
629 return -ENOMEM;
630
631 data->handle = sev->handle;
632 data->dst_addr = dst;
633 data->src_addr = src;
634 data->len = size;
635
636 ret = sev_issue_cmd(kvm,
637 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
638 data, error);
639 kfree(data);
640 return ret;
641}
642
643static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
644 unsigned long dst_paddr, int sz, int *err)
645{
646 int offset;
647
648 /*
649 * Its safe to read more than we are asked, caller should ensure that
650 * destination has enough space.
651 */
652 src_paddr = round_down(src_paddr, 16);
653 offset = src_paddr & 15;
654 sz = round_up(sz + offset, 16);
655
656 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
657}
658
659static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
660 unsigned long __user dst_uaddr,
661 unsigned long dst_paddr,
662 int size, int *err)
663{
664 struct page *tpage = NULL;
665 int ret, offset;
666
667 /* if inputs are not 16-byte then use intermediate buffer */
668 if (!IS_ALIGNED(dst_paddr, 16) ||
669 !IS_ALIGNED(paddr, 16) ||
670 !IS_ALIGNED(size, 16)) {
671 tpage = (void *)alloc_page(GFP_KERNEL);
672 if (!tpage)
673 return -ENOMEM;
674
675 dst_paddr = __sme_page_pa(tpage);
676 }
677
678 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
679 if (ret)
680 goto e_free;
681
682 if (tpage) {
683 offset = paddr & 15;
684 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
685 page_address(tpage) + offset, size))
686 ret = -EFAULT;
687 }
688
689e_free:
690 if (tpage)
691 __free_page(tpage);
692
693 return ret;
694}
695
696static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
697 unsigned long __user vaddr,
698 unsigned long dst_paddr,
699 unsigned long __user dst_vaddr,
700 int size, int *error)
701{
702 struct page *src_tpage = NULL;
703 struct page *dst_tpage = NULL;
704 int ret, len = size;
705
706 /* If source buffer is not aligned then use an intermediate buffer */
707 if (!IS_ALIGNED(vaddr, 16)) {
708 src_tpage = alloc_page(GFP_KERNEL);
709 if (!src_tpage)
710 return -ENOMEM;
711
712 if (copy_from_user(page_address(src_tpage),
713 (void __user *)(uintptr_t)vaddr, size)) {
714 __free_page(src_tpage);
715 return -EFAULT;
716 }
717
718 paddr = __sme_page_pa(src_tpage);
719 }
720
721 /*
722 * If destination buffer or length is not aligned then do read-modify-write:
723 * - decrypt destination in an intermediate buffer
724 * - copy the source buffer in an intermediate buffer
725 * - use the intermediate buffer as source buffer
726 */
727 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
728 int dst_offset;
729
730 dst_tpage = alloc_page(GFP_KERNEL);
731 if (!dst_tpage) {
732 ret = -ENOMEM;
733 goto e_free;
734 }
735
736 ret = __sev_dbg_decrypt(kvm, dst_paddr,
737 __sme_page_pa(dst_tpage), size, error);
738 if (ret)
739 goto e_free;
740
741 /*
742 * If source is kernel buffer then use memcpy() otherwise
743 * copy_from_user().
744 */
745 dst_offset = dst_paddr & 15;
746
747 if (src_tpage)
748 memcpy(page_address(dst_tpage) + dst_offset,
749 page_address(src_tpage), size);
750 else {
751 if (copy_from_user(page_address(dst_tpage) + dst_offset,
752 (void __user *)(uintptr_t)vaddr, size)) {
753 ret = -EFAULT;
754 goto e_free;
755 }
756 }
757
758 paddr = __sme_page_pa(dst_tpage);
759 dst_paddr = round_down(dst_paddr, 16);
760 len = round_up(size, 16);
761 }
762
763 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
764
765e_free:
766 if (src_tpage)
767 __free_page(src_tpage);
768 if (dst_tpage)
769 __free_page(dst_tpage);
770 return ret;
771}
772
773static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
774{
775 unsigned long vaddr, vaddr_end, next_vaddr;
776 unsigned long dst_vaddr;
777 struct page **src_p, **dst_p;
778 struct kvm_sev_dbg debug;
779 unsigned long n;
780 unsigned int size;
781 int ret;
782
783 if (!sev_guest(kvm))
784 return -ENOTTY;
785
786 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
787 return -EFAULT;
788
789 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
790 return -EINVAL;
791 if (!debug.dst_uaddr)
792 return -EINVAL;
793
794 vaddr = debug.src_uaddr;
795 size = debug.len;
796 vaddr_end = vaddr + size;
797 dst_vaddr = debug.dst_uaddr;
798
799 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
800 int len, s_off, d_off;
801
802 /* lock userspace source and destination page */
803 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300804 if (IS_ERR(src_p))
805 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100806
807 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300808 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100809 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300810 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100811 }
812
813 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400814 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
815 * the pages; flush the destination too so that future accesses do not
816 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100817 */
818 sev_clflush_pages(src_p, 1);
819 sev_clflush_pages(dst_p, 1);
820
821 /*
822 * Since user buffer may not be page aligned, calculate the
823 * offset within the page.
824 */
825 s_off = vaddr & ~PAGE_MASK;
826 d_off = dst_vaddr & ~PAGE_MASK;
827 len = min_t(size_t, (PAGE_SIZE - s_off), size);
828
829 if (dec)
830 ret = __sev_dbg_decrypt_user(kvm,
831 __sme_page_pa(src_p[0]) + s_off,
832 dst_vaddr,
833 __sme_page_pa(dst_p[0]) + d_off,
834 len, &argp->error);
835 else
836 ret = __sev_dbg_encrypt_user(kvm,
837 __sme_page_pa(src_p[0]) + s_off,
838 vaddr,
839 __sme_page_pa(dst_p[0]) + d_off,
840 dst_vaddr,
841 len, &argp->error);
842
843 sev_unpin_memory(kvm, src_p, n);
844 sev_unpin_memory(kvm, dst_p, n);
845
846 if (ret)
847 goto err;
848
849 next_vaddr = vaddr + len;
850 dst_vaddr = dst_vaddr + len;
851 size -= len;
852 }
853err:
854 return ret;
855}
856
857static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
858{
859 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
860 struct sev_data_launch_secret *data;
861 struct kvm_sev_launch_secret params;
862 struct page **pages;
863 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700864 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100865 int ret, offset;
866
867 if (!sev_guest(kvm))
868 return -ENOTTY;
869
870 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
871 return -EFAULT;
872
873 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400874 if (IS_ERR(pages))
875 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100876
877 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400878 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
879 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -0700880 */
881 sev_clflush_pages(pages, n);
882
883 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +0100884 * The secret must be copied into contiguous memory region, lets verify
885 * that userspace memory pages are contiguous before we issue command.
886 */
887 if (get_num_contig_pages(0, pages, n) != n) {
888 ret = -EINVAL;
889 goto e_unpin_memory;
890 }
891
892 ret = -ENOMEM;
893 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
894 if (!data)
895 goto e_unpin_memory;
896
897 offset = params.guest_uaddr & (PAGE_SIZE - 1);
898 data->guest_address = __sme_page_pa(pages[0]) + offset;
899 data->guest_len = params.guest_len;
900
901 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
902 if (IS_ERR(blob)) {
903 ret = PTR_ERR(blob);
904 goto e_free;
905 }
906
907 data->trans_address = __psp_pa(blob);
908 data->trans_len = params.trans_len;
909
910 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
911 if (IS_ERR(hdr)) {
912 ret = PTR_ERR(hdr);
913 goto e_free_blob;
914 }
915 data->hdr_address = __psp_pa(hdr);
916 data->hdr_len = params.hdr_len;
917
918 data->handle = sev->handle;
919 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
920
921 kfree(hdr);
922
923e_free_blob:
924 kfree(blob);
925e_free:
926 kfree(data);
927e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -0700928 /* content of memory is updated, mark pages dirty */
929 for (i = 0; i < n; i++) {
930 set_page_dirty_lock(pages[i]);
931 mark_page_accessed(pages[i]);
932 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100933 sev_unpin_memory(kvm, pages, n);
934 return ret;
935}
936
937int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
938{
939 struct kvm_sev_cmd sev_cmd;
940 int r;
941
Tom Lendacky916391a2020-12-10 11:09:38 -0600942 if (!svm_sev_enabled() || !sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100943 return -ENOTTY;
944
945 if (!argp)
946 return 0;
947
948 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
949 return -EFAULT;
950
951 mutex_lock(&kvm->lock);
952
953 switch (sev_cmd.id) {
954 case KVM_SEV_INIT:
955 r = sev_guest_init(kvm, &sev_cmd);
956 break;
957 case KVM_SEV_LAUNCH_START:
958 r = sev_launch_start(kvm, &sev_cmd);
959 break;
960 case KVM_SEV_LAUNCH_UPDATE_DATA:
961 r = sev_launch_update_data(kvm, &sev_cmd);
962 break;
963 case KVM_SEV_LAUNCH_MEASURE:
964 r = sev_launch_measure(kvm, &sev_cmd);
965 break;
966 case KVM_SEV_LAUNCH_FINISH:
967 r = sev_launch_finish(kvm, &sev_cmd);
968 break;
969 case KVM_SEV_GUEST_STATUS:
970 r = sev_guest_status(kvm, &sev_cmd);
971 break;
972 case KVM_SEV_DBG_DECRYPT:
973 r = sev_dbg_crypt(kvm, &sev_cmd, true);
974 break;
975 case KVM_SEV_DBG_ENCRYPT:
976 r = sev_dbg_crypt(kvm, &sev_cmd, false);
977 break;
978 case KVM_SEV_LAUNCH_SECRET:
979 r = sev_launch_secret(kvm, &sev_cmd);
980 break;
981 default:
982 r = -EINVAL;
983 goto out;
984 }
985
986 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
987 r = -EFAULT;
988
989out:
990 mutex_unlock(&kvm->lock);
991 return r;
992}
993
994int svm_register_enc_region(struct kvm *kvm,
995 struct kvm_enc_region *range)
996{
997 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
998 struct enc_region *region;
999 int ret = 0;
1000
1001 if (!sev_guest(kvm))
1002 return -ENOTTY;
1003
1004 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1005 return -EINVAL;
1006
1007 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1008 if (!region)
1009 return -ENOMEM;
1010
1011 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001012 if (IS_ERR(region->pages)) {
1013 ret = PTR_ERR(region->pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001014 goto e_free;
1015 }
1016
1017 /*
1018 * The guest may change the memory encryption attribute from C=0 -> C=1
1019 * or vice versa for this memory range. Lets make sure caches are
1020 * flushed to ensure that guest data gets written into memory with
1021 * correct C-bit.
1022 */
1023 sev_clflush_pages(region->pages, region->npages);
1024
1025 region->uaddr = range->addr;
1026 region->size = range->size;
1027
1028 mutex_lock(&kvm->lock);
1029 list_add_tail(&region->list, &sev->regions_list);
1030 mutex_unlock(&kvm->lock);
1031
1032 return ret;
1033
1034e_free:
1035 kfree(region);
1036 return ret;
1037}
1038
1039static struct enc_region *
1040find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1041{
1042 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1043 struct list_head *head = &sev->regions_list;
1044 struct enc_region *i;
1045
1046 list_for_each_entry(i, head, list) {
1047 if (i->uaddr == range->addr &&
1048 i->size == range->size)
1049 return i;
1050 }
1051
1052 return NULL;
1053}
1054
1055static void __unregister_enc_region_locked(struct kvm *kvm,
1056 struct enc_region *region)
1057{
1058 sev_unpin_memory(kvm, region->pages, region->npages);
1059 list_del(&region->list);
1060 kfree(region);
1061}
1062
1063int svm_unregister_enc_region(struct kvm *kvm,
1064 struct kvm_enc_region *range)
1065{
1066 struct enc_region *region;
1067 int ret;
1068
1069 mutex_lock(&kvm->lock);
1070
1071 if (!sev_guest(kvm)) {
1072 ret = -ENOTTY;
1073 goto failed;
1074 }
1075
1076 region = find_enc_region(kvm, range);
1077 if (!region) {
1078 ret = -EINVAL;
1079 goto failed;
1080 }
1081
1082 /*
1083 * Ensure that all guest tagged cache entries are flushed before
1084 * releasing the pages back to the system for use. CLFLUSH will
1085 * not do this, so issue a WBINVD.
1086 */
1087 wbinvd_on_all_cpus();
1088
1089 __unregister_enc_region_locked(kvm, region);
1090
1091 mutex_unlock(&kvm->lock);
1092 return 0;
1093
1094failed:
1095 mutex_unlock(&kvm->lock);
1096 return ret;
1097}
1098
1099void sev_vm_destroy(struct kvm *kvm)
1100{
1101 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1102 struct list_head *head = &sev->regions_list;
1103 struct list_head *pos, *q;
1104
1105 if (!sev_guest(kvm))
1106 return;
1107
1108 mutex_lock(&kvm->lock);
1109
1110 /*
1111 * Ensure that all guest tagged cache entries are flushed before
1112 * releasing the pages back to the system for use. CLFLUSH will
1113 * not do this, so issue a WBINVD.
1114 */
1115 wbinvd_on_all_cpus();
1116
1117 /*
1118 * if userspace was terminated before unregistering the memory regions
1119 * then lets unpin all the registered memory.
1120 */
1121 if (!list_empty(head)) {
1122 list_for_each_safe(pos, q, head) {
1123 __unregister_enc_region_locked(kvm,
1124 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001125 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001126 }
1127 }
1128
1129 mutex_unlock(&kvm->lock);
1130
1131 sev_unbind_asid(kvm, sev->handle);
1132 sev_asid_free(sev->asid);
1133}
1134
Tom Lendacky916391a2020-12-10 11:09:38 -06001135void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001136{
Tom Lendacky916391a2020-12-10 11:09:38 -06001137 unsigned int eax, ebx, ecx, edx;
1138 bool sev_es_supported = false;
1139 bool sev_supported = false;
1140
1141 /* Does the CPU support SEV? */
1142 if (!boot_cpu_has(X86_FEATURE_SEV))
1143 goto out;
1144
1145 /* Retrieve SEV CPUID information */
1146 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1147
Tom Lendacky1edc1452020-12-10 11:09:49 -06001148 /* Set encryption bit location for SEV-ES guests */
1149 sev_enc_bit = ebx & 0x3f;
1150
Joerg Roedeleaf78262020-03-24 10:41:54 +01001151 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001152 max_sev_asid = ecx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001153
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001154 if (!svm_sev_enabled())
Tom Lendacky916391a2020-12-10 11:09:38 -06001155 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001156
1157 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001158 min_sev_asid = edx;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001159
1160 /* Initialize SEV ASID bitmaps */
1161 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1162 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001163 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001164
1165 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1166 if (!sev_reclaim_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001167 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001168
Tom Lendacky916391a2020-12-10 11:09:38 -06001169 pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
1170 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001171
Tom Lendacky916391a2020-12-10 11:09:38 -06001172 /* SEV-ES support requested? */
1173 if (!sev_es)
1174 goto out;
1175
1176 /* Does the CPU support SEV-ES? */
1177 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1178 goto out;
1179
1180 /* Has the system been allocated ASIDs for SEV-ES? */
1181 if (min_sev_asid == 1)
1182 goto out;
1183
1184 pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1);
1185 sev_es_supported = true;
1186
1187out:
1188 sev = sev_supported;
1189 sev_es = sev_es_supported;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001190}
1191
1192void sev_hardware_teardown(void)
1193{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001194 if (!svm_sev_enabled())
1195 return;
1196
Joerg Roedeleaf78262020-03-24 10:41:54 +01001197 bitmap_free(sev_asid_bitmap);
1198 bitmap_free(sev_reclaim_asid_bitmap);
1199
1200 sev_flush_asids();
1201}
1202
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001203/*
1204 * Pages used by hardware to hold guest encrypted state must be flushed before
1205 * returning them to the system.
1206 */
1207static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1208 unsigned long len)
1209{
1210 /*
1211 * If hardware enforced cache coherency for encrypted mappings of the
1212 * same physical page is supported, nothing to do.
1213 */
1214 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1215 return;
1216
1217 /*
1218 * If the VM Page Flush MSR is supported, use it to flush the page
1219 * (using the page virtual address and the guest ASID).
1220 */
1221 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1222 struct kvm_sev_info *sev;
1223 unsigned long va_start;
1224 u64 start, stop;
1225
1226 /* Align start and stop to page boundaries. */
1227 va_start = (unsigned long)va;
1228 start = (u64)va_start & PAGE_MASK;
1229 stop = PAGE_ALIGN((u64)va_start + len);
1230
1231 if (start < stop) {
1232 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1233
1234 while (start < stop) {
1235 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1236 start | sev->asid);
1237
1238 start += PAGE_SIZE;
1239 }
1240
1241 return;
1242 }
1243
1244 WARN(1, "Address overflow, using WBINVD\n");
1245 }
1246
1247 /*
1248 * Hardware should always have one of the above features,
1249 * but if not, use WBINVD and issue a warning.
1250 */
1251 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1252 wbinvd_on_all_cpus();
1253}
1254
1255void sev_free_vcpu(struct kvm_vcpu *vcpu)
1256{
1257 struct vcpu_svm *svm;
1258
1259 if (!sev_es_guest(vcpu->kvm))
1260 return;
1261
1262 svm = to_svm(vcpu);
1263
1264 if (vcpu->arch.guest_state_protected)
1265 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1266 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06001267
1268 if (svm->ghcb_sa_free)
1269 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001270}
1271
Tom Lendacky291bd202020-12-10 11:09:47 -06001272static void dump_ghcb(struct vcpu_svm *svm)
1273{
1274 struct ghcb *ghcb = svm->ghcb;
1275 unsigned int nbits;
1276
1277 /* Re-use the dump_invalid_vmcb module parameter */
1278 if (!dump_invalid_vmcb) {
1279 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1280 return;
1281 }
1282
1283 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1284
1285 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1286 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1287 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1288 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1289 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1290 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1291 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1292 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1293 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1294 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1295}
1296
1297static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1298{
1299 struct kvm_vcpu *vcpu = &svm->vcpu;
1300 struct ghcb *ghcb = svm->ghcb;
1301
1302 /*
1303 * The GHCB protocol so far allows for the following data
1304 * to be returned:
1305 * GPRs RAX, RBX, RCX, RDX
1306 *
1307 * Copy their values to the GHCB if they are dirty.
1308 */
1309 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
1310 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1311 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
1312 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1313 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
1314 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1315 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
1316 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
1317}
1318
1319static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1320{
1321 struct vmcb_control_area *control = &svm->vmcb->control;
1322 struct kvm_vcpu *vcpu = &svm->vcpu;
1323 struct ghcb *ghcb = svm->ghcb;
1324 u64 exit_code;
1325
1326 /*
1327 * The GHCB protocol so far allows for the following data
1328 * to be supplied:
1329 * GPRs RAX, RBX, RCX, RDX
1330 * XCR0
1331 * CPL
1332 *
1333 * VMMCALL allows the guest to provide extra registers. KVM also
1334 * expects RSI for hypercalls, so include that, too.
1335 *
1336 * Copy their values to the appropriate location if supplied.
1337 */
1338 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1339
1340 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1341 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1342 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1343 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1344 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1345
1346 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1347
1348 if (ghcb_xcr0_is_valid(ghcb)) {
1349 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1350 kvm_update_cpuid_runtime(vcpu);
1351 }
1352
1353 /* Copy the GHCB exit information into the VMCB fields */
1354 exit_code = ghcb_get_sw_exit_code(ghcb);
1355 control->exit_code = lower_32_bits(exit_code);
1356 control->exit_code_hi = upper_32_bits(exit_code);
1357 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1358 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1359
1360 /* Clear the valid entries fields */
1361 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1362}
1363
1364static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1365{
1366 struct kvm_vcpu *vcpu;
1367 struct ghcb *ghcb;
1368 u64 exit_code = 0;
1369
1370 ghcb = svm->ghcb;
1371
1372 /* Only GHCB Usage code 0 is supported */
1373 if (ghcb->ghcb_usage)
1374 goto vmgexit_err;
1375
1376 /*
1377 * Retrieve the exit code now even though is may not be marked valid
1378 * as it could help with debugging.
1379 */
1380 exit_code = ghcb_get_sw_exit_code(ghcb);
1381
1382 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
1383 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
1384 !ghcb_sw_exit_info_2_is_valid(ghcb))
1385 goto vmgexit_err;
1386
1387 switch (ghcb_get_sw_exit_code(ghcb)) {
1388 case SVM_EXIT_READ_DR7:
1389 break;
1390 case SVM_EXIT_WRITE_DR7:
1391 if (!ghcb_rax_is_valid(ghcb))
1392 goto vmgexit_err;
1393 break;
1394 case SVM_EXIT_RDTSC:
1395 break;
1396 case SVM_EXIT_RDPMC:
1397 if (!ghcb_rcx_is_valid(ghcb))
1398 goto vmgexit_err;
1399 break;
1400 case SVM_EXIT_CPUID:
1401 if (!ghcb_rax_is_valid(ghcb) ||
1402 !ghcb_rcx_is_valid(ghcb))
1403 goto vmgexit_err;
1404 if (ghcb_get_rax(ghcb) == 0xd)
1405 if (!ghcb_xcr0_is_valid(ghcb))
1406 goto vmgexit_err;
1407 break;
1408 case SVM_EXIT_INVD:
1409 break;
1410 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001411 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
1412 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06001413 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001414 } else {
1415 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
1416 if (!ghcb_rax_is_valid(ghcb))
1417 goto vmgexit_err;
1418 }
Tom Lendacky291bd202020-12-10 11:09:47 -06001419 break;
1420 case SVM_EXIT_MSR:
1421 if (!ghcb_rcx_is_valid(ghcb))
1422 goto vmgexit_err;
1423 if (ghcb_get_sw_exit_info_1(ghcb)) {
1424 if (!ghcb_rax_is_valid(ghcb) ||
1425 !ghcb_rdx_is_valid(ghcb))
1426 goto vmgexit_err;
1427 }
1428 break;
1429 case SVM_EXIT_VMMCALL:
1430 if (!ghcb_rax_is_valid(ghcb) ||
1431 !ghcb_cpl_is_valid(ghcb))
1432 goto vmgexit_err;
1433 break;
1434 case SVM_EXIT_RDTSCP:
1435 break;
1436 case SVM_EXIT_WBINVD:
1437 break;
1438 case SVM_EXIT_MONITOR:
1439 if (!ghcb_rax_is_valid(ghcb) ||
1440 !ghcb_rcx_is_valid(ghcb) ||
1441 !ghcb_rdx_is_valid(ghcb))
1442 goto vmgexit_err;
1443 break;
1444 case SVM_EXIT_MWAIT:
1445 if (!ghcb_rax_is_valid(ghcb) ||
1446 !ghcb_rcx_is_valid(ghcb))
1447 goto vmgexit_err;
1448 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06001449 case SVM_VMGEXIT_MMIO_READ:
1450 case SVM_VMGEXIT_MMIO_WRITE:
1451 if (!ghcb_sw_scratch_is_valid(ghcb))
1452 goto vmgexit_err;
1453 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05001454 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky291bd202020-12-10 11:09:47 -06001455 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1456 break;
1457 default:
1458 goto vmgexit_err;
1459 }
1460
1461 return 0;
1462
1463vmgexit_err:
1464 vcpu = &svm->vcpu;
1465
1466 if (ghcb->ghcb_usage) {
1467 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
1468 ghcb->ghcb_usage);
1469 } else {
1470 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
1471 exit_code);
1472 dump_ghcb(svm);
1473 }
1474
1475 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1476 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
1477 vcpu->run->internal.ndata = 2;
1478 vcpu->run->internal.data[0] = exit_code;
1479 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
1480
1481 return -EINVAL;
1482}
1483
1484static void pre_sev_es_run(struct vcpu_svm *svm)
1485{
1486 if (!svm->ghcb)
1487 return;
1488
Tom Lendacky8f423a82020-12-10 11:09:53 -06001489 if (svm->ghcb_sa_free) {
1490 /*
1491 * The scratch area lives outside the GHCB, so there is a
1492 * buffer that, depending on the operation performed, may
1493 * need to be synced, then freed.
1494 */
1495 if (svm->ghcb_sa_sync) {
1496 kvm_write_guest(svm->vcpu.kvm,
1497 ghcb_get_sw_scratch(svm->ghcb),
1498 svm->ghcb_sa, svm->ghcb_sa_len);
1499 svm->ghcb_sa_sync = false;
1500 }
1501
1502 kfree(svm->ghcb_sa);
1503 svm->ghcb_sa = NULL;
1504 svm->ghcb_sa_free = false;
1505 }
1506
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06001507 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
1508
Tom Lendacky291bd202020-12-10 11:09:47 -06001509 sev_es_sync_to_ghcb(svm);
1510
1511 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
1512 svm->ghcb = NULL;
1513}
1514
Joerg Roedeleaf78262020-03-24 10:41:54 +01001515void pre_sev_run(struct vcpu_svm *svm, int cpu)
1516{
1517 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1518 int asid = sev_get_asid(svm->vcpu.kvm);
1519
Tom Lendacky291bd202020-12-10 11:09:47 -06001520 /* Perform any SEV-ES pre-run actions */
1521 pre_sev_es_run(svm);
1522
Joerg Roedeleaf78262020-03-24 10:41:54 +01001523 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05001524 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001525
1526 /*
1527 * Flush guest TLB:
1528 *
1529 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1530 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1531 */
1532 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07001533 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001534 return;
1535
Joerg Roedeleaf78262020-03-24 10:41:54 +01001536 sd->sev_vmcbs[asid] = svm->vmcb;
1537 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02001538 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001539}
Tom Lendacky291bd202020-12-10 11:09:47 -06001540
Tom Lendacky8f423a82020-12-10 11:09:53 -06001541#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
1542static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
1543{
1544 struct vmcb_control_area *control = &svm->vmcb->control;
1545 struct ghcb *ghcb = svm->ghcb;
1546 u64 ghcb_scratch_beg, ghcb_scratch_end;
1547 u64 scratch_gpa_beg, scratch_gpa_end;
1548 void *scratch_va;
1549
1550 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
1551 if (!scratch_gpa_beg) {
1552 pr_err("vmgexit: scratch gpa not provided\n");
1553 return false;
1554 }
1555
1556 scratch_gpa_end = scratch_gpa_beg + len;
1557 if (scratch_gpa_end < scratch_gpa_beg) {
1558 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
1559 len, scratch_gpa_beg);
1560 return false;
1561 }
1562
1563 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
1564 /* Scratch area begins within GHCB */
1565 ghcb_scratch_beg = control->ghcb_gpa +
1566 offsetof(struct ghcb, shared_buffer);
1567 ghcb_scratch_end = control->ghcb_gpa +
1568 offsetof(struct ghcb, reserved_1);
1569
1570 /*
1571 * If the scratch area begins within the GHCB, it must be
1572 * completely contained in the GHCB shared buffer area.
1573 */
1574 if (scratch_gpa_beg < ghcb_scratch_beg ||
1575 scratch_gpa_end > ghcb_scratch_end) {
1576 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
1577 scratch_gpa_beg, scratch_gpa_end);
1578 return false;
1579 }
1580
1581 scratch_va = (void *)svm->ghcb;
1582 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
1583 } else {
1584 /*
1585 * The guest memory must be read into a kernel buffer, so
1586 * limit the size
1587 */
1588 if (len > GHCB_SCRATCH_AREA_LIMIT) {
1589 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
1590 len, GHCB_SCRATCH_AREA_LIMIT);
1591 return false;
1592 }
1593 scratch_va = kzalloc(len, GFP_KERNEL);
1594 if (!scratch_va)
1595 return false;
1596
1597 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
1598 /* Unable to copy scratch area from guest */
1599 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
1600
1601 kfree(scratch_va);
1602 return false;
1603 }
1604
1605 /*
1606 * The scratch area is outside the GHCB. The operation will
1607 * dictate whether the buffer needs to be synced before running
1608 * the vCPU next time (i.e. a read was requested so the data
1609 * must be written back to the guest memory).
1610 */
1611 svm->ghcb_sa_sync = sync;
1612 svm->ghcb_sa_free = true;
1613 }
1614
1615 svm->ghcb_sa = scratch_va;
1616 svm->ghcb_sa_len = len;
1617
1618 return true;
1619}
1620
Tom Lendackyd3694662020-12-10 11:09:50 -06001621static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
1622 unsigned int pos)
1623{
1624 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
1625 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
1626}
1627
1628static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
1629{
1630 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
1631}
1632
Tom Lendacky1edc1452020-12-10 11:09:49 -06001633static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
1634{
1635 svm->vmcb->control.ghcb_gpa = value;
1636}
1637
Tom Lendacky291bd202020-12-10 11:09:47 -06001638static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
1639{
Tom Lendacky1edc1452020-12-10 11:09:49 -06001640 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06001641 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001642 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06001643 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001644
1645 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
1646
Tom Lendacky59e38b52020-12-10 11:09:52 -06001647 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
1648 control->ghcb_gpa);
1649
Tom Lendacky1edc1452020-12-10 11:09:49 -06001650 switch (ghcb_info) {
1651 case GHCB_MSR_SEV_INFO_REQ:
1652 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
1653 GHCB_VERSION_MIN,
1654 sev_enc_bit));
1655 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06001656 case GHCB_MSR_CPUID_REQ: {
1657 u64 cpuid_fn, cpuid_reg, cpuid_value;
1658
1659 cpuid_fn = get_ghcb_msr_bits(svm,
1660 GHCB_MSR_CPUID_FUNC_MASK,
1661 GHCB_MSR_CPUID_FUNC_POS);
1662
1663 /* Initialize the registers needed by the CPUID intercept */
1664 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
1665 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
1666
1667 ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
1668 if (!ret) {
1669 ret = -EINVAL;
1670 break;
1671 }
1672
1673 cpuid_reg = get_ghcb_msr_bits(svm,
1674 GHCB_MSR_CPUID_REG_MASK,
1675 GHCB_MSR_CPUID_REG_POS);
1676 if (cpuid_reg == 0)
1677 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
1678 else if (cpuid_reg == 1)
1679 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
1680 else if (cpuid_reg == 2)
1681 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
1682 else
1683 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
1684
1685 set_ghcb_msr_bits(svm, cpuid_value,
1686 GHCB_MSR_CPUID_VALUE_MASK,
1687 GHCB_MSR_CPUID_VALUE_POS);
1688
1689 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
1690 GHCB_MSR_INFO_MASK,
1691 GHCB_MSR_INFO_POS);
1692 break;
1693 }
Tom Lendackye1d71112020-12-10 11:09:51 -06001694 case GHCB_MSR_TERM_REQ: {
1695 u64 reason_set, reason_code;
1696
1697 reason_set = get_ghcb_msr_bits(svm,
1698 GHCB_MSR_TERM_REASON_SET_MASK,
1699 GHCB_MSR_TERM_REASON_SET_POS);
1700 reason_code = get_ghcb_msr_bits(svm,
1701 GHCB_MSR_TERM_REASON_MASK,
1702 GHCB_MSR_TERM_REASON_POS);
1703 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
1704 reason_set, reason_code);
1705 fallthrough;
1706 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06001707 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06001708 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06001709 }
1710
Tom Lendacky59e38b52020-12-10 11:09:52 -06001711 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
1712 control->ghcb_gpa, ret);
1713
Tom Lendackyd3694662020-12-10 11:09:50 -06001714 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06001715}
1716
1717int sev_handle_vmgexit(struct vcpu_svm *svm)
1718{
1719 struct vmcb_control_area *control = &svm->vmcb->control;
1720 u64 ghcb_gpa, exit_code;
1721 struct ghcb *ghcb;
1722 int ret;
1723
1724 /* Validate the GHCB */
1725 ghcb_gpa = control->ghcb_gpa;
1726 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
1727 return sev_handle_vmgexit_msr_protocol(svm);
1728
1729 if (!ghcb_gpa) {
1730 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
1731 return -EINVAL;
1732 }
1733
1734 if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
1735 /* Unable to map GHCB from guest */
1736 vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
1737 ghcb_gpa);
1738 return -EINVAL;
1739 }
1740
1741 svm->ghcb = svm->ghcb_map.hva;
1742 ghcb = svm->ghcb_map.hva;
1743
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06001744 trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb);
1745
Tom Lendacky291bd202020-12-10 11:09:47 -06001746 exit_code = ghcb_get_sw_exit_code(ghcb);
1747
1748 ret = sev_es_validate_vmgexit(svm);
1749 if (ret)
1750 return ret;
1751
1752 sev_es_sync_from_ghcb(svm);
1753 ghcb_set_sw_exit_info_1(ghcb, 0);
1754 ghcb_set_sw_exit_info_2(ghcb, 0);
1755
1756 ret = -EINVAL;
1757 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06001758 case SVM_VMGEXIT_MMIO_READ:
1759 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
1760 break;
1761
1762 ret = kvm_sev_es_mmio_read(&svm->vcpu,
1763 control->exit_info_1,
1764 control->exit_info_2,
1765 svm->ghcb_sa);
1766 break;
1767 case SVM_VMGEXIT_MMIO_WRITE:
1768 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
1769 break;
1770
1771 ret = kvm_sev_es_mmio_write(&svm->vcpu,
1772 control->exit_info_1,
1773 control->exit_info_2,
1774 svm->ghcb_sa);
1775 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05001776 case SVM_VMGEXIT_NMI_COMPLETE:
1777 ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
1778 break;
Tom Lendacky291bd202020-12-10 11:09:47 -06001779 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1780 vcpu_unimpl(&svm->vcpu,
1781 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
1782 control->exit_info_1, control->exit_info_2);
1783 break;
1784 default:
1785 ret = svm_invoke_exit_handler(svm, exit_code);
1786 }
1787
1788 return ret;
1789}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06001790
1791int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
1792{
1793 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
1794 return -EINVAL;
1795
1796 return kvm_sev_es_string_io(&svm->vcpu, size, port,
1797 svm->ghcb_sa, svm->ghcb_sa_len, in);
1798}
Tom Lendacky376c6d22020-12-10 11:10:06 -06001799
1800void sev_es_init_vmcb(struct vcpu_svm *svm)
1801{
1802 struct kvm_vcpu *vcpu = &svm->vcpu;
1803
1804 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
1805 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1806
1807 /*
1808 * An SEV-ES guest requires a VMSA area that is a separate from the
1809 * VMCB page. Do not include the encryption mask on the VMSA physical
1810 * address since hardware will access it using the guest key.
1811 */
1812 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
1813
1814 /* Can't intercept CR register access, HV can't modify CR registers */
1815 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1816 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
1817 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
1818 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1819 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
1820 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
1821
1822 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1823
1824 /* Track EFER/CR register changes */
1825 svm_set_intercept(svm, TRAP_EFER_WRITE);
1826 svm_set_intercept(svm, TRAP_CR0_WRITE);
1827 svm_set_intercept(svm, TRAP_CR4_WRITE);
1828 svm_set_intercept(svm, TRAP_CR8_WRITE);
1829
1830 /* No support for enable_vmware_backdoor */
1831 clr_exception_intercept(svm, GP_VECTOR);
1832
1833 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
1834 svm_clr_intercept(svm, INTERCEPT_XSETBV);
1835
1836 /* Clear intercepts on selected MSRs */
1837 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
1838 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
1839 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1840 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1841 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1842 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1843}
1844
1845void sev_es_create_vcpu(struct vcpu_svm *svm)
1846{
1847 /*
1848 * Set the GHCB MSR value as per the GHCB specification when creating
1849 * a vCPU for an SEV-ES guest.
1850 */
1851 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
1852 GHCB_VERSION_MIN,
1853 sev_enc_bit));
1854}