blob: ceeee4bb61504c70b32af505ab90959417b45823 [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
17
18#include "x86.h"
19#include "svm.h"
20
21static int sev_flush_asids(void);
22static DECLARE_RWSEM(sev_deactivate_lock);
23static DEFINE_MUTEX(sev_bitmap_lock);
24unsigned int max_sev_asid;
25static unsigned int min_sev_asid;
26static unsigned long *sev_asid_bitmap;
27static unsigned long *sev_reclaim_asid_bitmap;
28#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29
30struct enc_region {
31 struct list_head list;
32 unsigned long npages;
33 struct page **pages;
34 unsigned long uaddr;
35 unsigned long size;
36};
37
38static int sev_flush_asids(void)
39{
40 int ret, error = 0;
41
42 /*
43 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44 * so it must be guarded.
45 */
46 down_write(&sev_deactivate_lock);
47
48 wbinvd_on_all_cpus();
49 ret = sev_guest_df_flush(&error);
50
51 up_write(&sev_deactivate_lock);
52
53 if (ret)
54 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55
56 return ret;
57}
58
59/* Must be called with the sev_bitmap_lock held */
60static bool __sev_recycle_asids(void)
61{
62 int pos;
63
64 /* Check if there are any ASIDs to reclaim before performing a flush */
65 pos = find_next_bit(sev_reclaim_asid_bitmap,
66 max_sev_asid, min_sev_asid - 1);
67 if (pos >= max_sev_asid)
68 return false;
69
70 if (sev_flush_asids())
71 return false;
72
73 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74 max_sev_asid);
75 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76
77 return true;
78}
79
80static int sev_asid_new(void)
81{
82 bool retry = true;
83 int pos;
84
85 mutex_lock(&sev_bitmap_lock);
86
87 /*
88 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89 */
90again:
91 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92 if (pos >= max_sev_asid) {
93 if (retry && __sev_recycle_asids()) {
94 retry = false;
95 goto again;
96 }
97 mutex_unlock(&sev_bitmap_lock);
98 return -EBUSY;
99 }
100
101 __set_bit(pos, sev_asid_bitmap);
102
103 mutex_unlock(&sev_bitmap_lock);
104
105 return pos + 1;
106}
107
108static int sev_get_asid(struct kvm *kvm)
109{
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111
112 return sev->asid;
113}
114
115static void sev_asid_free(int asid)
116{
117 struct svm_cpu_data *sd;
118 int cpu, pos;
119
120 mutex_lock(&sev_bitmap_lock);
121
122 pos = asid - 1;
123 __set_bit(pos, sev_reclaim_asid_bitmap);
124
125 for_each_possible_cpu(cpu) {
126 sd = per_cpu(svm_data, cpu);
127 sd->sev_vmcbs[pos] = NULL;
128 }
129
130 mutex_unlock(&sev_bitmap_lock);
131}
132
133static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
134{
135 struct sev_data_decommission *decommission;
136 struct sev_data_deactivate *data;
137
138 if (!handle)
139 return;
140
141 data = kzalloc(sizeof(*data), GFP_KERNEL);
142 if (!data)
143 return;
144
145 /* deactivate handle */
146 data->handle = handle;
147
148 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149 down_read(&sev_deactivate_lock);
150 sev_guest_deactivate(data, NULL);
151 up_read(&sev_deactivate_lock);
152
153 kfree(data);
154
155 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
156 if (!decommission)
157 return;
158
159 /* decommission handle */
160 decommission->handle = handle;
161 sev_guest_decommission(decommission, NULL);
162
163 kfree(decommission);
164}
165
166static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
167{
168 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
169 int asid, ret;
170
171 ret = -EBUSY;
172 if (unlikely(sev->active))
173 return ret;
174
175 asid = sev_asid_new();
176 if (asid < 0)
177 return ret;
178
179 ret = sev_platform_init(&argp->error);
180 if (ret)
181 goto e_free;
182
183 sev->active = true;
184 sev->asid = asid;
185 INIT_LIST_HEAD(&sev->regions_list);
186
187 return 0;
188
189e_free:
190 sev_asid_free(asid);
191 return ret;
192}
193
194static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
195{
196 struct sev_data_activate *data;
197 int asid = sev_get_asid(kvm);
198 int ret;
199
200 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
201 if (!data)
202 return -ENOMEM;
203
204 /* activate ASID on the given handle */
205 data->handle = handle;
206 data->asid = asid;
207 ret = sev_guest_activate(data, error);
208 kfree(data);
209
210 return ret;
211}
212
213static int __sev_issue_cmd(int fd, int id, void *data, int *error)
214{
215 struct fd f;
216 int ret;
217
218 f = fdget(fd);
219 if (!f.file)
220 return -EBADF;
221
222 ret = sev_issue_cmd_external_user(f.file, id, data, error);
223
224 fdput(f);
225 return ret;
226}
227
228static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
229{
230 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231
232 return __sev_issue_cmd(sev->fd, id, data, error);
233}
234
235static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
236{
237 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238 struct sev_data_launch_start *start;
239 struct kvm_sev_launch_start params;
240 void *dh_blob, *session_blob;
241 int *error = &argp->error;
242 int ret;
243
244 if (!sev_guest(kvm))
245 return -ENOTTY;
246
247 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
248 return -EFAULT;
249
250 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
251 if (!start)
252 return -ENOMEM;
253
254 dh_blob = NULL;
255 if (params.dh_uaddr) {
256 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257 if (IS_ERR(dh_blob)) {
258 ret = PTR_ERR(dh_blob);
259 goto e_free;
260 }
261
262 start->dh_cert_address = __sme_set(__pa(dh_blob));
263 start->dh_cert_len = params.dh_len;
264 }
265
266 session_blob = NULL;
267 if (params.session_uaddr) {
268 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269 if (IS_ERR(session_blob)) {
270 ret = PTR_ERR(session_blob);
271 goto e_free_dh;
272 }
273
274 start->session_address = __sme_set(__pa(session_blob));
275 start->session_len = params.session_len;
276 }
277
278 start->handle = params.handle;
279 start->policy = params.policy;
280
281 /* create memory encryption context */
282 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
283 if (ret)
284 goto e_free_session;
285
286 /* Bind ASID to this guest */
287 ret = sev_bind_asid(kvm, start->handle, error);
288 if (ret)
289 goto e_free_session;
290
291 /* return handle to userspace */
292 params.handle = start->handle;
293 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
294 sev_unbind_asid(kvm, start->handle);
295 ret = -EFAULT;
296 goto e_free_session;
297 }
298
299 sev->handle = start->handle;
300 sev->fd = argp->sev_fd;
301
302e_free_session:
303 kfree(session_blob);
304e_free_dh:
305 kfree(dh_blob);
306e_free:
307 kfree(start);
308 return ret;
309}
310
311static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312 unsigned long ulen, unsigned long *n,
313 int write)
314{
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700316 unsigned long npages, size;
317 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100318 unsigned long locked, lock_limit;
319 struct page **pages;
320 unsigned long first, last;
321
322 if (ulen == 0 || uaddr + ulen < uaddr)
323 return NULL;
324
325 /* Calculate number of pages. */
326 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
327 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
328 npages = (last - first + 1);
329
330 locked = sev->pages_locked + npages;
331 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
332 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
333 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
334 return NULL;
335 }
336
John Hubbard78824fa2020-05-25 23:22:06 -0700337 if (WARN_ON_ONCE(npages > INT_MAX))
338 return NULL;
339
Joerg Roedeleaf78262020-03-24 10:41:54 +0100340 /* Avoid using vmalloc for smaller buffers. */
341 size = npages * sizeof(struct page *);
342 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700343 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100344 else
345 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
346
347 if (!pages)
348 return NULL;
349
350 /* Pin the user virtual address. */
Janakarajan Natarajan996ed222020-05-07 18:35:56 -0700351 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100352 if (npinned != npages) {
353 pr_err("SEV: Failure locking %lu pages.\n", npages);
354 goto err;
355 }
356
357 *n = npages;
358 sev->pages_locked = locked;
359
360 return pages;
361
362err:
363 if (npinned > 0)
364 release_pages(pages, npinned);
365
366 kvfree(pages);
367 return NULL;
368}
369
370static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
371 unsigned long npages)
372{
373 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
374
375 release_pages(pages, npages);
376 kvfree(pages);
377 sev->pages_locked -= npages;
378}
379
380static void sev_clflush_pages(struct page *pages[], unsigned long npages)
381{
382 uint8_t *page_virtual;
383 unsigned long i;
384
385 if (npages == 0 || pages == NULL)
386 return;
387
388 for (i = 0; i < npages; i++) {
389 page_virtual = kmap_atomic(pages[i]);
390 clflush_cache_range(page_virtual, PAGE_SIZE);
391 kunmap_atomic(page_virtual);
392 }
393}
394
395static unsigned long get_num_contig_pages(unsigned long idx,
396 struct page **inpages, unsigned long npages)
397{
398 unsigned long paddr, next_paddr;
399 unsigned long i = idx + 1, pages = 1;
400
401 /* find the number of contiguous pages starting from idx */
402 paddr = __sme_page_pa(inpages[idx]);
403 while (i < npages) {
404 next_paddr = __sme_page_pa(inpages[i++]);
405 if ((paddr + PAGE_SIZE) == next_paddr) {
406 pages++;
407 paddr = next_paddr;
408 continue;
409 }
410 break;
411 }
412
413 return pages;
414}
415
416static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
417{
418 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
419 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
420 struct kvm_sev_launch_update_data params;
421 struct sev_data_launch_update_data *data;
422 struct page **inpages;
423 int ret;
424
425 if (!sev_guest(kvm))
426 return -ENOTTY;
427
428 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
429 return -EFAULT;
430
431 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
432 if (!data)
433 return -ENOMEM;
434
435 vaddr = params.uaddr;
436 size = params.len;
437 vaddr_end = vaddr + size;
438
439 /* Lock the user memory. */
440 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
441 if (!inpages) {
442 ret = -ENOMEM;
443 goto e_free;
444 }
445
446 /*
447 * The LAUNCH_UPDATE command will perform in-place encryption of the
448 * memory content (i.e it will write the same memory region with C=1).
449 * It's possible that the cache may contain the data with C=0, i.e.,
450 * unencrypted so invalidate it first.
451 */
452 sev_clflush_pages(inpages, npages);
453
454 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
455 int offset, len;
456
457 /*
458 * If the user buffer is not page-aligned, calculate the offset
459 * within the page.
460 */
461 offset = vaddr & (PAGE_SIZE - 1);
462
463 /* Calculate the number of pages that can be encrypted in one go. */
464 pages = get_num_contig_pages(i, inpages, npages);
465
466 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
467
468 data->handle = sev->handle;
469 data->len = len;
470 data->address = __sme_page_pa(inpages[i]) + offset;
471 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
472 if (ret)
473 goto e_unpin;
474
475 size -= len;
476 next_vaddr = vaddr + len;
477 }
478
479e_unpin:
480 /* content of memory is updated, mark pages dirty */
481 for (i = 0; i < npages; i++) {
482 set_page_dirty_lock(inpages[i]);
483 mark_page_accessed(inpages[i]);
484 }
485 /* unlock the user pages */
486 sev_unpin_memory(kvm, inpages, npages);
487e_free:
488 kfree(data);
489 return ret;
490}
491
492static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
493{
494 void __user *measure = (void __user *)(uintptr_t)argp->data;
495 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
496 struct sev_data_launch_measure *data;
497 struct kvm_sev_launch_measure params;
498 void __user *p = NULL;
499 void *blob = NULL;
500 int ret;
501
502 if (!sev_guest(kvm))
503 return -ENOTTY;
504
505 if (copy_from_user(&params, measure, sizeof(params)))
506 return -EFAULT;
507
508 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
509 if (!data)
510 return -ENOMEM;
511
512 /* User wants to query the blob length */
513 if (!params.len)
514 goto cmd;
515
516 p = (void __user *)(uintptr_t)params.uaddr;
517 if (p) {
518 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
519 ret = -EINVAL;
520 goto e_free;
521 }
522
523 ret = -ENOMEM;
524 blob = kmalloc(params.len, GFP_KERNEL);
525 if (!blob)
526 goto e_free;
527
528 data->address = __psp_pa(blob);
529 data->len = params.len;
530 }
531
532cmd:
533 data->handle = sev->handle;
534 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
535
536 /*
537 * If we query the session length, FW responded with expected data.
538 */
539 if (!params.len)
540 goto done;
541
542 if (ret)
543 goto e_free_blob;
544
545 if (blob) {
546 if (copy_to_user(p, blob, params.len))
547 ret = -EFAULT;
548 }
549
550done:
551 params.len = data->len;
552 if (copy_to_user(measure, &params, sizeof(params)))
553 ret = -EFAULT;
554e_free_blob:
555 kfree(blob);
556e_free:
557 kfree(data);
558 return ret;
559}
560
561static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
562{
563 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
564 struct sev_data_launch_finish *data;
565 int ret;
566
567 if (!sev_guest(kvm))
568 return -ENOTTY;
569
570 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
571 if (!data)
572 return -ENOMEM;
573
574 data->handle = sev->handle;
575 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
576
577 kfree(data);
578 return ret;
579}
580
581static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
582{
583 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
584 struct kvm_sev_guest_status params;
585 struct sev_data_guest_status *data;
586 int ret;
587
588 if (!sev_guest(kvm))
589 return -ENOTTY;
590
591 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
592 if (!data)
593 return -ENOMEM;
594
595 data->handle = sev->handle;
596 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
597 if (ret)
598 goto e_free;
599
600 params.policy = data->policy;
601 params.state = data->state;
602 params.handle = data->handle;
603
604 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
605 ret = -EFAULT;
606e_free:
607 kfree(data);
608 return ret;
609}
610
611static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
612 unsigned long dst, int size,
613 int *error, bool enc)
614{
615 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
616 struct sev_data_dbg *data;
617 int ret;
618
619 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
620 if (!data)
621 return -ENOMEM;
622
623 data->handle = sev->handle;
624 data->dst_addr = dst;
625 data->src_addr = src;
626 data->len = size;
627
628 ret = sev_issue_cmd(kvm,
629 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
630 data, error);
631 kfree(data);
632 return ret;
633}
634
635static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
636 unsigned long dst_paddr, int sz, int *err)
637{
638 int offset;
639
640 /*
641 * Its safe to read more than we are asked, caller should ensure that
642 * destination has enough space.
643 */
644 src_paddr = round_down(src_paddr, 16);
645 offset = src_paddr & 15;
646 sz = round_up(sz + offset, 16);
647
648 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
649}
650
651static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
652 unsigned long __user dst_uaddr,
653 unsigned long dst_paddr,
654 int size, int *err)
655{
656 struct page *tpage = NULL;
657 int ret, offset;
658
659 /* if inputs are not 16-byte then use intermediate buffer */
660 if (!IS_ALIGNED(dst_paddr, 16) ||
661 !IS_ALIGNED(paddr, 16) ||
662 !IS_ALIGNED(size, 16)) {
663 tpage = (void *)alloc_page(GFP_KERNEL);
664 if (!tpage)
665 return -ENOMEM;
666
667 dst_paddr = __sme_page_pa(tpage);
668 }
669
670 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
671 if (ret)
672 goto e_free;
673
674 if (tpage) {
675 offset = paddr & 15;
676 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
677 page_address(tpage) + offset, size))
678 ret = -EFAULT;
679 }
680
681e_free:
682 if (tpage)
683 __free_page(tpage);
684
685 return ret;
686}
687
688static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
689 unsigned long __user vaddr,
690 unsigned long dst_paddr,
691 unsigned long __user dst_vaddr,
692 int size, int *error)
693{
694 struct page *src_tpage = NULL;
695 struct page *dst_tpage = NULL;
696 int ret, len = size;
697
698 /* If source buffer is not aligned then use an intermediate buffer */
699 if (!IS_ALIGNED(vaddr, 16)) {
700 src_tpage = alloc_page(GFP_KERNEL);
701 if (!src_tpage)
702 return -ENOMEM;
703
704 if (copy_from_user(page_address(src_tpage),
705 (void __user *)(uintptr_t)vaddr, size)) {
706 __free_page(src_tpage);
707 return -EFAULT;
708 }
709
710 paddr = __sme_page_pa(src_tpage);
711 }
712
713 /*
714 * If destination buffer or length is not aligned then do read-modify-write:
715 * - decrypt destination in an intermediate buffer
716 * - copy the source buffer in an intermediate buffer
717 * - use the intermediate buffer as source buffer
718 */
719 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
720 int dst_offset;
721
722 dst_tpage = alloc_page(GFP_KERNEL);
723 if (!dst_tpage) {
724 ret = -ENOMEM;
725 goto e_free;
726 }
727
728 ret = __sev_dbg_decrypt(kvm, dst_paddr,
729 __sme_page_pa(dst_tpage), size, error);
730 if (ret)
731 goto e_free;
732
733 /*
734 * If source is kernel buffer then use memcpy() otherwise
735 * copy_from_user().
736 */
737 dst_offset = dst_paddr & 15;
738
739 if (src_tpage)
740 memcpy(page_address(dst_tpage) + dst_offset,
741 page_address(src_tpage), size);
742 else {
743 if (copy_from_user(page_address(dst_tpage) + dst_offset,
744 (void __user *)(uintptr_t)vaddr, size)) {
745 ret = -EFAULT;
746 goto e_free;
747 }
748 }
749
750 paddr = __sme_page_pa(dst_tpage);
751 dst_paddr = round_down(dst_paddr, 16);
752 len = round_up(size, 16);
753 }
754
755 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
756
757e_free:
758 if (src_tpage)
759 __free_page(src_tpage);
760 if (dst_tpage)
761 __free_page(dst_tpage);
762 return ret;
763}
764
765static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
766{
767 unsigned long vaddr, vaddr_end, next_vaddr;
768 unsigned long dst_vaddr;
769 struct page **src_p, **dst_p;
770 struct kvm_sev_dbg debug;
771 unsigned long n;
772 unsigned int size;
773 int ret;
774
775 if (!sev_guest(kvm))
776 return -ENOTTY;
777
778 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
779 return -EFAULT;
780
781 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
782 return -EINVAL;
783 if (!debug.dst_uaddr)
784 return -EINVAL;
785
786 vaddr = debug.src_uaddr;
787 size = debug.len;
788 vaddr_end = vaddr + size;
789 dst_vaddr = debug.dst_uaddr;
790
791 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
792 int len, s_off, d_off;
793
794 /* lock userspace source and destination page */
795 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
796 if (!src_p)
797 return -EFAULT;
798
799 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
800 if (!dst_p) {
801 sev_unpin_memory(kvm, src_p, n);
802 return -EFAULT;
803 }
804
805 /*
806 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
807 * memory content (i.e it will write the same memory region with C=1).
808 * It's possible that the cache may contain the data with C=0, i.e.,
809 * unencrypted so invalidate it first.
810 */
811 sev_clflush_pages(src_p, 1);
812 sev_clflush_pages(dst_p, 1);
813
814 /*
815 * Since user buffer may not be page aligned, calculate the
816 * offset within the page.
817 */
818 s_off = vaddr & ~PAGE_MASK;
819 d_off = dst_vaddr & ~PAGE_MASK;
820 len = min_t(size_t, (PAGE_SIZE - s_off), size);
821
822 if (dec)
823 ret = __sev_dbg_decrypt_user(kvm,
824 __sme_page_pa(src_p[0]) + s_off,
825 dst_vaddr,
826 __sme_page_pa(dst_p[0]) + d_off,
827 len, &argp->error);
828 else
829 ret = __sev_dbg_encrypt_user(kvm,
830 __sme_page_pa(src_p[0]) + s_off,
831 vaddr,
832 __sme_page_pa(dst_p[0]) + d_off,
833 dst_vaddr,
834 len, &argp->error);
835
836 sev_unpin_memory(kvm, src_p, n);
837 sev_unpin_memory(kvm, dst_p, n);
838
839 if (ret)
840 goto err;
841
842 next_vaddr = vaddr + len;
843 dst_vaddr = dst_vaddr + len;
844 size -= len;
845 }
846err:
847 return ret;
848}
849
850static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
851{
852 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
853 struct sev_data_launch_secret *data;
854 struct kvm_sev_launch_secret params;
855 struct page **pages;
856 void *blob, *hdr;
857 unsigned long n;
858 int ret, offset;
859
860 if (!sev_guest(kvm))
861 return -ENOTTY;
862
863 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
864 return -EFAULT;
865
866 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
867 if (!pages)
868 return -ENOMEM;
869
870 /*
871 * The secret must be copied into contiguous memory region, lets verify
872 * that userspace memory pages are contiguous before we issue command.
873 */
874 if (get_num_contig_pages(0, pages, n) != n) {
875 ret = -EINVAL;
876 goto e_unpin_memory;
877 }
878
879 ret = -ENOMEM;
880 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
881 if (!data)
882 goto e_unpin_memory;
883
884 offset = params.guest_uaddr & (PAGE_SIZE - 1);
885 data->guest_address = __sme_page_pa(pages[0]) + offset;
886 data->guest_len = params.guest_len;
887
888 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
889 if (IS_ERR(blob)) {
890 ret = PTR_ERR(blob);
891 goto e_free;
892 }
893
894 data->trans_address = __psp_pa(blob);
895 data->trans_len = params.trans_len;
896
897 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
898 if (IS_ERR(hdr)) {
899 ret = PTR_ERR(hdr);
900 goto e_free_blob;
901 }
902 data->hdr_address = __psp_pa(hdr);
903 data->hdr_len = params.hdr_len;
904
905 data->handle = sev->handle;
906 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
907
908 kfree(hdr);
909
910e_free_blob:
911 kfree(blob);
912e_free:
913 kfree(data);
914e_unpin_memory:
915 sev_unpin_memory(kvm, pages, n);
916 return ret;
917}
918
919int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
920{
921 struct kvm_sev_cmd sev_cmd;
922 int r;
923
924 if (!svm_sev_enabled())
925 return -ENOTTY;
926
927 if (!argp)
928 return 0;
929
930 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
931 return -EFAULT;
932
933 mutex_lock(&kvm->lock);
934
935 switch (sev_cmd.id) {
936 case KVM_SEV_INIT:
937 r = sev_guest_init(kvm, &sev_cmd);
938 break;
939 case KVM_SEV_LAUNCH_START:
940 r = sev_launch_start(kvm, &sev_cmd);
941 break;
942 case KVM_SEV_LAUNCH_UPDATE_DATA:
943 r = sev_launch_update_data(kvm, &sev_cmd);
944 break;
945 case KVM_SEV_LAUNCH_MEASURE:
946 r = sev_launch_measure(kvm, &sev_cmd);
947 break;
948 case KVM_SEV_LAUNCH_FINISH:
949 r = sev_launch_finish(kvm, &sev_cmd);
950 break;
951 case KVM_SEV_GUEST_STATUS:
952 r = sev_guest_status(kvm, &sev_cmd);
953 break;
954 case KVM_SEV_DBG_DECRYPT:
955 r = sev_dbg_crypt(kvm, &sev_cmd, true);
956 break;
957 case KVM_SEV_DBG_ENCRYPT:
958 r = sev_dbg_crypt(kvm, &sev_cmd, false);
959 break;
960 case KVM_SEV_LAUNCH_SECRET:
961 r = sev_launch_secret(kvm, &sev_cmd);
962 break;
963 default:
964 r = -EINVAL;
965 goto out;
966 }
967
968 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
969 r = -EFAULT;
970
971out:
972 mutex_unlock(&kvm->lock);
973 return r;
974}
975
976int svm_register_enc_region(struct kvm *kvm,
977 struct kvm_enc_region *range)
978{
979 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
980 struct enc_region *region;
981 int ret = 0;
982
983 if (!sev_guest(kvm))
984 return -ENOTTY;
985
986 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
987 return -EINVAL;
988
989 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
990 if (!region)
991 return -ENOMEM;
992
993 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
994 if (!region->pages) {
995 ret = -ENOMEM;
996 goto e_free;
997 }
998
999 /*
1000 * The guest may change the memory encryption attribute from C=0 -> C=1
1001 * or vice versa for this memory range. Lets make sure caches are
1002 * flushed to ensure that guest data gets written into memory with
1003 * correct C-bit.
1004 */
1005 sev_clflush_pages(region->pages, region->npages);
1006
1007 region->uaddr = range->addr;
1008 region->size = range->size;
1009
1010 mutex_lock(&kvm->lock);
1011 list_add_tail(&region->list, &sev->regions_list);
1012 mutex_unlock(&kvm->lock);
1013
1014 return ret;
1015
1016e_free:
1017 kfree(region);
1018 return ret;
1019}
1020
1021static struct enc_region *
1022find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1023{
1024 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1025 struct list_head *head = &sev->regions_list;
1026 struct enc_region *i;
1027
1028 list_for_each_entry(i, head, list) {
1029 if (i->uaddr == range->addr &&
1030 i->size == range->size)
1031 return i;
1032 }
1033
1034 return NULL;
1035}
1036
1037static void __unregister_enc_region_locked(struct kvm *kvm,
1038 struct enc_region *region)
1039{
1040 sev_unpin_memory(kvm, region->pages, region->npages);
1041 list_del(&region->list);
1042 kfree(region);
1043}
1044
1045int svm_unregister_enc_region(struct kvm *kvm,
1046 struct kvm_enc_region *range)
1047{
1048 struct enc_region *region;
1049 int ret;
1050
1051 mutex_lock(&kvm->lock);
1052
1053 if (!sev_guest(kvm)) {
1054 ret = -ENOTTY;
1055 goto failed;
1056 }
1057
1058 region = find_enc_region(kvm, range);
1059 if (!region) {
1060 ret = -EINVAL;
1061 goto failed;
1062 }
1063
1064 /*
1065 * Ensure that all guest tagged cache entries are flushed before
1066 * releasing the pages back to the system for use. CLFLUSH will
1067 * not do this, so issue a WBINVD.
1068 */
1069 wbinvd_on_all_cpus();
1070
1071 __unregister_enc_region_locked(kvm, region);
1072
1073 mutex_unlock(&kvm->lock);
1074 return 0;
1075
1076failed:
1077 mutex_unlock(&kvm->lock);
1078 return ret;
1079}
1080
1081void sev_vm_destroy(struct kvm *kvm)
1082{
1083 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1084 struct list_head *head = &sev->regions_list;
1085 struct list_head *pos, *q;
1086
1087 if (!sev_guest(kvm))
1088 return;
1089
1090 mutex_lock(&kvm->lock);
1091
1092 /*
1093 * Ensure that all guest tagged cache entries are flushed before
1094 * releasing the pages back to the system for use. CLFLUSH will
1095 * not do this, so issue a WBINVD.
1096 */
1097 wbinvd_on_all_cpus();
1098
1099 /*
1100 * if userspace was terminated before unregistering the memory regions
1101 * then lets unpin all the registered memory.
1102 */
1103 if (!list_empty(head)) {
1104 list_for_each_safe(pos, q, head) {
1105 __unregister_enc_region_locked(kvm,
1106 list_entry(pos, struct enc_region, list));
1107 }
1108 }
1109
1110 mutex_unlock(&kvm->lock);
1111
1112 sev_unbind_asid(kvm, sev->handle);
1113 sev_asid_free(sev->asid);
1114}
1115
1116int __init sev_hardware_setup(void)
1117{
1118 struct sev_user_data_status *status;
1119 int rc;
1120
1121 /* Maximum number of encrypted guests supported simultaneously */
1122 max_sev_asid = cpuid_ecx(0x8000001F);
1123
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001124 if (!svm_sev_enabled())
Joerg Roedeleaf78262020-03-24 10:41:54 +01001125 return 1;
1126
1127 /* Minimum ASID value that should be used for SEV guest */
1128 min_sev_asid = cpuid_edx(0x8000001F);
1129
1130 /* Initialize SEV ASID bitmaps */
1131 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1132 if (!sev_asid_bitmap)
1133 return 1;
1134
1135 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1136 if (!sev_reclaim_asid_bitmap)
1137 return 1;
1138
1139 status = kmalloc(sizeof(*status), GFP_KERNEL);
1140 if (!status)
1141 return 1;
1142
1143 /*
1144 * Check SEV platform status.
1145 *
1146 * PLATFORM_STATUS can be called in any state, if we failed to query
1147 * the PLATFORM status then either PSP firmware does not support SEV
1148 * feature or SEV firmware is dead.
1149 */
1150 rc = sev_platform_status(status, NULL);
1151 if (rc)
1152 goto err;
1153
1154 pr_info("SEV supported\n");
1155
1156err:
1157 kfree(status);
1158 return rc;
1159}
1160
1161void sev_hardware_teardown(void)
1162{
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001163 if (!svm_sev_enabled())
1164 return;
1165
Joerg Roedeleaf78262020-03-24 10:41:54 +01001166 bitmap_free(sev_asid_bitmap);
1167 bitmap_free(sev_reclaim_asid_bitmap);
1168
1169 sev_flush_asids();
1170}
1171
1172void pre_sev_run(struct vcpu_svm *svm, int cpu)
1173{
1174 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1175 int asid = sev_get_asid(svm->vcpu.kvm);
1176
1177 /* Assign the asid allocated with this SEV guest */
1178 svm->vmcb->control.asid = asid;
1179
1180 /*
1181 * Flush guest TLB:
1182 *
1183 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1184 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1185 */
1186 if (sd->sev_vmcbs[asid] == svm->vmcb &&
1187 svm->last_cpu == cpu)
1188 return;
1189
1190 svm->last_cpu = cpu;
1191 sd->sev_vmcbs[asid] = svm->vmcb;
1192 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1193 mark_dirty(svm->vmcb, VMCB_ASID);
1194}