blob: 87874c5865316108924afabc7e9d48a68bd2a16f [file] [log] [blame]
Joerg Roedeleaf78262020-03-24 10:41:54 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM-SEV support
6 *
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 */
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
Borislav Petkovb2bce0a2020-04-11 18:09:27 +020015#include <linux/pagemap.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010016#include <linux/swap.h>
Vipin Sharma7aef27f2021-03-29 21:42:06 -070017#include <linux/misc_cgroup.h>
Tom Lendackyadd5e2f2020-12-10 11:09:40 -060018#include <linux/processor.h>
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060019#include <linux/trace_events.h>
Tom Lendacky86137772020-12-10 11:10:07 -060020#include <asm/fpu/internal.h>
Joerg Roedeleaf78262020-03-24 10:41:54 +010021
Dave Hansen784a46612021-06-23 14:02:05 +020022#include <asm/pkru.h>
Tom Lendacky8640ca52020-12-15 12:44:07 -050023#include <asm/trapnr.h>
24
Joerg Roedeleaf78262020-03-24 10:41:54 +010025#include "x86.h"
26#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080027#include "svm_ops.h"
Tom Lendacky291bd202020-12-10 11:09:47 -060028#include "cpuid.h"
Tom Lendackyd523ab6b2020-12-10 11:09:48 -060029#include "trace.h"
Joerg Roedeleaf78262020-03-24 10:41:54 +010030
Vipin Sharma7aef27f2021-03-29 21:42:06 -070031#ifndef CONFIG_KVM_AMD_SEV
32/*
33 * When this config is not defined, SEV feature is not supported and APIs in
34 * this file are not used but this file still gets compiled into the KVM AMD
35 * module.
36 *
37 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
38 * misc_res_type {} defined in linux/misc_cgroup.h.
39 *
40 * Below macros allow compilation to succeed.
41 */
42#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
43#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
44#endif
45
Sean Christophersona479c332021-04-21 19:11:18 -070046#ifdef CONFIG_KVM_AMD_SEV
Sean Christophersone8126bd2021-04-21 19:11:14 -070047/* enable/disable SEV support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070048static bool sev_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070049module_param_named(sev, sev_enabled, bool, 0444);
Sean Christophersone8126bd2021-04-21 19:11:14 -070050
51/* enable/disable SEV-ES support */
Sean Christopherson6c2c7bf2021-04-21 19:11:19 -070052static bool sev_es_enabled = true;
Sean Christopherson8d364a02021-04-21 19:11:17 -070053module_param_named(sev_es, sev_es_enabled, bool, 0444);
Sean Christophersona479c332021-04-21 19:11:18 -070054#else
55#define sev_enabled false
56#define sev_es_enabled false
57#endif /* CONFIG_KVM_AMD_SEV */
Sean Christophersone8126bd2021-04-21 19:11:14 -070058
Tom Lendacky1edc1452020-12-10 11:09:49 -060059static u8 sev_enc_bit;
Joerg Roedeleaf78262020-03-24 10:41:54 +010060static DECLARE_RWSEM(sev_deactivate_lock);
61static DEFINE_MUTEX(sev_bitmap_lock);
62unsigned int max_sev_asid;
63static unsigned int min_sev_asid;
Brijesh Singhd3d1af82021-04-15 15:53:55 +000064static unsigned long sev_me_mask;
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070065static unsigned int nr_asids;
Joerg Roedeleaf78262020-03-24 10:41:54 +010066static unsigned long *sev_asid_bitmap;
67static unsigned long *sev_reclaim_asid_bitmap;
Joerg Roedeleaf78262020-03-24 10:41:54 +010068
69struct enc_region {
70 struct list_head list;
71 unsigned long npages;
72 struct page **pages;
73 unsigned long uaddr;
74 unsigned long size;
75};
76
Sean Christopherson469bb322021-04-21 19:11:25 -070077/* Called with the sev_bitmap_lock held, or on shutdown */
78static int sev_flush_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +010079{
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070080 int ret, asid, error = 0;
Sean Christopherson469bb322021-04-21 19:11:25 -070081
82 /* Check if there are any ASIDs to reclaim before performing a flush */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -070083 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
84 if (asid > max_asid)
Sean Christopherson469bb322021-04-21 19:11:25 -070085 return -EBUSY;
Joerg Roedeleaf78262020-03-24 10:41:54 +010086
87 /*
88 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
89 * so it must be guarded.
90 */
91 down_write(&sev_deactivate_lock);
92
93 wbinvd_on_all_cpus();
94 ret = sev_guest_df_flush(&error);
95
96 up_write(&sev_deactivate_lock);
97
98 if (ret)
99 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
100
101 return ret;
102}
103
Nathan Tempelman54526d12021-04-08 22:32:14 +0000104static inline bool is_mirroring_enc_context(struct kvm *kvm)
105{
106 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
107}
108
Joerg Roedeleaf78262020-03-24 10:41:54 +0100109/* Must be called with the sev_bitmap_lock held */
Tom Lendacky80675b32020-12-10 11:10:05 -0600110static bool __sev_recycle_asids(int min_asid, int max_asid)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100111{
Sean Christopherson469bb322021-04-21 19:11:25 -0700112 if (sev_flush_asids(min_asid, max_asid))
Joerg Roedeleaf78262020-03-24 10:41:54 +0100113 return false;
114
Tom Lendacky80675b32020-12-10 11:10:05 -0600115 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100116 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700117 nr_asids);
118 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100119
120 return true;
121}
122
Tom Lendacky80675b32020-12-10 11:10:05 -0600123static int sev_asid_new(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100124{
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700125 int asid, min_asid, max_asid, ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100126 bool retry = true;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700127 enum misc_res_type type;
128
129 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
130 WARN_ON(sev->misc_cg);
131 sev->misc_cg = get_current_misc_cg();
132 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
133 if (ret) {
134 put_misc_cg(sev->misc_cg);
135 sev->misc_cg = NULL;
136 return ret;
137 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100138
139 mutex_lock(&sev_bitmap_lock);
140
141 /*
Tom Lendacky80675b32020-12-10 11:10:05 -0600142 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
143 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100144 */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700145 min_asid = sev->es_active ? 1 : min_sev_asid;
Tom Lendacky80675b32020-12-10 11:10:05 -0600146 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100147again:
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700148 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
149 if (asid > max_asid) {
Tom Lendacky80675b32020-12-10 11:10:05 -0600150 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100151 retry = false;
152 goto again;
153 }
154 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700155 ret = -EBUSY;
156 goto e_uncharge;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100157 }
158
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700159 __set_bit(asid, sev_asid_bitmap);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100160
161 mutex_unlock(&sev_bitmap_lock);
162
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700163 return asid;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700164e_uncharge:
165 misc_cg_uncharge(type, sev->misc_cg, 1);
166 put_misc_cg(sev->misc_cg);
167 sev->misc_cg = NULL;
168 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100169}
170
171static int sev_get_asid(struct kvm *kvm)
172{
173 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
174
175 return sev->asid;
176}
177
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700178static void sev_asid_free(struct kvm_sev_info *sev)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100179{
180 struct svm_cpu_data *sd;
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700181 int cpu;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700182 enum misc_res_type type;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100183
184 mutex_lock(&sev_bitmap_lock);
185
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -0700186 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100187
188 for_each_possible_cpu(cpu) {
189 sd = per_cpu(svm_data, cpu);
Sean Christopherson179c6c22021-08-03 09:27:46 -0700190 sd->sev_vmcbs[sev->asid] = NULL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100191 }
192
193 mutex_unlock(&sev_bitmap_lock);
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700194
195 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
196 misc_cg_uncharge(type, sev->misc_cg, 1);
197 put_misc_cg(sev->misc_cg);
198 sev->misc_cg = NULL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100199}
200
Alper Gun934002c2021-06-10 17:46:04 +0000201static void sev_decommission(unsigned int handle)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100202{
Sean Christopherson238eca82021-04-06 15:49:52 -0700203 struct sev_data_decommission decommission;
Alper Gun934002c2021-06-10 17:46:04 +0000204
205 if (!handle)
206 return;
207
208 decommission.handle = handle;
209 sev_guest_decommission(&decommission, NULL);
210}
211
212static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
213{
Sean Christopherson238eca82021-04-06 15:49:52 -0700214 struct sev_data_deactivate deactivate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100215
216 if (!handle)
217 return;
218
Sean Christopherson238eca82021-04-06 15:49:52 -0700219 deactivate.handle = handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100220
221 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
222 down_read(&sev_deactivate_lock);
Sean Christopherson238eca82021-04-06 15:49:52 -0700223 sev_guest_deactivate(&deactivate, NULL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100224 up_read(&sev_deactivate_lock);
225
Alper Gun934002c2021-06-10 17:46:04 +0000226 sev_decommission(handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100227}
228
229static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
230{
231 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
232 int asid, ret;
233
Sean Christopherson87279062021-03-30 20:19:36 -0700234 if (kvm->created_vcpus)
235 return -EINVAL;
236
Joerg Roedeleaf78262020-03-24 10:41:54 +0100237 ret = -EBUSY;
238 if (unlikely(sev->active))
239 return ret;
240
Sean Christophersona41fb262021-11-09 21:50:58 +0000241 sev->active = true;
242 sev->es_active = argp->id == KVM_SEV_ES_INIT;
Tom Lendacky80675b32020-12-10 11:10:05 -0600243 asid = sev_asid_new(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100244 if (asid < 0)
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400245 goto e_no_asid;
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700246 sev->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100247
248 ret = sev_platform_init(&argp->error);
249 if (ret)
250 goto e_free;
251
Joerg Roedeleaf78262020-03-24 10:41:54 +0100252 INIT_LIST_HEAD(&sev->regions_list);
253
254 return 0;
255
256e_free:
Vipin Sharma7aef27f2021-03-29 21:42:06 -0700257 sev_asid_free(sev);
258 sev->asid = 0;
Paolo Bonzinifd49e8e2021-04-22 02:39:48 -0400259e_no_asid:
260 sev->es_active = false;
Sean Christophersona41fb262021-11-09 21:50:58 +0000261 sev->active = false;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100262 return ret;
263}
264
265static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
266{
Sean Christopherson238eca82021-04-06 15:49:52 -0700267 struct sev_data_activate activate;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100268 int asid = sev_get_asid(kvm);
269 int ret;
270
Joerg Roedeleaf78262020-03-24 10:41:54 +0100271 /* activate ASID on the given handle */
Sean Christopherson238eca82021-04-06 15:49:52 -0700272 activate.handle = handle;
273 activate.asid = asid;
274 ret = sev_guest_activate(&activate, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100275
276 return ret;
277}
278
279static int __sev_issue_cmd(int fd, int id, void *data, int *error)
280{
281 struct fd f;
282 int ret;
283
284 f = fdget(fd);
285 if (!f.file)
286 return -EBADF;
287
288 ret = sev_issue_cmd_external_user(f.file, id, data, error);
289
290 fdput(f);
291 return ret;
292}
293
294static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
295{
296 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
297
298 return __sev_issue_cmd(sev->fd, id, data, error);
299}
300
301static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
302{
303 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700304 struct sev_data_launch_start start;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100305 struct kvm_sev_launch_start params;
306 void *dh_blob, *session_blob;
307 int *error = &argp->error;
308 int ret;
309
310 if (!sev_guest(kvm))
311 return -ENOTTY;
312
313 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
314 return -EFAULT;
315
Sean Christopherson238eca82021-04-06 15:49:52 -0700316 memset(&start, 0, sizeof(start));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100317
318 dh_blob = NULL;
319 if (params.dh_uaddr) {
320 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
Sean Christopherson238eca82021-04-06 15:49:52 -0700321 if (IS_ERR(dh_blob))
322 return PTR_ERR(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100323
Sean Christopherson238eca82021-04-06 15:49:52 -0700324 start.dh_cert_address = __sme_set(__pa(dh_blob));
325 start.dh_cert_len = params.dh_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100326 }
327
328 session_blob = NULL;
329 if (params.session_uaddr) {
330 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
331 if (IS_ERR(session_blob)) {
332 ret = PTR_ERR(session_blob);
333 goto e_free_dh;
334 }
335
Sean Christopherson238eca82021-04-06 15:49:52 -0700336 start.session_address = __sme_set(__pa(session_blob));
337 start.session_len = params.session_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100338 }
339
Sean Christopherson238eca82021-04-06 15:49:52 -0700340 start.handle = params.handle;
341 start.policy = params.policy;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100342
343 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -0700344 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100345 if (ret)
346 goto e_free_session;
347
348 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -0700349 ret = sev_bind_asid(kvm, start.handle, error);
Alper Gun934002c2021-06-10 17:46:04 +0000350 if (ret) {
351 sev_decommission(start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100352 goto e_free_session;
Alper Gun934002c2021-06-10 17:46:04 +0000353 }
Joerg Roedeleaf78262020-03-24 10:41:54 +0100354
355 /* return handle to userspace */
Sean Christopherson238eca82021-04-06 15:49:52 -0700356 params.handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100357 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700358 sev_unbind_asid(kvm, start.handle);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100359 ret = -EFAULT;
360 goto e_free_session;
361 }
362
Sean Christopherson238eca82021-04-06 15:49:52 -0700363 sev->handle = start.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100364 sev->fd = argp->sev_fd;
365
366e_free_session:
367 kfree(session_blob);
368e_free_dh:
369 kfree(dh_blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100370 return ret;
371}
372
373static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
374 unsigned long ulen, unsigned long *n,
375 int write)
376{
377 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
John Hubbard78824fa2020-05-25 23:22:06 -0700378 unsigned long npages, size;
379 int npinned;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100380 unsigned long locked, lock_limit;
381 struct page **pages;
382 unsigned long first, last;
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300383 int ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100384
Peter Gonda19a23da2021-01-27 08:15:24 -0800385 lockdep_assert_held(&kvm->lock);
386
Joerg Roedeleaf78262020-03-24 10:41:54 +0100387 if (ulen == 0 || uaddr + ulen < uaddr)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400388 return ERR_PTR(-EINVAL);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100389
390 /* Calculate number of pages. */
391 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
392 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
393 npages = (last - first + 1);
394
395 locked = sev->pages_locked + npages;
396 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
397 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
398 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400399 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100400 }
401
John Hubbard78824fa2020-05-25 23:22:06 -0700402 if (WARN_ON_ONCE(npages > INT_MAX))
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400403 return ERR_PTR(-EINVAL);
John Hubbard78824fa2020-05-25 23:22:06 -0700404
Joerg Roedeleaf78262020-03-24 10:41:54 +0100405 /* Avoid using vmalloc for smaller buffers. */
406 size = npages * sizeof(struct page *);
407 if (size > PAGE_SIZE)
Christoph Hellwig88dca4c2020-06-01 21:51:40 -0700408 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100409 else
410 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
411
412 if (!pages)
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400413 return ERR_PTR(-ENOMEM);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100414
415 /* Pin the user virtual address. */
John Hubbarddc42c8a2020-05-25 23:22:07 -0700416 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100417 if (npinned != npages) {
418 pr_err("SEV: Failure locking %lu pages.\n", npages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300419 ret = -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100420 goto err;
421 }
422
423 *n = npages;
424 sev->pages_locked = locked;
425
426 return pages;
427
428err:
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300429 if (npinned > 0)
John Hubbarddc42c8a2020-05-25 23:22:07 -0700430 unpin_user_pages(pages, npinned);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100431
432 kvfree(pages);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300433 return ERR_PTR(ret);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100434}
435
436static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
437 unsigned long npages)
438{
439 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
440
John Hubbarddc42c8a2020-05-25 23:22:07 -0700441 unpin_user_pages(pages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100442 kvfree(pages);
443 sev->pages_locked -= npages;
444}
445
446static void sev_clflush_pages(struct page *pages[], unsigned long npages)
447{
448 uint8_t *page_virtual;
449 unsigned long i;
450
Krish Sadhukhane1ebb2b2020-09-17 21:20:38 +0000451 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
452 pages == NULL)
Joerg Roedeleaf78262020-03-24 10:41:54 +0100453 return;
454
455 for (i = 0; i < npages; i++) {
456 page_virtual = kmap_atomic(pages[i]);
457 clflush_cache_range(page_virtual, PAGE_SIZE);
458 kunmap_atomic(page_virtual);
459 }
460}
461
462static unsigned long get_num_contig_pages(unsigned long idx,
463 struct page **inpages, unsigned long npages)
464{
465 unsigned long paddr, next_paddr;
466 unsigned long i = idx + 1, pages = 1;
467
468 /* find the number of contiguous pages starting from idx */
469 paddr = __sme_page_pa(inpages[idx]);
470 while (i < npages) {
471 next_paddr = __sme_page_pa(inpages[i++]);
472 if ((paddr + PAGE_SIZE) == next_paddr) {
473 pages++;
474 paddr = next_paddr;
475 continue;
476 }
477 break;
478 }
479
480 return pages;
481}
482
483static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
484{
485 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
486 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
487 struct kvm_sev_launch_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700488 struct sev_data_launch_update_data data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100489 struct page **inpages;
490 int ret;
491
492 if (!sev_guest(kvm))
493 return -ENOTTY;
494
495 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
496 return -EFAULT;
497
Joerg Roedeleaf78262020-03-24 10:41:54 +0100498 vaddr = params.uaddr;
499 size = params.len;
500 vaddr_end = vaddr + size;
501
502 /* Lock the user memory. */
503 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
Sean Christopherson238eca82021-04-06 15:49:52 -0700504 if (IS_ERR(inpages))
505 return PTR_ERR(inpages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100506
507 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400508 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
509 * place; the cache may contain the data that was written unencrypted.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100510 */
511 sev_clflush_pages(inpages, npages);
512
Sean Christopherson238eca82021-04-06 15:49:52 -0700513 data.reserved = 0;
514 data.handle = sev->handle;
515
Joerg Roedeleaf78262020-03-24 10:41:54 +0100516 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
517 int offset, len;
518
519 /*
520 * If the user buffer is not page-aligned, calculate the offset
521 * within the page.
522 */
523 offset = vaddr & (PAGE_SIZE - 1);
524
525 /* Calculate the number of pages that can be encrypted in one go. */
526 pages = get_num_contig_pages(i, inpages, npages);
527
528 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
529
Sean Christopherson238eca82021-04-06 15:49:52 -0700530 data.len = len;
531 data.address = __sme_page_pa(inpages[i]) + offset;
532 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100533 if (ret)
534 goto e_unpin;
535
536 size -= len;
537 next_vaddr = vaddr + len;
538 }
539
540e_unpin:
541 /* content of memory is updated, mark pages dirty */
542 for (i = 0; i < npages; i++) {
543 set_page_dirty_lock(inpages[i]);
544 mark_page_accessed(inpages[i]);
545 }
546 /* unlock the user pages */
547 sev_unpin_memory(kvm, inpages, npages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100548 return ret;
549}
550
Tom Lendackyad731092020-12-10 11:10:09 -0600551static int sev_es_sync_vmsa(struct vcpu_svm *svm)
552{
553 struct vmcb_save_area *save = &svm->vmcb->save;
554
555 /* Check some debug related fields before encrypting the VMSA */
556 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
557 return -EINVAL;
558
559 /* Sync registgers */
560 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
561 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
562 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
563 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
564 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
565 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
566 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
567 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500568#ifdef CONFIG_X86_64
Tom Lendackyad731092020-12-10 11:10:09 -0600569 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
570 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
571 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
572 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
573 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
574 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
575 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
576 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
Paolo Bonzinid45f89f2020-12-16 13:08:21 -0500577#endif
Tom Lendackyad731092020-12-10 11:10:09 -0600578 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
579
580 /* Sync some non-GPR registers before encrypting */
581 save->xcr0 = svm->vcpu.arch.xcr0;
582 save->pkru = svm->vcpu.arch.pkru;
583 save->xss = svm->vcpu.arch.ia32_xss;
Sean Christophersond0f9f822021-07-13 09:33:10 -0700584 save->dr6 = svm->vcpu.arch.dr6;
Tom Lendackyad731092020-12-10 11:10:09 -0600585
586 /*
587 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
588 * the traditional VMSA that is part of the VMCB. Copy the
589 * traditional VMSA as it has been built so far (in prep
590 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
591 */
592 memcpy(svm->vmsa, save, sizeof(*save));
593
594 return 0;
595}
596
Peter Gondabb18a672021-09-15 10:17:55 -0700597static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
598 int *error)
599{
600 struct sev_data_launch_update_vmsa vmsa;
601 struct vcpu_svm *svm = to_svm(vcpu);
602 int ret;
603
604 /* Perform some pre-encryption checks against the VMSA */
605 ret = sev_es_sync_vmsa(svm);
606 if (ret)
607 return ret;
608
609 /*
610 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
611 * the VMSA memory content (i.e it will write the same memory region
612 * with the guest's key), so invalidate it first.
613 */
614 clflush_cache_range(svm->vmsa, PAGE_SIZE);
615
616 vmsa.reserved = 0;
617 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
618 vmsa.address = __sme_pa(svm->vmsa);
619 vmsa.len = PAGE_SIZE;
620 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
621}
622
Tom Lendackyad731092020-12-10 11:10:09 -0600623static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
624{
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700625 struct kvm_vcpu *vcpu;
Tom Lendackyad731092020-12-10 11:10:09 -0600626 int i, ret;
627
628 if (!sev_es_guest(kvm))
629 return -ENOTTY;
630
Sean Christophersonc36b16d2021-03-30 20:19:34 -0700631 kvm_for_each_vcpu(i, vcpu, kvm) {
Peter Gondabb18a672021-09-15 10:17:55 -0700632 ret = mutex_lock_killable(&vcpu->mutex);
Tom Lendackyad731092020-12-10 11:10:09 -0600633 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700634 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600635
Peter Gondabb18a672021-09-15 10:17:55 -0700636 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
Tom Lendackyad731092020-12-10 11:10:09 -0600637
Peter Gondabb18a672021-09-15 10:17:55 -0700638 mutex_unlock(&vcpu->mutex);
Tom Lendackyad731092020-12-10 11:10:09 -0600639 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700640 return ret;
Tom Lendackyad731092020-12-10 11:10:09 -0600641 }
642
Sean Christopherson238eca82021-04-06 15:49:52 -0700643 return 0;
Tom Lendackyad731092020-12-10 11:10:09 -0600644}
645
Joerg Roedeleaf78262020-03-24 10:41:54 +0100646static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
647{
648 void __user *measure = (void __user *)(uintptr_t)argp->data;
649 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700650 struct sev_data_launch_measure data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100651 struct kvm_sev_launch_measure params;
652 void __user *p = NULL;
653 void *blob = NULL;
654 int ret;
655
656 if (!sev_guest(kvm))
657 return -ENOTTY;
658
659 if (copy_from_user(&params, measure, sizeof(params)))
660 return -EFAULT;
661
Sean Christopherson238eca82021-04-06 15:49:52 -0700662 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100663
664 /* User wants to query the blob length */
665 if (!params.len)
666 goto cmd;
667
668 p = (void __user *)(uintptr_t)params.uaddr;
669 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -0700670 if (params.len > SEV_FW_BLOB_MAX_SIZE)
671 return -EINVAL;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100672
Sean Christophersoneba04b22021-03-30 19:30:25 -0700673 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100674 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -0700675 return -ENOMEM;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100676
Sean Christopherson238eca82021-04-06 15:49:52 -0700677 data.address = __psp_pa(blob);
678 data.len = params.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100679 }
680
681cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -0700682 data.handle = sev->handle;
683 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100684
685 /*
686 * If we query the session length, FW responded with expected data.
687 */
688 if (!params.len)
689 goto done;
690
691 if (ret)
692 goto e_free_blob;
693
694 if (blob) {
695 if (copy_to_user(p, blob, params.len))
696 ret = -EFAULT;
697 }
698
699done:
Sean Christopherson238eca82021-04-06 15:49:52 -0700700 params.len = data.len;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100701 if (copy_to_user(measure, &params, sizeof(params)))
702 ret = -EFAULT;
703e_free_blob:
704 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100705 return ret;
706}
707
708static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
709{
710 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700711 struct sev_data_launch_finish data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100712
713 if (!sev_guest(kvm))
714 return -ENOTTY;
715
Sean Christopherson238eca82021-04-06 15:49:52 -0700716 data.handle = sev->handle;
717 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100718}
719
720static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
721{
722 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
723 struct kvm_sev_guest_status params;
Sean Christopherson238eca82021-04-06 15:49:52 -0700724 struct sev_data_guest_status data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100725 int ret;
726
727 if (!sev_guest(kvm))
728 return -ENOTTY;
729
Sean Christopherson238eca82021-04-06 15:49:52 -0700730 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +0100731
Sean Christopherson238eca82021-04-06 15:49:52 -0700732 data.handle = sev->handle;
733 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100734 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -0700735 return ret;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100736
Sean Christopherson238eca82021-04-06 15:49:52 -0700737 params.policy = data.policy;
738 params.state = data.state;
739 params.handle = data.handle;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100740
741 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
742 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -0700743
Joerg Roedeleaf78262020-03-24 10:41:54 +0100744 return ret;
745}
746
747static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
748 unsigned long dst, int size,
749 int *error, bool enc)
750{
751 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700752 struct sev_data_dbg data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100753
Sean Christopherson238eca82021-04-06 15:49:52 -0700754 data.reserved = 0;
755 data.handle = sev->handle;
756 data.dst_addr = dst;
757 data.src_addr = src;
758 data.len = size;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100759
Sean Christopherson238eca82021-04-06 15:49:52 -0700760 return sev_issue_cmd(kvm,
761 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
762 &data, error);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100763}
764
765static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
766 unsigned long dst_paddr, int sz, int *err)
767{
768 int offset;
769
770 /*
771 * Its safe to read more than we are asked, caller should ensure that
772 * destination has enough space.
773 */
Joerg Roedeleaf78262020-03-24 10:41:54 +0100774 offset = src_paddr & 15;
Ashish Kalra854c57f2020-11-10 22:42:05 +0000775 src_paddr = round_down(src_paddr, 16);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100776 sz = round_up(sz + offset, 16);
777
778 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
779}
780
781static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700782 void __user *dst_uaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100783 unsigned long dst_paddr,
784 int size, int *err)
785{
786 struct page *tpage = NULL;
787 int ret, offset;
788
789 /* if inputs are not 16-byte then use intermediate buffer */
790 if (!IS_ALIGNED(dst_paddr, 16) ||
791 !IS_ALIGNED(paddr, 16) ||
792 !IS_ALIGNED(size, 16)) {
793 tpage = (void *)alloc_page(GFP_KERNEL);
794 if (!tpage)
795 return -ENOMEM;
796
797 dst_paddr = __sme_page_pa(tpage);
798 }
799
800 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
801 if (ret)
802 goto e_free;
803
804 if (tpage) {
805 offset = paddr & 15;
Sean Christopherson368340a2021-05-06 16:15:42 -0700806 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
Joerg Roedeleaf78262020-03-24 10:41:54 +0100807 ret = -EFAULT;
808 }
809
810e_free:
811 if (tpage)
812 __free_page(tpage);
813
814 return ret;
815}
816
817static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700818 void __user *vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100819 unsigned long dst_paddr,
Sean Christopherson368340a2021-05-06 16:15:42 -0700820 void __user *dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100821 int size, int *error)
822{
823 struct page *src_tpage = NULL;
824 struct page *dst_tpage = NULL;
825 int ret, len = size;
826
827 /* If source buffer is not aligned then use an intermediate buffer */
Sean Christopherson368340a2021-05-06 16:15:42 -0700828 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100829 src_tpage = alloc_page(GFP_KERNEL);
830 if (!src_tpage)
831 return -ENOMEM;
832
Sean Christopherson368340a2021-05-06 16:15:42 -0700833 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100834 __free_page(src_tpage);
835 return -EFAULT;
836 }
837
838 paddr = __sme_page_pa(src_tpage);
839 }
840
841 /*
842 * If destination buffer or length is not aligned then do read-modify-write:
843 * - decrypt destination in an intermediate buffer
844 * - copy the source buffer in an intermediate buffer
845 * - use the intermediate buffer as source buffer
846 */
Sean Christopherson368340a2021-05-06 16:15:42 -0700847 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100848 int dst_offset;
849
850 dst_tpage = alloc_page(GFP_KERNEL);
851 if (!dst_tpage) {
852 ret = -ENOMEM;
853 goto e_free;
854 }
855
856 ret = __sev_dbg_decrypt(kvm, dst_paddr,
857 __sme_page_pa(dst_tpage), size, error);
858 if (ret)
859 goto e_free;
860
861 /*
862 * If source is kernel buffer then use memcpy() otherwise
863 * copy_from_user().
864 */
865 dst_offset = dst_paddr & 15;
866
867 if (src_tpage)
868 memcpy(page_address(dst_tpage) + dst_offset,
869 page_address(src_tpage), size);
870 else {
871 if (copy_from_user(page_address(dst_tpage) + dst_offset,
Sean Christopherson368340a2021-05-06 16:15:42 -0700872 vaddr, size)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100873 ret = -EFAULT;
874 goto e_free;
875 }
876 }
877
878 paddr = __sme_page_pa(dst_tpage);
879 dst_paddr = round_down(dst_paddr, 16);
880 len = round_up(size, 16);
881 }
882
883 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
884
885e_free:
886 if (src_tpage)
887 __free_page(src_tpage);
888 if (dst_tpage)
889 __free_page(dst_tpage);
890 return ret;
891}
892
893static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
894{
895 unsigned long vaddr, vaddr_end, next_vaddr;
896 unsigned long dst_vaddr;
897 struct page **src_p, **dst_p;
898 struct kvm_sev_dbg debug;
899 unsigned long n;
900 unsigned int size;
901 int ret;
902
903 if (!sev_guest(kvm))
904 return -ENOTTY;
905
906 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
907 return -EFAULT;
908
909 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
910 return -EINVAL;
911 if (!debug.dst_uaddr)
912 return -EINVAL;
913
914 vaddr = debug.src_uaddr;
915 size = debug.len;
916 vaddr_end = vaddr + size;
917 dst_vaddr = debug.dst_uaddr;
918
919 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
920 int len, s_off, d_off;
921
922 /* lock userspace source and destination page */
923 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300924 if (IS_ERR(src_p))
925 return PTR_ERR(src_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100926
927 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300928 if (IS_ERR(dst_p)) {
Joerg Roedeleaf78262020-03-24 10:41:54 +0100929 sev_unpin_memory(kvm, src_p, n);
Dan Carpenterff2bd9f2020-07-14 17:23:51 +0300930 return PTR_ERR(dst_p);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100931 }
932
933 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400934 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
935 * the pages; flush the destination too so that future accesses do not
936 * see stale data.
Joerg Roedeleaf78262020-03-24 10:41:54 +0100937 */
938 sev_clflush_pages(src_p, 1);
939 sev_clflush_pages(dst_p, 1);
940
941 /*
942 * Since user buffer may not be page aligned, calculate the
943 * offset within the page.
944 */
945 s_off = vaddr & ~PAGE_MASK;
946 d_off = dst_vaddr & ~PAGE_MASK;
947 len = min_t(size_t, (PAGE_SIZE - s_off), size);
948
949 if (dec)
950 ret = __sev_dbg_decrypt_user(kvm,
951 __sme_page_pa(src_p[0]) + s_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700952 (void __user *)dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100953 __sme_page_pa(dst_p[0]) + d_off,
954 len, &argp->error);
955 else
956 ret = __sev_dbg_encrypt_user(kvm,
957 __sme_page_pa(src_p[0]) + s_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700958 (void __user *)vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100959 __sme_page_pa(dst_p[0]) + d_off,
Sean Christopherson368340a2021-05-06 16:15:42 -0700960 (void __user *)dst_vaddr,
Joerg Roedeleaf78262020-03-24 10:41:54 +0100961 len, &argp->error);
962
963 sev_unpin_memory(kvm, src_p, n);
964 sev_unpin_memory(kvm, dst_p, n);
965
966 if (ret)
967 goto err;
968
969 next_vaddr = vaddr + len;
970 dst_vaddr = dst_vaddr + len;
971 size -= len;
972 }
973err:
974 return ret;
975}
976
977static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
978{
979 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -0700980 struct sev_data_launch_secret data;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100981 struct kvm_sev_launch_secret params;
982 struct page **pages;
983 void *blob, *hdr;
Cfir Cohen50085be2020-08-07 17:37:46 -0700984 unsigned long n, i;
Joerg Roedeleaf78262020-03-24 10:41:54 +0100985 int ret, offset;
986
987 if (!sev_guest(kvm))
988 return -ENOTTY;
989
990 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
991 return -EFAULT;
992
993 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -0400994 if (IS_ERR(pages))
995 return PTR_ERR(pages);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100996
997 /*
Paolo Bonzini14e3dd82020-09-23 13:01:33 -0400998 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
999 * place; the cache may contain the data that was written unencrypted.
Cfir Cohen50085be2020-08-07 17:37:46 -07001000 */
1001 sev_clflush_pages(pages, n);
1002
1003 /*
Joerg Roedeleaf78262020-03-24 10:41:54 +01001004 * The secret must be copied into contiguous memory region, lets verify
1005 * that userspace memory pages are contiguous before we issue command.
1006 */
1007 if (get_num_contig_pages(0, pages, n) != n) {
1008 ret = -EINVAL;
1009 goto e_unpin_memory;
1010 }
1011
Sean Christopherson238eca82021-04-06 15:49:52 -07001012 memset(&data, 0, sizeof(data));
Joerg Roedeleaf78262020-03-24 10:41:54 +01001013
1014 offset = params.guest_uaddr & (PAGE_SIZE - 1);
Sean Christopherson238eca82021-04-06 15:49:52 -07001015 data.guest_address = __sme_page_pa(pages[0]) + offset;
1016 data.guest_len = params.guest_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001017
1018 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1019 if (IS_ERR(blob)) {
1020 ret = PTR_ERR(blob);
Sean Christopherson238eca82021-04-06 15:49:52 -07001021 goto e_unpin_memory;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001022 }
1023
Sean Christopherson238eca82021-04-06 15:49:52 -07001024 data.trans_address = __psp_pa(blob);
1025 data.trans_len = params.trans_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001026
1027 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1028 if (IS_ERR(hdr)) {
1029 ret = PTR_ERR(hdr);
1030 goto e_free_blob;
1031 }
Sean Christopherson238eca82021-04-06 15:49:52 -07001032 data.hdr_address = __psp_pa(hdr);
1033 data.hdr_len = params.hdr_len;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001034
Sean Christopherson238eca82021-04-06 15:49:52 -07001035 data.handle = sev->handle;
1036 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001037
1038 kfree(hdr);
1039
1040e_free_blob:
1041 kfree(blob);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001042e_unpin_memory:
Cfir Cohen50085be2020-08-07 17:37:46 -07001043 /* content of memory is updated, mark pages dirty */
1044 for (i = 0; i < n; i++) {
1045 set_page_dirty_lock(pages[i]);
1046 mark_page_accessed(pages[i]);
1047 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001048 sev_unpin_memory(kvm, pages, n);
1049 return ret;
1050}
1051
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001052static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1053{
1054 void __user *report = (void __user *)(uintptr_t)argp->data;
1055 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001056 struct sev_data_attestation_report data;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001057 struct kvm_sev_attestation_report params;
1058 void __user *p;
1059 void *blob = NULL;
1060 int ret;
1061
1062 if (!sev_guest(kvm))
1063 return -ENOTTY;
1064
1065 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1066 return -EFAULT;
1067
Sean Christopherson238eca82021-04-06 15:49:52 -07001068 memset(&data, 0, sizeof(data));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001069
1070 /* User wants to query the blob length */
1071 if (!params.len)
1072 goto cmd;
1073
1074 p = (void __user *)(uintptr_t)params.uaddr;
1075 if (p) {
Sean Christopherson238eca82021-04-06 15:49:52 -07001076 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1077 return -EINVAL;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001078
Sean Christophersoneba04b22021-03-30 19:30:25 -07001079 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001080 if (!blob)
Sean Christopherson238eca82021-04-06 15:49:52 -07001081 return -ENOMEM;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001082
Sean Christopherson238eca82021-04-06 15:49:52 -07001083 data.address = __psp_pa(blob);
1084 data.len = params.len;
1085 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001086 }
1087cmd:
Sean Christopherson238eca82021-04-06 15:49:52 -07001088 data.handle = sev->handle;
1089 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001090 /*
1091 * If we query the session length, FW responded with expected data.
1092 */
1093 if (!params.len)
1094 goto done;
1095
1096 if (ret)
1097 goto e_free_blob;
1098
1099 if (blob) {
1100 if (copy_to_user(p, blob, params.len))
1101 ret = -EFAULT;
1102 }
1103
1104done:
Sean Christopherson238eca82021-04-06 15:49:52 -07001105 params.len = data.len;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001106 if (copy_to_user(report, &params, sizeof(params)))
1107 ret = -EFAULT;
1108e_free_blob:
1109 kfree(blob);
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001110 return ret;
1111}
1112
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001113/* Userspace wants to query session length. */
1114static int
1115__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1116 struct kvm_sev_send_start *params)
1117{
1118 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001119 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001120 int ret;
1121
Ashish Kalra4f13d472021-06-07 06:15:32 +00001122 memset(&data, 0, sizeof(data));
Sean Christopherson238eca82021-04-06 15:49:52 -07001123 data.handle = sev->handle;
1124 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001125
Sean Christopherson238eca82021-04-06 15:49:52 -07001126 params->session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001127 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1128 sizeof(struct kvm_sev_send_start)))
1129 ret = -EFAULT;
1130
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001131 return ret;
1132}
1133
1134static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1135{
1136 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001137 struct sev_data_send_start data;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001138 struct kvm_sev_send_start params;
1139 void *amd_certs, *session_data;
1140 void *pdh_cert, *plat_certs;
1141 int ret;
1142
1143 if (!sev_guest(kvm))
1144 return -ENOTTY;
1145
1146 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1147 sizeof(struct kvm_sev_send_start)))
1148 return -EFAULT;
1149
1150 /* if session_len is zero, userspace wants to query the session length */
1151 if (!params.session_len)
1152 return __sev_send_start_query_session_length(kvm, argp,
1153 &params);
1154
1155 /* some sanity checks */
1156 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1157 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1158 return -EINVAL;
1159
1160 /* allocate the memory to hold the session data blob */
1161 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1162 if (!session_data)
1163 return -ENOMEM;
1164
1165 /* copy the certificate blobs from userspace */
1166 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1167 params.pdh_cert_len);
1168 if (IS_ERR(pdh_cert)) {
1169 ret = PTR_ERR(pdh_cert);
1170 goto e_free_session;
1171 }
1172
1173 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1174 params.plat_certs_len);
1175 if (IS_ERR(plat_certs)) {
1176 ret = PTR_ERR(plat_certs);
1177 goto e_free_pdh;
1178 }
1179
1180 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1181 params.amd_certs_len);
1182 if (IS_ERR(amd_certs)) {
1183 ret = PTR_ERR(amd_certs);
1184 goto e_free_plat_cert;
1185 }
1186
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001187 /* populate the FW SEND_START field with system physical address */
Sean Christopherson238eca82021-04-06 15:49:52 -07001188 memset(&data, 0, sizeof(data));
1189 data.pdh_cert_address = __psp_pa(pdh_cert);
1190 data.pdh_cert_len = params.pdh_cert_len;
1191 data.plat_certs_address = __psp_pa(plat_certs);
1192 data.plat_certs_len = params.plat_certs_len;
1193 data.amd_certs_address = __psp_pa(amd_certs);
1194 data.amd_certs_len = params.amd_certs_len;
1195 data.session_address = __psp_pa(session_data);
1196 data.session_len = params.session_len;
1197 data.handle = sev->handle;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001198
Sean Christopherson238eca82021-04-06 15:49:52 -07001199 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001200
1201 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1202 session_data, params.session_len)) {
1203 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001204 goto e_free_amd_cert;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001205 }
1206
Sean Christopherson238eca82021-04-06 15:49:52 -07001207 params.policy = data.policy;
1208 params.session_len = data.session_len;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001209 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
1210 sizeof(struct kvm_sev_send_start)))
1211 ret = -EFAULT;
1212
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001213e_free_amd_cert:
1214 kfree(amd_certs);
1215e_free_plat_cert:
1216 kfree(plat_certs);
1217e_free_pdh:
1218 kfree(pdh_cert);
1219e_free_session:
1220 kfree(session_data);
1221 return ret;
1222}
1223
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001224/* Userspace wants to query either header or trans length. */
1225static int
1226__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1227 struct kvm_sev_send_update_data *params)
1228{
1229 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001230 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001231 int ret;
1232
Ashish Kalra4f13d472021-06-07 06:15:32 +00001233 memset(&data, 0, sizeof(data));
Sean Christopherson238eca82021-04-06 15:49:52 -07001234 data.handle = sev->handle;
1235 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001236
Sean Christopherson238eca82021-04-06 15:49:52 -07001237 params->hdr_len = data.hdr_len;
1238 params->trans_len = data.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001239
1240 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1241 sizeof(struct kvm_sev_send_update_data)))
1242 ret = -EFAULT;
1243
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001244 return ret;
1245}
1246
1247static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1248{
1249 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001250 struct sev_data_send_update_data data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001251 struct kvm_sev_send_update_data params;
1252 void *hdr, *trans_data;
1253 struct page **guest_page;
1254 unsigned long n;
1255 int ret, offset;
1256
1257 if (!sev_guest(kvm))
1258 return -ENOTTY;
1259
1260 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1261 sizeof(struct kvm_sev_send_update_data)))
1262 return -EFAULT;
1263
1264 /* userspace wants to query either header or trans length */
1265 if (!params.trans_len || !params.hdr_len)
1266 return __sev_send_update_data_query_lengths(kvm, argp, &params);
1267
1268 if (!params.trans_uaddr || !params.guest_uaddr ||
1269 !params.guest_len || !params.hdr_uaddr)
1270 return -EINVAL;
1271
1272 /* Check if we are crossing the page boundary */
1273 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1274 if ((params.guest_len + offset > PAGE_SIZE))
1275 return -EINVAL;
1276
1277 /* Pin guest memory */
1278 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1279 PAGE_SIZE, &n, 0);
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001280 if (IS_ERR(guest_page))
1281 return PTR_ERR(guest_page);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001282
1283 /* allocate memory for header and transport buffer */
1284 ret = -ENOMEM;
1285 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1286 if (!hdr)
1287 goto e_unpin;
1288
1289 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1290 if (!trans_data)
1291 goto e_free_hdr;
1292
Sean Christopherson238eca82021-04-06 15:49:52 -07001293 memset(&data, 0, sizeof(data));
1294 data.hdr_address = __psp_pa(hdr);
1295 data.hdr_len = params.hdr_len;
1296 data.trans_address = __psp_pa(trans_data);
1297 data.trans_len = params.trans_len;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001298
1299 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001300 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1301 data.guest_address |= sev_me_mask;
1302 data.guest_len = params.guest_len;
1303 data.handle = sev->handle;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001304
Sean Christopherson238eca82021-04-06 15:49:52 -07001305 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001306
1307 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001308 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001309
1310 /* copy transport buffer to user space */
1311 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1312 trans_data, params.trans_len)) {
1313 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001314 goto e_free_trans_data;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001315 }
1316
1317 /* Copy packet header to userspace. */
Sean Christophersonb4a69392021-05-06 10:58:25 -07001318 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1319 params.hdr_len))
1320 ret = -EFAULT;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001321
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001322e_free_trans_data:
1323 kfree(trans_data);
1324e_free_hdr:
1325 kfree(hdr);
1326e_unpin:
1327 sev_unpin_memory(kvm, guest_page, n);
1328
1329 return ret;
1330}
1331
Brijesh Singhfddecf62021-04-15 15:54:15 +00001332static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1333{
1334 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001335 struct sev_data_send_finish data;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001336
1337 if (!sev_guest(kvm))
1338 return -ENOTTY;
1339
Sean Christopherson238eca82021-04-06 15:49:52 -07001340 data.handle = sev->handle;
1341 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
Brijesh Singhfddecf62021-04-15 15:54:15 +00001342}
1343
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001344static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1345{
1346 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001347 struct sev_data_send_cancel data;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001348
1349 if (!sev_guest(kvm))
1350 return -ENOTTY;
1351
Sean Christopherson238eca82021-04-06 15:49:52 -07001352 data.handle = sev->handle;
1353 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001354}
1355
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001356static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1357{
1358 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001359 struct sev_data_receive_start start;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001360 struct kvm_sev_receive_start params;
1361 int *error = &argp->error;
1362 void *session_data;
1363 void *pdh_data;
1364 int ret;
1365
1366 if (!sev_guest(kvm))
1367 return -ENOTTY;
1368
1369 /* Get parameter from the userspace */
1370 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1371 sizeof(struct kvm_sev_receive_start)))
1372 return -EFAULT;
1373
1374 /* some sanity checks */
1375 if (!params.pdh_uaddr || !params.pdh_len ||
1376 !params.session_uaddr || !params.session_len)
1377 return -EINVAL;
1378
1379 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1380 if (IS_ERR(pdh_data))
1381 return PTR_ERR(pdh_data);
1382
1383 session_data = psp_copy_user_blob(params.session_uaddr,
1384 params.session_len);
1385 if (IS_ERR(session_data)) {
1386 ret = PTR_ERR(session_data);
1387 goto e_free_pdh;
1388 }
1389
Sean Christopherson238eca82021-04-06 15:49:52 -07001390 memset(&start, 0, sizeof(start));
1391 start.handle = params.handle;
1392 start.policy = params.policy;
1393 start.pdh_cert_address = __psp_pa(pdh_data);
1394 start.pdh_cert_len = params.pdh_len;
1395 start.session_address = __psp_pa(session_data);
1396 start.session_len = params.session_len;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001397
1398 /* create memory encryption context */
Sean Christopherson238eca82021-04-06 15:49:52 -07001399 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001400 error);
1401 if (ret)
Sean Christopherson238eca82021-04-06 15:49:52 -07001402 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001403
1404 /* Bind ASID to this guest */
Sean Christopherson238eca82021-04-06 15:49:52 -07001405 ret = sev_bind_asid(kvm, start.handle, error);
Mingwei Zhangf1815e0a2021-09-12 18:18:15 +00001406 if (ret) {
1407 sev_decommission(start.handle);
Sean Christopherson238eca82021-04-06 15:49:52 -07001408 goto e_free_session;
Mingwei Zhangf1815e0a2021-09-12 18:18:15 +00001409 }
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001410
Sean Christopherson238eca82021-04-06 15:49:52 -07001411 params.handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001412 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1413 &params, sizeof(struct kvm_sev_receive_start))) {
1414 ret = -EFAULT;
Sean Christopherson238eca82021-04-06 15:49:52 -07001415 sev_unbind_asid(kvm, start.handle);
1416 goto e_free_session;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001417 }
1418
Sean Christopherson238eca82021-04-06 15:49:52 -07001419 sev->handle = start.handle;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001420 sev->fd = argp->sev_fd;
1421
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001422e_free_session:
1423 kfree(session_data);
1424e_free_pdh:
1425 kfree(pdh_data);
1426
1427 return ret;
1428}
1429
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001430static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1431{
1432 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1433 struct kvm_sev_receive_update_data params;
Sean Christopherson238eca82021-04-06 15:49:52 -07001434 struct sev_data_receive_update_data data;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001435 void *hdr = NULL, *trans = NULL;
1436 struct page **guest_page;
1437 unsigned long n;
1438 int ret, offset;
1439
1440 if (!sev_guest(kvm))
1441 return -EINVAL;
1442
1443 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
1444 sizeof(struct kvm_sev_receive_update_data)))
1445 return -EFAULT;
1446
1447 if (!params.hdr_uaddr || !params.hdr_len ||
1448 !params.guest_uaddr || !params.guest_len ||
1449 !params.trans_uaddr || !params.trans_len)
1450 return -EINVAL;
1451
1452 /* Check if we are crossing the page boundary */
1453 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1454 if ((params.guest_len + offset > PAGE_SIZE))
1455 return -EINVAL;
1456
1457 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1458 if (IS_ERR(hdr))
1459 return PTR_ERR(hdr);
1460
1461 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1462 if (IS_ERR(trans)) {
1463 ret = PTR_ERR(trans);
1464 goto e_free_hdr;
1465 }
1466
Sean Christopherson238eca82021-04-06 15:49:52 -07001467 memset(&data, 0, sizeof(data));
1468 data.hdr_address = __psp_pa(hdr);
1469 data.hdr_len = params.hdr_len;
1470 data.trans_address = __psp_pa(trans);
1471 data.trans_len = params.trans_len;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001472
1473 /* Pin guest memory */
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001474 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
Sean Christopherson50c03802021-09-14 14:09:50 -07001475 PAGE_SIZE, &n, 1);
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001476 if (IS_ERR(guest_page)) {
1477 ret = PTR_ERR(guest_page);
Sean Christopherson238eca82021-04-06 15:49:52 -07001478 goto e_free_trans;
Sean Christophersonc7a1b2b2021-05-06 10:58:26 -07001479 }
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001480
1481 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
Sean Christopherson238eca82021-04-06 15:49:52 -07001482 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1483 data.guest_address |= sev_me_mask;
1484 data.guest_len = params.guest_len;
1485 data.handle = sev->handle;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001486
Sean Christopherson238eca82021-04-06 15:49:52 -07001487 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001488 &argp->error);
1489
1490 sev_unpin_memory(kvm, guest_page, n);
1491
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001492e_free_trans:
1493 kfree(trans);
1494e_free_hdr:
1495 kfree(hdr);
1496
1497 return ret;
1498}
1499
Brijesh Singh6a443de2021-04-15 15:55:40 +00001500static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1501{
1502 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
Sean Christopherson238eca82021-04-06 15:49:52 -07001503 struct sev_data_receive_finish data;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001504
1505 if (!sev_guest(kvm))
1506 return -ENOTTY;
1507
Sean Christopherson238eca82021-04-06 15:49:52 -07001508 data.handle = sev->handle;
1509 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
Brijesh Singh6a443de2021-04-15 15:55:40 +00001510}
1511
Sean Christopherson8e38e962021-11-09 21:51:01 +00001512static bool is_cmd_allowed_from_mirror(u32 cmd_id)
Peter Gonda5b92b6c2021-09-21 08:03:45 -07001513{
1514 /*
1515 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1516 * active mirror VMs. Also allow the debugging and status commands.
1517 */
1518 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1519 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1520 cmd_id == KVM_SEV_DBG_ENCRYPT)
1521 return true;
1522
1523 return false;
1524}
1525
Joerg Roedeleaf78262020-03-24 10:41:54 +01001526int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1527{
1528 struct kvm_sev_cmd sev_cmd;
1529 int r;
1530
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07001531 if (!sev_enabled)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001532 return -ENOTTY;
1533
1534 if (!argp)
1535 return 0;
1536
1537 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1538 return -EFAULT;
1539
1540 mutex_lock(&kvm->lock);
1541
Peter Gonda5b92b6c2021-09-21 08:03:45 -07001542 /* Only the enc_context_owner handles some memory enc operations. */
1543 if (is_mirroring_enc_context(kvm) &&
Sean Christopherson8e38e962021-11-09 21:51:01 +00001544 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
Nathan Tempelman54526d12021-04-08 22:32:14 +00001545 r = -EINVAL;
1546 goto out;
1547 }
1548
Joerg Roedeleaf78262020-03-24 10:41:54 +01001549 switch (sev_cmd.id) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001550 case KVM_SEV_ES_INIT:
Sean Christopherson8d364a02021-04-21 19:11:17 -07001551 if (!sev_es_enabled) {
Sean Christopherson9fa15212021-03-30 20:19:35 -07001552 r = -ENOTTY;
1553 goto out;
1554 }
1555 fallthrough;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001556 case KVM_SEV_INIT:
1557 r = sev_guest_init(kvm, &sev_cmd);
1558 break;
1559 case KVM_SEV_LAUNCH_START:
1560 r = sev_launch_start(kvm, &sev_cmd);
1561 break;
1562 case KVM_SEV_LAUNCH_UPDATE_DATA:
1563 r = sev_launch_update_data(kvm, &sev_cmd);
1564 break;
Tom Lendackyad731092020-12-10 11:10:09 -06001565 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1566 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1567 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001568 case KVM_SEV_LAUNCH_MEASURE:
1569 r = sev_launch_measure(kvm, &sev_cmd);
1570 break;
1571 case KVM_SEV_LAUNCH_FINISH:
1572 r = sev_launch_finish(kvm, &sev_cmd);
1573 break;
1574 case KVM_SEV_GUEST_STATUS:
1575 r = sev_guest_status(kvm, &sev_cmd);
1576 break;
1577 case KVM_SEV_DBG_DECRYPT:
1578 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1579 break;
1580 case KVM_SEV_DBG_ENCRYPT:
1581 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1582 break;
1583 case KVM_SEV_LAUNCH_SECRET:
1584 r = sev_launch_secret(kvm, &sev_cmd);
1585 break;
Brijesh Singh2c07ded2021-01-04 09:17:49 -06001586 case KVM_SEV_GET_ATTESTATION_REPORT:
1587 r = sev_get_attestation_report(kvm, &sev_cmd);
1588 break;
Brijesh Singh4cfdd472021-04-15 15:53:14 +00001589 case KVM_SEV_SEND_START:
1590 r = sev_send_start(kvm, &sev_cmd);
1591 break;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001592 case KVM_SEV_SEND_UPDATE_DATA:
1593 r = sev_send_update_data(kvm, &sev_cmd);
1594 break;
Brijesh Singhfddecf62021-04-15 15:54:15 +00001595 case KVM_SEV_SEND_FINISH:
1596 r = sev_send_finish(kvm, &sev_cmd);
1597 break;
Steve Rutherford5569e2e2021-04-20 05:01:20 -04001598 case KVM_SEV_SEND_CANCEL:
1599 r = sev_send_cancel(kvm, &sev_cmd);
1600 break;
Brijesh Singhaf43cbb2021-04-15 15:54:50 +00001601 case KVM_SEV_RECEIVE_START:
1602 r = sev_receive_start(kvm, &sev_cmd);
1603 break;
Brijesh Singh15fb7de2021-04-15 15:55:17 +00001604 case KVM_SEV_RECEIVE_UPDATE_DATA:
1605 r = sev_receive_update_data(kvm, &sev_cmd);
1606 break;
Brijesh Singh6a443de2021-04-15 15:55:40 +00001607 case KVM_SEV_RECEIVE_FINISH:
1608 r = sev_receive_finish(kvm, &sev_cmd);
1609 break;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001610 default:
1611 r = -EINVAL;
1612 goto out;
1613 }
1614
1615 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1616 r = -EFAULT;
1617
1618out:
1619 mutex_unlock(&kvm->lock);
1620 return r;
1621}
1622
1623int svm_register_enc_region(struct kvm *kvm,
1624 struct kvm_enc_region *range)
1625{
1626 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1627 struct enc_region *region;
1628 int ret = 0;
1629
1630 if (!sev_guest(kvm))
1631 return -ENOTTY;
1632
Nathan Tempelman54526d12021-04-08 22:32:14 +00001633 /* If kvm is mirroring encryption context it isn't responsible for it */
1634 if (is_mirroring_enc_context(kvm))
1635 return -EINVAL;
1636
Joerg Roedeleaf78262020-03-24 10:41:54 +01001637 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1638 return -EINVAL;
1639
1640 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1641 if (!region)
1642 return -ENOMEM;
1643
Peter Gonda19a23da2021-01-27 08:15:24 -08001644 mutex_lock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001645 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
Paolo Bonzinia8d908b2020-06-23 05:12:24 -04001646 if (IS_ERR(region->pages)) {
1647 ret = PTR_ERR(region->pages);
Peter Gonda19a23da2021-01-27 08:15:24 -08001648 mutex_unlock(&kvm->lock);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001649 goto e_free;
1650 }
1651
Peter Gonda19a23da2021-01-27 08:15:24 -08001652 region->uaddr = range->addr;
1653 region->size = range->size;
1654
1655 list_add_tail(&region->list, &sev->regions_list);
1656 mutex_unlock(&kvm->lock);
1657
Joerg Roedeleaf78262020-03-24 10:41:54 +01001658 /*
1659 * The guest may change the memory encryption attribute from C=0 -> C=1
1660 * or vice versa for this memory range. Lets make sure caches are
1661 * flushed to ensure that guest data gets written into memory with
1662 * correct C-bit.
1663 */
1664 sev_clflush_pages(region->pages, region->npages);
1665
Joerg Roedeleaf78262020-03-24 10:41:54 +01001666 return ret;
1667
1668e_free:
1669 kfree(region);
1670 return ret;
1671}
1672
1673static struct enc_region *
1674find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1675{
1676 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1677 struct list_head *head = &sev->regions_list;
1678 struct enc_region *i;
1679
1680 list_for_each_entry(i, head, list) {
1681 if (i->uaddr == range->addr &&
1682 i->size == range->size)
1683 return i;
1684 }
1685
1686 return NULL;
1687}
1688
1689static void __unregister_enc_region_locked(struct kvm *kvm,
1690 struct enc_region *region)
1691{
1692 sev_unpin_memory(kvm, region->pages, region->npages);
1693 list_del(&region->list);
1694 kfree(region);
1695}
1696
1697int svm_unregister_enc_region(struct kvm *kvm,
1698 struct kvm_enc_region *range)
1699{
1700 struct enc_region *region;
1701 int ret;
1702
Nathan Tempelman54526d12021-04-08 22:32:14 +00001703 /* If kvm is mirroring encryption context it isn't responsible for it */
1704 if (is_mirroring_enc_context(kvm))
1705 return -EINVAL;
1706
Joerg Roedeleaf78262020-03-24 10:41:54 +01001707 mutex_lock(&kvm->lock);
1708
1709 if (!sev_guest(kvm)) {
1710 ret = -ENOTTY;
1711 goto failed;
1712 }
1713
1714 region = find_enc_region(kvm, range);
1715 if (!region) {
1716 ret = -EINVAL;
1717 goto failed;
1718 }
1719
1720 /*
1721 * Ensure that all guest tagged cache entries are flushed before
1722 * releasing the pages back to the system for use. CLFLUSH will
1723 * not do this, so issue a WBINVD.
1724 */
1725 wbinvd_on_all_cpus();
1726
1727 __unregister_enc_region_locked(kvm, region);
1728
1729 mutex_unlock(&kvm->lock);
1730 return 0;
1731
1732failed:
1733 mutex_unlock(&kvm->lock);
1734 return ret;
1735}
1736
Nathan Tempelman54526d12021-04-08 22:32:14 +00001737int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1738{
1739 struct file *source_kvm_file;
1740 struct kvm *source_kvm;
Peter Gondaf43c8872021-09-21 08:03:44 -07001741 struct kvm_sev_info source_sev, *mirror_sev;
Nathan Tempelman54526d12021-04-08 22:32:14 +00001742 int ret;
1743
1744 source_kvm_file = fget(source_fd);
1745 if (!file_is_kvm(source_kvm_file)) {
1746 ret = -EBADF;
1747 goto e_source_put;
1748 }
1749
1750 source_kvm = source_kvm_file->private_data;
1751 mutex_lock(&source_kvm->lock);
1752
1753 if (!sev_guest(source_kvm)) {
1754 ret = -EINVAL;
1755 goto e_source_unlock;
1756 }
1757
1758 /* Mirrors of mirrors should work, but let's not get silly */
1759 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1760 ret = -EINVAL;
1761 goto e_source_unlock;
1762 }
1763
Peter Gondaf43c8872021-09-21 08:03:44 -07001764 memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
1765 sizeof(source_sev));
Nathan Tempelman54526d12021-04-08 22:32:14 +00001766
1767 /*
1768 * The mirror kvm holds an enc_context_owner ref so its asid can't
1769 * disappear until we're done with it
1770 */
1771 kvm_get_kvm(source_kvm);
1772
1773 fput(source_kvm_file);
1774 mutex_unlock(&source_kvm->lock);
1775 mutex_lock(&kvm->lock);
1776
Sean Christopherson79b11142021-11-09 21:50:56 +00001777 /*
1778 * Disallow out-of-band SEV/SEV-ES init if the target is already an
1779 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
1780 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
1781 */
1782 if (sev_guest(kvm) || kvm->created_vcpus) {
Nathan Tempelman54526d12021-04-08 22:32:14 +00001783 ret = -EINVAL;
1784 goto e_mirror_unlock;
1785 }
1786
1787 /* Set enc_context_owner and copy its encryption context over */
1788 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1789 mirror_sev->enc_context_owner = source_kvm;
Nathan Tempelman54526d12021-04-08 22:32:14 +00001790 mirror_sev->active = true;
Peter Gondaf43c8872021-09-21 08:03:44 -07001791 mirror_sev->asid = source_sev.asid;
1792 mirror_sev->fd = source_sev.fd;
1793 mirror_sev->es_active = source_sev.es_active;
1794 mirror_sev->handle = source_sev.handle;
1795 /*
1796 * Do not copy ap_jump_table. Since the mirror does not share the same
1797 * KVM contexts as the original, and they may have different
1798 * memory-views.
1799 */
Nathan Tempelman54526d12021-04-08 22:32:14 +00001800
1801 mutex_unlock(&kvm->lock);
1802 return 0;
1803
1804e_mirror_unlock:
1805 mutex_unlock(&kvm->lock);
1806 kvm_put_kvm(source_kvm);
1807 return ret;
1808e_source_unlock:
1809 mutex_unlock(&source_kvm->lock);
1810e_source_put:
Colin Ian King8899a5f2021-04-30 18:03:03 +01001811 if (source_kvm_file)
1812 fput(source_kvm_file);
Nathan Tempelman54526d12021-04-08 22:32:14 +00001813 return ret;
1814}
1815
Joerg Roedeleaf78262020-03-24 10:41:54 +01001816void sev_vm_destroy(struct kvm *kvm)
1817{
1818 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1819 struct list_head *head = &sev->regions_list;
1820 struct list_head *pos, *q;
1821
1822 if (!sev_guest(kvm))
1823 return;
1824
Nathan Tempelman54526d12021-04-08 22:32:14 +00001825 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
1826 if (is_mirroring_enc_context(kvm)) {
1827 kvm_put_kvm(sev->enc_context_owner);
1828 return;
1829 }
1830
Joerg Roedeleaf78262020-03-24 10:41:54 +01001831 mutex_lock(&kvm->lock);
1832
1833 /*
1834 * Ensure that all guest tagged cache entries are flushed before
1835 * releasing the pages back to the system for use. CLFLUSH will
1836 * not do this, so issue a WBINVD.
1837 */
1838 wbinvd_on_all_cpus();
1839
1840 /*
1841 * if userspace was terminated before unregistering the memory regions
1842 * then lets unpin all the registered memory.
1843 */
1844 if (!list_empty(head)) {
1845 list_for_each_safe(pos, q, head) {
1846 __unregister_enc_region_locked(kvm,
1847 list_entry(pos, struct enc_region, list));
David Rientjes7be74942020-08-25 12:56:28 -07001848 cond_resched();
Joerg Roedeleaf78262020-03-24 10:41:54 +01001849 }
1850 }
1851
1852 mutex_unlock(&kvm->lock);
1853
1854 sev_unbind_asid(kvm, sev->handle);
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001855 sev_asid_free(sev);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001856}
1857
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001858void __init sev_set_cpu_caps(void)
1859{
Sean Christopherson8d364a02021-04-21 19:11:17 -07001860 if (!sev_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001861 kvm_cpu_cap_clear(X86_FEATURE_SEV);
Sean Christopherson8d364a02021-04-21 19:11:17 -07001862 if (!sev_es_enabled)
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -07001863 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
1864}
1865
Tom Lendacky916391a2020-12-10 11:09:38 -06001866void __init sev_hardware_setup(void)
Joerg Roedeleaf78262020-03-24 10:41:54 +01001867{
Sean Christophersona479c332021-04-21 19:11:18 -07001868#ifdef CONFIG_KVM_AMD_SEV
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001869 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
Tom Lendacky916391a2020-12-10 11:09:38 -06001870 bool sev_es_supported = false;
1871 bool sev_supported = false;
1872
Sean Christophersona479c332021-04-21 19:11:18 -07001873 if (!sev_enabled || !npt_enabled)
Sean Christophersone8126bd2021-04-21 19:11:14 -07001874 goto out;
1875
Tom Lendacky916391a2020-12-10 11:09:38 -06001876 /* Does the CPU support SEV? */
1877 if (!boot_cpu_has(X86_FEATURE_SEV))
1878 goto out;
1879
1880 /* Retrieve SEV CPUID information */
1881 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1882
Tom Lendacky1edc1452020-12-10 11:09:49 -06001883 /* Set encryption bit location for SEV-ES guests */
1884 sev_enc_bit = ebx & 0x3f;
1885
Joerg Roedeleaf78262020-03-24 10:41:54 +01001886 /* Maximum number of encrypted guests supported simultaneously */
Tom Lendacky916391a2020-12-10 11:09:38 -06001887 max_sev_asid = ecx;
Sean Christopherson8cb756b2021-04-21 19:11:21 -07001888 if (!max_sev_asid)
Tom Lendacky916391a2020-12-10 11:09:38 -06001889 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001890
1891 /* Minimum ASID value that should be used for SEV guest */
Tom Lendacky916391a2020-12-10 11:09:38 -06001892 min_sev_asid = edx;
Brijesh Singhd3d1af82021-04-15 15:53:55 +00001893 sev_me_mask = 1UL << (ebx & 0x3f);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001894
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07001895 /*
1896 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
1897 * even though it's never used, so that the bitmap is indexed by the
1898 * actual ASID.
1899 */
1900 nr_asids = max_sev_asid + 1;
1901 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001902 if (!sev_asid_bitmap)
Tom Lendacky916391a2020-12-10 11:09:38 -06001903 goto out;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001904
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07001905 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
Sean Christophersonf31b88b2021-04-21 19:11:12 -07001906 if (!sev_reclaim_asid_bitmap) {
1907 bitmap_free(sev_asid_bitmap);
1908 sev_asid_bitmap = NULL;
Tom Lendacky916391a2020-12-10 11:09:38 -06001909 goto out;
Sean Christophersonf31b88b2021-04-21 19:11:12 -07001910 }
Joerg Roedeleaf78262020-03-24 10:41:54 +01001911
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001912 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1913 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1914 goto out;
1915
1916 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06001917 sev_supported = true;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001918
Tom Lendacky916391a2020-12-10 11:09:38 -06001919 /* SEV-ES support requested? */
Sean Christopherson8d364a02021-04-21 19:11:17 -07001920 if (!sev_es_enabled)
Tom Lendacky916391a2020-12-10 11:09:38 -06001921 goto out;
1922
1923 /* Does the CPU support SEV-ES? */
1924 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1925 goto out;
1926
1927 /* Has the system been allocated ASIDs for SEV-ES? */
1928 if (min_sev_asid == 1)
1929 goto out;
1930
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001931 sev_es_asid_count = min_sev_asid - 1;
1932 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1933 goto out;
1934
1935 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
Tom Lendacky916391a2020-12-10 11:09:38 -06001936 sev_es_supported = true;
1937
1938out:
Sean Christopherson8d364a02021-04-21 19:11:17 -07001939 sev_enabled = sev_supported;
1940 sev_es_enabled = sev_es_supported;
Sean Christophersona479c332021-04-21 19:11:18 -07001941#endif
Joerg Roedeleaf78262020-03-24 10:41:54 +01001942}
1943
1944void sev_hardware_teardown(void)
1945{
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07001946 if (!sev_enabled)
Paolo Bonzini9ef15302020-04-13 03:20:06 -04001947 return;
1948
Sean Christopherson469bb322021-04-21 19:11:25 -07001949 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07001950 sev_flush_asids(1, max_sev_asid);
Sean Christopherson469bb322021-04-21 19:11:25 -07001951
Joerg Roedeleaf78262020-03-24 10:41:54 +01001952 bitmap_free(sev_asid_bitmap);
1953 bitmap_free(sev_reclaim_asid_bitmap);
Sean Christopherson469bb322021-04-21 19:11:25 -07001954
Vipin Sharma7aef27f2021-03-29 21:42:06 -07001955 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1956 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
Joerg Roedeleaf78262020-03-24 10:41:54 +01001957}
Joerg Roedeleaf78262020-03-24 10:41:54 +01001958
Sean Christophersonb95c2212021-04-21 19:11:22 -07001959int sev_cpu_init(struct svm_cpu_data *sd)
1960{
Sean Christophersona5c1c5a2021-04-21 19:11:23 -07001961 if (!sev_enabled)
Sean Christophersonb95c2212021-04-21 19:11:22 -07001962 return 0;
1963
Mingwei Zhangbb2baeb2021-08-02 11:09:03 -07001964 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
Sean Christophersonb95c2212021-04-21 19:11:22 -07001965 if (!sd->sev_vmcbs)
1966 return -ENOMEM;
1967
1968 return 0;
Joerg Roedeleaf78262020-03-24 10:41:54 +01001969}
1970
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001971/*
1972 * Pages used by hardware to hold guest encrypted state must be flushed before
1973 * returning them to the system.
1974 */
1975static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1976 unsigned long len)
1977{
1978 /*
1979 * If hardware enforced cache coherency for encrypted mappings of the
1980 * same physical page is supported, nothing to do.
1981 */
1982 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1983 return;
1984
1985 /*
1986 * If the VM Page Flush MSR is supported, use it to flush the page
1987 * (using the page virtual address and the guest ASID).
1988 */
1989 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1990 struct kvm_sev_info *sev;
1991 unsigned long va_start;
1992 u64 start, stop;
1993
1994 /* Align start and stop to page boundaries. */
1995 va_start = (unsigned long)va;
1996 start = (u64)va_start & PAGE_MASK;
1997 stop = PAGE_ALIGN((u64)va_start + len);
1998
1999 if (start < stop) {
2000 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
2001
2002 while (start < stop) {
2003 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
2004 start | sev->asid);
2005
2006 start += PAGE_SIZE;
2007 }
2008
2009 return;
2010 }
2011
2012 WARN(1, "Address overflow, using WBINVD\n");
2013 }
2014
2015 /*
2016 * Hardware should always have one of the above features,
2017 * but if not, use WBINVD and issue a warning.
2018 */
2019 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
2020 wbinvd_on_all_cpus();
2021}
2022
2023void sev_free_vcpu(struct kvm_vcpu *vcpu)
2024{
2025 struct vcpu_svm *svm;
2026
2027 if (!sev_es_guest(vcpu->kvm))
2028 return;
2029
2030 svm = to_svm(vcpu);
2031
2032 if (vcpu->arch.guest_state_protected)
2033 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
2034 __free_page(virt_to_page(svm->vmsa));
Tom Lendacky8f423a82020-12-10 11:09:53 -06002035
2036 if (svm->ghcb_sa_free)
2037 kfree(svm->ghcb_sa);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06002038}
2039
Tom Lendacky291bd202020-12-10 11:09:47 -06002040static void dump_ghcb(struct vcpu_svm *svm)
2041{
2042 struct ghcb *ghcb = svm->ghcb;
2043 unsigned int nbits;
2044
2045 /* Re-use the dump_invalid_vmcb module parameter */
2046 if (!dump_invalid_vmcb) {
2047 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2048 return;
2049 }
2050
2051 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2052
2053 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2054 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2055 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2056 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2057 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2058 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2059 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2060 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2061 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2062 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2063}
2064
2065static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2066{
2067 struct kvm_vcpu *vcpu = &svm->vcpu;
2068 struct ghcb *ghcb = svm->ghcb;
2069
2070 /*
2071 * The GHCB protocol so far allows for the following data
2072 * to be returned:
2073 * GPRs RAX, RBX, RCX, RDX
2074 *
Sean Christopherson25009142021-01-22 15:50:47 -08002075 * Copy their values, even if they may not have been written during the
2076 * VM-Exit. It's the guest's responsibility to not consume random data.
Tom Lendacky291bd202020-12-10 11:09:47 -06002077 */
Sean Christopherson25009142021-01-22 15:50:47 -08002078 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2079 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2080 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2081 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
Tom Lendacky291bd202020-12-10 11:09:47 -06002082}
2083
2084static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2085{
2086 struct vmcb_control_area *control = &svm->vmcb->control;
2087 struct kvm_vcpu *vcpu = &svm->vcpu;
2088 struct ghcb *ghcb = svm->ghcb;
2089 u64 exit_code;
2090
2091 /*
2092 * The GHCB protocol so far allows for the following data
2093 * to be supplied:
2094 * GPRs RAX, RBX, RCX, RDX
2095 * XCR0
2096 * CPL
2097 *
2098 * VMMCALL allows the guest to provide extra registers. KVM also
2099 * expects RSI for hypercalls, so include that, too.
2100 *
2101 * Copy their values to the appropriate location if supplied.
2102 */
2103 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2104
2105 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2106 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2107 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2108 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2109 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2110
2111 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2112
2113 if (ghcb_xcr0_is_valid(ghcb)) {
2114 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2115 kvm_update_cpuid_runtime(vcpu);
2116 }
2117
2118 /* Copy the GHCB exit information into the VMCB fields */
2119 exit_code = ghcb_get_sw_exit_code(ghcb);
2120 control->exit_code = lower_32_bits(exit_code);
2121 control->exit_code_hi = upper_32_bits(exit_code);
2122 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2123 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2124
2125 /* Clear the valid entries fields */
2126 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2127}
2128
2129static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2130{
2131 struct kvm_vcpu *vcpu;
2132 struct ghcb *ghcb;
2133 u64 exit_code = 0;
2134
2135 ghcb = svm->ghcb;
2136
2137 /* Only GHCB Usage code 0 is supported */
2138 if (ghcb->ghcb_usage)
2139 goto vmgexit_err;
2140
2141 /*
2142 * Retrieve the exit code now even though is may not be marked valid
2143 * as it could help with debugging.
2144 */
2145 exit_code = ghcb_get_sw_exit_code(ghcb);
2146
2147 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2148 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2149 !ghcb_sw_exit_info_2_is_valid(ghcb))
2150 goto vmgexit_err;
2151
2152 switch (ghcb_get_sw_exit_code(ghcb)) {
2153 case SVM_EXIT_READ_DR7:
2154 break;
2155 case SVM_EXIT_WRITE_DR7:
2156 if (!ghcb_rax_is_valid(ghcb))
2157 goto vmgexit_err;
2158 break;
2159 case SVM_EXIT_RDTSC:
2160 break;
2161 case SVM_EXIT_RDPMC:
2162 if (!ghcb_rcx_is_valid(ghcb))
2163 goto vmgexit_err;
2164 break;
2165 case SVM_EXIT_CPUID:
2166 if (!ghcb_rax_is_valid(ghcb) ||
2167 !ghcb_rcx_is_valid(ghcb))
2168 goto vmgexit_err;
2169 if (ghcb_get_rax(ghcb) == 0xd)
2170 if (!ghcb_xcr0_is_valid(ghcb))
2171 goto vmgexit_err;
2172 break;
2173 case SVM_EXIT_INVD:
2174 break;
2175 case SVM_EXIT_IOIO:
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002176 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2177 if (!ghcb_sw_scratch_is_valid(ghcb))
Tom Lendacky291bd202020-12-10 11:09:47 -06002178 goto vmgexit_err;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002179 } else {
2180 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2181 if (!ghcb_rax_is_valid(ghcb))
2182 goto vmgexit_err;
2183 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002184 break;
2185 case SVM_EXIT_MSR:
2186 if (!ghcb_rcx_is_valid(ghcb))
2187 goto vmgexit_err;
2188 if (ghcb_get_sw_exit_info_1(ghcb)) {
2189 if (!ghcb_rax_is_valid(ghcb) ||
2190 !ghcb_rdx_is_valid(ghcb))
2191 goto vmgexit_err;
2192 }
2193 break;
2194 case SVM_EXIT_VMMCALL:
2195 if (!ghcb_rax_is_valid(ghcb) ||
2196 !ghcb_cpl_is_valid(ghcb))
2197 goto vmgexit_err;
2198 break;
2199 case SVM_EXIT_RDTSCP:
2200 break;
2201 case SVM_EXIT_WBINVD:
2202 break;
2203 case SVM_EXIT_MONITOR:
2204 if (!ghcb_rax_is_valid(ghcb) ||
2205 !ghcb_rcx_is_valid(ghcb) ||
2206 !ghcb_rdx_is_valid(ghcb))
2207 goto vmgexit_err;
2208 break;
2209 case SVM_EXIT_MWAIT:
2210 if (!ghcb_rax_is_valid(ghcb) ||
2211 !ghcb_rcx_is_valid(ghcb))
2212 goto vmgexit_err;
2213 break;
Tom Lendacky8f423a82020-12-10 11:09:53 -06002214 case SVM_VMGEXIT_MMIO_READ:
2215 case SVM_VMGEXIT_MMIO_WRITE:
2216 if (!ghcb_sw_scratch_is_valid(ghcb))
2217 goto vmgexit_err;
2218 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002219 case SVM_VMGEXIT_NMI_COMPLETE:
Tom Lendacky647daca2021-01-04 14:20:01 -06002220 case SVM_VMGEXIT_AP_HLT_LOOP:
Tom Lendacky8640ca52020-12-15 12:44:07 -05002221 case SVM_VMGEXIT_AP_JUMP_TABLE:
Tom Lendacky291bd202020-12-10 11:09:47 -06002222 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2223 break;
2224 default:
2225 goto vmgexit_err;
2226 }
2227
2228 return 0;
2229
2230vmgexit_err:
2231 vcpu = &svm->vcpu;
2232
2233 if (ghcb->ghcb_usage) {
2234 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2235 ghcb->ghcb_usage);
2236 } else {
2237 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2238 exit_code);
2239 dump_ghcb(svm);
2240 }
2241
2242 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2243 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2244 vcpu->run->internal.ndata = 2;
2245 vcpu->run->internal.data[0] = exit_code;
2246 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2247
2248 return -EINVAL;
2249}
2250
Tom Lendackyce7ea0c2021-05-06 15:14:41 -05002251void sev_es_unmap_ghcb(struct vcpu_svm *svm)
Tom Lendacky291bd202020-12-10 11:09:47 -06002252{
2253 if (!svm->ghcb)
2254 return;
2255
Tom Lendacky8f423a82020-12-10 11:09:53 -06002256 if (svm->ghcb_sa_free) {
2257 /*
2258 * The scratch area lives outside the GHCB, so there is a
2259 * buffer that, depending on the operation performed, may
2260 * need to be synced, then freed.
2261 */
2262 if (svm->ghcb_sa_sync) {
2263 kvm_write_guest(svm->vcpu.kvm,
2264 ghcb_get_sw_scratch(svm->ghcb),
2265 svm->ghcb_sa, svm->ghcb_sa_len);
2266 svm->ghcb_sa_sync = false;
2267 }
2268
2269 kfree(svm->ghcb_sa);
2270 svm->ghcb_sa = NULL;
2271 svm->ghcb_sa_free = false;
2272 }
2273
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002274 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2275
Tom Lendacky291bd202020-12-10 11:09:47 -06002276 sev_es_sync_to_ghcb(svm);
2277
2278 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2279 svm->ghcb = NULL;
2280}
2281
Joerg Roedeleaf78262020-03-24 10:41:54 +01002282void pre_sev_run(struct vcpu_svm *svm, int cpu)
2283{
2284 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2285 int asid = sev_get_asid(svm->vcpu.kvm);
2286
2287 /* Assign the asid allocated with this SEV guest */
Paolo Bonzinidee734a2020-11-30 09:39:59 -05002288 svm->asid = asid;
Joerg Roedeleaf78262020-03-24 10:41:54 +01002289
2290 /*
2291 * Flush guest TLB:
2292 *
2293 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2294 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2295 */
2296 if (sd->sev_vmcbs[asid] == svm->vmcb &&
Jim Mattson8a14fe42020-06-03 16:56:22 -07002297 svm->vcpu.arch.last_vmentry_cpu == cpu)
Joerg Roedeleaf78262020-03-24 10:41:54 +01002298 return;
2299
Joerg Roedeleaf78262020-03-24 10:41:54 +01002300 sd->sev_vmcbs[asid] = svm->vmcb;
2301 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
Joerg Roedel06e78522020-06-25 10:03:23 +02002302 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Joerg Roedeleaf78262020-03-24 10:41:54 +01002303}
Tom Lendacky291bd202020-12-10 11:09:47 -06002304
Tom Lendacky8f423a82020-12-10 11:09:53 -06002305#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2306static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2307{
2308 struct vmcb_control_area *control = &svm->vmcb->control;
2309 struct ghcb *ghcb = svm->ghcb;
2310 u64 ghcb_scratch_beg, ghcb_scratch_end;
2311 u64 scratch_gpa_beg, scratch_gpa_end;
2312 void *scratch_va;
2313
2314 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2315 if (!scratch_gpa_beg) {
2316 pr_err("vmgexit: scratch gpa not provided\n");
2317 return false;
2318 }
2319
2320 scratch_gpa_end = scratch_gpa_beg + len;
2321 if (scratch_gpa_end < scratch_gpa_beg) {
2322 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2323 len, scratch_gpa_beg);
2324 return false;
2325 }
2326
2327 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2328 /* Scratch area begins within GHCB */
2329 ghcb_scratch_beg = control->ghcb_gpa +
2330 offsetof(struct ghcb, shared_buffer);
2331 ghcb_scratch_end = control->ghcb_gpa +
2332 offsetof(struct ghcb, reserved_1);
2333
2334 /*
2335 * If the scratch area begins within the GHCB, it must be
2336 * completely contained in the GHCB shared buffer area.
2337 */
2338 if (scratch_gpa_beg < ghcb_scratch_beg ||
2339 scratch_gpa_end > ghcb_scratch_end) {
2340 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2341 scratch_gpa_beg, scratch_gpa_end);
2342 return false;
2343 }
2344
2345 scratch_va = (void *)svm->ghcb;
2346 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2347 } else {
2348 /*
2349 * The guest memory must be read into a kernel buffer, so
2350 * limit the size
2351 */
2352 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2353 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2354 len, GHCB_SCRATCH_AREA_LIMIT);
2355 return false;
2356 }
Sean Christophersoneba04b22021-03-30 19:30:25 -07002357 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
Tom Lendacky8f423a82020-12-10 11:09:53 -06002358 if (!scratch_va)
2359 return false;
2360
2361 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2362 /* Unable to copy scratch area from guest */
2363 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2364
2365 kfree(scratch_va);
2366 return false;
2367 }
2368
2369 /*
2370 * The scratch area is outside the GHCB. The operation will
2371 * dictate whether the buffer needs to be synced before running
2372 * the vCPU next time (i.e. a read was requested so the data
2373 * must be written back to the guest memory).
2374 */
2375 svm->ghcb_sa_sync = sync;
2376 svm->ghcb_sa_free = true;
2377 }
2378
2379 svm->ghcb_sa = scratch_va;
2380 svm->ghcb_sa_len = len;
2381
2382 return true;
2383}
2384
Tom Lendackyd3694662020-12-10 11:09:50 -06002385static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2386 unsigned int pos)
2387{
2388 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2389 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2390}
2391
2392static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2393{
2394 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2395}
2396
Tom Lendacky1edc1452020-12-10 11:09:49 -06002397static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2398{
2399 svm->vmcb->control.ghcb_gpa = value;
2400}
2401
Tom Lendacky291bd202020-12-10 11:09:47 -06002402static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2403{
Tom Lendacky1edc1452020-12-10 11:09:49 -06002404 struct vmcb_control_area *control = &svm->vmcb->control;
Tom Lendackyd3694662020-12-10 11:09:50 -06002405 struct kvm_vcpu *vcpu = &svm->vcpu;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002406 u64 ghcb_info;
Tom Lendackyd3694662020-12-10 11:09:50 -06002407 int ret = 1;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002408
2409 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2410
Tom Lendacky59e38b52020-12-10 11:09:52 -06002411 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2412 control->ghcb_gpa);
2413
Tom Lendacky1edc1452020-12-10 11:09:49 -06002414 switch (ghcb_info) {
2415 case GHCB_MSR_SEV_INFO_REQ:
2416 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2417 GHCB_VERSION_MIN,
2418 sev_enc_bit));
2419 break;
Tom Lendackyd3694662020-12-10 11:09:50 -06002420 case GHCB_MSR_CPUID_REQ: {
2421 u64 cpuid_fn, cpuid_reg, cpuid_value;
2422
2423 cpuid_fn = get_ghcb_msr_bits(svm,
2424 GHCB_MSR_CPUID_FUNC_MASK,
2425 GHCB_MSR_CPUID_FUNC_POS);
2426
2427 /* Initialize the registers needed by the CPUID intercept */
2428 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2429 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2430
Paolo Bonzini63129752021-03-02 14:40:39 -05002431 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
Tom Lendackyd3694662020-12-10 11:09:50 -06002432 if (!ret) {
2433 ret = -EINVAL;
2434 break;
2435 }
2436
2437 cpuid_reg = get_ghcb_msr_bits(svm,
2438 GHCB_MSR_CPUID_REG_MASK,
2439 GHCB_MSR_CPUID_REG_POS);
2440 if (cpuid_reg == 0)
2441 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2442 else if (cpuid_reg == 1)
2443 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2444 else if (cpuid_reg == 2)
2445 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2446 else
2447 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2448
2449 set_ghcb_msr_bits(svm, cpuid_value,
2450 GHCB_MSR_CPUID_VALUE_MASK,
2451 GHCB_MSR_CPUID_VALUE_POS);
2452
2453 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2454 GHCB_MSR_INFO_MASK,
2455 GHCB_MSR_INFO_POS);
2456 break;
2457 }
Tom Lendackye1d71112020-12-10 11:09:51 -06002458 case GHCB_MSR_TERM_REQ: {
2459 u64 reason_set, reason_code;
2460
2461 reason_set = get_ghcb_msr_bits(svm,
2462 GHCB_MSR_TERM_REASON_SET_MASK,
2463 GHCB_MSR_TERM_REASON_SET_POS);
2464 reason_code = get_ghcb_msr_bits(svm,
2465 GHCB_MSR_TERM_REASON_MASK,
2466 GHCB_MSR_TERM_REASON_POS);
2467 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2468 reason_set, reason_code);
2469 fallthrough;
2470 }
Tom Lendacky1edc1452020-12-10 11:09:49 -06002471 default:
Tom Lendackyd3694662020-12-10 11:09:50 -06002472 ret = -EINVAL;
Tom Lendacky1edc1452020-12-10 11:09:49 -06002473 }
2474
Tom Lendacky59e38b52020-12-10 11:09:52 -06002475 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2476 control->ghcb_gpa, ret);
2477
Tom Lendackyd3694662020-12-10 11:09:50 -06002478 return ret;
Tom Lendacky291bd202020-12-10 11:09:47 -06002479}
2480
Paolo Bonzini63129752021-03-02 14:40:39 -05002481int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
Tom Lendacky291bd202020-12-10 11:09:47 -06002482{
Paolo Bonzini63129752021-03-02 14:40:39 -05002483 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -06002484 struct vmcb_control_area *control = &svm->vmcb->control;
2485 u64 ghcb_gpa, exit_code;
2486 struct ghcb *ghcb;
2487 int ret;
2488
2489 /* Validate the GHCB */
2490 ghcb_gpa = control->ghcb_gpa;
2491 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2492 return sev_handle_vmgexit_msr_protocol(svm);
2493
2494 if (!ghcb_gpa) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002495 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
Tom Lendacky291bd202020-12-10 11:09:47 -06002496 return -EINVAL;
2497 }
2498
Paolo Bonzini63129752021-03-02 14:40:39 -05002499 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
Tom Lendacky291bd202020-12-10 11:09:47 -06002500 /* Unable to map GHCB from guest */
Paolo Bonzini63129752021-03-02 14:40:39 -05002501 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
Tom Lendacky291bd202020-12-10 11:09:47 -06002502 ghcb_gpa);
2503 return -EINVAL;
2504 }
2505
2506 svm->ghcb = svm->ghcb_map.hva;
2507 ghcb = svm->ghcb_map.hva;
2508
Paolo Bonzini63129752021-03-02 14:40:39 -05002509 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
Tom Lendackyd523ab6b2020-12-10 11:09:48 -06002510
Tom Lendacky291bd202020-12-10 11:09:47 -06002511 exit_code = ghcb_get_sw_exit_code(ghcb);
2512
2513 ret = sev_es_validate_vmgexit(svm);
2514 if (ret)
2515 return ret;
2516
2517 sev_es_sync_from_ghcb(svm);
2518 ghcb_set_sw_exit_info_1(ghcb, 0);
2519 ghcb_set_sw_exit_info_2(ghcb, 0);
2520
2521 ret = -EINVAL;
2522 switch (exit_code) {
Tom Lendacky8f423a82020-12-10 11:09:53 -06002523 case SVM_VMGEXIT_MMIO_READ:
2524 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2525 break;
2526
Paolo Bonzini63129752021-03-02 14:40:39 -05002527 ret = kvm_sev_es_mmio_read(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002528 control->exit_info_1,
2529 control->exit_info_2,
2530 svm->ghcb_sa);
2531 break;
2532 case SVM_VMGEXIT_MMIO_WRITE:
2533 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2534 break;
2535
Paolo Bonzini63129752021-03-02 14:40:39 -05002536 ret = kvm_sev_es_mmio_write(vcpu,
Tom Lendacky8f423a82020-12-10 11:09:53 -06002537 control->exit_info_1,
2538 control->exit_info_2,
2539 svm->ghcb_sa);
2540 break;
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002541 case SVM_VMGEXIT_NMI_COMPLETE:
Paolo Bonzini63129752021-03-02 14:40:39 -05002542 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002543 break;
Tom Lendacky647daca2021-01-04 14:20:01 -06002544 case SVM_VMGEXIT_AP_HLT_LOOP:
Paolo Bonzini63129752021-03-02 14:40:39 -05002545 ret = kvm_emulate_ap_reset_hold(vcpu);
Tom Lendacky647daca2021-01-04 14:20:01 -06002546 break;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002547 case SVM_VMGEXIT_AP_JUMP_TABLE: {
Paolo Bonzini63129752021-03-02 14:40:39 -05002548 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
Tom Lendacky8640ca52020-12-15 12:44:07 -05002549
2550 switch (control->exit_info_1) {
2551 case 0:
2552 /* Set AP jump table address */
2553 sev->ap_jump_table = control->exit_info_2;
2554 break;
2555 case 1:
2556 /* Get AP jump table address */
2557 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2558 break;
2559 default:
2560 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2561 control->exit_info_1);
2562 ghcb_set_sw_exit_info_1(ghcb, 1);
2563 ghcb_set_sw_exit_info_2(ghcb,
2564 X86_TRAP_UD |
2565 SVM_EVTINJ_TYPE_EXEPT |
2566 SVM_EVTINJ_VALID);
2567 }
2568
2569 ret = 1;
2570 break;
2571 }
Tom Lendacky291bd202020-12-10 11:09:47 -06002572 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
Paolo Bonzini63129752021-03-02 14:40:39 -05002573 vcpu_unimpl(vcpu,
Tom Lendacky291bd202020-12-10 11:09:47 -06002574 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2575 control->exit_info_1, control->exit_info_2);
2576 break;
2577 default:
Paolo Bonzini63129752021-03-02 14:40:39 -05002578 ret = svm_invoke_exit_handler(vcpu, exit_code);
Tom Lendacky291bd202020-12-10 11:09:47 -06002579 }
2580
2581 return ret;
2582}
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002583
2584int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2585{
2586 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2587 return -EINVAL;
2588
2589 return kvm_sev_es_string_io(&svm->vcpu, size, port,
Paolo Bonzini019057b2021-10-12 11:07:59 -04002590 svm->ghcb_sa, svm->ghcb_sa_len / size, in);
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002591}
Tom Lendacky376c6d22020-12-10 11:10:06 -06002592
2593void sev_es_init_vmcb(struct vcpu_svm *svm)
2594{
2595 struct kvm_vcpu *vcpu = &svm->vcpu;
2596
2597 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2598 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2599
2600 /*
2601 * An SEV-ES guest requires a VMSA area that is a separate from the
2602 * VMCB page. Do not include the encryption mask on the VMSA physical
2603 * address since hardware will access it using the guest key.
2604 */
2605 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2606
2607 /* Can't intercept CR register access, HV can't modify CR registers */
2608 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2609 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2610 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2611 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2612 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2613 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2614
2615 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2616
2617 /* Track EFER/CR register changes */
2618 svm_set_intercept(svm, TRAP_EFER_WRITE);
2619 svm_set_intercept(svm, TRAP_CR0_WRITE);
2620 svm_set_intercept(svm, TRAP_CR4_WRITE);
2621 svm_set_intercept(svm, TRAP_CR8_WRITE);
2622
2623 /* No support for enable_vmware_backdoor */
2624 clr_exception_intercept(svm, GP_VECTOR);
2625
2626 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2627 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2628
2629 /* Clear intercepts on selected MSRs */
2630 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2631 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2632 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2633 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2634 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2635 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2636}
2637
Sean Christopherson9ebe5302021-09-20 17:03:02 -07002638void sev_es_vcpu_reset(struct vcpu_svm *svm)
Tom Lendacky376c6d22020-12-10 11:10:06 -06002639{
2640 /*
Sean Christopherson9ebe5302021-09-20 17:03:02 -07002641 * Set the GHCB MSR value as per the GHCB specification when emulating
2642 * vCPU RESET for an SEV-ES guest.
Tom Lendacky376c6d22020-12-10 11:10:06 -06002643 */
2644 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2645 GHCB_VERSION_MIN,
2646 sev_enc_bit));
2647}
Tom Lendacky86137772020-12-10 11:10:07 -06002648
Michael Rotha7fc06d2021-02-02 13:01:26 -06002649void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
Tom Lendacky86137772020-12-10 11:10:07 -06002650{
2651 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2652 struct vmcb_save_area *hostsa;
Tom Lendacky86137772020-12-10 11:10:07 -06002653
2654 /*
2655 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2656 * of which one step is to perform a VMLOAD. Since hardware does not
2657 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2658 */
Sean Christopherson35a78312020-12-30 16:27:00 -08002659 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06002660
Tom Lendacky86137772020-12-10 11:10:07 -06002661 /* XCR0 is restored on VMEXIT, save the current host value */
2662 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2663 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2664
Ingo Molnard9f6e122021-03-18 15:28:01 +01002665 /* PKRU is restored on VMEXIT, save the current host value */
Tom Lendacky86137772020-12-10 11:10:07 -06002666 hostsa->pkru = read_pkru();
2667
2668 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2669 hostsa->xss = host_xss;
2670}
2671
Tom Lendacky647daca2021-01-04 14:20:01 -06002672void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2673{
2674 struct vcpu_svm *svm = to_svm(vcpu);
2675
2676 /* First SIPI: Use the values as initially set by the VMM */
2677 if (!svm->received_first_sipi) {
2678 svm->received_first_sipi = true;
2679 return;
2680 }
2681
2682 /*
2683 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2684 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2685 * non-zero value.
2686 */
Tom Lendackya3ba26e2021-04-09 09:38:42 -05002687 if (!svm->ghcb)
2688 return;
2689
Tom Lendacky647daca2021-01-04 14:20:01 -06002690 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2691}