blob: a63424d13627bcfe935ee1f02f1f6637cf984f1c [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Tom Lendacky1958b5f2017-10-20 09:30:54 -05002/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2017 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Tom Lendacky1958b5f2017-10-20 09:30:54 -05008 */
9
10#include <linux/linkage.h>
11
12#include <asm/processor-flags.h>
13#include <asm/msr.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .code32
Jiri Slaby6dcc5622019-10-11 13:51:04 +020018SYM_FUNC_START(get_sev_encryption_bit)
Tom Lendacky1958b5f2017-10-20 09:30:54 -050019 xor %eax, %eax
20
21#ifdef CONFIG_AMD_MEM_ENCRYPT
22 push %ebx
23 push %ecx
24 push %edx
Tom Lendacky1958b5f2017-10-20 09:30:54 -050025
Tom Lendacky1958b5f2017-10-20 09:30:54 -050026 movl $0x80000000, %eax /* CPUID to check the highest leaf */
27 cpuid
28 cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
29 jb .Lno_sev
30
31 /*
32 * Check for the SEV feature:
33 * CPUID Fn8000_001F[EAX] - Bit 1
34 * CPUID Fn8000_001F[EBX] - Bits 5:0
35 * Pagetable bit position used to indicate encryption
36 */
37 movl $0x8000001f, %eax
38 cpuid
39 bt $1, %eax /* Check if SEV is available */
40 jnc .Lno_sev
41
42 movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
43 rdmsr
44 bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
45 jnc .Lno_sev
46
47 movl %ebx, %eax
48 andl $0x3f, %eax /* Return the encryption bit location */
Tom Lendacky1958b5f2017-10-20 09:30:54 -050049 jmp .Lsev_exit
50
51.Lno_sev:
52 xor %eax, %eax
Tom Lendacky1958b5f2017-10-20 09:30:54 -050053
54.Lsev_exit:
Tom Lendacky1958b5f2017-10-20 09:30:54 -050055 pop %edx
56 pop %ecx
57 pop %ebx
58
59#endif /* CONFIG_AMD_MEM_ENCRYPT */
60
Peter Zijlstraf94909c2021-12-04 14:43:40 +010061 RET
Jiri Slaby6dcc5622019-10-11 13:51:04 +020062SYM_FUNC_END(get_sev_encryption_bit)
Tom Lendacky1958b5f2017-10-20 09:30:54 -050063
Joerg Roedel1ccdbf72021-03-10 09:43:22 +010064/**
65 * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
66 * the GHCB MSR protocol
67 *
68 * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
69 * @%edx: CPUID Function
70 *
71 * Returns 0 in %eax on success, non-zero on failure
72 * %edx returns CPUID value on success
73 */
74SYM_CODE_START_LOCAL(sev_es_req_cpuid)
75 shll $30, %eax
76 orl $0x00000004, %eax
77 movl $MSR_AMD64_SEV_ES_GHCB, %ecx
78 wrmsr
79 rep; vmmcall # VMGEXIT
80 rdmsr
81
82 /* Check response */
83 movl %eax, %ecx
84 andl $0x3ffff000, %ecx # Bits [12-29] MBZ
85 jnz 2f
86
87 /* Check return code */
88 andl $0xfff, %eax
89 cmpl $5, %eax
90 jne 2f
91
92 /* All good - return success */
93 xorl %eax, %eax
941:
Peter Zijlstraf94909c2021-12-04 14:43:40 +010095 RET
Joerg Roedel1ccdbf72021-03-10 09:43:22 +0100962:
97 movl $-1, %eax
98 jmp 1b
99SYM_CODE_END(sev_es_req_cpuid)
100
101SYM_CODE_START(startup32_vc_handler)
102 pushl %eax
103 pushl %ebx
104 pushl %ecx
105 pushl %edx
106
107 /* Keep CPUID function in %ebx */
108 movl %eax, %ebx
109
110 /* Check if error-code == SVM_EXIT_CPUID */
111 cmpl $0x72, 16(%esp)
112 jne .Lfail
113
114 movl $0, %eax # Request CPUID[fn].EAX
115 movl %ebx, %edx # CPUID fn
116 call sev_es_req_cpuid # Call helper
117 testl %eax, %eax # Check return code
118 jnz .Lfail
119 movl %edx, 12(%esp) # Store result
120
121 movl $1, %eax # Request CPUID[fn].EBX
122 movl %ebx, %edx # CPUID fn
123 call sev_es_req_cpuid # Call helper
124 testl %eax, %eax # Check return code
125 jnz .Lfail
126 movl %edx, 8(%esp) # Store result
127
128 movl $2, %eax # Request CPUID[fn].ECX
129 movl %ebx, %edx # CPUID fn
130 call sev_es_req_cpuid # Call helper
131 testl %eax, %eax # Check return code
132 jnz .Lfail
133 movl %edx, 4(%esp) # Store result
134
135 movl $3, %eax # Request CPUID[fn].EDX
136 movl %ebx, %edx # CPUID fn
137 call sev_es_req_cpuid # Call helper
138 testl %eax, %eax # Check return code
139 jnz .Lfail
140 movl %edx, 0(%esp) # Store result
141
Joerg Roedele927e622021-03-12 13:38:22 +0100142 /*
143 * Sanity check CPUID results from the Hypervisor. See comment in
144 * do_vc_no_ghcb() for more details on why this is necessary.
145 */
146
147 /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */
148 cmpl $0x80000000, %ebx
149 jne .Lcheck_sev
150 cmpl $0x8000001f, 12(%esp)
151 jb .Lfail
152 jmp .Ldone
153
154.Lcheck_sev:
155 /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */
156 cmpl $0x8000001f, %ebx
157 jne .Ldone
158 btl $1, 12(%esp)
159 jnc .Lfail
160
161.Ldone:
Joerg Roedel1ccdbf72021-03-10 09:43:22 +0100162 popl %edx
163 popl %ecx
164 popl %ebx
165 popl %eax
166
167 /* Remove error code */
168 addl $4, %esp
169
170 /* Jump over CPUID instruction */
171 addl $2, (%esp)
172
173 iret
174.Lfail:
Joerg Roedele927e622021-03-12 13:38:22 +0100175 /* Send terminate request to Hypervisor */
176 movl $0x100, %eax
177 xorl %edx, %edx
178 movl $MSR_AMD64_SEV_ES_GHCB, %ecx
179 wrmsr
180 rep; vmmcall
181
182 /* If request fails, go to hlt loop */
Joerg Roedel1ccdbf72021-03-10 09:43:22 +0100183 hlt
184 jmp .Lfail
185SYM_CODE_END(startup32_vc_handler)
186
Tom Lendacky1958b5f2017-10-20 09:30:54 -0500187 .code64
Joerg Roedel86ce43f2020-10-28 17:46:57 +0100188
189#include "../../kernel/sev_verify_cbit.S"
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200190SYM_FUNC_START(set_sev_encryption_mask)
Tom Lendacky1958b5f2017-10-20 09:30:54 -0500191#ifdef CONFIG_AMD_MEM_ENCRYPT
192 push %rbp
193 push %rdx
194
195 movq %rsp, %rbp /* Save current stack pointer */
196
197 call get_sev_encryption_bit /* Get the encryption bit position */
198 testl %eax, %eax
199 jz .Lno_sev_mask
200
Tom Lendacky07344b12018-03-27 17:07:11 -0500201 bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
Tom Lendacky1958b5f2017-10-20 09:30:54 -0500202
Joerg Roedel3ad84242020-10-28 17:46:55 +0100203 /*
204 * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
205 * get_sev_encryption_bit() because this function is 32-bit code and
206 * shared between 64-bit and 32-bit boot path.
207 */
208 movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
209 rdmsr
210
211 /* Store MSR value in sev_status */
212 shlq $32, %rdx
213 orq %rdx, %rax
214 movq %rax, sev_status(%rip)
215
Tom Lendacky1958b5f2017-10-20 09:30:54 -0500216.Lno_sev_mask:
217 movq %rbp, %rsp /* Restore original stack pointer */
218
219 pop %rdx
220 pop %rbp
221#endif
222
Tom Lendacky07344b12018-03-27 17:07:11 -0500223 xor %rax, %rax
Peter Zijlstraf94909c2021-12-04 14:43:40 +0100224 RET
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200225SYM_FUNC_END(set_sev_encryption_mask)
Tom Lendacky1958b5f2017-10-20 09:30:54 -0500226
227 .data
Tom Lendacky07344b12018-03-27 17:07:11 -0500228
229#ifdef CONFIG_AMD_MEM_ENCRYPT
230 .balign 8
Joerg Roedel3ad84242020-10-28 17:46:55 +0100231SYM_DATA(sme_me_mask, .quad 0)
232SYM_DATA(sev_status, .quad 0)
Joerg Roedel86ce43f2020-10-28 17:46:57 +0100233SYM_DATA(sev_check_data, .quad 0)
Tom Lendacky07344b12018-03-27 17:07:11 -0500234#endif