blob: 594d6b1695d597575ed657b2a4d63dd6822feaf9 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Cryptographic API.
4 *
5 * Support for VIA PadLock hardware crypto engine.
6 *
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Herbert Xu28ce7282006-08-21 21:38:42 +100011#include <crypto/algapi.h>
Sebastian Siewior89e12652007-10-17 23:18:57 +080012#include <crypto/aes.h>
Eric Biggers713b2e72019-10-12 21:17:41 -070013#include <crypto/internal/skcipher.h>
Herbert Xu21493082011-01-07 14:52:00 +110014#include <crypto/padlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/interrupt.h>
Herbert Xu6789b2d2005-07-06 13:52:27 -070020#include <linux/kernel.h>
Herbert Xu420a4b22008-08-31 15:58:45 +100021#include <linux/percpu.h>
22#include <linux/smp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010024#include <asm/cpu_device_id.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/byteorder.h>
Chuck Ebberta76c1c22009-06-18 19:24:10 +080026#include <asm/processor.h>
Ingo Molnardf6b35f2015-04-24 02:46:00 +020027#include <asm/fpu/api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080029/*
30 * Number of data blocks actually fetched for each xcrypt insn.
31 * Processors with prefetch errata will fetch extra blocks.
32 */
Chuck Ebberta76c1c22009-06-18 19:24:10 +080033static unsigned int ecb_fetch_blocks = 2;
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080034#define MAX_ECB_FETCH_BLOCKS (8)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080035#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080036
37static unsigned int cbc_fetch_blocks = 1;
38#define MAX_CBC_FETCH_BLOCKS (4)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080039#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
40
Michal Ludvigccc17c32006-07-15 10:23:49 +100041/* Control word. */
42struct cword {
43 unsigned int __attribute__ ((__packed__))
44 rounds:4,
45 algo:3,
46 keygen:1,
47 interm:1,
48 encdec:1,
49 ksize:2;
50} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
51
Michal Ludvigcc086322006-07-15 11:08:50 +100052/* Whenever making any changes to the following
53 * structure *make sure* you keep E, d_data
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080054 * and cword aligned on 16 Bytes boundaries and
55 * the Hardware can access 16 * 16 bytes of E and d_data
56 * (only the first 15 * 16 bytes matter but the HW reads
57 * more).
58 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059struct aes_ctx {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080060 u32 E[AES_MAX_KEYLENGTH_U32]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 u32 d_data[AES_MAX_KEYLENGTH_U32]
63 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
Herbert Xu6789b2d2005-07-06 13:52:27 -070064 struct {
65 struct cword encrypt;
66 struct cword decrypt;
67 } cword;
Herbert Xu82062c72006-05-16 22:20:34 +100068 u32 *D;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Tejun Heo390dfd92009-10-29 22:34:14 +090071static DEFINE_PER_CPU(struct cword *, paes_last_cword);
Herbert Xu420a4b22008-08-31 15:58:45 +100072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* Tells whether the ACE is capable to generate
74 the extended key for a given key_len. */
75static inline int
76aes_hw_extkey_available(uint8_t key_len)
77{
78 /* TODO: We should check the actual CPU model/stepping
79 as it's possible that the capability will be
80 added in the next CPU revisions. */
81 if (key_len == 16)
82 return 1;
83 return 0;
84}
85
Herbert Xu28ce7282006-08-21 21:38:42 +100086static inline struct aes_ctx *aes_ctx_common(void *ctx)
Herbert Xu6789b2d2005-07-06 13:52:27 -070087{
Herbert Xu28ce7282006-08-21 21:38:42 +100088 unsigned long addr = (unsigned long)ctx;
Herbert Xuf10b7892006-01-25 22:34:01 +110089 unsigned long align = PADLOCK_ALIGNMENT;
90
91 if (align <= crypto_tfm_ctx_alignment())
92 align = 1;
Herbert Xu6c2bb982006-05-16 22:09:29 +100093 return (struct aes_ctx *)ALIGN(addr, align);
Herbert Xu6789b2d2005-07-06 13:52:27 -070094}
95
Herbert Xu28ce7282006-08-21 21:38:42 +100096static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
97{
98 return aes_ctx_common(crypto_tfm_ctx(tfm));
99}
100
Eric Biggers713b2e72019-10-12 21:17:41 -0700101static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
Herbert Xu28ce7282006-08-21 21:38:42 +1000102{
Eric Biggers713b2e72019-10-12 21:17:41 -0700103 return aes_ctx_common(crypto_skcipher_ctx(tfm));
Herbert Xu28ce7282006-08-21 21:38:42 +1000104}
105
Herbert Xu6c2bb982006-05-16 22:09:29 +1000106static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000107 unsigned int key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000109 struct aes_ctx *ctx = aes_ctx(tfm);
Herbert Xu06ace7a2005-10-30 21:25:15 +1100110 const __le32 *key = (const __le32 *)in_key;
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800111 struct crypto_aes_ctx gen_aes;
Herbert Xu420a4b22008-08-31 15:58:45 +1000112 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Eric Biggers674f3682019-12-30 21:19:36 -0600114 if (key_len % 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Herbert Xu6789b2d2005-07-06 13:52:27 -0700117 /*
118 * If the hardware is capable of generating the extended key
119 * itself we must supply the plain key for both encryption
120 * and decryption.
121 */
Herbert Xu82062c72006-05-16 22:20:34 +1000122 ctx->D = ctx->E;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800124 ctx->E[0] = le32_to_cpu(key[0]);
125 ctx->E[1] = le32_to_cpu(key[1]);
126 ctx->E[2] = le32_to_cpu(key[2]);
127 ctx->E[3] = le32_to_cpu(key[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Herbert Xu6789b2d2005-07-06 13:52:27 -0700129 /* Prepare control words. */
130 memset(&ctx->cword, 0, sizeof(ctx->cword));
131
132 ctx->cword.decrypt.encdec = 1;
133 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
134 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
135 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
136 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 /* Don't generate extended keys if the hardware can do it. */
139 if (aes_hw_extkey_available(key_len))
Herbert Xu420a4b22008-08-31 15:58:45 +1000140 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Herbert Xu6789b2d2005-07-06 13:52:27 -0700142 ctx->D = ctx->d_data;
143 ctx->cword.encrypt.keygen = 1;
144 ctx->cword.decrypt.keygen = 1;
145
Eric Biggers674f3682019-12-30 21:19:36 -0600146 if (aes_expandkey(&gen_aes, in_key, key_len))
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800147 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800149 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
150 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
Herbert Xu420a4b22008-08-31 15:58:45 +1000151
152ok:
153 for_each_online_cpu(cpu)
Tejun Heo390dfd92009-10-29 22:34:14 +0900154 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
155 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
156 per_cpu(paes_last_cword, cpu) = NULL;
Herbert Xu420a4b22008-08-31 15:58:45 +1000157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 return 0;
159}
160
Eric Biggers713b2e72019-10-12 21:17:41 -0700161static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
162 unsigned int key_len)
163{
164 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167/* ====== Encryption/decryption routines ====== */
168
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700169/* These are the real call to PadLock. */
Herbert Xu420a4b22008-08-31 15:58:45 +1000170static inline void padlock_reset_key(struct cword *cword)
Herbert Xu866cd902007-12-27 00:04:44 +1100171{
Herbert Xu420a4b22008-08-31 15:58:45 +1000172 int cpu = raw_smp_processor_id();
173
Tejun Heo390dfd92009-10-29 22:34:14 +0900174 if (cword != per_cpu(paes_last_cword, cpu))
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800175#ifndef CONFIG_X86_64
Herbert Xu420a4b22008-08-31 15:58:45 +1000176 asm volatile ("pushfl; popfl");
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800177#else
178 asm volatile ("pushfq; popfq");
179#endif
Herbert Xu420a4b22008-08-31 15:58:45 +1000180}
181
182static inline void padlock_store_cword(struct cword *cword)
183{
Tejun Heo390dfd92009-10-29 22:34:14 +0900184 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
Herbert Xu866cd902007-12-27 00:04:44 +1100185}
186
Suresh Siddhae4914012008-08-13 22:02:26 +1000187/*
188 * While the padlock instructions don't use FP/SSE registers, they
Andy Lutomirski5a83d602016-10-31 15:18:44 -0700189 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
190 * the kernel doesn't use CR0.TS.
Suresh Siddhae4914012008-08-13 22:02:26 +1000191 */
192
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800193static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800194 struct cword *control_word, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100195{
196 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
197 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800198 : "d"(control_word), "b"(key), "c"(count));
Herbert Xud4a7dd82007-12-28 11:05:46 +1100199}
200
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800201static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
202 u8 *iv, struct cword *control_word, int count)
203{
204 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
205 : "+S" (input), "+D" (output), "+a" (iv)
206 : "d" (control_word), "b" (key), "c" (count));
207 return iv;
208}
209
210static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800211 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100212{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800213 /*
214 * Padlock prefetches extra data so we must provide mapped input buffers.
215 * Assume there are at least 16 bytes of stack already in use.
216 */
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800217 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
Herbert Xu490fe3f2008-01-11 08:09:35 +1100218 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100219
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800220 memcpy(tmp, in, count * AES_BLOCK_SIZE);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800221 rep_xcrypt_ecb(tmp, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100222}
223
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800224static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
225 u8 *iv, struct cword *cword, int count)
226{
227 /*
228 * Padlock prefetches extra data so we must provide mapped input buffers.
229 * Assume there are at least 16 bytes of stack already in use.
230 */
231 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
232 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
233
234 memcpy(tmp, in, count * AES_BLOCK_SIZE);
235 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
236}
237
238static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800239 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100240{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800241 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
242 * We could avoid some copying here but it's probably not worth it.
243 */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800244 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800245 ecb_crypt_copy(in, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100246 return;
247 }
248
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800249 rep_xcrypt_ecb(in, out, key, cword, count);
250}
251
252static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
253 u8 *iv, struct cword *cword, int count)
254{
255 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800256 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800257 return cbc_crypt_copy(in, out, key, iv, cword, count);
258
259 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100260}
261
Herbert Xu6789b2d2005-07-06 13:52:27 -0700262static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
263 void *control_word, u32 count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800265 u32 initial = count & (ecb_fetch_blocks - 1);
266
267 if (count < ecb_fetch_blocks) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800268 ecb_crypt(input, output, key, control_word, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100269 return;
270 }
271
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800272 count -= initial;
273
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800274 if (initial)
275 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
276 : "+S"(input), "+D"(output)
277 : "d"(control_word), "b"(key), "c"(initial));
278
279 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 : "+S"(input), "+D"(output)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800281 : "d"(control_word), "b"(key), "c"(count));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
Herbert Xu476df252005-07-06 13:54:09 -0700284static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
285 u8 *iv, void *control_word, u32 count)
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700286{
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800287 u32 initial = count & (cbc_fetch_blocks - 1);
288
289 if (count < cbc_fetch_blocks)
290 return cbc_crypt(input, output, key, iv, control_word, count);
291
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800292 count -= initial;
293
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800294 if (initial)
295 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
296 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xuc054a072010-11-04 14:38:39 -0400297 : "d" (control_word), "b" (key), "c" (initial));
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800298
299 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700300 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800301 : "d" (control_word), "b" (key), "c" (count));
Herbert Xu476df252005-07-06 13:54:09 -0700302 return iv;
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700303}
304
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200305static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000307 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000308
Herbert Xu420a4b22008-08-31 15:58:45 +1000309 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800310 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000311 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200314static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000316 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000317
Herbert Xu420a4b22008-08-31 15:58:45 +1000318 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800319 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000320 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
323static struct crypto_alg aes_alg = {
324 .cra_name = "aes",
Herbert Xuc8a19c92005-11-05 18:06:26 +1100325 .cra_driver_name = "aes-padlock",
Michal Ludvigccc17c32006-07-15 10:23:49 +1000326 .cra_priority = PADLOCK_CRA_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
328 .cra_blocksize = AES_BLOCK_SIZE,
Herbert Xufbdae9f2005-07-06 13:53:29 -0700329 .cra_ctxsize = sizeof(struct aes_ctx),
Herbert Xu6789b2d2005-07-06 13:52:27 -0700330 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 .cra_module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 .cra_u = {
333 .cipher = {
334 .cia_min_keysize = AES_MIN_KEY_SIZE,
335 .cia_max_keysize = AES_MAX_KEY_SIZE,
336 .cia_setkey = aes_set_key,
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200337 .cia_encrypt = padlock_aes_encrypt,
338 .cia_decrypt = padlock_aes_decrypt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340 }
341};
342
Eric Biggers713b2e72019-10-12 21:17:41 -0700343static int ecb_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000344{
Eric Biggers713b2e72019-10-12 21:17:41 -0700345 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
346 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
347 struct skcipher_walk walk;
348 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000349 int err;
350
Herbert Xu420a4b22008-08-31 15:58:45 +1000351 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100352
Eric Biggers713b2e72019-10-12 21:17:41 -0700353 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000354
Eric Biggers713b2e72019-10-12 21:17:41 -0700355 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000356 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
357 ctx->E, &ctx->cword.encrypt,
358 nbytes / AES_BLOCK_SIZE);
359 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700360 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000361 }
362
Herbert Xu420a4b22008-08-31 15:58:45 +1000363 padlock_store_cword(&ctx->cword.encrypt);
364
Herbert Xu28ce7282006-08-21 21:38:42 +1000365 return err;
366}
367
Eric Biggers713b2e72019-10-12 21:17:41 -0700368static int ecb_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000369{
Eric Biggers713b2e72019-10-12 21:17:41 -0700370 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
371 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
372 struct skcipher_walk walk;
373 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000374 int err;
375
Herbert Xu420a4b22008-08-31 15:58:45 +1000376 padlock_reset_key(&ctx->cword.decrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100377
Eric Biggers713b2e72019-10-12 21:17:41 -0700378 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000379
Eric Biggers713b2e72019-10-12 21:17:41 -0700380 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000381 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
382 ctx->D, &ctx->cword.decrypt,
383 nbytes / AES_BLOCK_SIZE);
384 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700385 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000386 }
Herbert Xu420a4b22008-08-31 15:58:45 +1000387
388 padlock_store_cword(&ctx->cword.encrypt);
389
Herbert Xu28ce7282006-08-21 21:38:42 +1000390 return err;
391}
392
Eric Biggers713b2e72019-10-12 21:17:41 -0700393static struct skcipher_alg ecb_aes_alg = {
394 .base.cra_name = "ecb(aes)",
395 .base.cra_driver_name = "ecb-aes-padlock",
396 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
397 .base.cra_blocksize = AES_BLOCK_SIZE,
398 .base.cra_ctxsize = sizeof(struct aes_ctx),
399 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
400 .base.cra_module = THIS_MODULE,
401 .min_keysize = AES_MIN_KEY_SIZE,
402 .max_keysize = AES_MAX_KEY_SIZE,
403 .setkey = aes_set_key_skcipher,
404 .encrypt = ecb_aes_encrypt,
405 .decrypt = ecb_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000406};
407
Eric Biggers713b2e72019-10-12 21:17:41 -0700408static int cbc_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000409{
Eric Biggers713b2e72019-10-12 21:17:41 -0700410 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
411 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
412 struct skcipher_walk walk;
413 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000414 int err;
415
Herbert Xu420a4b22008-08-31 15:58:45 +1000416 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100417
Eric Biggers713b2e72019-10-12 21:17:41 -0700418 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000419
Eric Biggers713b2e72019-10-12 21:17:41 -0700420 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000421 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
422 walk.dst.virt.addr, ctx->E,
423 walk.iv, &ctx->cword.encrypt,
424 nbytes / AES_BLOCK_SIZE);
425 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
426 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700427 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000428 }
429
Herbert Xu420a4b22008-08-31 15:58:45 +1000430 padlock_store_cword(&ctx->cword.decrypt);
431
Herbert Xu28ce7282006-08-21 21:38:42 +1000432 return err;
433}
434
Eric Biggers713b2e72019-10-12 21:17:41 -0700435static int cbc_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000436{
Eric Biggers713b2e72019-10-12 21:17:41 -0700437 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
438 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
439 struct skcipher_walk walk;
440 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000441 int err;
442
Herbert Xu420a4b22008-08-31 15:58:45 +1000443 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100444
Eric Biggers713b2e72019-10-12 21:17:41 -0700445 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000446
Eric Biggers713b2e72019-10-12 21:17:41 -0700447 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000448 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
449 ctx->D, walk.iv, &ctx->cword.decrypt,
450 nbytes / AES_BLOCK_SIZE);
451 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700452 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000453 }
454
Herbert Xu420a4b22008-08-31 15:58:45 +1000455 padlock_store_cword(&ctx->cword.encrypt);
456
Herbert Xu28ce7282006-08-21 21:38:42 +1000457 return err;
458}
459
Eric Biggers713b2e72019-10-12 21:17:41 -0700460static struct skcipher_alg cbc_aes_alg = {
461 .base.cra_name = "cbc(aes)",
462 .base.cra_driver_name = "cbc-aes-padlock",
463 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
464 .base.cra_blocksize = AES_BLOCK_SIZE,
465 .base.cra_ctxsize = sizeof(struct aes_ctx),
466 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
467 .base.cra_module = THIS_MODULE,
468 .min_keysize = AES_MIN_KEY_SIZE,
469 .max_keysize = AES_MAX_KEY_SIZE,
470 .ivsize = AES_BLOCK_SIZE,
471 .setkey = aes_set_key_skcipher,
472 .encrypt = cbc_aes_encrypt,
473 .decrypt = cbc_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000474};
475
Arvind Yadavd9893642017-08-25 23:53:42 +0530476static const struct x86_cpu_id padlock_cpu_id[] = {
Andi Kleen3bd391f2012-01-26 00:09:06 +0100477 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
478 {}
479};
480MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
481
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000482static int __init padlock_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000484 int ret;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800485 struct cpuinfo_x86 *c = &cpu_data(0);
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000486
Andi Kleen3bd391f2012-01-26 00:09:06 +0100487 if (!x86_match_cpu(padlock_cpu_id))
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000488 return -ENODEV;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000489
Borislav Petkov362f9242015-12-07 10:39:41 +0100490 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800491 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000492 return -ENODEV;
493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Eric Biggers713b2e72019-10-12 21:17:41 -0700495 if ((ret = crypto_register_alg(&aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000496 goto aes_err;
497
Eric Biggers713b2e72019-10-12 21:17:41 -0700498 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000499 goto ecb_aes_err;
500
Eric Biggers713b2e72019-10-12 21:17:41 -0700501 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000502 goto cbc_aes_err;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000503
504 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
505
Jia Zhangb3991512018-01-01 09:52:10 +0800506 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800507 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
508 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800509 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
510 }
511
Herbert Xu28ce7282006-08-21 21:38:42 +1000512out:
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000513 return ret;
Herbert Xu28ce7282006-08-21 21:38:42 +1000514
515cbc_aes_err:
Eric Biggers713b2e72019-10-12 21:17:41 -0700516 crypto_unregister_skcipher(&ecb_aes_alg);
Herbert Xu28ce7282006-08-21 21:38:42 +1000517ecb_aes_err:
518 crypto_unregister_alg(&aes_alg);
519aes_err:
520 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
521 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000524static void __exit padlock_fini(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
Eric Biggers713b2e72019-10-12 21:17:41 -0700526 crypto_unregister_skcipher(&cbc_aes_alg);
527 crypto_unregister_skcipher(&ecb_aes_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 crypto_unregister_alg(&aes_alg);
529}
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000530
531module_init(padlock_init);
532module_exit(padlock_fini);
533
534MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
535MODULE_LICENSE("GPL");
536MODULE_AUTHOR("Michal Ludvig");
537
Kees Cook5d26a102014-11-20 17:05:53 -0800538MODULE_ALIAS_CRYPTO("aes");