blob: c5b60f50e1b5cc73aac71df745c3b749a0c27ac4 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Cryptographic API.
4 *
5 * Support for VIA PadLock hardware crypto engine.
6 *
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Herbert Xu28ce7282006-08-21 21:38:42 +100011#include <crypto/algapi.h>
Sebastian Siewior89e12652007-10-17 23:18:57 +080012#include <crypto/aes.h>
Eric Biggers713b2e72019-10-12 21:17:41 -070013#include <crypto/internal/skcipher.h>
Herbert Xu21493082011-01-07 14:52:00 +110014#include <crypto/padlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/interrupt.h>
Herbert Xu6789b2d2005-07-06 13:52:27 -070020#include <linux/kernel.h>
Herbert Xu420a4b22008-08-31 15:58:45 +100021#include <linux/percpu.h>
22#include <linux/smp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010024#include <asm/cpu_device_id.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/byteorder.h>
Chuck Ebberta76c1c22009-06-18 19:24:10 +080026#include <asm/processor.h>
Ingo Molnardf6b35f2015-04-24 02:46:00 +020027#include <asm/fpu/api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080029/*
30 * Number of data blocks actually fetched for each xcrypt insn.
31 * Processors with prefetch errata will fetch extra blocks.
32 */
Chuck Ebberta76c1c22009-06-18 19:24:10 +080033static unsigned int ecb_fetch_blocks = 2;
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080034#define MAX_ECB_FETCH_BLOCKS (8)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080035#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080036
37static unsigned int cbc_fetch_blocks = 1;
38#define MAX_CBC_FETCH_BLOCKS (4)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080039#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
40
Michal Ludvigccc17c32006-07-15 10:23:49 +100041/* Control word. */
42struct cword {
43 unsigned int __attribute__ ((__packed__))
44 rounds:4,
45 algo:3,
46 keygen:1,
47 interm:1,
48 encdec:1,
49 ksize:2;
50} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
51
Michal Ludvigcc086322006-07-15 11:08:50 +100052/* Whenever making any changes to the following
53 * structure *make sure* you keep E, d_data
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080054 * and cword aligned on 16 Bytes boundaries and
55 * the Hardware can access 16 * 16 bytes of E and d_data
56 * (only the first 15 * 16 bytes matter but the HW reads
57 * more).
58 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059struct aes_ctx {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080060 u32 E[AES_MAX_KEYLENGTH_U32]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 u32 d_data[AES_MAX_KEYLENGTH_U32]
63 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
Herbert Xu6789b2d2005-07-06 13:52:27 -070064 struct {
65 struct cword encrypt;
66 struct cword decrypt;
67 } cword;
Herbert Xu82062c72006-05-16 22:20:34 +100068 u32 *D;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Tejun Heo390dfd92009-10-29 22:34:14 +090071static DEFINE_PER_CPU(struct cword *, paes_last_cword);
Herbert Xu420a4b22008-08-31 15:58:45 +100072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* Tells whether the ACE is capable to generate
74 the extended key for a given key_len. */
75static inline int
76aes_hw_extkey_available(uint8_t key_len)
77{
78 /* TODO: We should check the actual CPU model/stepping
79 as it's possible that the capability will be
80 added in the next CPU revisions. */
81 if (key_len == 16)
82 return 1;
83 return 0;
84}
85
Herbert Xu28ce7282006-08-21 21:38:42 +100086static inline struct aes_ctx *aes_ctx_common(void *ctx)
Herbert Xu6789b2d2005-07-06 13:52:27 -070087{
Herbert Xu28ce7282006-08-21 21:38:42 +100088 unsigned long addr = (unsigned long)ctx;
Herbert Xuf10b7892006-01-25 22:34:01 +110089 unsigned long align = PADLOCK_ALIGNMENT;
90
91 if (align <= crypto_tfm_ctx_alignment())
92 align = 1;
Herbert Xu6c2bb982006-05-16 22:09:29 +100093 return (struct aes_ctx *)ALIGN(addr, align);
Herbert Xu6789b2d2005-07-06 13:52:27 -070094}
95
Herbert Xu28ce7282006-08-21 21:38:42 +100096static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
97{
98 return aes_ctx_common(crypto_tfm_ctx(tfm));
99}
100
Eric Biggers713b2e72019-10-12 21:17:41 -0700101static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
Herbert Xu28ce7282006-08-21 21:38:42 +1000102{
Eric Biggers713b2e72019-10-12 21:17:41 -0700103 return aes_ctx_common(crypto_skcipher_ctx(tfm));
Herbert Xu28ce7282006-08-21 21:38:42 +1000104}
105
Herbert Xu6c2bb982006-05-16 22:09:29 +1000106static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000107 unsigned int key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000109 struct aes_ctx *ctx = aes_ctx(tfm);
Herbert Xu06ace7a2005-10-30 21:25:15 +1100110 const __le32 *key = (const __le32 *)in_key;
Herbert Xu560c06a2006-08-13 14:16:39 +1000111 u32 *flags = &tfm->crt_flags;
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800112 struct crypto_aes_ctx gen_aes;
Herbert Xu420a4b22008-08-31 15:58:45 +1000113 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Herbert Xu560c06a2006-08-13 14:16:39 +1000115 if (key_len % 8) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
117 return -EINVAL;
118 }
119
Herbert Xu6789b2d2005-07-06 13:52:27 -0700120 /*
121 * If the hardware is capable of generating the extended key
122 * itself we must supply the plain key for both encryption
123 * and decryption.
124 */
Herbert Xu82062c72006-05-16 22:20:34 +1000125 ctx->D = ctx->E;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800127 ctx->E[0] = le32_to_cpu(key[0]);
128 ctx->E[1] = le32_to_cpu(key[1]);
129 ctx->E[2] = le32_to_cpu(key[2]);
130 ctx->E[3] = le32_to_cpu(key[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Herbert Xu6789b2d2005-07-06 13:52:27 -0700132 /* Prepare control words. */
133 memset(&ctx->cword, 0, sizeof(ctx->cword));
134
135 ctx->cword.decrypt.encdec = 1;
136 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
137 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
138 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
139 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 /* Don't generate extended keys if the hardware can do it. */
142 if (aes_hw_extkey_available(key_len))
Herbert Xu420a4b22008-08-31 15:58:45 +1000143 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Herbert Xu6789b2d2005-07-06 13:52:27 -0700145 ctx->D = ctx->d_data;
146 ctx->cword.encrypt.keygen = 1;
147 ctx->cword.decrypt.keygen = 1;
148
Ard Biesheuvel81318782019-07-02 21:41:25 +0200149 if (aes_expandkey(&gen_aes, in_key, key_len)) {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800150 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
151 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800154 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
155 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
Herbert Xu420a4b22008-08-31 15:58:45 +1000156
157ok:
158 for_each_online_cpu(cpu)
Tejun Heo390dfd92009-10-29 22:34:14 +0900159 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
160 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
161 per_cpu(paes_last_cword, cpu) = NULL;
Herbert Xu420a4b22008-08-31 15:58:45 +1000162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return 0;
164}
165
Eric Biggers713b2e72019-10-12 21:17:41 -0700166static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
167 unsigned int key_len)
168{
169 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
170}
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172/* ====== Encryption/decryption routines ====== */
173
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700174/* These are the real call to PadLock. */
Herbert Xu420a4b22008-08-31 15:58:45 +1000175static inline void padlock_reset_key(struct cword *cword)
Herbert Xu866cd902007-12-27 00:04:44 +1100176{
Herbert Xu420a4b22008-08-31 15:58:45 +1000177 int cpu = raw_smp_processor_id();
178
Tejun Heo390dfd92009-10-29 22:34:14 +0900179 if (cword != per_cpu(paes_last_cword, cpu))
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800180#ifndef CONFIG_X86_64
Herbert Xu420a4b22008-08-31 15:58:45 +1000181 asm volatile ("pushfl; popfl");
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800182#else
183 asm volatile ("pushfq; popfq");
184#endif
Herbert Xu420a4b22008-08-31 15:58:45 +1000185}
186
187static inline void padlock_store_cword(struct cword *cword)
188{
Tejun Heo390dfd92009-10-29 22:34:14 +0900189 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
Herbert Xu866cd902007-12-27 00:04:44 +1100190}
191
Suresh Siddhae4914012008-08-13 22:02:26 +1000192/*
193 * While the padlock instructions don't use FP/SSE registers, they
Andy Lutomirski5a83d602016-10-31 15:18:44 -0700194 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
195 * the kernel doesn't use CR0.TS.
Suresh Siddhae4914012008-08-13 22:02:26 +1000196 */
197
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800198static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800199 struct cword *control_word, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100200{
201 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
202 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800203 : "d"(control_word), "b"(key), "c"(count));
Herbert Xud4a7dd82007-12-28 11:05:46 +1100204}
205
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800206static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
207 u8 *iv, struct cword *control_word, int count)
208{
209 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
210 : "+S" (input), "+D" (output), "+a" (iv)
211 : "d" (control_word), "b" (key), "c" (count));
212 return iv;
213}
214
215static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800216 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100217{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800218 /*
219 * Padlock prefetches extra data so we must provide mapped input buffers.
220 * Assume there are at least 16 bytes of stack already in use.
221 */
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800222 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
Herbert Xu490fe3f2008-01-11 08:09:35 +1100223 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100224
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800225 memcpy(tmp, in, count * AES_BLOCK_SIZE);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800226 rep_xcrypt_ecb(tmp, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100227}
228
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800229static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
230 u8 *iv, struct cword *cword, int count)
231{
232 /*
233 * Padlock prefetches extra data so we must provide mapped input buffers.
234 * Assume there are at least 16 bytes of stack already in use.
235 */
236 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
237 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
238
239 memcpy(tmp, in, count * AES_BLOCK_SIZE);
240 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
241}
242
243static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800244 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100245{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800246 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
247 * We could avoid some copying here but it's probably not worth it.
248 */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800249 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800250 ecb_crypt_copy(in, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100251 return;
252 }
253
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800254 rep_xcrypt_ecb(in, out, key, cword, count);
255}
256
257static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
258 u8 *iv, struct cword *cword, int count)
259{
260 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800261 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800262 return cbc_crypt_copy(in, out, key, iv, cword, count);
263
264 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100265}
266
Herbert Xu6789b2d2005-07-06 13:52:27 -0700267static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
268 void *control_word, u32 count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800270 u32 initial = count & (ecb_fetch_blocks - 1);
271
272 if (count < ecb_fetch_blocks) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800273 ecb_crypt(input, output, key, control_word, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100274 return;
275 }
276
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800277 count -= initial;
278
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800279 if (initial)
280 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
281 : "+S"(input), "+D"(output)
282 : "d"(control_word), "b"(key), "c"(initial));
283
284 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 : "+S"(input), "+D"(output)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800286 : "d"(control_word), "b"(key), "c"(count));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Herbert Xu476df252005-07-06 13:54:09 -0700289static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
290 u8 *iv, void *control_word, u32 count)
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700291{
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800292 u32 initial = count & (cbc_fetch_blocks - 1);
293
294 if (count < cbc_fetch_blocks)
295 return cbc_crypt(input, output, key, iv, control_word, count);
296
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800297 count -= initial;
298
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800299 if (initial)
300 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
301 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xuc054a072010-11-04 14:38:39 -0400302 : "d" (control_word), "b" (key), "c" (initial));
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800303
304 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700305 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800306 : "d" (control_word), "b" (key), "c" (count));
Herbert Xu476df252005-07-06 13:54:09 -0700307 return iv;
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700308}
309
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200310static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000312 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000313
Herbert Xu420a4b22008-08-31 15:58:45 +1000314 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800315 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000316 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200319static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000321 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000322
Herbert Xu420a4b22008-08-31 15:58:45 +1000323 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800324 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000325 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
328static struct crypto_alg aes_alg = {
329 .cra_name = "aes",
Herbert Xuc8a19c92005-11-05 18:06:26 +1100330 .cra_driver_name = "aes-padlock",
Michal Ludvigccc17c32006-07-15 10:23:49 +1000331 .cra_priority = PADLOCK_CRA_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
333 .cra_blocksize = AES_BLOCK_SIZE,
Herbert Xufbdae9f2005-07-06 13:53:29 -0700334 .cra_ctxsize = sizeof(struct aes_ctx),
Herbert Xu6789b2d2005-07-06 13:52:27 -0700335 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 .cra_module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 .cra_u = {
338 .cipher = {
339 .cia_min_keysize = AES_MIN_KEY_SIZE,
340 .cia_max_keysize = AES_MAX_KEY_SIZE,
341 .cia_setkey = aes_set_key,
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200342 .cia_encrypt = padlock_aes_encrypt,
343 .cia_decrypt = padlock_aes_decrypt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
345 }
346};
347
Eric Biggers713b2e72019-10-12 21:17:41 -0700348static int ecb_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000349{
Eric Biggers713b2e72019-10-12 21:17:41 -0700350 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
351 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
352 struct skcipher_walk walk;
353 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000354 int err;
355
Herbert Xu420a4b22008-08-31 15:58:45 +1000356 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100357
Eric Biggers713b2e72019-10-12 21:17:41 -0700358 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000359
Eric Biggers713b2e72019-10-12 21:17:41 -0700360 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000361 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
362 ctx->E, &ctx->cword.encrypt,
363 nbytes / AES_BLOCK_SIZE);
364 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700365 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000366 }
367
Herbert Xu420a4b22008-08-31 15:58:45 +1000368 padlock_store_cword(&ctx->cword.encrypt);
369
Herbert Xu28ce7282006-08-21 21:38:42 +1000370 return err;
371}
372
Eric Biggers713b2e72019-10-12 21:17:41 -0700373static int ecb_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000374{
Eric Biggers713b2e72019-10-12 21:17:41 -0700375 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
376 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
377 struct skcipher_walk walk;
378 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000379 int err;
380
Herbert Xu420a4b22008-08-31 15:58:45 +1000381 padlock_reset_key(&ctx->cword.decrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100382
Eric Biggers713b2e72019-10-12 21:17:41 -0700383 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000384
Eric Biggers713b2e72019-10-12 21:17:41 -0700385 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000386 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
387 ctx->D, &ctx->cword.decrypt,
388 nbytes / AES_BLOCK_SIZE);
389 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700390 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000391 }
Herbert Xu420a4b22008-08-31 15:58:45 +1000392
393 padlock_store_cword(&ctx->cword.encrypt);
394
Herbert Xu28ce7282006-08-21 21:38:42 +1000395 return err;
396}
397
Eric Biggers713b2e72019-10-12 21:17:41 -0700398static struct skcipher_alg ecb_aes_alg = {
399 .base.cra_name = "ecb(aes)",
400 .base.cra_driver_name = "ecb-aes-padlock",
401 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
402 .base.cra_blocksize = AES_BLOCK_SIZE,
403 .base.cra_ctxsize = sizeof(struct aes_ctx),
404 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
405 .base.cra_module = THIS_MODULE,
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .setkey = aes_set_key_skcipher,
409 .encrypt = ecb_aes_encrypt,
410 .decrypt = ecb_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000411};
412
Eric Biggers713b2e72019-10-12 21:17:41 -0700413static int cbc_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000414{
Eric Biggers713b2e72019-10-12 21:17:41 -0700415 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
416 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
417 struct skcipher_walk walk;
418 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000419 int err;
420
Herbert Xu420a4b22008-08-31 15:58:45 +1000421 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100422
Eric Biggers713b2e72019-10-12 21:17:41 -0700423 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000424
Eric Biggers713b2e72019-10-12 21:17:41 -0700425 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000426 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
427 walk.dst.virt.addr, ctx->E,
428 walk.iv, &ctx->cword.encrypt,
429 nbytes / AES_BLOCK_SIZE);
430 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
431 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700432 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000433 }
434
Herbert Xu420a4b22008-08-31 15:58:45 +1000435 padlock_store_cword(&ctx->cword.decrypt);
436
Herbert Xu28ce7282006-08-21 21:38:42 +1000437 return err;
438}
439
Eric Biggers713b2e72019-10-12 21:17:41 -0700440static int cbc_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000441{
Eric Biggers713b2e72019-10-12 21:17:41 -0700442 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
443 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
444 struct skcipher_walk walk;
445 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000446 int err;
447
Herbert Xu420a4b22008-08-31 15:58:45 +1000448 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100449
Eric Biggers713b2e72019-10-12 21:17:41 -0700450 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000451
Eric Biggers713b2e72019-10-12 21:17:41 -0700452 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000453 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
454 ctx->D, walk.iv, &ctx->cword.decrypt,
455 nbytes / AES_BLOCK_SIZE);
456 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700457 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000458 }
459
Herbert Xu420a4b22008-08-31 15:58:45 +1000460 padlock_store_cword(&ctx->cword.encrypt);
461
Herbert Xu28ce7282006-08-21 21:38:42 +1000462 return err;
463}
464
Eric Biggers713b2e72019-10-12 21:17:41 -0700465static struct skcipher_alg cbc_aes_alg = {
466 .base.cra_name = "cbc(aes)",
467 .base.cra_driver_name = "cbc-aes-padlock",
468 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
469 .base.cra_blocksize = AES_BLOCK_SIZE,
470 .base.cra_ctxsize = sizeof(struct aes_ctx),
471 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
472 .base.cra_module = THIS_MODULE,
473 .min_keysize = AES_MIN_KEY_SIZE,
474 .max_keysize = AES_MAX_KEY_SIZE,
475 .ivsize = AES_BLOCK_SIZE,
476 .setkey = aes_set_key_skcipher,
477 .encrypt = cbc_aes_encrypt,
478 .decrypt = cbc_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000479};
480
Arvind Yadavd9893642017-08-25 23:53:42 +0530481static const struct x86_cpu_id padlock_cpu_id[] = {
Andi Kleen3bd391f2012-01-26 00:09:06 +0100482 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
483 {}
484};
485MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
486
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000487static int __init padlock_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000489 int ret;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800490 struct cpuinfo_x86 *c = &cpu_data(0);
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000491
Andi Kleen3bd391f2012-01-26 00:09:06 +0100492 if (!x86_match_cpu(padlock_cpu_id))
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000493 return -ENODEV;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000494
Borislav Petkov362f9242015-12-07 10:39:41 +0100495 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800496 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000497 return -ENODEV;
498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Eric Biggers713b2e72019-10-12 21:17:41 -0700500 if ((ret = crypto_register_alg(&aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000501 goto aes_err;
502
Eric Biggers713b2e72019-10-12 21:17:41 -0700503 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000504 goto ecb_aes_err;
505
Eric Biggers713b2e72019-10-12 21:17:41 -0700506 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000507 goto cbc_aes_err;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000508
509 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
510
Jia Zhangb3991512018-01-01 09:52:10 +0800511 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800512 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
513 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800514 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
515 }
516
Herbert Xu28ce7282006-08-21 21:38:42 +1000517out:
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000518 return ret;
Herbert Xu28ce7282006-08-21 21:38:42 +1000519
520cbc_aes_err:
Eric Biggers713b2e72019-10-12 21:17:41 -0700521 crypto_unregister_skcipher(&ecb_aes_alg);
Herbert Xu28ce7282006-08-21 21:38:42 +1000522ecb_aes_err:
523 crypto_unregister_alg(&aes_alg);
524aes_err:
525 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
526 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000529static void __exit padlock_fini(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
Eric Biggers713b2e72019-10-12 21:17:41 -0700531 crypto_unregister_skcipher(&cbc_aes_alg);
532 crypto_unregister_skcipher(&ecb_aes_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 crypto_unregister_alg(&aes_alg);
534}
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000535
536module_init(padlock_init);
537module_exit(padlock_fini);
538
539MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
540MODULE_LICENSE("GPL");
541MODULE_AUTHOR("Michal Ludvig");
542
Kees Cook5d26a102014-11-20 17:05:53 -0800543MODULE_ALIAS_CRYPTO("aes");