blob: 1be549a07a21976dc20ef45ca1ad8c3c40030ee4 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Cryptographic API.
4 *
5 * Support for VIA PadLock hardware crypto engine.
6 *
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Herbert Xu28ce7282006-08-21 21:38:42 +100011#include <crypto/algapi.h>
Sebastian Siewior89e12652007-10-17 23:18:57 +080012#include <crypto/aes.h>
Eric Biggers713b2e72019-10-12 21:17:41 -070013#include <crypto/internal/skcipher.h>
Herbert Xu21493082011-01-07 14:52:00 +110014#include <crypto/padlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/interrupt.h>
Herbert Xu6789b2d2005-07-06 13:52:27 -070020#include <linux/kernel.h>
Herbert Xu0c3dc782020-08-19 21:58:20 +100021#include <linux/mm.h>
Herbert Xu420a4b22008-08-31 15:58:45 +100022#include <linux/percpu.h>
23#include <linux/smp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010025#include <asm/cpu_device_id.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/byteorder.h>
Chuck Ebberta76c1c22009-06-18 19:24:10 +080027#include <asm/processor.h>
Ingo Molnardf6b35f2015-04-24 02:46:00 +020028#include <asm/fpu/api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080030/*
31 * Number of data blocks actually fetched for each xcrypt insn.
32 * Processors with prefetch errata will fetch extra blocks.
33 */
Chuck Ebberta76c1c22009-06-18 19:24:10 +080034static unsigned int ecb_fetch_blocks = 2;
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080035#define MAX_ECB_FETCH_BLOCKS (8)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080036#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080037
38static unsigned int cbc_fetch_blocks = 1;
39#define MAX_CBC_FETCH_BLOCKS (4)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080040#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
41
Michal Ludvigccc17c32006-07-15 10:23:49 +100042/* Control word. */
43struct cword {
44 unsigned int __attribute__ ((__packed__))
45 rounds:4,
46 algo:3,
47 keygen:1,
48 interm:1,
49 encdec:1,
50 ksize:2;
51} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
52
Michal Ludvigcc086322006-07-15 11:08:50 +100053/* Whenever making any changes to the following
54 * structure *make sure* you keep E, d_data
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080055 * and cword aligned on 16 Bytes boundaries and
56 * the Hardware can access 16 * 16 bytes of E and d_data
57 * (only the first 15 * 16 bytes matter but the HW reads
58 * more).
59 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060struct aes_ctx {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080061 u32 E[AES_MAX_KEYLENGTH_U32]
62 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
63 u32 d_data[AES_MAX_KEYLENGTH_U32]
64 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
Herbert Xu6789b2d2005-07-06 13:52:27 -070065 struct {
66 struct cword encrypt;
67 struct cword decrypt;
68 } cword;
Herbert Xu82062c72006-05-16 22:20:34 +100069 u32 *D;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070};
71
Tejun Heo390dfd92009-10-29 22:34:14 +090072static DEFINE_PER_CPU(struct cword *, paes_last_cword);
Herbert Xu420a4b22008-08-31 15:58:45 +100073
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/* Tells whether the ACE is capable to generate
75 the extended key for a given key_len. */
76static inline int
77aes_hw_extkey_available(uint8_t key_len)
78{
79 /* TODO: We should check the actual CPU model/stepping
80 as it's possible that the capability will be
81 added in the next CPU revisions. */
82 if (key_len == 16)
83 return 1;
84 return 0;
85}
86
Herbert Xu28ce7282006-08-21 21:38:42 +100087static inline struct aes_ctx *aes_ctx_common(void *ctx)
Herbert Xu6789b2d2005-07-06 13:52:27 -070088{
Herbert Xu28ce7282006-08-21 21:38:42 +100089 unsigned long addr = (unsigned long)ctx;
Herbert Xuf10b7892006-01-25 22:34:01 +110090 unsigned long align = PADLOCK_ALIGNMENT;
91
92 if (align <= crypto_tfm_ctx_alignment())
93 align = 1;
Herbert Xu6c2bb982006-05-16 22:09:29 +100094 return (struct aes_ctx *)ALIGN(addr, align);
Herbert Xu6789b2d2005-07-06 13:52:27 -070095}
96
Herbert Xu28ce7282006-08-21 21:38:42 +100097static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
98{
99 return aes_ctx_common(crypto_tfm_ctx(tfm));
100}
101
Eric Biggers713b2e72019-10-12 21:17:41 -0700102static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
Herbert Xu28ce7282006-08-21 21:38:42 +1000103{
Eric Biggers713b2e72019-10-12 21:17:41 -0700104 return aes_ctx_common(crypto_skcipher_ctx(tfm));
Herbert Xu28ce7282006-08-21 21:38:42 +1000105}
106
Herbert Xu6c2bb982006-05-16 22:09:29 +1000107static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000108 unsigned int key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000110 struct aes_ctx *ctx = aes_ctx(tfm);
Herbert Xu06ace7a2005-10-30 21:25:15 +1100111 const __le32 *key = (const __le32 *)in_key;
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800112 struct crypto_aes_ctx gen_aes;
Herbert Xu420a4b22008-08-31 15:58:45 +1000113 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Eric Biggers674f3682019-12-30 21:19:36 -0600115 if (key_len % 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Herbert Xu6789b2d2005-07-06 13:52:27 -0700118 /*
119 * If the hardware is capable of generating the extended key
120 * itself we must supply the plain key for both encryption
121 * and decryption.
122 */
Herbert Xu82062c72006-05-16 22:20:34 +1000123 ctx->D = ctx->E;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800125 ctx->E[0] = le32_to_cpu(key[0]);
126 ctx->E[1] = le32_to_cpu(key[1]);
127 ctx->E[2] = le32_to_cpu(key[2]);
128 ctx->E[3] = le32_to_cpu(key[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Herbert Xu6789b2d2005-07-06 13:52:27 -0700130 /* Prepare control words. */
131 memset(&ctx->cword, 0, sizeof(ctx->cword));
132
133 ctx->cword.decrypt.encdec = 1;
134 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
135 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
136 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
137 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 /* Don't generate extended keys if the hardware can do it. */
140 if (aes_hw_extkey_available(key_len))
Herbert Xu420a4b22008-08-31 15:58:45 +1000141 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Herbert Xu6789b2d2005-07-06 13:52:27 -0700143 ctx->D = ctx->d_data;
144 ctx->cword.encrypt.keygen = 1;
145 ctx->cword.decrypt.keygen = 1;
146
Eric Biggers674f3682019-12-30 21:19:36 -0600147 if (aes_expandkey(&gen_aes, in_key, key_len))
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800148 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800150 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
151 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
Herbert Xu420a4b22008-08-31 15:58:45 +1000152
153ok:
154 for_each_online_cpu(cpu)
Tejun Heo390dfd92009-10-29 22:34:14 +0900155 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
156 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
157 per_cpu(paes_last_cword, cpu) = NULL;
Herbert Xu420a4b22008-08-31 15:58:45 +1000158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return 0;
160}
161
Eric Biggers713b2e72019-10-12 21:17:41 -0700162static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
163 unsigned int key_len)
164{
165 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/* ====== Encryption/decryption routines ====== */
169
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700170/* These are the real call to PadLock. */
Herbert Xu420a4b22008-08-31 15:58:45 +1000171static inline void padlock_reset_key(struct cword *cword)
Herbert Xu866cd902007-12-27 00:04:44 +1100172{
Herbert Xu420a4b22008-08-31 15:58:45 +1000173 int cpu = raw_smp_processor_id();
174
Tejun Heo390dfd92009-10-29 22:34:14 +0900175 if (cword != per_cpu(paes_last_cword, cpu))
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800176#ifndef CONFIG_X86_64
Herbert Xu420a4b22008-08-31 15:58:45 +1000177 asm volatile ("pushfl; popfl");
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800178#else
179 asm volatile ("pushfq; popfq");
180#endif
Herbert Xu420a4b22008-08-31 15:58:45 +1000181}
182
183static inline void padlock_store_cword(struct cword *cword)
184{
Tejun Heo390dfd92009-10-29 22:34:14 +0900185 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
Herbert Xu866cd902007-12-27 00:04:44 +1100186}
187
Suresh Siddhae4914012008-08-13 22:02:26 +1000188/*
189 * While the padlock instructions don't use FP/SSE registers, they
Andy Lutomirski5a83d602016-10-31 15:18:44 -0700190 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
191 * the kernel doesn't use CR0.TS.
Suresh Siddhae4914012008-08-13 22:02:26 +1000192 */
193
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800194static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800195 struct cword *control_word, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100196{
197 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
198 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800199 : "d"(control_word), "b"(key), "c"(count));
Herbert Xud4a7dd82007-12-28 11:05:46 +1100200}
201
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800202static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
203 u8 *iv, struct cword *control_word, int count)
204{
205 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
206 : "+S" (input), "+D" (output), "+a" (iv)
207 : "d" (control_word), "b" (key), "c" (count));
208 return iv;
209}
210
211static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800212 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100213{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800214 /*
215 * Padlock prefetches extra data so we must provide mapped input buffers.
216 * Assume there are at least 16 bytes of stack already in use.
217 */
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800218 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
Herbert Xu490fe3f2008-01-11 08:09:35 +1100219 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100220
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800221 memcpy(tmp, in, count * AES_BLOCK_SIZE);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800222 rep_xcrypt_ecb(tmp, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100223}
224
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800225static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
226 u8 *iv, struct cword *cword, int count)
227{
228 /*
229 * Padlock prefetches extra data so we must provide mapped input buffers.
230 * Assume there are at least 16 bytes of stack already in use.
231 */
232 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
233 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
234
235 memcpy(tmp, in, count * AES_BLOCK_SIZE);
236 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
237}
238
239static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800240 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100241{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800242 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
243 * We could avoid some copying here but it's probably not worth it.
244 */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800245 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800246 ecb_crypt_copy(in, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100247 return;
248 }
249
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800250 rep_xcrypt_ecb(in, out, key, cword, count);
251}
252
253static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
254 u8 *iv, struct cword *cword, int count)
255{
256 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
Geliang Tang1d4bbc52015-11-21 22:24:11 +0800257 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800258 return cbc_crypt_copy(in, out, key, iv, cword, count);
259
260 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100261}
262
Herbert Xu6789b2d2005-07-06 13:52:27 -0700263static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
264 void *control_word, u32 count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800266 u32 initial = count & (ecb_fetch_blocks - 1);
267
268 if (count < ecb_fetch_blocks) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800269 ecb_crypt(input, output, key, control_word, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100270 return;
271 }
272
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800273 count -= initial;
274
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800275 if (initial)
276 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
277 : "+S"(input), "+D"(output)
278 : "d"(control_word), "b"(key), "c"(initial));
279
280 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 : "+S"(input), "+D"(output)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800282 : "d"(control_word), "b"(key), "c"(count));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
Herbert Xu476df252005-07-06 13:54:09 -0700285static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
286 u8 *iv, void *control_word, u32 count)
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700287{
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800288 u32 initial = count & (cbc_fetch_blocks - 1);
289
290 if (count < cbc_fetch_blocks)
291 return cbc_crypt(input, output, key, iv, control_word, count);
292
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800293 count -= initial;
294
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800295 if (initial)
296 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
297 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xuc054a072010-11-04 14:38:39 -0400298 : "d" (control_word), "b" (key), "c" (initial));
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800299
300 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700301 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xu46d8c4b2018-07-13 16:12:32 +0800302 : "d" (control_word), "b" (key), "c" (count));
Herbert Xu476df252005-07-06 13:54:09 -0700303 return iv;
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700304}
305
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200306static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000308 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000309
Herbert Xu420a4b22008-08-31 15:58:45 +1000310 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800311 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000312 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200315static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000317 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000318
Herbert Xu420a4b22008-08-31 15:58:45 +1000319 padlock_reset_key(&ctx->cword.encrypt);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800320 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
Herbert Xu420a4b22008-08-31 15:58:45 +1000321 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
324static struct crypto_alg aes_alg = {
325 .cra_name = "aes",
Herbert Xuc8a19c92005-11-05 18:06:26 +1100326 .cra_driver_name = "aes-padlock",
Michal Ludvigccc17c32006-07-15 10:23:49 +1000327 .cra_priority = PADLOCK_CRA_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
329 .cra_blocksize = AES_BLOCK_SIZE,
Herbert Xufbdae9f2005-07-06 13:53:29 -0700330 .cra_ctxsize = sizeof(struct aes_ctx),
Herbert Xu6789b2d2005-07-06 13:52:27 -0700331 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 .cra_module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 .cra_u = {
334 .cipher = {
335 .cia_min_keysize = AES_MIN_KEY_SIZE,
336 .cia_max_keysize = AES_MAX_KEY_SIZE,
337 .cia_setkey = aes_set_key,
Ard Biesheuvel724ecd32019-07-02 21:41:20 +0200338 .cia_encrypt = padlock_aes_encrypt,
339 .cia_decrypt = padlock_aes_decrypt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
341 }
342};
343
Eric Biggers713b2e72019-10-12 21:17:41 -0700344static int ecb_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000345{
Eric Biggers713b2e72019-10-12 21:17:41 -0700346 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
348 struct skcipher_walk walk;
349 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000350 int err;
351
Herbert Xu420a4b22008-08-31 15:58:45 +1000352 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100353
Eric Biggers713b2e72019-10-12 21:17:41 -0700354 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000355
Eric Biggers713b2e72019-10-12 21:17:41 -0700356 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000357 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
358 ctx->E, &ctx->cword.encrypt,
359 nbytes / AES_BLOCK_SIZE);
360 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700361 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000362 }
363
Herbert Xu420a4b22008-08-31 15:58:45 +1000364 padlock_store_cword(&ctx->cword.encrypt);
365
Herbert Xu28ce7282006-08-21 21:38:42 +1000366 return err;
367}
368
Eric Biggers713b2e72019-10-12 21:17:41 -0700369static int ecb_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000370{
Eric Biggers713b2e72019-10-12 21:17:41 -0700371 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
372 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
373 struct skcipher_walk walk;
374 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000375 int err;
376
Herbert Xu420a4b22008-08-31 15:58:45 +1000377 padlock_reset_key(&ctx->cword.decrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100378
Eric Biggers713b2e72019-10-12 21:17:41 -0700379 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000380
Eric Biggers713b2e72019-10-12 21:17:41 -0700381 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000382 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
383 ctx->D, &ctx->cword.decrypt,
384 nbytes / AES_BLOCK_SIZE);
385 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700386 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000387 }
Herbert Xu420a4b22008-08-31 15:58:45 +1000388
389 padlock_store_cword(&ctx->cword.encrypt);
390
Herbert Xu28ce7282006-08-21 21:38:42 +1000391 return err;
392}
393
Eric Biggers713b2e72019-10-12 21:17:41 -0700394static struct skcipher_alg ecb_aes_alg = {
395 .base.cra_name = "ecb(aes)",
396 .base.cra_driver_name = "ecb-aes-padlock",
397 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
398 .base.cra_blocksize = AES_BLOCK_SIZE,
399 .base.cra_ctxsize = sizeof(struct aes_ctx),
400 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
401 .base.cra_module = THIS_MODULE,
402 .min_keysize = AES_MIN_KEY_SIZE,
403 .max_keysize = AES_MAX_KEY_SIZE,
404 .setkey = aes_set_key_skcipher,
405 .encrypt = ecb_aes_encrypt,
406 .decrypt = ecb_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000407};
408
Eric Biggers713b2e72019-10-12 21:17:41 -0700409static int cbc_aes_encrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000410{
Eric Biggers713b2e72019-10-12 21:17:41 -0700411 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
412 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
413 struct skcipher_walk walk;
414 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000415 int err;
416
Herbert Xu420a4b22008-08-31 15:58:45 +1000417 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100418
Eric Biggers713b2e72019-10-12 21:17:41 -0700419 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000420
Eric Biggers713b2e72019-10-12 21:17:41 -0700421 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000422 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
423 walk.dst.virt.addr, ctx->E,
424 walk.iv, &ctx->cword.encrypt,
425 nbytes / AES_BLOCK_SIZE);
426 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
427 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700428 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000429 }
430
Herbert Xu420a4b22008-08-31 15:58:45 +1000431 padlock_store_cword(&ctx->cword.decrypt);
432
Herbert Xu28ce7282006-08-21 21:38:42 +1000433 return err;
434}
435
Eric Biggers713b2e72019-10-12 21:17:41 -0700436static int cbc_aes_decrypt(struct skcipher_request *req)
Herbert Xu28ce7282006-08-21 21:38:42 +1000437{
Eric Biggers713b2e72019-10-12 21:17:41 -0700438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
439 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
440 struct skcipher_walk walk;
441 unsigned int nbytes;
Herbert Xu28ce7282006-08-21 21:38:42 +1000442 int err;
443
Herbert Xu420a4b22008-08-31 15:58:45 +1000444 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100445
Eric Biggers713b2e72019-10-12 21:17:41 -0700446 err = skcipher_walk_virt(&walk, req, false);
Herbert Xu28ce7282006-08-21 21:38:42 +1000447
Eric Biggers713b2e72019-10-12 21:17:41 -0700448 while ((nbytes = walk.nbytes) != 0) {
Herbert Xu28ce7282006-08-21 21:38:42 +1000449 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
450 ctx->D, walk.iv, &ctx->cword.decrypt,
451 nbytes / AES_BLOCK_SIZE);
452 nbytes &= AES_BLOCK_SIZE - 1;
Eric Biggers713b2e72019-10-12 21:17:41 -0700453 err = skcipher_walk_done(&walk, nbytes);
Herbert Xu28ce7282006-08-21 21:38:42 +1000454 }
455
Herbert Xu420a4b22008-08-31 15:58:45 +1000456 padlock_store_cword(&ctx->cword.encrypt);
457
Herbert Xu28ce7282006-08-21 21:38:42 +1000458 return err;
459}
460
Eric Biggers713b2e72019-10-12 21:17:41 -0700461static struct skcipher_alg cbc_aes_alg = {
462 .base.cra_name = "cbc(aes)",
463 .base.cra_driver_name = "cbc-aes-padlock",
464 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
465 .base.cra_blocksize = AES_BLOCK_SIZE,
466 .base.cra_ctxsize = sizeof(struct aes_ctx),
467 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
468 .base.cra_module = THIS_MODULE,
469 .min_keysize = AES_MIN_KEY_SIZE,
470 .max_keysize = AES_MAX_KEY_SIZE,
471 .ivsize = AES_BLOCK_SIZE,
472 .setkey = aes_set_key_skcipher,
473 .encrypt = cbc_aes_encrypt,
474 .decrypt = cbc_aes_decrypt,
Herbert Xu28ce7282006-08-21 21:38:42 +1000475};
476
Arvind Yadavd9893642017-08-25 23:53:42 +0530477static const struct x86_cpu_id padlock_cpu_id[] = {
Thomas Gleixnerf30cfac2020-03-20 14:14:05 +0100478 X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
Andi Kleen3bd391f2012-01-26 00:09:06 +0100479 {}
480};
481MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
482
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000483static int __init padlock_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000485 int ret;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800486 struct cpuinfo_x86 *c = &cpu_data(0);
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000487
Andi Kleen3bd391f2012-01-26 00:09:06 +0100488 if (!x86_match_cpu(padlock_cpu_id))
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000489 return -ENODEV;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000490
Borislav Petkov362f9242015-12-07 10:39:41 +0100491 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800492 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000493 return -ENODEV;
494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Eric Biggers713b2e72019-10-12 21:17:41 -0700496 if ((ret = crypto_register_alg(&aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000497 goto aes_err;
498
Eric Biggers713b2e72019-10-12 21:17:41 -0700499 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000500 goto ecb_aes_err;
501
Eric Biggers713b2e72019-10-12 21:17:41 -0700502 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
Herbert Xu28ce7282006-08-21 21:38:42 +1000503 goto cbc_aes_err;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000504
505 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
506
Jia Zhangb3991512018-01-01 09:52:10 +0800507 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800508 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
509 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800510 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
511 }
512
Herbert Xu28ce7282006-08-21 21:38:42 +1000513out:
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000514 return ret;
Herbert Xu28ce7282006-08-21 21:38:42 +1000515
516cbc_aes_err:
Eric Biggers713b2e72019-10-12 21:17:41 -0700517 crypto_unregister_skcipher(&ecb_aes_alg);
Herbert Xu28ce7282006-08-21 21:38:42 +1000518ecb_aes_err:
519 crypto_unregister_alg(&aes_alg);
520aes_err:
521 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
522 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000525static void __exit padlock_fini(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Eric Biggers713b2e72019-10-12 21:17:41 -0700527 crypto_unregister_skcipher(&cbc_aes_alg);
528 crypto_unregister_skcipher(&ecb_aes_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 crypto_unregister_alg(&aes_alg);
530}
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000531
532module_init(padlock_init);
533module_exit(padlock_fini);
534
535MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
536MODULE_LICENSE("GPL");
537MODULE_AUTHOR("Michal Ludvig");
538
Kees Cook5d26a102014-11-20 17:05:53 -0800539MODULE_ALIAS_CRYPTO("aes");