blob: a73db2a5637f8b2aea8596c313774ccd364d8bb1 [file] [log] [blame]
Thomas Gleixner97fb5e82019-05-29 07:17:58 -07001// SPDX-License-Identifier: GPL-2.0-only
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +03002/*
3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +03004 */
5
6#include <linux/err.h>
7#include <linux/interrupt.h>
8#include <linux/types.h>
9#include <crypto/scatterwalk.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080010#include <crypto/sha1.h>
11#include <crypto/sha2.h>
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030012
13#include "cipher.h"
14#include "common.h"
15#include "core.h"
16#include "regs-v5.h"
17#include "sha.h"
18
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030019static inline u32 qce_read(struct qce_device *qce, u32 offset)
20{
21 return readl(qce->base + offset);
22}
23
24static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
25{
26 writel(val, qce->base + offset);
27}
28
29static inline void qce_write_array(struct qce_device *qce, u32 offset,
30 const u32 *val, unsigned int len)
31{
32 int i;
33
34 for (i = 0; i < len; i++)
35 qce_write(qce, offset + i * sizeof(u32), val[i]);
36}
37
38static inline void
39qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
40{
41 int i;
42
43 for (i = 0; i < len; i++)
44 qce_write(qce, offset + i * sizeof(u32), 0);
45}
46
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030047static u32 qce_config_reg(struct qce_device *qce, int little)
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030048{
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030049 u32 beats = (qce->burst_size >> 3) - 1;
50 u32 pipe_pair = qce->pipe_pair_id;
51 u32 config;
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030052
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030053 config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
54 config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
55 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
56 config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
57 config &= ~HIGH_SPD_EN_N_SHIFT;
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030058
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030059 if (little)
60 config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030061
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030062 return config;
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030063}
64
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -030065void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
66{
67 __be32 *d = dst;
68 const u8 *s = src;
69 unsigned int n;
70
71 n = len / sizeof(u32);
72 for (; n > 0; n--) {
73 *d = cpu_to_be32p((const __u32 *) s);
74 s += sizeof(__u32);
75 d++;
76 }
77}
78
79static void qce_setup_config(struct qce_device *qce)
80{
81 u32 config;
82
83 /* get big endianness */
84 config = qce_config_reg(qce, 0);
85
86 /* clear status */
87 qce_write(qce, REG_STATUS, 0);
88 qce_write(qce, REG_CONFIG, config);
89}
90
91static inline void qce_crypto_go(struct qce_device *qce)
92{
93 qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
94}
95
96#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030097static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
98{
99 u32 cfg = 0;
100
101 if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
102 cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
103 else
104 cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
105
106 if (IS_CCM(flags) || IS_CMAC(flags)) {
107 if (key_size == AES_KEYSIZE_128)
108 cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
109 else if (key_size == AES_KEYSIZE_256)
110 cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
111 }
112
113 if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
114 cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
115 else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
116 cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
117 else if (IS_CMAC(flags))
118 cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
119
120 if (IS_SHA1(flags) || IS_SHA256(flags))
121 cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
122 else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
123 IS_CBC(flags) || IS_CTR(flags))
124 cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
125 else if (IS_AES(flags) && IS_CCM(flags))
126 cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
127 else if (IS_AES(flags) && IS_CMAC(flags))
128 cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
129
130 if (IS_SHA(flags) || IS_SHA_HMAC(flags))
131 cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
132
133 if (IS_CCM(flags))
134 cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
135
136 if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
137 IS_CMAC(flags))
138 cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
139
140 return cfg;
141}
142
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300143static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
144 u32 totallen, u32 offset)
145{
146 struct ahash_request *req = ahash_request_cast(async_req);
147 struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
148 struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
149 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
150 struct qce_device *qce = tmpl->qce;
151 unsigned int digestsize = crypto_ahash_digestsize(ahash);
152 unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
153 __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
154 __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
155 u32 auth_cfg = 0, config;
156 unsigned int iv_words;
157
158 /* if not the last, the size has to be on the block boundary */
159 if (!rctx->last_blk && req->nbytes % blocksize)
160 return -EINVAL;
161
162 qce_setup_config(qce);
163
164 if (IS_CMAC(rctx->flags)) {
165 qce_write(qce, REG_AUTH_SEG_CFG, 0);
166 qce_write(qce, REG_ENCR_SEG_CFG, 0);
167 qce_write(qce, REG_ENCR_SEG_SIZE, 0);
168 qce_clear_array(qce, REG_AUTH_IV0, 16);
169 qce_clear_array(qce, REG_AUTH_KEY0, 16);
170 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
171
172 auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
173 }
174
175 if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
176 u32 authkey_words = rctx->authklen / sizeof(u32);
177
178 qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
Stanimir Varbanov58a65352014-07-04 17:03:29 +0300179 qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
180 authkey_words);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300181 }
182
183 if (IS_CMAC(rctx->flags))
184 goto go_proc;
185
186 if (rctx->first_blk)
187 memcpy(auth, rctx->digest, digestsize);
188 else
189 qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
190
191 iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
Stanimir Varbanov58a65352014-07-04 17:03:29 +0300192 qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300193
194 if (rctx->first_blk)
195 qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
196 else
Stanimir Varbanov58a65352014-07-04 17:03:29 +0300197 qce_write_array(qce, REG_AUTH_BYTECNT0,
198 (u32 *)rctx->byte_count, 2);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300199
200 auth_cfg = qce_auth_cfg(rctx->flags, 0);
201
202 if (rctx->last_blk)
203 auth_cfg |= BIT(AUTH_LAST_SHIFT);
204 else
205 auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
206
207 if (rctx->first_blk)
208 auth_cfg |= BIT(AUTH_FIRST_SHIFT);
209 else
210 auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
211
212go_proc:
213 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
214 qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
215 qce_write(qce, REG_AUTH_SEG_START, 0);
216 qce_write(qce, REG_ENCR_SEG_CFG, 0);
217 qce_write(qce, REG_SEG_SIZE, req->nbytes);
218
219 /* get little endianness */
220 config = qce_config_reg(qce, 1);
221 qce_write(qce, REG_CONFIG, config);
222
223 qce_crypto_go(qce);
224
225 return 0;
226}
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -0300227#endif
228
229#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
230static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
231{
232 u32 cfg = 0;
233
234 if (IS_AES(flags)) {
235 if (aes_key_size == AES_KEYSIZE_128)
236 cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
237 else if (aes_key_size == AES_KEYSIZE_256)
238 cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
239 }
240
241 if (IS_AES(flags))
242 cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
243 else if (IS_DES(flags) || IS_3DES(flags))
244 cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
245
246 if (IS_DES(flags))
247 cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
248
249 if (IS_3DES(flags))
250 cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
251
252 switch (flags & QCE_MODE_MASK) {
253 case QCE_MODE_ECB:
254 cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
255 break;
256 case QCE_MODE_CBC:
257 cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
258 break;
259 case QCE_MODE_CTR:
260 cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
261 break;
262 case QCE_MODE_XTS:
263 cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
264 break;
265 case QCE_MODE_CCM:
266 cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
267 cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
268 break;
269 default:
270 return ~0;
271 }
272
273 return cfg;
274}
275
276static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
277{
278 u8 swap[QCE_AES_IV_LENGTH];
279 u32 i, j;
280
281 if (ivsize > QCE_AES_IV_LENGTH)
282 return;
283
284 memset(swap, 0, QCE_AES_IV_LENGTH);
285
286 for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
287 i < QCE_AES_IV_LENGTH; i++, j--)
288 swap[i] = src[j];
289
290 qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
291}
292
293static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
294 unsigned int enckeylen, unsigned int cryptlen)
295{
296 u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
297 unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
298 unsigned int xtsdusize;
299
300 qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
301 enckeylen / 2);
302 qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
303
304 /* xts du size 512B */
305 xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
306 qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
307}
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300308
Ard Biesheuvel8bf08712019-11-09 18:09:45 +0100309static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300310 u32 totallen, u32 offset)
311{
Ard Biesheuvel8bf08712019-11-09 18:09:45 +0100312 struct skcipher_request *req = skcipher_request_cast(async_req);
313 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300314 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
Ard Biesheuvel8bf08712019-11-09 18:09:45 +0100315 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300316 struct qce_device *qce = tmpl->qce;
317 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
318 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
319 unsigned int enckey_words, enciv_words;
320 unsigned int keylen;
321 u32 encr_cfg = 0, auth_cfg = 0, config;
322 unsigned int ivsize = rctx->ivsize;
323 unsigned long flags = rctx->flags;
324
325 qce_setup_config(qce);
326
327 if (IS_XTS(flags))
328 keylen = ctx->enc_keylen / 2;
329 else
330 keylen = ctx->enc_keylen;
331
332 qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
333 enckey_words = keylen / sizeof(u32);
334
335 qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
336
337 encr_cfg = qce_encr_cfg(flags, keylen);
338
339 if (IS_DES(flags)) {
340 enciv_words = 2;
341 enckey_words = 2;
342 } else if (IS_3DES(flags)) {
343 enciv_words = 2;
344 enckey_words = 6;
345 } else if (IS_AES(flags)) {
346 if (IS_XTS(flags))
347 qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
348 rctx->cryptlen);
349 enciv_words = 4;
350 } else {
351 return -EINVAL;
352 }
353
Stanimir Varbanov58a65352014-07-04 17:03:29 +0300354 qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300355
356 if (!IS_ECB(flags)) {
357 if (IS_XTS(flags))
358 qce_xts_swapiv(enciv, rctx->iv, ivsize);
359 else
360 qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
361
Stanimir Varbanov58a65352014-07-04 17:03:29 +0300362 qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300363 }
364
365 if (IS_ENCRYPT(flags))
366 encr_cfg |= BIT(ENCODE_SHIFT);
367
368 qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
369 qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
370 qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
371
372 if (IS_CTR(flags)) {
373 qce_write(qce, REG_CNTR_MASK, ~0);
374 qce_write(qce, REG_CNTR_MASK0, ~0);
375 qce_write(qce, REG_CNTR_MASK1, ~0);
376 qce_write(qce, REG_CNTR_MASK2, ~0);
377 }
378
379 qce_write(qce, REG_SEG_SIZE, totallen);
380
381 /* get little endianness */
382 config = qce_config_reg(qce, 1);
383 qce_write(qce, REG_CONFIG, config);
384
385 qce_crypto_go(qce);
386
387 return 0;
388}
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -0300389#endif
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300390
391int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
392 u32 offset)
393{
394 switch (type) {
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -0300395#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
Ard Biesheuvel8bf08712019-11-09 18:09:45 +0100396 case CRYPTO_ALG_TYPE_SKCIPHER:
397 return qce_setup_regs_skcipher(async_req, totallen, offset);
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -0300398#endif
399#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300400 case CRYPTO_ALG_TYPE_AHASH:
401 return qce_setup_regs_ahash(async_req, totallen, offset);
Eneas U de Queiroz59e056c2019-12-20 16:02:18 -0300402#endif
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300403 default:
404 return -EINVAL;
405 }
406}
407
408#define STATUS_ERRORS \
409 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
410
411int qce_check_status(struct qce_device *qce, u32 *status)
412{
413 int ret = 0;
414
415 *status = qce_read(qce, REG_STATUS);
416
417 /*
418 * Don't use result dump status. The operation may not be complete.
419 * Instead, use the status we just read from device. In case, we need to
420 * use result_status from result dump the result_status needs to be byte
421 * swapped, since we set the device to little endian.
422 */
423 if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
424 ret = -ENXIO;
425
426 return ret;
427}
428
429void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
430{
431 u32 val;
432
433 val = qce_read(qce, REG_VERSION);
434 *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
435 *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
436 *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
437}