blob: e21783b830468bdb47c271fda3d5b9276f32ecaf [file] [log] [blame]
Boris BREZILLONf63601f2015-06-18 15:46:20 +02001/*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/aes.h>
16
17#include "cesa.h"
18
19struct mv_cesa_aes_ctx {
20 struct mv_cesa_ctx base;
21 struct crypto_aes_ctx aes;
22};
23
Boris BREZILLONdb509a42015-06-18 15:46:21 +020024struct mv_cesa_ablkcipher_dma_iter {
25 struct mv_cesa_dma_iter base;
26 struct mv_cesa_sg_dma_iter src;
27 struct mv_cesa_sg_dma_iter dst;
28};
29
30static inline void
31mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
32 struct ablkcipher_request *req)
33{
34 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
35 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
36 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
37}
38
39static inline bool
40mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
41{
42 iter->src.op_offset = 0;
43 iter->dst.op_offset = 0;
44
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
46}
47
48static inline void
49mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
50{
51 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
52
53 if (req->dst != req->src) {
54 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
55 DMA_FROM_DEVICE);
56 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
57 DMA_TO_DEVICE);
58 } else {
59 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
60 DMA_BIDIRECTIONAL);
61 }
62 mv_cesa_dma_cleanup(&creq->req.dma);
63}
64
65static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
66{
67 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
68
69 if (creq->req.base.type == CESA_DMA_REQ)
70 mv_cesa_ablkcipher_dma_cleanup(req);
71}
72
Boris BREZILLONf63601f2015-06-18 15:46:20 +020073static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
74{
75 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
76 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
77 struct mv_cesa_engine *engine = sreq->base.engine;
78 size_t len = min_t(size_t, req->nbytes - sreq->offset,
79 CESA_SA_SRAM_PAYLOAD_SIZE);
80
81 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
82 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
83 len, sreq->offset);
84
85 sreq->size = len;
86 mv_cesa_set_crypt_op_len(&sreq->op, len);
87
88 /* FIXME: only update enc_len field */
89 if (!sreq->skip_ctx) {
90 memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
91 sreq->skip_ctx = true;
92 } else {
93 memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc));
94 }
95
96 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
97 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
98 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
99}
100
101static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
102 u32 status)
103{
104 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
105 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
106 struct mv_cesa_engine *engine = sreq->base.engine;
107 size_t len;
108
109 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
110 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
111 sreq->size, sreq->offset);
112
113 sreq->offset += len;
114 if (sreq->offset < req->nbytes)
115 return -EINPROGRESS;
116
117 return 0;
118}
119
120static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
121 u32 status)
122{
123 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
124 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
125 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
126 struct mv_cesa_engine *engine = sreq->base.engine;
127 int ret;
128
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200129 if (creq->req.base.type == CESA_DMA_REQ)
130 ret = mv_cesa_dma_process(&creq->req.dma, status);
131 else
132 ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
133
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200134 if (ret)
135 return ret;
136
137 memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
138 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
139
140 return 0;
141}
142
143static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
144{
145 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200146 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200147
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200148 if (creq->req.base.type == CESA_DMA_REQ)
149 mv_cesa_dma_step(&creq->req.dma);
150 else
151 mv_cesa_ablkcipher_std_step(ablkreq);
152}
153
154static inline void
155mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
156{
157 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
158 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
159
160 mv_cesa_dma_prepare(dreq, dreq->base.engine);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200161}
162
163static inline void
164mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
165{
166 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
167 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
168 struct mv_cesa_engine *engine = sreq->base.engine;
169
170 sreq->size = 0;
171 sreq->offset = 0;
172 mv_cesa_adjust_op(engine, &sreq->op);
173 memcpy(engine->sram, &sreq->op, sizeof(sreq->op));
174}
175
176static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
177 struct mv_cesa_engine *engine)
178{
179 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
180 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
181
182 creq->req.base.engine = engine;
183
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200184 if (creq->req.base.type == CESA_DMA_REQ)
185 mv_cesa_ablkcipher_dma_prepare(ablkreq);
186 else
187 mv_cesa_ablkcipher_std_prepare(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200188}
189
190static inline void
191mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
192{
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200193 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
194
195 mv_cesa_ablkcipher_cleanup(ablkreq);
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200196}
197
198static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
199 .step = mv_cesa_ablkcipher_step,
200 .process = mv_cesa_ablkcipher_process,
201 .prepare = mv_cesa_ablkcipher_prepare,
202 .cleanup = mv_cesa_ablkcipher_req_cleanup,
203};
204
205static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
206{
207 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
208
209 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
210
211 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
212
213 return 0;
214}
215
216static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
217 unsigned int len)
218{
219 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
220 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
221 int remaining;
222 int offset;
223 int ret;
224 int i;
225
226 ret = crypto_aes_expand_key(&ctx->aes, key, len);
227 if (ret) {
228 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
229 return ret;
230 }
231
232 remaining = (ctx->aes.key_length - 16) / 4;
233 offset = ctx->aes.key_length + 24 - remaining;
234 for (i = 0; i < remaining; i++)
235 ctx->aes.key_dec[4 + i] =
236 cpu_to_le32(ctx->aes.key_enc[offset + i]);
237
238 return 0;
239}
240
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200241static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
242 const struct mv_cesa_op_ctx *op_templ)
243{
244 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
245 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246 GFP_KERNEL : GFP_ATOMIC;
247 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
248 struct mv_cesa_ablkcipher_dma_iter iter;
249 struct mv_cesa_tdma_chain chain;
250 bool skip_ctx = false;
251 int ret;
252
253 dreq->base.type = CESA_DMA_REQ;
254 dreq->chain.first = NULL;
255 dreq->chain.last = NULL;
256
257 if (req->src != req->dst) {
258 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
259 DMA_TO_DEVICE);
260 if (!ret)
261 return -ENOMEM;
262
263 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
264 DMA_FROM_DEVICE);
265 if (!ret) {
266 ret = -ENOMEM;
267 goto err_unmap_src;
268 }
269 } else {
270 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
271 DMA_BIDIRECTIONAL);
272 if (!ret)
273 return -ENOMEM;
274 }
275
276 mv_cesa_tdma_desc_iter_init(&chain);
277 mv_cesa_ablkcipher_req_iter_init(&iter, req);
278
279 do {
280 struct mv_cesa_op_ctx *op;
281
282 op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
283 if (IS_ERR(op)) {
284 ret = PTR_ERR(op);
285 goto err_free_tdma;
286 }
287 skip_ctx = true;
288
289 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
290
291 /* Add input transfers */
292 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
293 &iter.src, flags);
294 if (ret)
295 goto err_free_tdma;
296
297 /* Add dummy desc to launch the crypto operation */
298 ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
299 if (ret)
300 goto err_free_tdma;
301
302 /* Add output transfers */
303 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
304 &iter.dst, flags);
305 if (ret)
306 goto err_free_tdma;
307
308 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
309
310 dreq->chain = chain;
311
312 return 0;
313
314err_free_tdma:
315 mv_cesa_dma_cleanup(dreq);
316 if (req->dst != req->src)
317 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
318 DMA_FROM_DEVICE);
319
320err_unmap_src:
321 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
322 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
323
324 return ret;
325}
326
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200327static inline int
328mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
329 const struct mv_cesa_op_ctx *op_templ)
330{
331 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
332 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
333
334 sreq->base.type = CESA_STD_REQ;
335 sreq->op = *op_templ;
336 sreq->skip_ctx = false;
337
338 return 0;
339}
340
341static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
342 struct mv_cesa_op_ctx *tmpl)
343{
344 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
345 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
346 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200347 int ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200348
349 if (!IS_ALIGNED(req->nbytes, blksize))
350 return -EINVAL;
351
352 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
353 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
354
355 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
356 CESA_SA_DESC_CFG_OP_MSK);
357
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200358 /* TODO: add a threshold for DMA usage */
359 if (cesa_dev->caps->has_tdma)
360 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
361 else
362 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
363
364 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200365}
366
367static int mv_cesa_aes_op(struct ablkcipher_request *req,
368 struct mv_cesa_op_ctx *tmpl)
369{
370 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
371 int ret, i;
372 u32 *key;
373 u32 cfg;
374
375 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
376
377 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
378 key = ctx->aes.key_dec;
379 else
380 key = ctx->aes.key_enc;
381
382 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
383 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
384
385 if (ctx->aes.key_length == 24)
386 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
387 else if (ctx->aes.key_length == 32)
388 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
389
390 mv_cesa_update_op_cfg(tmpl, cfg,
391 CESA_SA_DESC_CFG_CRYPTM_MSK |
392 CESA_SA_DESC_CFG_AES_LEN_MSK);
393
394 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
395 if (ret)
396 return ret;
397
Boris BREZILLONdb509a42015-06-18 15:46:21 +0200398 ret = mv_cesa_queue_req(&req->base);
399 if (ret && ret != -EINPROGRESS)
400 mv_cesa_ablkcipher_cleanup(req);
401
402 return ret;
Boris BREZILLONf63601f2015-06-18 15:46:20 +0200403}
404
405static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
406{
407 struct mv_cesa_op_ctx tmpl;
408
409 mv_cesa_set_op_cfg(&tmpl,
410 CESA_SA_DESC_CFG_CRYPTCM_ECB |
411 CESA_SA_DESC_CFG_DIR_ENC);
412
413 return mv_cesa_aes_op(req, &tmpl);
414}
415
416static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
417{
418 struct mv_cesa_op_ctx tmpl;
419
420 mv_cesa_set_op_cfg(&tmpl,
421 CESA_SA_DESC_CFG_CRYPTCM_ECB |
422 CESA_SA_DESC_CFG_DIR_DEC);
423
424 return mv_cesa_aes_op(req, &tmpl);
425}
426
427struct crypto_alg mv_cesa_ecb_aes_alg = {
428 .cra_name = "ecb(aes)",
429 .cra_driver_name = "mv-ecb-aes",
430 .cra_priority = 300,
431 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
432 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
433 .cra_blocksize = AES_BLOCK_SIZE,
434 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
435 .cra_alignmask = 0,
436 .cra_type = &crypto_ablkcipher_type,
437 .cra_module = THIS_MODULE,
438 .cra_init = mv_cesa_ablkcipher_cra_init,
439 .cra_u = {
440 .ablkcipher = {
441 .min_keysize = AES_MIN_KEY_SIZE,
442 .max_keysize = AES_MAX_KEY_SIZE,
443 .setkey = mv_cesa_aes_setkey,
444 .encrypt = mv_cesa_ecb_aes_encrypt,
445 .decrypt = mv_cesa_ecb_aes_decrypt,
446 },
447 },
448};
449
450static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
451 struct mv_cesa_op_ctx *tmpl)
452{
453 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
454 CESA_SA_DESC_CFG_CRYPTCM_MSK);
455 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
456
457 return mv_cesa_aes_op(req, tmpl);
458}
459
460static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
461{
462 struct mv_cesa_op_ctx tmpl;
463
464 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
465
466 return mv_cesa_cbc_aes_op(req, &tmpl);
467}
468
469static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
470{
471 struct mv_cesa_op_ctx tmpl;
472
473 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
474
475 return mv_cesa_cbc_aes_op(req, &tmpl);
476}
477
478struct crypto_alg mv_cesa_cbc_aes_alg = {
479 .cra_name = "cbc(aes)",
480 .cra_driver_name = "mv-cbc-aes",
481 .cra_priority = 300,
482 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
483 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
484 .cra_blocksize = AES_BLOCK_SIZE,
485 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
486 .cra_alignmask = 0,
487 .cra_type = &crypto_ablkcipher_type,
488 .cra_module = THIS_MODULE,
489 .cra_init = mv_cesa_ablkcipher_cra_init,
490 .cra_u = {
491 .ablkcipher = {
492 .min_keysize = AES_MIN_KEY_SIZE,
493 .max_keysize = AES_MAX_KEY_SIZE,
494 .ivsize = AES_BLOCK_SIZE,
495 .setkey = mv_cesa_aes_setkey,
496 .encrypt = mv_cesa_cbc_aes_encrypt,
497 .decrypt = mv_cesa_cbc_aes_decrypt,
498 },
499 },
500};