blob: 7e5a8e05b5b7d0d21c8545572c5b9a8e9ba07a2f [file] [log] [blame]
Ryder Lee785e5c62016-12-19 10:20:44 +08001/*
2 * Cryptographic API.
3 *
4 * Driver for EIP97 AES acceleration.
5 *
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Some ideas are from atmel-aes.c drivers.
13 */
14
15#include <crypto/aes.h>
16#include "mtk-platform.h"
17
18#define AES_QUEUE_SIZE 512
19#define AES_BUF_ORDER 2
20#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
21 & ~(AES_BLOCK_SIZE - 1))
22
Ryder Leea87399622017-01-20 13:41:08 +080023/* AES command token size */
Ryder Lee785e5c62016-12-19 10:20:44 +080024#define AES_CT_SIZE_ECB 2
25#define AES_CT_SIZE_CBC 3
26#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
Ryder Leea87399622017-01-20 13:41:08 +080027/* AES-CBC/ECB command token */
28#define AES_CMD0 cpu_to_le32(0x05000000)
29#define AES_CMD1 cpu_to_le32(0x2d060000)
30#define AES_CMD2 cpu_to_le32(0xe4a63806)
Ryder Lee785e5c62016-12-19 10:20:44 +080031
Ryder Leea87399622017-01-20 13:41:08 +080032/* AES transform information word 0 fields */
33#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
34#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
Ryder Lee785e5c62016-12-19 10:20:44 +080035#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
36#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
37#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
38#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
Ryder Leea87399622017-01-20 13:41:08 +080039/* AES transform information word 1 fields */
40#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
41#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
Ryder Lee785e5c62016-12-19 10:20:44 +080042#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5)
43
44/* AES flags */
45#define AES_FLAGS_MODE_MSK 0x7
46#define AES_FLAGS_ECB BIT(0)
47#define AES_FLAGS_CBC BIT(1)
48#define AES_FLAGS_ENCRYPT BIT(2)
49#define AES_FLAGS_BUSY BIT(3)
50
51/**
Ryder Leea87399622017-01-20 13:41:08 +080052 * Command token(CT) is a set of hardware instructions that
53 * are used to control engine's processing flow of AES.
Ryder Lee785e5c62016-12-19 10:20:44 +080054 *
Ryder Leea87399622017-01-20 13:41:08 +080055 * Transform information(TFM) is used to define AES state and
56 * contains all keys and initial vectors.
57 *
58 * The engine requires CT and TFM to do:
Ryder Lee785e5c62016-12-19 10:20:44 +080059 * - Commands decoding and control of the engine's data path.
60 * - Coordinating hardware data fetch and store operations.
61 * - Result token construction and output.
62 */
Ryder Leea87399622017-01-20 13:41:08 +080063struct mtk_aes_ct {
64 __le32 cmd[AES_CT_SIZE_CBC];
65};
66
67struct mtk_aes_tfm {
68 __le32 ctrl[2];
69 __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE)];
Ryder Lee785e5c62016-12-19 10:20:44 +080070};
71
72struct mtk_aes_reqctx {
73 u64 mode;
74};
75
Ryder Lee382ae572017-01-20 13:41:10 +080076struct mtk_aes_base_ctx {
Ryder Lee785e5c62016-12-19 10:20:44 +080077 struct mtk_cryp *cryp;
Ryder Lee785e5c62016-12-19 10:20:44 +080078 u32 keylen;
Ryder Lee382ae572017-01-20 13:41:10 +080079 mtk_aes_fn start;
Ryder Leea87399622017-01-20 13:41:08 +080080
81 struct mtk_aes_ct ct;
82 dma_addr_t ct_dma;
83 struct mtk_aes_tfm tfm;
84 dma_addr_t tfm_dma;
85
86 __le32 ct_hdr;
87 u32 ct_size;
Ryder Lee785e5c62016-12-19 10:20:44 +080088};
89
Ryder Lee382ae572017-01-20 13:41:10 +080090struct mtk_aes_ctx {
91 struct mtk_aes_base_ctx base;
92};
93
Ryder Lee785e5c62016-12-19 10:20:44 +080094struct mtk_aes_drv {
95 struct list_head dev_list;
96 /* Device list lock */
97 spinlock_t lock;
98};
99
100static struct mtk_aes_drv mtk_aes = {
101 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
102 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
103};
104
105static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
106{
107 return readl_relaxed(cryp->base + offset);
108}
109
110static inline void mtk_aes_write(struct mtk_cryp *cryp,
111 u32 offset, u32 value)
112{
113 writel_relaxed(value, cryp->base + offset);
114}
115
Ryder Lee382ae572017-01-20 13:41:10 +0800116static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
Ryder Lee785e5c62016-12-19 10:20:44 +0800117{
118 struct mtk_cryp *cryp = NULL;
119 struct mtk_cryp *tmp;
120
121 spin_lock_bh(&mtk_aes.lock);
122 if (!ctx->cryp) {
123 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
124 cryp = tmp;
125 break;
126 }
127 ctx->cryp = cryp;
128 } else {
129 cryp = ctx->cryp;
130 }
131 spin_unlock_bh(&mtk_aes.lock);
132
133 return cryp;
134}
135
136static inline size_t mtk_aes_padlen(size_t len)
137{
138 len &= AES_BLOCK_SIZE - 1;
139 return len ? AES_BLOCK_SIZE - len : 0;
140}
141
142static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
143 struct mtk_aes_dma *dma)
144{
145 int nents;
146
147 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
148 return false;
149
150 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
151 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
152 return false;
153
154 if (len <= sg->length) {
155 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
156 return false;
157
158 dma->nents = nents + 1;
159 dma->remainder = sg->length - len;
160 sg->length = len;
161 return true;
162 }
163
164 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
165 return false;
166
167 len -= sg->length;
168 }
169
170 return false;
171}
172
173/* Initialize and map transform information of AES */
174static int mtk_aes_info_map(struct mtk_cryp *cryp,
175 struct mtk_aes_rec *aes,
176 size_t len)
177{
Ryder Lee382ae572017-01-20 13:41:10 +0800178 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
179 struct mtk_aes_base_ctx *ctx = aes->ctx;
Ryder Lee785e5c62016-12-19 10:20:44 +0800180
Ryder Leea87399622017-01-20 13:41:08 +0800181 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
182 ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len);
183 ctx->ct.cmd[1] = AES_CMD1;
Ryder Lee785e5c62016-12-19 10:20:44 +0800184
185 if (aes->flags & AES_FLAGS_ENCRYPT)
Ryder Leea87399622017-01-20 13:41:08 +0800186 ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT;
Ryder Lee785e5c62016-12-19 10:20:44 +0800187 else
Ryder Leea87399622017-01-20 13:41:08 +0800188 ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN;
Ryder Lee785e5c62016-12-19 10:20:44 +0800189
190 if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128))
Ryder Leea87399622017-01-20 13:41:08 +0800191 ctx->tfm.ctrl[0] |= AES_TFM_128BITS;
Ryder Lee785e5c62016-12-19 10:20:44 +0800192 else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256))
Ryder Leea87399622017-01-20 13:41:08 +0800193 ctx->tfm.ctrl[0] |= AES_TFM_256BITS;
Ryder Lee785e5c62016-12-19 10:20:44 +0800194 else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_192))
Ryder Leea87399622017-01-20 13:41:08 +0800195 ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
Ryder Lee785e5c62016-12-19 10:20:44 +0800196
197 if (aes->flags & AES_FLAGS_CBC) {
Ryder Lee382ae572017-01-20 13:41:10 +0800198 const u32 *iv = (const u32 *)req->info;
Ryder Leea87399622017-01-20 13:41:08 +0800199 u32 *iv_state = ctx->tfm.state + ctx->keylen;
Ryder Lee785e5c62016-12-19 10:20:44 +0800200 int i;
201
Ryder Leea87399622017-01-20 13:41:08 +0800202 ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
Ryder Lee785e5c62016-12-19 10:20:44 +0800203 SIZE_IN_WORDS(AES_BLOCK_SIZE));
Ryder Leea87399622017-01-20 13:41:08 +0800204 ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
Ryder Lee785e5c62016-12-19 10:20:44 +0800205
206 for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
207 iv_state[i] = cpu_to_le32(iv[i]);
208
Ryder Leea87399622017-01-20 13:41:08 +0800209 ctx->ct.cmd[2] = AES_CMD2;
210 ctx->ct_size = AES_CT_SIZE_CBC;
Ryder Lee785e5c62016-12-19 10:20:44 +0800211 } else if (aes->flags & AES_FLAGS_ECB) {
Ryder Leea87399622017-01-20 13:41:08 +0800212 ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
213 ctx->tfm.ctrl[1] = AES_TFM_ECB;
214
215 ctx->ct_size = AES_CT_SIZE_ECB;
Ryder Lee785e5c62016-12-19 10:20:44 +0800216 }
217
Ryder Leea87399622017-01-20 13:41:08 +0800218 ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct),
219 DMA_TO_DEVICE);
220 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
221 return -EINVAL;
222
223 ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm),
224 DMA_TO_DEVICE);
225 if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma))) {
226 dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
227 DMA_TO_DEVICE);
Ryder Lee785e5c62016-12-19 10:20:44 +0800228 return -EINVAL;
229 }
Ryder Lee785e5c62016-12-19 10:20:44 +0800230
231 return 0;
232}
233
Ryder Lee44328612017-01-20 13:41:09 +0800234/*
235 * Write descriptors for processing. This will configure the engine, load
236 * the transform information and then start the packet processing.
237 */
Ryder Lee785e5c62016-12-19 10:20:44 +0800238static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
239{
240 struct mtk_ring *ring = cryp->ring[aes->id];
241 struct mtk_desc *cmd = NULL, *res = NULL;
Ryder Lee44328612017-01-20 13:41:09 +0800242 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
243 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
Ryder Lee785e5c62016-12-19 10:20:44 +0800244 int nents;
245
Ryder Lee44328612017-01-20 13:41:09 +0800246 /* Write command descriptors */
247 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
248 cmd = ring->cmd_base + ring->cmd_pos;
Ryder Lee785e5c62016-12-19 10:20:44 +0800249 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
250 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
251
Ryder Lee785e5c62016-12-19 10:20:44 +0800252 if (nents == 0) {
Ryder Lee785e5c62016-12-19 10:20:44 +0800253 cmd->hdr |= MTK_DESC_FIRST |
Ryder Leea87399622017-01-20 13:41:08 +0800254 MTK_DESC_CT_LEN(aes->ctx->ct_size);
255 cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
256 cmd->ct_hdr = aes->ctx->ct_hdr;
257 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
Ryder Lee785e5c62016-12-19 10:20:44 +0800258 }
259
Ryder Lee44328612017-01-20 13:41:09 +0800260 if (++ring->cmd_pos == MTK_DESC_NUM)
261 ring->cmd_pos = 0;
Ryder Lee785e5c62016-12-19 10:20:44 +0800262 }
Ryder Lee785e5c62016-12-19 10:20:44 +0800263 cmd->hdr |= MTK_DESC_LAST;
Ryder Lee44328612017-01-20 13:41:09 +0800264
265 /* Prepare result descriptors */
266 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
267 res = ring->res_base + ring->res_pos;
268 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
269 res->buf = cpu_to_le32(sg_dma_address(dsg));
270
271 if (nents == 0)
272 res->hdr |= MTK_DESC_FIRST;
273
274 if (++ring->res_pos == MTK_DESC_NUM)
275 ring->res_pos = 0;
276 }
Ryder Lee785e5c62016-12-19 10:20:44 +0800277 res->hdr |= MTK_DESC_LAST;
278
279 /*
280 * Make sure that all changes to the DMA ring are done before we
281 * start engine.
282 */
283 wmb();
284 /* Start DMA transfer */
Ryder Lee44328612017-01-20 13:41:09 +0800285 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
286 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
Ryder Lee785e5c62016-12-19 10:20:44 +0800287
288 return -EINPROGRESS;
289}
290
291static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
292{
293 struct scatterlist *sg = dma->sg;
294 int nents = dma->nents;
295
296 if (!dma->remainder)
297 return;
298
299 while (--nents > 0 && sg)
300 sg = sg_next(sg);
301
302 if (!sg)
303 return;
304
305 sg->length += dma->remainder;
306}
307
Ryder Lee382ae572017-01-20 13:41:10 +0800308static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
309 struct scatterlist *src, struct scatterlist *dst,
310 size_t len)
Ryder Lee785e5c62016-12-19 10:20:44 +0800311{
Ryder Lee785e5c62016-12-19 10:20:44 +0800312 size_t padlen = 0;
313 bool src_aligned, dst_aligned;
314
315 aes->total = len;
316 aes->src.sg = src;
317 aes->dst.sg = dst;
318 aes->real_dst = dst;
319
320 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
321 if (src == dst)
322 dst_aligned = src_aligned;
323 else
324 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
325
326 if (!src_aligned || !dst_aligned) {
327 padlen = mtk_aes_padlen(len);
328
329 if (len + padlen > AES_BUF_SIZE)
330 return -ENOMEM;
331
332 if (!src_aligned) {
333 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
334 aes->src.sg = &aes->aligned_sg;
335 aes->src.nents = 1;
336 aes->src.remainder = 0;
337 }
338
339 if (!dst_aligned) {
340 aes->dst.sg = &aes->aligned_sg;
341 aes->dst.nents = 1;
342 aes->dst.remainder = 0;
343 }
344
345 sg_init_table(&aes->aligned_sg, 1);
346 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
347 }
348
349 if (aes->src.sg == aes->dst.sg) {
350 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
351 aes->src.nents, DMA_BIDIRECTIONAL);
352 aes->dst.sg_len = aes->src.sg_len;
353 if (unlikely(!aes->src.sg_len))
354 return -EFAULT;
355 } else {
356 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
357 aes->src.nents, DMA_TO_DEVICE);
358 if (unlikely(!aes->src.sg_len))
359 return -EFAULT;
360
361 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
362 aes->dst.nents, DMA_FROM_DEVICE);
363 if (unlikely(!aes->dst.sg_len)) {
364 dma_unmap_sg(cryp->dev, aes->src.sg,
365 aes->src.nents, DMA_TO_DEVICE);
366 return -EFAULT;
367 }
368 }
369
370 return mtk_aes_info_map(cryp, aes, len + padlen);
371}
372
373static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
Ryder Lee382ae572017-01-20 13:41:10 +0800374 struct crypto_async_request *new_areq)
Ryder Lee785e5c62016-12-19 10:20:44 +0800375{
376 struct mtk_aes_rec *aes = cryp->aes[id];
377 struct crypto_async_request *areq, *backlog;
Ryder Lee382ae572017-01-20 13:41:10 +0800378 struct mtk_aes_base_ctx *ctx;
Ryder Lee785e5c62016-12-19 10:20:44 +0800379 unsigned long flags;
Ryder Lee382ae572017-01-20 13:41:10 +0800380 int ret = 0;
Ryder Lee785e5c62016-12-19 10:20:44 +0800381
382 spin_lock_irqsave(&aes->lock, flags);
Ryder Lee382ae572017-01-20 13:41:10 +0800383 if (new_areq)
384 ret = crypto_enqueue_request(&aes->queue, new_areq);
Ryder Lee785e5c62016-12-19 10:20:44 +0800385 if (aes->flags & AES_FLAGS_BUSY) {
386 spin_unlock_irqrestore(&aes->lock, flags);
387 return ret;
388 }
389 backlog = crypto_get_backlog(&aes->queue);
390 areq = crypto_dequeue_request(&aes->queue);
391 if (areq)
392 aes->flags |= AES_FLAGS_BUSY;
393 spin_unlock_irqrestore(&aes->lock, flags);
394
395 if (!areq)
396 return ret;
397
398 if (backlog)
399 backlog->complete(backlog, -EINPROGRESS);
400
Ryder Lee382ae572017-01-20 13:41:10 +0800401 ctx = crypto_tfm_ctx(areq->tfm);
402
403 aes->areq = areq;
404 aes->ctx = ctx;
405
406 return ctx->start(cryp, aes);
407}
408
409static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
410{
411 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
412 struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
413 int err;
414
Ryder Lee785e5c62016-12-19 10:20:44 +0800415 rctx = ablkcipher_request_ctx(req);
416 rctx->mode &= AES_FLAGS_MODE_MSK;
Ryder Lee785e5c62016-12-19 10:20:44 +0800417 aes->flags = (aes->flags & ~AES_FLAGS_MODE_MSK) | rctx->mode;
418
Ryder Lee382ae572017-01-20 13:41:10 +0800419 err = mtk_aes_map(cryp, aes, req->src, req->dst, req->nbytes);
Ryder Lee785e5c62016-12-19 10:20:44 +0800420 if (err)
421 return err;
422
423 return mtk_aes_xmit(cryp, aes);
424}
425
426static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
427{
Ryder Lee382ae572017-01-20 13:41:10 +0800428 struct mtk_aes_base_ctx *ctx = aes->ctx;
Ryder Leea87399622017-01-20 13:41:08 +0800429
430 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
431 DMA_TO_DEVICE);
432 dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
433 DMA_TO_DEVICE);
Ryder Lee785e5c62016-12-19 10:20:44 +0800434
435 if (aes->src.sg == aes->dst.sg) {
436 dma_unmap_sg(cryp->dev, aes->src.sg,
437 aes->src.nents, DMA_BIDIRECTIONAL);
438
439 if (aes->src.sg != &aes->aligned_sg)
440 mtk_aes_restore_sg(&aes->src);
441 } else {
442 dma_unmap_sg(cryp->dev, aes->dst.sg,
443 aes->dst.nents, DMA_FROM_DEVICE);
444
445 if (aes->dst.sg != &aes->aligned_sg)
446 mtk_aes_restore_sg(&aes->dst);
447
448 dma_unmap_sg(cryp->dev, aes->src.sg,
449 aes->src.nents, DMA_TO_DEVICE);
450
451 if (aes->src.sg != &aes->aligned_sg)
452 mtk_aes_restore_sg(&aes->src);
453 }
454
455 if (aes->dst.sg == &aes->aligned_sg)
456 sg_copy_from_buffer(aes->real_dst,
457 sg_nents(aes->real_dst),
458 aes->buf, aes->total);
459}
460
461static inline void mtk_aes_complete(struct mtk_cryp *cryp,
462 struct mtk_aes_rec *aes)
463{
464 aes->flags &= ~AES_FLAGS_BUSY;
Ryder Lee382ae572017-01-20 13:41:10 +0800465 aes->areq->complete(aes->areq, 0);
Ryder Lee785e5c62016-12-19 10:20:44 +0800466
467 /* Handle new request */
468 mtk_aes_handle_queue(cryp, aes->id, NULL);
469}
470
471/* Check and set the AES key to transform state buffer */
472static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
473 const u8 *key, u32 keylen)
474{
Ryder Lee382ae572017-01-20 13:41:10 +0800475 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
Ryder Lee785e5c62016-12-19 10:20:44 +0800476 const u32 *key_tmp = (const u32 *)key;
Ryder Leea87399622017-01-20 13:41:08 +0800477 u32 *key_state = ctx->tfm.state;
Ryder Lee785e5c62016-12-19 10:20:44 +0800478 int i;
479
480 if (keylen != AES_KEYSIZE_128 &&
481 keylen != AES_KEYSIZE_192 &&
482 keylen != AES_KEYSIZE_256) {
483 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
484 return -EINVAL;
485 }
486
487 ctx->keylen = SIZE_IN_WORDS(keylen);
488
489 for (i = 0; i < ctx->keylen; i++)
490 key_state[i] = cpu_to_le32(key_tmp[i]);
491
492 return 0;
493}
494
495static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
496{
Ryder Lee382ae572017-01-20 13:41:10 +0800497 struct mtk_aes_base_ctx *ctx;
498 struct mtk_aes_reqctx *rctx;
Ryder Lee785e5c62016-12-19 10:20:44 +0800499
Ryder Lee382ae572017-01-20 13:41:10 +0800500 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
501 rctx = ablkcipher_request_ctx(req);
Ryder Lee785e5c62016-12-19 10:20:44 +0800502 rctx->mode = mode;
503
504 return mtk_aes_handle_queue(ctx->cryp,
Ryder Lee382ae572017-01-20 13:41:10 +0800505 !(mode & AES_FLAGS_ENCRYPT), &req->base);
Ryder Lee785e5c62016-12-19 10:20:44 +0800506}
507
508static int mtk_ecb_encrypt(struct ablkcipher_request *req)
509{
510 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
511}
512
513static int mtk_ecb_decrypt(struct ablkcipher_request *req)
514{
515 return mtk_aes_crypt(req, AES_FLAGS_ECB);
516}
517
518static int mtk_cbc_encrypt(struct ablkcipher_request *req)
519{
520 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
521}
522
523static int mtk_cbc_decrypt(struct ablkcipher_request *req)
524{
525 return mtk_aes_crypt(req, AES_FLAGS_CBC);
526}
527
528static int mtk_aes_cra_init(struct crypto_tfm *tfm)
529{
530 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
531 struct mtk_cryp *cryp = NULL;
532
Ryder Lee382ae572017-01-20 13:41:10 +0800533 cryp = mtk_aes_find_dev(&ctx->base);
Ryder Lee785e5c62016-12-19 10:20:44 +0800534 if (!cryp) {
535 pr_err("can't find crypto device\n");
536 return -ENODEV;
537 }
538
Ryder Lee382ae572017-01-20 13:41:10 +0800539 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
540 ctx->base.start = mtk_aes_start;
Ryder Lee785e5c62016-12-19 10:20:44 +0800541 return 0;
542}
543
544static struct crypto_alg aes_algs[] = {
545{
546 .cra_name = "cbc(aes)",
547 .cra_driver_name = "cbc-aes-mtk",
548 .cra_priority = 400,
549 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
550 CRYPTO_ALG_ASYNC,
551 .cra_init = mtk_aes_cra_init,
552 .cra_blocksize = AES_BLOCK_SIZE,
553 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
554 .cra_alignmask = 15,
555 .cra_type = &crypto_ablkcipher_type,
556 .cra_module = THIS_MODULE,
557 .cra_u.ablkcipher = {
558 .min_keysize = AES_MIN_KEY_SIZE,
559 .max_keysize = AES_MAX_KEY_SIZE,
560 .setkey = mtk_aes_setkey,
561 .encrypt = mtk_cbc_encrypt,
562 .decrypt = mtk_cbc_decrypt,
563 .ivsize = AES_BLOCK_SIZE,
564 }
565},
566{
567 .cra_name = "ecb(aes)",
568 .cra_driver_name = "ecb-aes-mtk",
569 .cra_priority = 400,
570 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
571 CRYPTO_ALG_ASYNC,
572 .cra_init = mtk_aes_cra_init,
573 .cra_blocksize = AES_BLOCK_SIZE,
574 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
575 .cra_alignmask = 15,
576 .cra_type = &crypto_ablkcipher_type,
577 .cra_module = THIS_MODULE,
578 .cra_u.ablkcipher = {
579 .min_keysize = AES_MIN_KEY_SIZE,
580 .max_keysize = AES_MAX_KEY_SIZE,
581 .setkey = mtk_aes_setkey,
582 .encrypt = mtk_ecb_encrypt,
583 .decrypt = mtk_ecb_decrypt,
584 }
585},
586};
587
588static void mtk_aes_enc_task(unsigned long data)
589{
590 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
591 struct mtk_aes_rec *aes = cryp->aes[0];
592
593 mtk_aes_unmap(cryp, aes);
594 mtk_aes_complete(cryp, aes);
595}
596
597static void mtk_aes_dec_task(unsigned long data)
598{
599 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
600 struct mtk_aes_rec *aes = cryp->aes[1];
601
602 mtk_aes_unmap(cryp, aes);
603 mtk_aes_complete(cryp, aes);
604}
605
606static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id)
607{
608 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
609 struct mtk_aes_rec *aes = cryp->aes[0];
610 u32 val = mtk_aes_read(cryp, RDR_STAT(RING0));
611
612 mtk_aes_write(cryp, RDR_STAT(RING0), val);
613
614 if (likely(AES_FLAGS_BUSY & aes->flags)) {
615 mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST);
616 mtk_aes_write(cryp, RDR_THRESH(RING0),
617 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
618
619 tasklet_schedule(&aes->task);
620 } else {
621 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
622 }
623 return IRQ_HANDLED;
624}
625
626static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
627{
628 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
629 struct mtk_aes_rec *aes = cryp->aes[1];
630 u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
631
632 mtk_aes_write(cryp, RDR_STAT(RING1), val);
633
634 if (likely(AES_FLAGS_BUSY & aes->flags)) {
635 mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
636 mtk_aes_write(cryp, RDR_THRESH(RING1),
637 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
638
639 tasklet_schedule(&aes->task);
640 } else {
641 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
642 }
643 return IRQ_HANDLED;
644}
645
646/*
647 * The purpose of creating encryption and decryption records is
648 * to process outbound/inbound data in parallel, it can improve
649 * performance in most use cases, such as IPSec VPN, especially
650 * under heavy network traffic.
651 */
652static int mtk_aes_record_init(struct mtk_cryp *cryp)
653{
654 struct mtk_aes_rec **aes = cryp->aes;
655 int i, err = -ENOMEM;
656
657 for (i = 0; i < MTK_REC_NUM; i++) {
658 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
659 if (!aes[i])
660 goto err_cleanup;
661
662 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
663 AES_BUF_ORDER);
664 if (!aes[i]->buf)
665 goto err_cleanup;
666
667 aes[i]->id = i;
668
669 spin_lock_init(&aes[i]->lock);
670 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
671 }
672
673 tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp);
674 tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp);
675
676 return 0;
677
678err_cleanup:
679 for (; i--; ) {
680 free_page((unsigned long)aes[i]->buf);
681 kfree(aes[i]);
682 }
683
684 return err;
685}
686
687static void mtk_aes_record_free(struct mtk_cryp *cryp)
688{
689 int i;
690
691 for (i = 0; i < MTK_REC_NUM; i++) {
692 tasklet_kill(&cryp->aes[i]->task);
693 free_page((unsigned long)cryp->aes[i]->buf);
694 kfree(cryp->aes[i]);
695 }
696}
697
698static void mtk_aes_unregister_algs(void)
699{
700 int i;
701
702 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
703 crypto_unregister_alg(&aes_algs[i]);
704}
705
706static int mtk_aes_register_algs(void)
707{
708 int err, i;
709
710 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
711 err = crypto_register_alg(&aes_algs[i]);
712 if (err)
713 goto err_aes_algs;
714 }
715
716 return 0;
717
718err_aes_algs:
719 for (; i--; )
720 crypto_unregister_alg(&aes_algs[i]);
721
722 return err;
723}
724
725int mtk_cipher_alg_register(struct mtk_cryp *cryp)
726{
727 int ret;
728
729 INIT_LIST_HEAD(&cryp->aes_list);
730
731 /* Initialize two cipher records */
732 ret = mtk_aes_record_init(cryp);
733 if (ret)
734 goto err_record;
735
736 /* Ring0 is use by encryption record */
737 ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq,
738 IRQF_TRIGGER_LOW, "mtk-aes", cryp);
739 if (ret) {
740 dev_err(cryp->dev, "unable to request AES encryption irq.\n");
741 goto err_res;
742 }
743
744 /* Ring1 is use by decryption record */
745 ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq,
746 IRQF_TRIGGER_LOW, "mtk-aes", cryp);
747 if (ret) {
748 dev_err(cryp->dev, "unable to request AES decryption irq.\n");
749 goto err_res;
750 }
751
752 /* Enable ring0 and ring1 interrupt */
753 mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0);
754 mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1);
755
756 spin_lock(&mtk_aes.lock);
757 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
758 spin_unlock(&mtk_aes.lock);
759
760 ret = mtk_aes_register_algs();
761 if (ret)
762 goto err_algs;
763
764 return 0;
765
766err_algs:
767 spin_lock(&mtk_aes.lock);
768 list_del(&cryp->aes_list);
769 spin_unlock(&mtk_aes.lock);
770err_res:
771 mtk_aes_record_free(cryp);
772err_record:
773
774 dev_err(cryp->dev, "mtk-aes initialization failed.\n");
775 return ret;
776}
777
778void mtk_cipher_alg_release(struct mtk_cryp *cryp)
779{
780 spin_lock(&mtk_aes.lock);
781 list_del(&cryp->aes_list);
782 spin_unlock(&mtk_aes.lock);
783
784 mtk_aes_unregister_algs();
785 mtk_aes_record_free(cryp);
786}