blob: 5016e63b6c253512b53a01fdd3081f847d4070dc [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad32011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad32011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Yuan Kangacdca312011-07-15 11:21:42 +080073#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
74#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
75 20 * CAAM_CMD_SZ)
76#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
77 15 * CAAM_CMD_SZ)
78
Yuan Kang1acebad32011-07-15 11:21:42 +080079#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
80 CAAM_MAX_KEY_SIZE)
81#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050082
Kim Phillips8e8ec592011-03-13 16:54:26 +080083#ifdef DEBUG
84/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080085#define debug(format, arg...) printk(format, arg)
86#else
87#define debug(format, arg...)
88#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +053089static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +080090
Yuan Kang1acebad32011-07-15 11:21:42 +080091/* Set DK bit in class 1 operation if shared */
92static inline void append_dec_op1(u32 *desc, u32 type)
93{
94 u32 *jump_cmd, *uncond_jump_cmd;
95
96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
97 append_operation(desc, type | OP_ALG_AS_INITFINAL |
98 OP_ALG_DECRYPT);
99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
100 set_jump_tgt_here(desc, jump_cmd);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
103 set_jump_tgt_here(desc, uncond_jump_cmd);
104}
105
106/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800107 * For aead functions, read payload and write payload,
108 * both of which are specified in req->src and req->dst
109 */
110static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
111{
112 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
113 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
114 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
115}
116
117/*
118 * For aead encrypt and decrypt, read iv for both classes
119 */
120static inline void aead_append_ld_iv(u32 *desc, int ivsize)
121{
122 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
123 LDST_CLASS_1_CCB | ivsize);
124 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
125}
126
127/*
Yuan Kangacdca312011-07-15 11:21:42 +0800128 * For ablkcipher encrypt and decrypt, read from req->src and
129 * write to req->dst
130 */
131static inline void ablkcipher_append_src_dst(u32 *desc)
132{
Kim Phillips70d793c2012-06-22 19:42:35 -0500133 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
134 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
135 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
136 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
137 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800138}
139
140/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800141 * If all data, including src (with assoc and iv) or dst (with iv only) are
142 * contiguous
143 */
144#define GIV_SRC_CONTIG 1
145#define GIV_DST_CONTIG (1 << 1)
146
Kim Phillips8e8ec592011-03-13 16:54:26 +0800147/*
148 * per-session context
149 */
150struct caam_ctx {
151 struct device *jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +0800152 u32 sh_desc_enc[DESC_MAX_USED_LEN];
153 u32 sh_desc_dec[DESC_MAX_USED_LEN];
154 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
155 dma_addr_t sh_desc_enc_dma;
156 dma_addr_t sh_desc_dec_dma;
157 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800158 u32 class1_alg_type;
159 u32 class2_alg_type;
160 u32 alg_op;
Yuan Kang1acebad32011-07-15 11:21:42 +0800161 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800162 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800163 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800164 unsigned int split_key_len;
165 unsigned int split_key_pad_len;
166 unsigned int authsize;
167};
168
Yuan Kang1acebad32011-07-15 11:21:42 +0800169static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
170 int keys_fit_inline)
171{
172 if (keys_fit_inline) {
173 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
174 ctx->split_key_len, CLASS_2 |
175 KEY_DEST_MDHA_SPLIT | KEY_ENC);
176 append_key_as_imm(desc, (void *)ctx->key +
177 ctx->split_key_pad_len, ctx->enckeylen,
178 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
179 } else {
180 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
181 KEY_DEST_MDHA_SPLIT | KEY_ENC);
182 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
183 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
184 }
185}
186
187static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
188 int keys_fit_inline)
189{
190 u32 *key_jump_cmd;
191
Kim Phillips61bb86b2012-07-13 17:49:28 -0500192 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad32011-07-15 11:21:42 +0800193
194 /* Skip if already shared */
195 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
196 JUMP_COND_SHRD);
197
198 append_key_aead(desc, ctx, keys_fit_inline);
199
200 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad32011-07-15 11:21:42 +0800201}
202
203static int aead_set_sh_desc(struct crypto_aead *aead)
204{
205 struct aead_tfm *tfm = &aead->base.crt_aead;
206 struct caam_ctx *ctx = crypto_aead_ctx(aead);
207 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800208 bool keys_fit_inline = false;
Yuan Kang1acebad32011-07-15 11:21:42 +0800209 u32 geniv, moveiv;
210 u32 *desc;
211
212 if (!ctx->enckeylen || !ctx->authsize)
213 return 0;
214
215 /*
216 * Job Descriptor and Shared Descriptors
217 * must all fit into the 64-word Descriptor h/w Buffer
218 */
219 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
220 ctx->split_key_pad_len + ctx->enckeylen <=
221 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800222 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800223
224 /* aead_encrypt shared descriptor */
225 desc = ctx->sh_desc_enc;
226
227 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
228
229 /* Class 2 operation */
230 append_operation(desc, ctx->class2_alg_type |
231 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
232
233 /* cryptlen = seqoutlen - authsize */
234 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
235
236 /* assoclen + cryptlen = seqinlen - ivsize */
237 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
238
Horia Geanta4464a7d2014-03-14 17:46:49 +0200239 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad32011-07-15 11:21:42 +0800240 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
241
242 /* read assoc before reading payload */
243 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
244 KEY_VLF);
245 aead_append_ld_iv(desc, tfm->ivsize);
246
247 /* Class 1 operation */
248 append_operation(desc, ctx->class1_alg_type |
249 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
250
251 /* Read and write cryptlen bytes */
252 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
253 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
254 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
255
256 /* Write ICV */
257 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
258 LDST_SRCDST_BYTE_CONTEXT);
259
260 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
261 desc_bytes(desc),
262 DMA_TO_DEVICE);
263 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
264 dev_err(jrdev, "unable to map shared descriptor\n");
265 return -ENOMEM;
266 }
267#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300268 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800269 DUMP_PREFIX_ADDRESS, 16, 4, desc,
270 desc_bytes(desc), 1);
271#endif
272
273 /*
274 * Job Descriptor and Shared Descriptors
275 * must all fit into the 64-word Descriptor h/w Buffer
276 */
277 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
278 ctx->split_key_pad_len + ctx->enckeylen <=
279 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800280 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800281
Horia Geanta4464a7d2014-03-14 17:46:49 +0200282 /* aead_decrypt shared descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +0800283 desc = ctx->sh_desc_dec;
284
Horia Geanta4464a7d2014-03-14 17:46:49 +0200285 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad32011-07-15 11:21:42 +0800286
287 /* Class 2 operation */
288 append_operation(desc, ctx->class2_alg_type |
289 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
290
Horia Geanta4464a7d2014-03-14 17:46:49 +0200291 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad32011-07-15 11:21:42 +0800292 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
293 ctx->authsize + tfm->ivsize)
294 /* assoclen = (assoclen + cryptlen) - cryptlen */
295 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
296 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
297
298 /* read assoc before reading payload */
299 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
300 KEY_VLF);
301
302 aead_append_ld_iv(desc, tfm->ivsize);
303
304 append_dec_op1(desc, ctx->class1_alg_type);
305
306 /* Read and write cryptlen bytes */
307 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
308 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
309 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
310
311 /* Load ICV */
312 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
313 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad32011-07-15 11:21:42 +0800314
315 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
316 desc_bytes(desc),
317 DMA_TO_DEVICE);
318 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
319 dev_err(jrdev, "unable to map shared descriptor\n");
320 return -ENOMEM;
321 }
322#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300323 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800324 DUMP_PREFIX_ADDRESS, 16, 4, desc,
325 desc_bytes(desc), 1);
326#endif
327
328 /*
329 * Job Descriptor and Shared Descriptors
330 * must all fit into the 64-word Descriptor h/w Buffer
331 */
332 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
333 ctx->split_key_pad_len + ctx->enckeylen <=
334 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800335 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800336
337 /* aead_givencrypt shared descriptor */
338 desc = ctx->sh_desc_givenc;
339
340 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
341
342 /* Generate IV */
343 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
344 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
345 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
346 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
347 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
348 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
349 append_move(desc, MOVE_SRC_INFIFO |
350 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
351 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
352
353 /* Copy IV to class 1 context */
354 append_move(desc, MOVE_SRC_CLASS1CTX |
355 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
356
357 /* Return to encryption */
358 append_operation(desc, ctx->class2_alg_type |
359 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
360
361 /* ivsize + cryptlen = seqoutlen - authsize */
362 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
363
364 /* assoclen = seqinlen - (ivsize + cryptlen) */
365 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
366
367 /* read assoc before reading payload */
368 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
369 KEY_VLF);
370
371 /* Copy iv from class 1 ctx to class 2 fifo*/
372 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
373 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
374 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
375 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
376 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
377 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
378
379 /* Class 1 operation */
380 append_operation(desc, ctx->class1_alg_type |
381 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
382
383 /* Will write ivsize + cryptlen */
384 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
385
386 /* Not need to reload iv */
387 append_seq_fifo_load(desc, tfm->ivsize,
388 FIFOLD_CLASS_SKIP);
389
390 /* Will read cryptlen */
391 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
392 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
393
394 /* Write ICV */
395 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
396 LDST_SRCDST_BYTE_CONTEXT);
397
398 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
399 desc_bytes(desc),
400 DMA_TO_DEVICE);
401 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
402 dev_err(jrdev, "unable to map shared descriptor\n");
403 return -ENOMEM;
404 }
405#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300406 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800407 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1);
409#endif
410
411 return 0;
412}
413
Yuan Kang0e479302011-07-15 11:21:41 +0800414static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800415 unsigned int authsize)
416{
417 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
418
419 ctx->authsize = authsize;
Yuan Kang1acebad32011-07-15 11:21:42 +0800420 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800421
422 return 0;
423}
424
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500425static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
426 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800427{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500428 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
429 ctx->split_key_pad_len, key_in, authkeylen,
430 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800431}
432
Yuan Kang0e479302011-07-15 11:21:41 +0800433static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800434 const u8 *key, unsigned int keylen)
435{
436 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
437 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
438 struct caam_ctx *ctx = crypto_aead_ctx(aead);
439 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200440 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800441 int ret = 0;
442
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200443 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800444 goto badkey;
445
446 /* Pick class 2 key length from algorithm submask */
447 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
448 OP_ALG_ALGSEL_SHIFT] * 2;
449 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
450
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200451 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
452 goto badkey;
453
Kim Phillips8e8ec592011-03-13 16:54:26 +0800454#ifdef DEBUG
455 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200456 keys.authkeylen + keys.enckeylen, keys.enckeylen,
457 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800458 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
459 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300460 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800461 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
462#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800463
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200464 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800465 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800466 goto badkey;
467 }
468
469 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200470 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800471
Yuan Kang885e9e22011-07-15 11:21:41 +0800472 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200473 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800474 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800475 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800476 return -ENOMEM;
477 }
478#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300479 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800480 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200481 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800482#endif
483
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200484 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800485
Yuan Kang1acebad32011-07-15 11:21:42 +0800486 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800487 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +0800488 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200489 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800490 }
491
492 return ret;
493badkey:
494 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
495 return -EINVAL;
496}
497
Yuan Kangacdca312011-07-15 11:21:42 +0800498static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
499 const u8 *key, unsigned int keylen)
500{
501 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
502 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
503 struct device *jrdev = ctx->jrdev;
504 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +0200505 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +0800506 u32 *desc;
507
508#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300509 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800510 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
511#endif
512
513 memcpy(ctx->key, key, keylen);
514 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
515 DMA_TO_DEVICE);
516 if (dma_mapping_error(jrdev, ctx->key_dma)) {
517 dev_err(jrdev, "unable to map key i/o memory\n");
518 return -ENOMEM;
519 }
520 ctx->enckeylen = keylen;
521
522 /* ablkcipher_encrypt shared descriptor */
523 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -0500524 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800525 /* Skip if already shared */
526 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
527 JUMP_COND_SHRD);
528
529 /* Load class1 key only */
530 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
531 ctx->enckeylen, CLASS_1 |
532 KEY_DEST_CLASS_REG);
533
534 set_jump_tgt_here(desc, key_jump_cmd);
535
Yuan Kangacdca312011-07-15 11:21:42 +0800536 /* Load iv */
537 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
538 LDST_CLASS_1_CCB | tfm->ivsize);
539
540 /* Load operation */
541 append_operation(desc, ctx->class1_alg_type |
542 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
543
544 /* Perform operation */
545 ablkcipher_append_src_dst(desc);
546
547 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
548 desc_bytes(desc),
549 DMA_TO_DEVICE);
550 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
551 dev_err(jrdev, "unable to map shared descriptor\n");
552 return -ENOMEM;
553 }
554#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300555 print_hex_dump(KERN_ERR,
556 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800557 DUMP_PREFIX_ADDRESS, 16, 4, desc,
558 desc_bytes(desc), 1);
559#endif
560 /* ablkcipher_decrypt shared descriptor */
561 desc = ctx->sh_desc_dec;
562
Kim Phillips61bb86b2012-07-13 17:49:28 -0500563 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800564 /* Skip if already shared */
565 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
566 JUMP_COND_SHRD);
567
568 /* Load class1 key only */
569 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
570 ctx->enckeylen, CLASS_1 |
571 KEY_DEST_CLASS_REG);
572
Yuan Kangacdca312011-07-15 11:21:42 +0800573 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +0800574
575 /* load IV */
576 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
577 LDST_CLASS_1_CCB | tfm->ivsize);
578
579 /* Choose operation */
580 append_dec_op1(desc, ctx->class1_alg_type);
581
582 /* Perform operation */
583 ablkcipher_append_src_dst(desc);
584
Yuan Kangacdca312011-07-15 11:21:42 +0800585 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
586 desc_bytes(desc),
587 DMA_TO_DEVICE);
588 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
589 dev_err(jrdev, "unable to map shared descriptor\n");
590 return -ENOMEM;
591 }
592
593#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300594 print_hex_dump(KERN_ERR,
595 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800596 DUMP_PREFIX_ADDRESS, 16, 4, desc,
597 desc_bytes(desc), 1);
598#endif
599
600 return ret;
601}
602
Kim Phillips8e8ec592011-03-13 16:54:26 +0800603/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800604 * aead_edesc - s/w-extended aead descriptor
605 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500606 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800607 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500608 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800609 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500610 * @dst_chained: if destination is chained
Yuan Kang1acebad32011-07-15 11:21:42 +0800611 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800612 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500613 * @sec4_sg_bytes: length of dma mapped sec4_sg space
614 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800615 * @hw_desc: the h/w job descriptor followed by any referenced link tables
616 */
Yuan Kang0e479302011-07-15 11:21:41 +0800617struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800618 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500619 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800620 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500621 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800622 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500623 bool dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +0800624 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500625 int sec4_sg_bytes;
626 dma_addr_t sec4_sg_dma;
627 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800628 u32 hw_desc[0];
629};
630
Yuan Kangacdca312011-07-15 11:21:42 +0800631/*
632 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
633 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500634 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800635 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500636 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800637 * @iv_dma: dma address of iv for checking continuity and link table
638 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500639 * @sec4_sg_bytes: length of dma mapped sec4_sg space
640 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +0800641 * @hw_desc: the h/w job descriptor followed by any referenced link tables
642 */
643struct ablkcipher_edesc {
644 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500645 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800646 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500647 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800648 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500649 int sec4_sg_bytes;
650 dma_addr_t sec4_sg_dma;
651 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +0800652 u32 hw_desc[0];
653};
654
Yuan Kang1acebad32011-07-15 11:21:42 +0800655static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -0500656 struct scatterlist *dst, int src_nents,
657 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -0500658 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
659 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800660{
Yuan Kang643b39b2012-06-22 19:48:49 -0500661 if (dst != src) {
662 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
663 src_chained);
664 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
665 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800666 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -0500667 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
668 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800669 }
670
Yuan Kang1acebad32011-07-15 11:21:42 +0800671 if (iv_dma)
672 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -0500673 if (sec4_sg_bytes)
674 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800675 DMA_TO_DEVICE);
676}
677
Yuan Kang1acebad32011-07-15 11:21:42 +0800678static void aead_unmap(struct device *dev,
679 struct aead_edesc *edesc,
680 struct aead_request *req)
681{
682 struct crypto_aead *aead = crypto_aead_reqtfm(req);
683 int ivsize = crypto_aead_ivsize(aead);
684
Yuan Kang643b39b2012-06-22 19:48:49 -0500685 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
686 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +0800687
688 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500689 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
690 edesc->dst_chained, edesc->iv_dma, ivsize,
691 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad32011-07-15 11:21:42 +0800692}
693
Yuan Kangacdca312011-07-15 11:21:42 +0800694static void ablkcipher_unmap(struct device *dev,
695 struct ablkcipher_edesc *edesc,
696 struct ablkcipher_request *req)
697{
698 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
699 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
700
701 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500702 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
703 edesc->dst_chained, edesc->iv_dma, ivsize,
704 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +0800705}
706
Yuan Kang0e479302011-07-15 11:21:41 +0800707static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800708 void *context)
709{
Yuan Kang0e479302011-07-15 11:21:41 +0800710 struct aead_request *req = context;
711 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800712#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800713 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800714 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad32011-07-15 11:21:42 +0800715 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800716
717 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
718#endif
Yuan Kang1acebad32011-07-15 11:21:42 +0800719
Yuan Kang0e479302011-07-15 11:21:41 +0800720 edesc = (struct aead_edesc *)((char *)desc -
721 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800722
723 if (err) {
Kim Phillipsde2954d2011-05-02 18:29:17 -0500724 char tmp[CAAM_ERROR_STR_MAX];
Kim Phillips8e8ec592011-03-13 16:54:26 +0800725
Kim Phillips8e8ec592011-03-13 16:54:26 +0800726 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
727 }
728
Yuan Kang0e479302011-07-15 11:21:41 +0800729 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800730
731#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300732 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800733 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
734 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300735 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800736 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800737 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300738 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800739 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
740 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800741 ctx->authsize + 4, 1);
742#endif
743
744 kfree(edesc);
745
Yuan Kang0e479302011-07-15 11:21:41 +0800746 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800747}
748
Yuan Kang0e479302011-07-15 11:21:41 +0800749static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800750 void *context)
751{
Yuan Kang0e479302011-07-15 11:21:41 +0800752 struct aead_request *req = context;
753 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800754#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800755 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800756 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad32011-07-15 11:21:42 +0800757 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800758
759 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
760#endif
Yuan Kang1acebad32011-07-15 11:21:42 +0800761
Yuan Kang0e479302011-07-15 11:21:41 +0800762 edesc = (struct aead_edesc *)((char *)desc -
763 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800764
Yuan Kang1acebad32011-07-15 11:21:42 +0800765#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300766 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800767 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
768 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300769 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800770 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +0200771 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad32011-07-15 11:21:42 +0800772#endif
773
Kim Phillips8e8ec592011-03-13 16:54:26 +0800774 if (err) {
Kim Phillipsde2954d2011-05-02 18:29:17 -0500775 char tmp[CAAM_ERROR_STR_MAX];
Kim Phillips8e8ec592011-03-13 16:54:26 +0800776
777 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
778 }
779
Yuan Kang0e479302011-07-15 11:21:41 +0800780 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800781
782 /*
783 * verify hw auth check passed else return -EBADMSG
784 */
785 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
786 err = -EBADMSG;
787
788#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300789 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800790 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +0800791 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
792 sizeof(struct iphdr) + req->assoclen +
793 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800794 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -0500795 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +0800796 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +0300797 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800798 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
799 sg->length + ctx->authsize + 16, 1);
800 }
801#endif
Yuan Kang1acebad32011-07-15 11:21:42 +0800802
Kim Phillips8e8ec592011-03-13 16:54:26 +0800803 kfree(edesc);
804
Yuan Kang0e479302011-07-15 11:21:41 +0800805 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800806}
807
Yuan Kangacdca312011-07-15 11:21:42 +0800808static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
809 void *context)
810{
811 struct ablkcipher_request *req = context;
812 struct ablkcipher_edesc *edesc;
813#ifdef DEBUG
814 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
815 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
816
817 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
818#endif
819
820 edesc = (struct ablkcipher_edesc *)((char *)desc -
821 offsetof(struct ablkcipher_edesc, hw_desc));
822
823 if (err) {
824 char tmp[CAAM_ERROR_STR_MAX];
825
826 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
827 }
828
829#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300830 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800831 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
832 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300833 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800834 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
835 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
836#endif
837
838 ablkcipher_unmap(jrdev, edesc, req);
839 kfree(edesc);
840
841 ablkcipher_request_complete(req, err);
842}
843
844static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
845 void *context)
846{
847 struct ablkcipher_request *req = context;
848 struct ablkcipher_edesc *edesc;
849#ifdef DEBUG
850 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
851 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
852
853 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
854#endif
855
856 edesc = (struct ablkcipher_edesc *)((char *)desc -
857 offsetof(struct ablkcipher_edesc, hw_desc));
858 if (err) {
859 char tmp[CAAM_ERROR_STR_MAX];
860
861 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
862 }
863
864#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300865 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800866 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
867 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300868 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800869 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
870 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
871#endif
872
873 ablkcipher_unmap(jrdev, edesc, req);
874 kfree(edesc);
875
876 ablkcipher_request_complete(req, err);
877}
878
Kim Phillips8e8ec592011-03-13 16:54:26 +0800879/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800880 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +0800881 */
Yuan Kang1acebad32011-07-15 11:21:42 +0800882static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
883 struct aead_edesc *edesc,
884 struct aead_request *req,
885 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800886{
Yuan Kang0e479302011-07-15 11:21:41 +0800887 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800888 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800889 int ivsize = crypto_aead_ivsize(aead);
890 int authsize = ctx->authsize;
Yuan Kang1acebad32011-07-15 11:21:42 +0800891 u32 *desc = edesc->hw_desc;
892 u32 out_options = 0, in_options;
893 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500894 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800895
Yuan Kang1acebad32011-07-15 11:21:42 +0800896#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +0800897 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +0800898 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +0300899 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800900 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
901 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300902 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800903 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800904 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300905 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800906 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad32011-07-15 11:21:42 +0800907 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300908 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800909 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
910 desc_bytes(sh_desc), 1);
911#endif
Yuan Kang1acebad32011-07-15 11:21:42 +0800912
913 len = desc_len(sh_desc);
914 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
915
916 if (all_contig) {
917 src_dma = sg_dma_address(req->assoc);
918 in_options = 0;
919 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500920 src_dma = edesc->sec4_sg_dma;
921 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
922 (edesc->src_nents ? : 1);
Yuan Kang1acebad32011-07-15 11:21:42 +0800923 in_options = LDST_SGF;
924 }
Horia Geantabbf9c892013-11-28 15:11:16 +0200925
926 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
927 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800928
Yuan Kang1acebad32011-07-15 11:21:42 +0800929 if (likely(req->src == req->dst)) {
930 if (all_contig) {
931 dst_dma = sg_dma_address(req->src);
932 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500933 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad32011-07-15 11:21:42 +0800934 ((edesc->assoc_nents ? : 1) + 1);
935 out_options = LDST_SGF;
936 }
Kim Phillips8e8ec592011-03-13 16:54:26 +0800937 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800938 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +0800939 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800940 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500941 dst_dma = edesc->sec4_sg_dma +
942 sec4_sg_index *
943 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +0800944 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800945 }
946 }
Kim Phillips8e8ec592011-03-13 16:54:26 +0800947 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +0200948 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
949 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800950 else
Yuan Kang1acebad32011-07-15 11:21:42 +0800951 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
952 out_options);
953}
954
955/*
956 * Fill in aead givencrypt job descriptor
957 */
958static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
959 struct aead_edesc *edesc,
960 struct aead_request *req,
961 int contig)
962{
963 struct crypto_aead *aead = crypto_aead_reqtfm(req);
964 struct caam_ctx *ctx = crypto_aead_ctx(aead);
965 int ivsize = crypto_aead_ivsize(aead);
966 int authsize = ctx->authsize;
967 u32 *desc = edesc->hw_desc;
968 u32 out_options = 0, in_options;
969 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500970 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800971
972#ifdef DEBUG
Yuan Kang1acebad32011-07-15 11:21:42 +0800973 debug("assoclen %d cryptlen %d authsize %d\n",
974 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +0300975 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800976 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
977 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300978 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800979 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300980 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800981 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
982 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300983 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800984 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
985 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800986#endif
987
Yuan Kang1acebad32011-07-15 11:21:42 +0800988 len = desc_len(sh_desc);
989 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
990
991 if (contig & GIV_SRC_CONTIG) {
992 src_dma = sg_dma_address(req->assoc);
993 in_options = 0;
994 } else {
Yuan Kanga299c832012-06-22 19:48:46 -0500995 src_dma = edesc->sec4_sg_dma;
996 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +0800997 in_options = LDST_SGF;
998 }
Horia Geantabbf9c892013-11-28 15:11:16 +0200999 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1000 in_options);
Yuan Kang1acebad32011-07-15 11:21:42 +08001001
1002 if (contig & GIV_DST_CONTIG) {
1003 dst_dma = edesc->iv_dma;
1004 } else {
1005 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001006 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad32011-07-15 11:21:42 +08001007 edesc->assoc_nents;
1008 out_options = LDST_SGF;
1009 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001010 dst_dma = edesc->sec4_sg_dma +
1011 sec4_sg_index *
1012 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +08001013 out_options = LDST_SGF;
1014 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001015 }
1016
Horia Geantabbf9c892013-11-28 15:11:16 +02001017 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1018 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001019}
1020
1021/*
Yuan Kangacdca312011-07-15 11:21:42 +08001022 * Fill in ablkcipher job descriptor
1023 */
1024static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1025 struct ablkcipher_edesc *edesc,
1026 struct ablkcipher_request *req,
1027 bool iv_contig)
1028{
1029 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1030 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1031 u32 *desc = edesc->hw_desc;
1032 u32 out_options = 0, in_options;
1033 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001034 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001035
1036#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001037 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001038 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1039 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001040 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001041 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1042 edesc->src_nents ? 100 : req->nbytes, 1);
1043#endif
1044
1045 len = desc_len(sh_desc);
1046 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1047
1048 if (iv_contig) {
1049 src_dma = edesc->iv_dma;
1050 in_options = 0;
1051 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001052 src_dma = edesc->sec4_sg_dma;
1053 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001054 in_options = LDST_SGF;
1055 }
1056 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1057
1058 if (likely(req->src == req->dst)) {
1059 if (!edesc->src_nents && iv_contig) {
1060 dst_dma = sg_dma_address(req->src);
1061 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001062 dst_dma = edesc->sec4_sg_dma +
1063 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001064 out_options = LDST_SGF;
1065 }
1066 } else {
1067 if (!edesc->dst_nents) {
1068 dst_dma = sg_dma_address(req->dst);
1069 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001070 dst_dma = edesc->sec4_sg_dma +
1071 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001072 out_options = LDST_SGF;
1073 }
1074 }
1075 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1076}
1077
1078/*
Yuan Kang1acebad32011-07-15 11:21:42 +08001079 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001080 */
Yuan Kang0e479302011-07-15 11:21:41 +08001081static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001082 int desc_bytes, bool *all_contig_ptr,
1083 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001084{
Yuan Kang0e479302011-07-15 11:21:41 +08001085 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001086 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1087 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08001088 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1089 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1090 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001091 struct aead_edesc *edesc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001092 dma_addr_t iv_dma = 0;
1093 int sgc;
1094 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001095 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad32011-07-15 11:21:42 +08001096 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001097 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001098 unsigned int authsize = ctx->authsize;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001099
Yuan Kang643b39b2012-06-22 19:48:49 -05001100 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001101
Horia Geantabbf9c892013-11-28 15:11:16 +02001102 if (unlikely(req->dst != req->src)) {
1103 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1104 dst_nents = sg_count(req->dst,
1105 req->cryptlen +
1106 (encrypt ? authsize : (-authsize)),
1107 &dst_chained);
1108 } else {
1109 src_nents = sg_count(req->src,
1110 req->cryptlen +
1111 (encrypt ? authsize : 0),
1112 &src_chained);
1113 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001114
Yuan Kang643b39b2012-06-22 19:48:49 -05001115 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001116 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001117 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001118 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1119 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001120 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001121 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1122 DMA_TO_DEVICE, src_chained);
1123 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1124 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001125 }
1126
Yuan Kang1acebad32011-07-15 11:21:42 +08001127 /* Check if data are contiguous */
1128 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1129 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1130 iv_dma || src_nents || iv_dma + ivsize !=
1131 sg_dma_address(req->src)) {
1132 all_contig = false;
1133 assoc_nents = assoc_nents ? : 1;
1134 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001135 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08001136 }
Yuan Kanga299c832012-06-22 19:48:46 -05001137 sec4_sg_len += dst_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08001138
Yuan Kanga299c832012-06-22 19:48:46 -05001139 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001140
1141 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001142 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001143 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001144 if (!edesc) {
1145 dev_err(jrdev, "could not allocate extended descriptor\n");
1146 return ERR_PTR(-ENOMEM);
1147 }
1148
1149 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001150 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001151 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001152 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001153 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001154 edesc->dst_chained = dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08001155 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001156 edesc->sec4_sg_bytes = sec4_sg_bytes;
1157 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1158 desc_bytes;
1159 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1160 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kang1acebad32011-07-15 11:21:42 +08001161 *all_contig_ptr = all_contig;
1162
Yuan Kanga299c832012-06-22 19:48:46 -05001163 sec4_sg_index = 0;
Yuan Kang1acebad32011-07-15 11:21:42 +08001164 if (!all_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001165 sg_to_sec4_sg(req->assoc,
1166 (assoc_nents ? : 1),
1167 edesc->sec4_sg +
1168 sec4_sg_index, 0);
1169 sec4_sg_index += assoc_nents ? : 1;
1170 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08001171 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001172 sec4_sg_index += 1;
1173 sg_to_sec4_sg_last(req->src,
1174 (src_nents ? : 1),
1175 edesc->sec4_sg +
1176 sec4_sg_index, 0);
1177 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad32011-07-15 11:21:42 +08001178 }
1179 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001180 sg_to_sec4_sg_last(req->dst, dst_nents,
1181 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad32011-07-15 11:21:42 +08001182 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001183
1184 return edesc;
1185}
1186
Yuan Kang0e479302011-07-15 11:21:41 +08001187static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001188{
Yuan Kang0e479302011-07-15 11:21:41 +08001189 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001190 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001191 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1192 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08001193 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001194 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001195 int ret = 0;
1196
Kim Phillips8e8ec592011-03-13 16:54:26 +08001197 /* allocate extended descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +08001198 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001199 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001200 if (IS_ERR(edesc))
1201 return PTR_ERR(edesc);
1202
Yuan Kang1acebad32011-07-15 11:21:42 +08001203 /* Create and submit job descriptor */
1204 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1205 all_contig, true);
1206#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001207 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001208 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1209 desc_bytes(edesc->hw_desc), 1);
1210#endif
1211
Kim Phillips8e8ec592011-03-13 16:54:26 +08001212 desc = edesc->hw_desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001213 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1214 if (!ret) {
1215 ret = -EINPROGRESS;
1216 } else {
1217 aead_unmap(jrdev, edesc, req);
1218 kfree(edesc);
1219 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001220
Yuan Kang1acebad32011-07-15 11:21:42 +08001221 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001222}
1223
Yuan Kang0e479302011-07-15 11:21:41 +08001224static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001225{
Yuan Kang1acebad32011-07-15 11:21:42 +08001226 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001227 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001228 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1229 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08001230 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001231 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001232 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001233
1234 /* allocate extended descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +08001235 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001236 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08001237 if (IS_ERR(edesc))
1238 return PTR_ERR(edesc);
1239
Yuan Kang1acebad32011-07-15 11:21:42 +08001240#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001241 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001242 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1243 req->cryptlen, 1);
1244#endif
1245
1246 /* Create and submit job descriptor*/
1247 init_aead_job(ctx->sh_desc_dec,
1248 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1249#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001250 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001251 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252 desc_bytes(edesc->hw_desc), 1);
1253#endif
1254
Yuan Kang0e479302011-07-15 11:21:41 +08001255 desc = edesc->hw_desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001256 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1257 if (!ret) {
1258 ret = -EINPROGRESS;
1259 } else {
1260 aead_unmap(jrdev, edesc, req);
1261 kfree(edesc);
1262 }
Yuan Kang0e479302011-07-15 11:21:41 +08001263
Yuan Kang1acebad32011-07-15 11:21:42 +08001264 return ret;
1265}
Yuan Kang0e479302011-07-15 11:21:41 +08001266
Yuan Kang1acebad32011-07-15 11:21:42 +08001267/*
1268 * allocate and map the aead extended descriptor for aead givencrypt
1269 */
1270static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1271 *greq, int desc_bytes,
1272 u32 *contig_ptr)
1273{
1274 struct aead_request *req = &greq->areq;
1275 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1276 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1277 struct device *jrdev = ctx->jrdev;
1278 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1279 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1280 int assoc_nents, src_nents, dst_nents = 0;
1281 struct aead_edesc *edesc;
1282 dma_addr_t iv_dma = 0;
1283 int sgc;
1284 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1285 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05001286 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001287 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Yuan Kang0e479302011-07-15 11:21:41 +08001288
Yuan Kang643b39b2012-06-22 19:48:49 -05001289 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1290 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08001291
Yuan Kang1acebad32011-07-15 11:21:42 +08001292 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02001293 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1294 &dst_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001295
Yuan Kang643b39b2012-06-22 19:48:49 -05001296 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001297 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001298 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001299 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1300 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001301 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001302 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1303 DMA_TO_DEVICE, src_chained);
1304 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1305 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001306 }
1307
1308 /* Check if data are contiguous */
1309 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1310 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1311 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1312 contig &= ~GIV_SRC_CONTIG;
1313 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1314 contig &= ~GIV_DST_CONTIG;
Kim Phillips2af8f4a2012-09-07 04:17:03 +08001315 if (unlikely(req->src != req->dst)) {
1316 dst_nents = dst_nents ? : 1;
1317 sec4_sg_len += 1;
1318 }
Yuan Kang1acebad32011-07-15 11:21:42 +08001319 if (!(contig & GIV_SRC_CONTIG)) {
1320 assoc_nents = assoc_nents ? : 1;
1321 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001322 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08001323 if (likely(req->src == req->dst))
1324 contig &= ~GIV_DST_CONTIG;
1325 }
Yuan Kanga299c832012-06-22 19:48:46 -05001326 sec4_sg_len += dst_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08001327
Yuan Kanga299c832012-06-22 19:48:46 -05001328 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +08001329
1330 /* allocate space for base edesc and hw desc commands, link tables */
1331 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001332 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad32011-07-15 11:21:42 +08001333 if (!edesc) {
1334 dev_err(jrdev, "could not allocate extended descriptor\n");
1335 return ERR_PTR(-ENOMEM);
1336 }
1337
1338 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001339 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08001340 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001341 edesc->src_chained = src_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08001342 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001343 edesc->dst_chained = dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08001344 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001345 edesc->sec4_sg_bytes = sec4_sg_bytes;
1346 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1347 desc_bytes;
1348 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1349 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kang1acebad32011-07-15 11:21:42 +08001350 *contig_ptr = contig;
1351
Yuan Kanga299c832012-06-22 19:48:46 -05001352 sec4_sg_index = 0;
Yuan Kang1acebad32011-07-15 11:21:42 +08001353 if (!(contig & GIV_SRC_CONTIG)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001354 sg_to_sec4_sg(req->assoc, assoc_nents,
1355 edesc->sec4_sg +
1356 sec4_sg_index, 0);
1357 sec4_sg_index += assoc_nents;
1358 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08001359 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001360 sec4_sg_index += 1;
1361 sg_to_sec4_sg_last(req->src, src_nents,
1362 edesc->sec4_sg +
1363 sec4_sg_index, 0);
1364 sec4_sg_index += src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08001365 }
1366 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05001367 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08001368 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001369 sec4_sg_index += 1;
1370 sg_to_sec4_sg_last(req->dst, dst_nents,
1371 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad32011-07-15 11:21:42 +08001372 }
1373
1374 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001375}
1376
1377static int aead_givencrypt(struct aead_givcrypt_request *areq)
1378{
1379 struct aead_request *req = &areq->areq;
1380 struct aead_edesc *edesc;
1381 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001382 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1383 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08001384 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001385 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001386 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001387
Kim Phillips8e8ec592011-03-13 16:54:26 +08001388 /* allocate extended descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +08001389 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1390 CAAM_CMD_SZ, &contig);
1391
Kim Phillips8e8ec592011-03-13 16:54:26 +08001392 if (IS_ERR(edesc))
1393 return PTR_ERR(edesc);
1394
Yuan Kang1acebad32011-07-15 11:21:42 +08001395#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001396 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001397 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1398 req->cryptlen, 1);
1399#endif
1400
1401 /* Create and submit job descriptor*/
1402 init_aead_giv_job(ctx->sh_desc_givenc,
1403 ctx->sh_desc_givenc_dma, edesc, req, contig);
1404#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001405 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001406 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1407 desc_bytes(edesc->hw_desc), 1);
1408#endif
1409
Kim Phillips8e8ec592011-03-13 16:54:26 +08001410 desc = edesc->hw_desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08001411 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1412 if (!ret) {
1413 ret = -EINPROGRESS;
1414 } else {
1415 aead_unmap(jrdev, edesc, req);
1416 kfree(edesc);
1417 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001418
Yuan Kang1acebad32011-07-15 11:21:42 +08001419 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001420}
1421
Yuan Kangacdca312011-07-15 11:21:42 +08001422/*
1423 * allocate and map the ablkcipher extended descriptor for ablkcipher
1424 */
1425static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1426 *req, int desc_bytes,
1427 bool *iv_contig_out)
1428{
1429 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1430 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1431 struct device *jrdev = ctx->jrdev;
1432 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1433 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1434 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001435 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001436 struct ablkcipher_edesc *edesc;
1437 dma_addr_t iv_dma = 0;
1438 bool iv_contig = false;
1439 int sgc;
1440 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05001441 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001442 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001443
Yuan Kang643b39b2012-06-22 19:48:49 -05001444 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001445
Yuan Kang643b39b2012-06-22 19:48:49 -05001446 if (req->dst != req->src)
1447 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001448
1449 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001450 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1451 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001452 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001453 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1454 DMA_TO_DEVICE, src_chained);
1455 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1456 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001457 }
1458
1459 /*
1460 * Check if iv can be contiguous with source and destination.
1461 * If so, include it. If not, create scatterlist.
1462 */
1463 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1464 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1465 iv_contig = true;
1466 else
1467 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001468 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1469 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001470
1471 /* allocate space for base edesc and hw desc commands, link tables */
1472 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001473 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08001474 if (!edesc) {
1475 dev_err(jrdev, "could not allocate extended descriptor\n");
1476 return ERR_PTR(-ENOMEM);
1477 }
1478
1479 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001480 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001481 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001482 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05001483 edesc->sec4_sg_bytes = sec4_sg_bytes;
1484 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1485 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001486
Yuan Kanga299c832012-06-22 19:48:46 -05001487 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001488 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001489 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1490 sg_to_sec4_sg_last(req->src, src_nents,
1491 edesc->sec4_sg + 1, 0);
1492 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001493 }
1494
Yuan Kang643b39b2012-06-22 19:48:49 -05001495 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001496 sg_to_sec4_sg_last(req->dst, dst_nents,
1497 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001498 }
1499
Yuan Kanga299c832012-06-22 19:48:46 -05001500 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1501 sec4_sg_bytes, DMA_TO_DEVICE);
Yuan Kangacdca312011-07-15 11:21:42 +08001502 edesc->iv_dma = iv_dma;
1503
1504#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001505 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05001506 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1507 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001508#endif
1509
1510 *iv_contig_out = iv_contig;
1511 return edesc;
1512}
1513
1514static int ablkcipher_encrypt(struct ablkcipher_request *req)
1515{
1516 struct ablkcipher_edesc *edesc;
1517 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1518 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1519 struct device *jrdev = ctx->jrdev;
1520 bool iv_contig;
1521 u32 *desc;
1522 int ret = 0;
1523
1524 /* allocate extended descriptor */
1525 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1526 CAAM_CMD_SZ, &iv_contig);
1527 if (IS_ERR(edesc))
1528 return PTR_ERR(edesc);
1529
1530 /* Create and submit job descriptor*/
1531 init_ablkcipher_job(ctx->sh_desc_enc,
1532 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1533#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001534 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001535 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1536 desc_bytes(edesc->hw_desc), 1);
1537#endif
1538 desc = edesc->hw_desc;
1539 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1540
1541 if (!ret) {
1542 ret = -EINPROGRESS;
1543 } else {
1544 ablkcipher_unmap(jrdev, edesc, req);
1545 kfree(edesc);
1546 }
1547
1548 return ret;
1549}
1550
1551static int ablkcipher_decrypt(struct ablkcipher_request *req)
1552{
1553 struct ablkcipher_edesc *edesc;
1554 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1555 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1556 struct device *jrdev = ctx->jrdev;
1557 bool iv_contig;
1558 u32 *desc;
1559 int ret = 0;
1560
1561 /* allocate extended descriptor */
1562 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1563 CAAM_CMD_SZ, &iv_contig);
1564 if (IS_ERR(edesc))
1565 return PTR_ERR(edesc);
1566
1567 /* Create and submit job descriptor*/
1568 init_ablkcipher_job(ctx->sh_desc_dec,
1569 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1570 desc = edesc->hw_desc;
1571#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001572 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001573 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1574 desc_bytes(edesc->hw_desc), 1);
1575#endif
1576
1577 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1578 if (!ret) {
1579 ret = -EINPROGRESS;
1580 } else {
1581 ablkcipher_unmap(jrdev, edesc, req);
1582 kfree(edesc);
1583 }
1584
1585 return ret;
1586}
1587
Yuan Kang885e9e22011-07-15 11:21:41 +08001588#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08001589#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08001590struct caam_alg_template {
1591 char name[CRYPTO_MAX_ALG_NAME];
1592 char driver_name[CRYPTO_MAX_ALG_NAME];
1593 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08001594 u32 type;
1595 union {
1596 struct ablkcipher_alg ablkcipher;
1597 struct aead_alg aead;
1598 struct blkcipher_alg blkcipher;
1599 struct cipher_alg cipher;
1600 struct compress_alg compress;
1601 struct rng_alg rng;
1602 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001603 u32 class1_alg_type;
1604 u32 class2_alg_type;
1605 u32 alg_op;
1606};
1607
1608static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02001609 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08001610 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001611 .name = "authenc(hmac(md5),cbc(aes))",
1612 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1613 .blocksize = AES_BLOCK_SIZE,
1614 .type = CRYPTO_ALG_TYPE_AEAD,
1615 .template_aead = {
1616 .setkey = aead_setkey,
1617 .setauthsize = aead_setauthsize,
1618 .encrypt = aead_encrypt,
1619 .decrypt = aead_decrypt,
1620 .givencrypt = aead_givencrypt,
1621 .geniv = "<built-in>",
1622 .ivsize = AES_BLOCK_SIZE,
1623 .maxauthsize = MD5_DIGEST_SIZE,
1624 },
1625 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1626 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1627 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1628 },
1629 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001630 .name = "authenc(hmac(sha1),cbc(aes))",
1631 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1632 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001633 .type = CRYPTO_ALG_TYPE_AEAD,
1634 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001635 .setkey = aead_setkey,
1636 .setauthsize = aead_setauthsize,
1637 .encrypt = aead_encrypt,
1638 .decrypt = aead_decrypt,
1639 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001640 .geniv = "<built-in>",
1641 .ivsize = AES_BLOCK_SIZE,
1642 .maxauthsize = SHA1_DIGEST_SIZE,
1643 },
1644 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1645 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1646 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1647 },
1648 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001649 .name = "authenc(hmac(sha224),cbc(aes))",
1650 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1651 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301652 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001653 .template_aead = {
1654 .setkey = aead_setkey,
1655 .setauthsize = aead_setauthsize,
1656 .encrypt = aead_encrypt,
1657 .decrypt = aead_decrypt,
1658 .givencrypt = aead_givencrypt,
1659 .geniv = "<built-in>",
1660 .ivsize = AES_BLOCK_SIZE,
1661 .maxauthsize = SHA224_DIGEST_SIZE,
1662 },
1663 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1664 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1665 OP_ALG_AAI_HMAC_PRECOMP,
1666 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1667 },
1668 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001669 .name = "authenc(hmac(sha256),cbc(aes))",
1670 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1671 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001672 .type = CRYPTO_ALG_TYPE_AEAD,
1673 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001674 .setkey = aead_setkey,
1675 .setauthsize = aead_setauthsize,
1676 .encrypt = aead_encrypt,
1677 .decrypt = aead_decrypt,
1678 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001679 .geniv = "<built-in>",
1680 .ivsize = AES_BLOCK_SIZE,
1681 .maxauthsize = SHA256_DIGEST_SIZE,
1682 },
1683 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1685 OP_ALG_AAI_HMAC_PRECOMP,
1686 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1687 },
1688 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001689 .name = "authenc(hmac(sha384),cbc(aes))",
1690 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1691 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301692 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001693 .template_aead = {
1694 .setkey = aead_setkey,
1695 .setauthsize = aead_setauthsize,
1696 .encrypt = aead_encrypt,
1697 .decrypt = aead_decrypt,
1698 .givencrypt = aead_givencrypt,
1699 .geniv = "<built-in>",
1700 .ivsize = AES_BLOCK_SIZE,
1701 .maxauthsize = SHA384_DIGEST_SIZE,
1702 },
1703 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1704 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1705 OP_ALG_AAI_HMAC_PRECOMP,
1706 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1707 },
1708
1709 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001710 .name = "authenc(hmac(sha512),cbc(aes))",
1711 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1712 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001713 .type = CRYPTO_ALG_TYPE_AEAD,
1714 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001715 .setkey = aead_setkey,
1716 .setauthsize = aead_setauthsize,
1717 .encrypt = aead_encrypt,
1718 .decrypt = aead_decrypt,
1719 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001720 .geniv = "<built-in>",
1721 .ivsize = AES_BLOCK_SIZE,
1722 .maxauthsize = SHA512_DIGEST_SIZE,
1723 },
1724 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1725 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1726 OP_ALG_AAI_HMAC_PRECOMP,
1727 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1728 },
1729 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001730 .name = "authenc(hmac(md5),cbc(des3_ede))",
1731 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1732 .blocksize = DES3_EDE_BLOCK_SIZE,
1733 .type = CRYPTO_ALG_TYPE_AEAD,
1734 .template_aead = {
1735 .setkey = aead_setkey,
1736 .setauthsize = aead_setauthsize,
1737 .encrypt = aead_encrypt,
1738 .decrypt = aead_decrypt,
1739 .givencrypt = aead_givencrypt,
1740 .geniv = "<built-in>",
1741 .ivsize = DES3_EDE_BLOCK_SIZE,
1742 .maxauthsize = MD5_DIGEST_SIZE,
1743 },
1744 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1745 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1746 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1747 },
1748 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001749 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1750 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1751 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001752 .type = CRYPTO_ALG_TYPE_AEAD,
1753 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001754 .setkey = aead_setkey,
1755 .setauthsize = aead_setauthsize,
1756 .encrypt = aead_encrypt,
1757 .decrypt = aead_decrypt,
1758 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001759 .geniv = "<built-in>",
1760 .ivsize = DES3_EDE_BLOCK_SIZE,
1761 .maxauthsize = SHA1_DIGEST_SIZE,
1762 },
1763 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1764 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1765 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1766 },
1767 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001768 .name = "authenc(hmac(sha224),cbc(des3_ede))",
1769 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1770 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301771 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001772 .template_aead = {
1773 .setkey = aead_setkey,
1774 .setauthsize = aead_setauthsize,
1775 .encrypt = aead_encrypt,
1776 .decrypt = aead_decrypt,
1777 .givencrypt = aead_givencrypt,
1778 .geniv = "<built-in>",
1779 .ivsize = DES3_EDE_BLOCK_SIZE,
1780 .maxauthsize = SHA224_DIGEST_SIZE,
1781 },
1782 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1783 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1784 OP_ALG_AAI_HMAC_PRECOMP,
1785 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1786 },
1787 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001788 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1789 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1790 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001791 .type = CRYPTO_ALG_TYPE_AEAD,
1792 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001793 .setkey = aead_setkey,
1794 .setauthsize = aead_setauthsize,
1795 .encrypt = aead_encrypt,
1796 .decrypt = aead_decrypt,
1797 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001798 .geniv = "<built-in>",
1799 .ivsize = DES3_EDE_BLOCK_SIZE,
1800 .maxauthsize = SHA256_DIGEST_SIZE,
1801 },
1802 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1803 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1804 OP_ALG_AAI_HMAC_PRECOMP,
1805 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1806 },
1807 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001808 .name = "authenc(hmac(sha384),cbc(des3_ede))",
1809 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1810 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301811 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001812 .template_aead = {
1813 .setkey = aead_setkey,
1814 .setauthsize = aead_setauthsize,
1815 .encrypt = aead_encrypt,
1816 .decrypt = aead_decrypt,
1817 .givencrypt = aead_givencrypt,
1818 .geniv = "<built-in>",
1819 .ivsize = DES3_EDE_BLOCK_SIZE,
1820 .maxauthsize = SHA384_DIGEST_SIZE,
1821 },
1822 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1823 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1824 OP_ALG_AAI_HMAC_PRECOMP,
1825 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1826 },
1827 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001828 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1829 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1830 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001831 .type = CRYPTO_ALG_TYPE_AEAD,
1832 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001833 .setkey = aead_setkey,
1834 .setauthsize = aead_setauthsize,
1835 .encrypt = aead_encrypt,
1836 .decrypt = aead_decrypt,
1837 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001838 .geniv = "<built-in>",
1839 .ivsize = DES3_EDE_BLOCK_SIZE,
1840 .maxauthsize = SHA512_DIGEST_SIZE,
1841 },
1842 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1843 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1844 OP_ALG_AAI_HMAC_PRECOMP,
1845 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1846 },
1847 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001848 .name = "authenc(hmac(md5),cbc(des))",
1849 .driver_name = "authenc-hmac-md5-cbc-des-caam",
1850 .blocksize = DES_BLOCK_SIZE,
1851 .type = CRYPTO_ALG_TYPE_AEAD,
1852 .template_aead = {
1853 .setkey = aead_setkey,
1854 .setauthsize = aead_setauthsize,
1855 .encrypt = aead_encrypt,
1856 .decrypt = aead_decrypt,
1857 .givencrypt = aead_givencrypt,
1858 .geniv = "<built-in>",
1859 .ivsize = DES_BLOCK_SIZE,
1860 .maxauthsize = MD5_DIGEST_SIZE,
1861 },
1862 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1863 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1864 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1865 },
1866 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001867 .name = "authenc(hmac(sha1),cbc(des))",
1868 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1869 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001870 .type = CRYPTO_ALG_TYPE_AEAD,
1871 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001872 .setkey = aead_setkey,
1873 .setauthsize = aead_setauthsize,
1874 .encrypt = aead_encrypt,
1875 .decrypt = aead_decrypt,
1876 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001877 .geniv = "<built-in>",
1878 .ivsize = DES_BLOCK_SIZE,
1879 .maxauthsize = SHA1_DIGEST_SIZE,
1880 },
1881 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1882 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1883 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1884 },
1885 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001886 .name = "authenc(hmac(sha224),cbc(des))",
1887 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1888 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301889 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001890 .template_aead = {
1891 .setkey = aead_setkey,
1892 .setauthsize = aead_setauthsize,
1893 .encrypt = aead_encrypt,
1894 .decrypt = aead_decrypt,
1895 .givencrypt = aead_givencrypt,
1896 .geniv = "<built-in>",
1897 .ivsize = DES_BLOCK_SIZE,
1898 .maxauthsize = SHA224_DIGEST_SIZE,
1899 },
1900 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1901 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1902 OP_ALG_AAI_HMAC_PRECOMP,
1903 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1904 },
1905 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001906 .name = "authenc(hmac(sha256),cbc(des))",
1907 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1908 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001909 .type = CRYPTO_ALG_TYPE_AEAD,
1910 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001911 .setkey = aead_setkey,
1912 .setauthsize = aead_setauthsize,
1913 .encrypt = aead_encrypt,
1914 .decrypt = aead_decrypt,
1915 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001916 .geniv = "<built-in>",
1917 .ivsize = DES_BLOCK_SIZE,
1918 .maxauthsize = SHA256_DIGEST_SIZE,
1919 },
1920 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1921 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1922 OP_ALG_AAI_HMAC_PRECOMP,
1923 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1924 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05001925 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001926 .name = "authenc(hmac(sha384),cbc(des))",
1927 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1928 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301929 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001930 .template_aead = {
1931 .setkey = aead_setkey,
1932 .setauthsize = aead_setauthsize,
1933 .encrypt = aead_encrypt,
1934 .decrypt = aead_decrypt,
1935 .givencrypt = aead_givencrypt,
1936 .geniv = "<built-in>",
1937 .ivsize = DES_BLOCK_SIZE,
1938 .maxauthsize = SHA384_DIGEST_SIZE,
1939 },
1940 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1941 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1942 OP_ALG_AAI_HMAC_PRECOMP,
1943 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1944 },
1945 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05001946 .name = "authenc(hmac(sha512),cbc(des))",
1947 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1948 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001949 .type = CRYPTO_ALG_TYPE_AEAD,
1950 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001951 .setkey = aead_setkey,
1952 .setauthsize = aead_setauthsize,
1953 .encrypt = aead_encrypt,
1954 .decrypt = aead_decrypt,
1955 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05001956 .geniv = "<built-in>",
1957 .ivsize = DES_BLOCK_SIZE,
1958 .maxauthsize = SHA512_DIGEST_SIZE,
1959 },
1960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1963 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1964 },
Yuan Kangacdca312011-07-15 11:21:42 +08001965 /* ablkcipher descriptor */
1966 {
1967 .name = "cbc(aes)",
1968 .driver_name = "cbc-aes-caam",
1969 .blocksize = AES_BLOCK_SIZE,
1970 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1971 .template_ablkcipher = {
1972 .setkey = ablkcipher_setkey,
1973 .encrypt = ablkcipher_encrypt,
1974 .decrypt = ablkcipher_decrypt,
1975 .geniv = "eseqiv",
1976 .min_keysize = AES_MIN_KEY_SIZE,
1977 .max_keysize = AES_MAX_KEY_SIZE,
1978 .ivsize = AES_BLOCK_SIZE,
1979 },
1980 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1981 },
1982 {
1983 .name = "cbc(des3_ede)",
1984 .driver_name = "cbc-3des-caam",
1985 .blocksize = DES3_EDE_BLOCK_SIZE,
1986 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1987 .template_ablkcipher = {
1988 .setkey = ablkcipher_setkey,
1989 .encrypt = ablkcipher_encrypt,
1990 .decrypt = ablkcipher_decrypt,
1991 .geniv = "eseqiv",
1992 .min_keysize = DES3_EDE_KEY_SIZE,
1993 .max_keysize = DES3_EDE_KEY_SIZE,
1994 .ivsize = DES3_EDE_BLOCK_SIZE,
1995 },
1996 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1997 },
1998 {
1999 .name = "cbc(des)",
2000 .driver_name = "cbc-des-caam",
2001 .blocksize = DES_BLOCK_SIZE,
2002 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2003 .template_ablkcipher = {
2004 .setkey = ablkcipher_setkey,
2005 .encrypt = ablkcipher_encrypt,
2006 .decrypt = ablkcipher_decrypt,
2007 .geniv = "eseqiv",
2008 .min_keysize = DES_KEY_SIZE,
2009 .max_keysize = DES_KEY_SIZE,
2010 .ivsize = DES_BLOCK_SIZE,
2011 },
2012 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2013 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002014};
2015
2016struct caam_crypto_alg {
2017 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002018 int class1_alg_type;
2019 int class2_alg_type;
2020 int alg_op;
2021 struct crypto_alg crypto_alg;
2022};
2023
2024static int caam_cra_init(struct crypto_tfm *tfm)
2025{
2026 struct crypto_alg *alg = tfm->__crt_alg;
2027 struct caam_crypto_alg *caam_alg =
2028 container_of(alg, struct caam_crypto_alg, crypto_alg);
2029 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002030
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302031 ctx->jrdev = caam_jr_alloc();
2032 if (IS_ERR(ctx->jrdev)) {
2033 pr_err("Job Ring Device allocation for transform failed\n");
2034 return PTR_ERR(ctx->jrdev);
2035 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002036
2037 /* copy descriptor header template value */
2038 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2039 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2040 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2041
2042 return 0;
2043}
2044
2045static void caam_cra_exit(struct crypto_tfm *tfm)
2046{
2047 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2048
Yuan Kang1acebad32011-07-15 11:21:42 +08002049 if (ctx->sh_desc_enc_dma &&
2050 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2051 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2052 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2053 if (ctx->sh_desc_dec_dma &&
2054 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2055 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2056 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2057 if (ctx->sh_desc_givenc_dma &&
2058 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2059 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2060 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05002061 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302062
2063 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002064}
2065
2066static void __exit caam_algapi_exit(void)
2067{
2068
Kim Phillips8e8ec592011-03-13 16:54:26 +08002069 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002070
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302071 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002072 return;
2073
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302074 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002075 crypto_unregister_alg(&t_alg->crypto_alg);
2076 list_del(&t_alg->entry);
2077 kfree(t_alg);
2078 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002079}
2080
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302081static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08002082 *template)
2083{
2084 struct caam_crypto_alg *t_alg;
2085 struct crypto_alg *alg;
2086
2087 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2088 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302089 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002090 return ERR_PTR(-ENOMEM);
2091 }
2092
2093 alg = &t_alg->crypto_alg;
2094
2095 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2096 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2097 template->driver_name);
2098 alg->cra_module = THIS_MODULE;
2099 alg->cra_init = caam_cra_init;
2100 alg->cra_exit = caam_cra_exit;
2101 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002102 alg->cra_blocksize = template->blocksize;
2103 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002104 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002105 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2106 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08002107 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08002108 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2109 alg->cra_type = &crypto_ablkcipher_type;
2110 alg->cra_ablkcipher = template->template_ablkcipher;
2111 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08002112 case CRYPTO_ALG_TYPE_AEAD:
2113 alg->cra_type = &crypto_aead_type;
2114 alg->cra_aead = template->template_aead;
2115 break;
2116 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002117
2118 t_alg->class1_alg_type = template->class1_alg_type;
2119 t_alg->class2_alg_type = template->class2_alg_type;
2120 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002121
2122 return t_alg;
2123}
2124
2125static int __init caam_algapi_init(void)
2126{
Kim Phillips8e8ec592011-03-13 16:54:26 +08002127 int i = 0, err = 0;
2128
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302129 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002130
2131 /* register crypto algorithms the device supports */
2132 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2133 /* TODO: check if h/w supports alg */
2134 struct caam_crypto_alg *t_alg;
2135
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302136 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002137 if (IS_ERR(t_alg)) {
2138 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302139 pr_warn("%s alg allocation failed\n",
2140 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002141 continue;
2142 }
2143
2144 err = crypto_register_alg(&t_alg->crypto_alg);
2145 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302146 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002147 t_alg->crypto_alg.cra_driver_name);
2148 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02002149 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302150 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002151 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302152 if (!list_empty(&alg_list))
2153 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002154
2155 return err;
2156}
2157
2158module_init(caam_algapi_init);
2159module_exit(caam_algapi_exit);
2160
2161MODULE_LICENSE("GPL");
2162MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2163MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");