blob: 66e35efcedfa0172659d9cc36af111632e02df1a [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Yuan Kangacdca312011-07-15 11:21:42 +080081#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
82#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
83 20 * CAAM_CMD_SZ)
84#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
85 15 * CAAM_CMD_SZ)
86
Yuan Kang1acebad2011-07-15 11:21:42 +080087#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
88 CAAM_MAX_KEY_SIZE)
89#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050090
Kim Phillips8e8ec592011-03-13 16:54:26 +080091#ifdef DEBUG
92/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080093#define debug(format, arg...) printk(format, arg)
94#else
95#define debug(format, arg...)
96#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +053097static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +080098
Yuan Kang1acebad2011-07-15 11:21:42 +080099/* Set DK bit in class 1 operation if shared */
100static inline void append_dec_op1(u32 *desc, u32 type)
101{
102 u32 *jump_cmd, *uncond_jump_cmd;
103
Horia Geantaa60384d2014-07-11 15:46:58 +0300104 /* DK bit is valid only for AES */
105 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
106 append_operation(desc, type | OP_ALG_AS_INITFINAL |
107 OP_ALG_DECRYPT);
108 return;
109 }
110
Yuan Kang1acebad2011-07-15 11:21:42 +0800111 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
112 append_operation(desc, type | OP_ALG_AS_INITFINAL |
113 OP_ALG_DECRYPT);
114 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
115 set_jump_tgt_here(desc, jump_cmd);
116 append_operation(desc, type | OP_ALG_AS_INITFINAL |
117 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
118 set_jump_tgt_here(desc, uncond_jump_cmd);
119}
120
121/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800122 * For aead functions, read payload and write payload,
123 * both of which are specified in req->src and req->dst
124 */
125static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
126{
Horia Geantaae4a8252014-03-14 17:46:52 +0200127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800128 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
129 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800130}
131
132/*
133 * For aead encrypt and decrypt, read iv for both classes
134 */
135static inline void aead_append_ld_iv(u32 *desc, int ivsize)
136{
137 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
138 LDST_CLASS_1_CCB | ivsize);
139 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
140}
141
142/*
Yuan Kangacdca312011-07-15 11:21:42 +0800143 * For ablkcipher encrypt and decrypt, read from req->src and
144 * write to req->dst
145 */
146static inline void ablkcipher_append_src_dst(u32 *desc)
147{
Kim Phillips70d793c2012-06-22 19:42:35 -0500148 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
149 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
150 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
151 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
152 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800153}
154
155/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800156 * If all data, including src (with assoc and iv) or dst (with iv only) are
157 * contiguous
158 */
159#define GIV_SRC_CONTIG 1
160#define GIV_DST_CONTIG (1 << 1)
161
Kim Phillips8e8ec592011-03-13 16:54:26 +0800162/*
163 * per-session context
164 */
165struct caam_ctx {
166 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800167 u32 sh_desc_enc[DESC_MAX_USED_LEN];
168 u32 sh_desc_dec[DESC_MAX_USED_LEN];
169 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
170 dma_addr_t sh_desc_enc_dma;
171 dma_addr_t sh_desc_dec_dma;
172 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800173 u32 class1_alg_type;
174 u32 class2_alg_type;
175 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800176 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800177 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800178 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800179 unsigned int split_key_len;
180 unsigned int split_key_pad_len;
181 unsigned int authsize;
182};
183
Yuan Kang1acebad2011-07-15 11:21:42 +0800184static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
185 int keys_fit_inline)
186{
187 if (keys_fit_inline) {
188 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
189 ctx->split_key_len, CLASS_2 |
190 KEY_DEST_MDHA_SPLIT | KEY_ENC);
191 append_key_as_imm(desc, (void *)ctx->key +
192 ctx->split_key_pad_len, ctx->enckeylen,
193 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
194 } else {
195 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
196 KEY_DEST_MDHA_SPLIT | KEY_ENC);
197 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
198 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
199 }
200}
201
202static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
203 int keys_fit_inline)
204{
205 u32 *key_jump_cmd;
206
Kim Phillips61bb86b2012-07-13 17:49:28 -0500207 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800208
209 /* Skip if already shared */
210 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
211 JUMP_COND_SHRD);
212
213 append_key_aead(desc, ctx, keys_fit_inline);
214
215 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800216}
217
Horia Geantaae4a8252014-03-14 17:46:52 +0200218static int aead_null_set_sh_desc(struct crypto_aead *aead)
219{
220 struct aead_tfm *tfm = &aead->base.crt_aead;
221 struct caam_ctx *ctx = crypto_aead_ctx(aead);
222 struct device *jrdev = ctx->jrdev;
223 bool keys_fit_inline = false;
224 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
225 u32 *desc;
226
227 /*
228 * Job Descriptor and Shared Descriptors
229 * must all fit into the 64-word Descriptor h/w Buffer
230 */
231 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
232 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
233 keys_fit_inline = true;
234
235 /* aead_encrypt shared descriptor */
236 desc = ctx->sh_desc_enc;
237
238 init_sh_desc(desc, HDR_SHARE_SERIAL);
239
240 /* Skip if already shared */
241 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
242 JUMP_COND_SHRD);
243 if (keys_fit_inline)
244 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
245 ctx->split_key_len, CLASS_2 |
246 KEY_DEST_MDHA_SPLIT | KEY_ENC);
247 else
248 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
249 KEY_DEST_MDHA_SPLIT | KEY_ENC);
250 set_jump_tgt_here(desc, key_jump_cmd);
251
252 /* cryptlen = seqoutlen - authsize */
253 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
254
255 /*
256 * NULL encryption; IV is zero
257 * assoclen = (assoclen + cryptlen) - cryptlen
258 */
259 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
260
261 /* read assoc before reading payload */
262 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
263 KEY_VLF);
264
265 /* Prepare to read and write cryptlen bytes */
266 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
267 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
268
269 /*
270 * MOVE_LEN opcode is not available in all SEC HW revisions,
271 * thus need to do some magic, i.e. self-patch the descriptor
272 * buffer.
273 */
274 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
275 MOVE_DEST_MATH3 |
276 (0x6 << MOVE_LEN_SHIFT));
277 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
278 MOVE_DEST_DESCBUF |
279 MOVE_WAITCOMP |
280 (0x8 << MOVE_LEN_SHIFT));
281
282 /* Class 2 operation */
283 append_operation(desc, ctx->class2_alg_type |
284 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
285
286 /* Read and write cryptlen bytes */
287 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
288
289 set_move_tgt_here(desc, read_move_cmd);
290 set_move_tgt_here(desc, write_move_cmd);
291 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
292 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
293 MOVE_AUX_LS);
294
295 /* Write ICV */
296 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
297 LDST_SRCDST_BYTE_CONTEXT);
298
299 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
300 desc_bytes(desc),
301 DMA_TO_DEVICE);
302 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
303 dev_err(jrdev, "unable to map shared descriptor\n");
304 return -ENOMEM;
305 }
306#ifdef DEBUG
307 print_hex_dump(KERN_ERR,
308 "aead null enc shdesc@"__stringify(__LINE__)": ",
309 DUMP_PREFIX_ADDRESS, 16, 4, desc,
310 desc_bytes(desc), 1);
311#endif
312
313 /*
314 * Job Descriptor and Shared Descriptors
315 * must all fit into the 64-word Descriptor h/w Buffer
316 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500317 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200318 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
319 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
320 keys_fit_inline = true;
321
322 desc = ctx->sh_desc_dec;
323
324 /* aead_decrypt shared descriptor */
325 init_sh_desc(desc, HDR_SHARE_SERIAL);
326
327 /* Skip if already shared */
328 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
329 JUMP_COND_SHRD);
330 if (keys_fit_inline)
331 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
332 ctx->split_key_len, CLASS_2 |
333 KEY_DEST_MDHA_SPLIT | KEY_ENC);
334 else
335 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
336 KEY_DEST_MDHA_SPLIT | KEY_ENC);
337 set_jump_tgt_here(desc, key_jump_cmd);
338
339 /* Class 2 operation */
340 append_operation(desc, ctx->class2_alg_type |
341 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
342
343 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
344 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
345 ctx->authsize + tfm->ivsize);
346 /* assoclen = (assoclen + cryptlen) - cryptlen */
347 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
348 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
349
350 /* read assoc before reading payload */
351 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
352 KEY_VLF);
353
354 /* Prepare to read and write cryptlen bytes */
355 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
356 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
357
358 /*
359 * MOVE_LEN opcode is not available in all SEC HW revisions,
360 * thus need to do some magic, i.e. self-patch the descriptor
361 * buffer.
362 */
363 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
364 MOVE_DEST_MATH2 |
365 (0x6 << MOVE_LEN_SHIFT));
366 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
367 MOVE_DEST_DESCBUF |
368 MOVE_WAITCOMP |
369 (0x8 << MOVE_LEN_SHIFT));
370
371 /* Read and write cryptlen bytes */
372 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
373
374 /*
375 * Insert a NOP here, since we need at least 4 instructions between
376 * code patching the descriptor buffer and the location being patched.
377 */
378 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
379 set_jump_tgt_here(desc, jump_cmd);
380
381 set_move_tgt_here(desc, read_move_cmd);
382 set_move_tgt_here(desc, write_move_cmd);
383 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
384 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
385 MOVE_AUX_LS);
386 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
387
388 /* Load ICV */
389 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
390 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
391
392 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
393 desc_bytes(desc),
394 DMA_TO_DEVICE);
395 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
396 dev_err(jrdev, "unable to map shared descriptor\n");
397 return -ENOMEM;
398 }
399#ifdef DEBUG
400 print_hex_dump(KERN_ERR,
401 "aead null dec shdesc@"__stringify(__LINE__)": ",
402 DUMP_PREFIX_ADDRESS, 16, 4, desc,
403 desc_bytes(desc), 1);
404#endif
405
406 return 0;
407}
408
Yuan Kang1acebad2011-07-15 11:21:42 +0800409static int aead_set_sh_desc(struct crypto_aead *aead)
410{
411 struct aead_tfm *tfm = &aead->base.crt_aead;
412 struct caam_ctx *ctx = crypto_aead_ctx(aead);
413 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800414 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800415 u32 geniv, moveiv;
416 u32 *desc;
417
Horia Geantaae4a8252014-03-14 17:46:52 +0200418 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800419 return 0;
420
Horia Geantaae4a8252014-03-14 17:46:52 +0200421 /* NULL encryption / decryption */
422 if (!ctx->enckeylen)
423 return aead_null_set_sh_desc(aead);
424
Yuan Kang1acebad2011-07-15 11:21:42 +0800425 /*
426 * Job Descriptor and Shared Descriptors
427 * must all fit into the 64-word Descriptor h/w Buffer
428 */
429 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
430 ctx->split_key_pad_len + ctx->enckeylen <=
431 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800432 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800433
434 /* aead_encrypt shared descriptor */
435 desc = ctx->sh_desc_enc;
436
437 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
438
439 /* Class 2 operation */
440 append_operation(desc, ctx->class2_alg_type |
441 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
442
443 /* cryptlen = seqoutlen - authsize */
444 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
445
446 /* assoclen + cryptlen = seqinlen - ivsize */
447 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
448
Horia Geanta4464a7d2014-03-14 17:46:49 +0200449 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800450 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
451
452 /* read assoc before reading payload */
453 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
454 KEY_VLF);
455 aead_append_ld_iv(desc, tfm->ivsize);
456
457 /* Class 1 operation */
458 append_operation(desc, ctx->class1_alg_type |
459 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
460
461 /* Read and write cryptlen bytes */
462 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
463 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
464 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
465
466 /* Write ICV */
467 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
468 LDST_SRCDST_BYTE_CONTEXT);
469
470 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
471 desc_bytes(desc),
472 DMA_TO_DEVICE);
473 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
474 dev_err(jrdev, "unable to map shared descriptor\n");
475 return -ENOMEM;
476 }
477#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300478 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800479 DUMP_PREFIX_ADDRESS, 16, 4, desc,
480 desc_bytes(desc), 1);
481#endif
482
483 /*
484 * Job Descriptor and Shared Descriptors
485 * must all fit into the 64-word Descriptor h/w Buffer
486 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500487 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800488 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
489 ctx->split_key_pad_len + ctx->enckeylen <=
490 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800491 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800492
Horia Geanta4464a7d2014-03-14 17:46:49 +0200493 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800494 desc = ctx->sh_desc_dec;
495
Horia Geanta4464a7d2014-03-14 17:46:49 +0200496 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800497
498 /* Class 2 operation */
499 append_operation(desc, ctx->class2_alg_type |
500 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
501
Horia Geanta4464a7d2014-03-14 17:46:49 +0200502 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800503 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200504 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800505 /* assoclen = (assoclen + cryptlen) - cryptlen */
506 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
507 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
508
509 /* read assoc before reading payload */
510 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
511 KEY_VLF);
512
513 aead_append_ld_iv(desc, tfm->ivsize);
514
515 append_dec_op1(desc, ctx->class1_alg_type);
516
517 /* Read and write cryptlen bytes */
518 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
519 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
520 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
521
522 /* Load ICV */
523 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
524 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800525
526 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
527 desc_bytes(desc),
528 DMA_TO_DEVICE);
529 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
530 dev_err(jrdev, "unable to map shared descriptor\n");
531 return -ENOMEM;
532 }
533#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300534 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800535 DUMP_PREFIX_ADDRESS, 16, 4, desc,
536 desc_bytes(desc), 1);
537#endif
538
539 /*
540 * Job Descriptor and Shared Descriptors
541 * must all fit into the 64-word Descriptor h/w Buffer
542 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500543 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800544 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
545 ctx->split_key_pad_len + ctx->enckeylen <=
546 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800547 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800548
549 /* aead_givencrypt shared descriptor */
550 desc = ctx->sh_desc_givenc;
551
552 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
553
554 /* Generate IV */
555 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
556 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
557 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
558 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
559 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
560 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
561 append_move(desc, MOVE_SRC_INFIFO |
562 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
563 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
564
565 /* Copy IV to class 1 context */
566 append_move(desc, MOVE_SRC_CLASS1CTX |
567 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
568
569 /* Return to encryption */
570 append_operation(desc, ctx->class2_alg_type |
571 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
572
573 /* ivsize + cryptlen = seqoutlen - authsize */
574 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
575
576 /* assoclen = seqinlen - (ivsize + cryptlen) */
577 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
578
579 /* read assoc before reading payload */
580 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
581 KEY_VLF);
582
583 /* Copy iv from class 1 ctx to class 2 fifo*/
584 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
585 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
586 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
587 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
588 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
589 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
590
591 /* Class 1 operation */
592 append_operation(desc, ctx->class1_alg_type |
593 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
594
595 /* Will write ivsize + cryptlen */
596 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
597
598 /* Not need to reload iv */
599 append_seq_fifo_load(desc, tfm->ivsize,
600 FIFOLD_CLASS_SKIP);
601
602 /* Will read cryptlen */
603 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
604 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
605
606 /* Write ICV */
607 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
608 LDST_SRCDST_BYTE_CONTEXT);
609
610 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
611 desc_bytes(desc),
612 DMA_TO_DEVICE);
613 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
614 dev_err(jrdev, "unable to map shared descriptor\n");
615 return -ENOMEM;
616 }
617#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300618 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800619 DUMP_PREFIX_ADDRESS, 16, 4, desc,
620 desc_bytes(desc), 1);
621#endif
622
623 return 0;
624}
625
Yuan Kang0e479302011-07-15 11:21:41 +0800626static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800627 unsigned int authsize)
628{
629 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
630
631 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800632 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800633
634 return 0;
635}
636
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300637static int gcm_set_sh_desc(struct crypto_aead *aead)
638{
639 struct aead_tfm *tfm = &aead->base.crt_aead;
640 struct caam_ctx *ctx = crypto_aead_ctx(aead);
641 struct device *jrdev = ctx->jrdev;
642 bool keys_fit_inline = false;
643 u32 *key_jump_cmd, *zero_payload_jump_cmd,
644 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
645 u32 *desc;
646
647 if (!ctx->enckeylen || !ctx->authsize)
648 return 0;
649
650 /*
651 * AES GCM encrypt shared descriptor
652 * Job Descriptor and Shared Descriptor
653 * must fit into the 64-word Descriptor h/w Buffer
654 */
655 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
656 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
657 keys_fit_inline = true;
658
659 desc = ctx->sh_desc_enc;
660
661 init_sh_desc(desc, HDR_SHARE_SERIAL);
662
663 /* skip key loading if they are loaded due to sharing */
664 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
665 JUMP_COND_SHRD | JUMP_COND_SELF);
666 if (keys_fit_inline)
667 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
668 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
669 else
670 append_key(desc, ctx->key_dma, ctx->enckeylen,
671 CLASS_1 | KEY_DEST_CLASS_REG);
672 set_jump_tgt_here(desc, key_jump_cmd);
673
674 /* class 1 operation */
675 append_operation(desc, ctx->class1_alg_type |
676 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
677
678 /* cryptlen = seqoutlen - authsize */
679 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
680
681 /* assoclen + cryptlen = seqinlen - ivsize */
682 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
683
684 /* assoclen = (assoclen + cryptlen) - cryptlen */
685 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
686
687 /* if cryptlen is ZERO jump to zero-payload commands */
688 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
689 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
690 JUMP_COND_MATH_Z);
691 /* read IV */
692 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
693 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
694
695 /* if assoclen is ZERO, skip reading the assoc data */
696 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
697 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
698 JUMP_COND_MATH_Z);
699
700 /* read assoc data */
701 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
702 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
703 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
704
705 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
706
707 /* write encrypted data */
708 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
709
710 /* read payload data */
711 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
712 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
713
714 /* jump the zero-payload commands */
715 append_jump(desc, JUMP_TEST_ALL | 7);
716
717 /* zero-payload commands */
718 set_jump_tgt_here(desc, zero_payload_jump_cmd);
719
720 /* if assoclen is ZERO, jump to IV reading - is the only input data */
721 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
722 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
723 JUMP_COND_MATH_Z);
724 /* read IV */
725 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
726 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
727
728 /* read assoc data */
729 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
730 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
731
732 /* jump to ICV writing */
733 append_jump(desc, JUMP_TEST_ALL | 2);
734
735 /* read IV - is the only input data */
736 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
737 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
738 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
739 FIFOLD_TYPE_LAST1);
740
741 /* write ICV */
742 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
743 LDST_SRCDST_BYTE_CONTEXT);
744
745 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
746 desc_bytes(desc),
747 DMA_TO_DEVICE);
748 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
749 dev_err(jrdev, "unable to map shared descriptor\n");
750 return -ENOMEM;
751 }
752#ifdef DEBUG
753 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
754 DUMP_PREFIX_ADDRESS, 16, 4, desc,
755 desc_bytes(desc), 1);
756#endif
757
758 /*
759 * Job Descriptor and Shared Descriptors
760 * must all fit into the 64-word Descriptor h/w Buffer
761 */
762 keys_fit_inline = false;
763 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
764 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
765 keys_fit_inline = true;
766
767 desc = ctx->sh_desc_dec;
768
769 init_sh_desc(desc, HDR_SHARE_SERIAL);
770
771 /* skip key loading if they are loaded due to sharing */
772 key_jump_cmd = append_jump(desc, JUMP_JSL |
773 JUMP_TEST_ALL | JUMP_COND_SHRD |
774 JUMP_COND_SELF);
775 if (keys_fit_inline)
776 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
777 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
778 else
779 append_key(desc, ctx->key_dma, ctx->enckeylen,
780 CLASS_1 | KEY_DEST_CLASS_REG);
781 set_jump_tgt_here(desc, key_jump_cmd);
782
783 /* class 1 operation */
784 append_operation(desc, ctx->class1_alg_type |
785 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
786
787 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
788 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
789 ctx->authsize + tfm->ivsize);
790
791 /* assoclen = (assoclen + cryptlen) - cryptlen */
792 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
793 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
794
795 /* read IV */
796 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
797 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
798
799 /* jump to zero-payload command if cryptlen is zero */
800 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
801 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
802 JUMP_COND_MATH_Z);
803
804 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
805 /* if asoclen is ZERO, skip reading assoc data */
806 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
807 JUMP_COND_MATH_Z);
808 /* read assoc data */
809 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
810 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
811 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
812
813 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
814
815 /* store encrypted data */
816 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
817
818 /* read payload data */
819 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
820 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
821
822 /* jump the zero-payload commands */
823 append_jump(desc, JUMP_TEST_ALL | 4);
824
825 /* zero-payload command */
826 set_jump_tgt_here(desc, zero_payload_jump_cmd);
827
828 /* if assoclen is ZERO, jump to ICV reading */
829 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
830 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
831 JUMP_COND_MATH_Z);
832 /* read assoc data */
833 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
834 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
835 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
836
837 /* read ICV */
838 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
839 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
840
841 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
842 desc_bytes(desc),
843 DMA_TO_DEVICE);
844 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
845 dev_err(jrdev, "unable to map shared descriptor\n");
846 return -ENOMEM;
847 }
848#ifdef DEBUG
849 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
850 DUMP_PREFIX_ADDRESS, 16, 4, desc,
851 desc_bytes(desc), 1);
852#endif
853
854 return 0;
855}
856
857static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
858{
859 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
860
861 ctx->authsize = authsize;
862 gcm_set_sh_desc(authenc);
863
864 return 0;
865}
866
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500867static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
868 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800869{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500870 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
871 ctx->split_key_pad_len, key_in, authkeylen,
872 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800873}
874
Yuan Kang0e479302011-07-15 11:21:41 +0800875static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800876 const u8 *key, unsigned int keylen)
877{
878 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
879 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
880 struct caam_ctx *ctx = crypto_aead_ctx(aead);
881 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200882 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800883 int ret = 0;
884
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200885 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800886 goto badkey;
887
888 /* Pick class 2 key length from algorithm submask */
889 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
890 OP_ALG_ALGSEL_SHIFT] * 2;
891 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
892
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200893 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
894 goto badkey;
895
Kim Phillips8e8ec592011-03-13 16:54:26 +0800896#ifdef DEBUG
897 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200898 keys.authkeylen + keys.enckeylen, keys.enckeylen,
899 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800900 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
901 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300902 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800903 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
904#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800905
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200906 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800907 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800908 goto badkey;
909 }
910
911 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200912 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800913
Yuan Kang885e9e22011-07-15 11:21:41 +0800914 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200915 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800916 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800917 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800918 return -ENOMEM;
919 }
920#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300921 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800922 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200923 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800924#endif
925
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200926 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800927
Yuan Kang1acebad2011-07-15 11:21:42 +0800928 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800929 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +0800930 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200931 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800932 }
933
934 return ret;
935badkey:
936 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
937 return -EINVAL;
938}
939
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300940static int gcm_setkey(struct crypto_aead *aead,
941 const u8 *key, unsigned int keylen)
942{
943 struct caam_ctx *ctx = crypto_aead_ctx(aead);
944 struct device *jrdev = ctx->jrdev;
945 int ret = 0;
946
947#ifdef DEBUG
948 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
949 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
950#endif
951
952 memcpy(ctx->key, key, keylen);
953 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
954 DMA_TO_DEVICE);
955 if (dma_mapping_error(jrdev, ctx->key_dma)) {
956 dev_err(jrdev, "unable to map key i/o memory\n");
957 return -ENOMEM;
958 }
959 ctx->enckeylen = keylen;
960
961 ret = gcm_set_sh_desc(aead);
962 if (ret) {
963 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
964 DMA_TO_DEVICE);
965 }
966
967 return ret;
968}
969
Yuan Kangacdca312011-07-15 11:21:42 +0800970static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
971 const u8 *key, unsigned int keylen)
972{
973 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
974 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
975 struct device *jrdev = ctx->jrdev;
976 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +0200977 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +0800978 u32 *desc;
979
980#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300981 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800982 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
983#endif
984
985 memcpy(ctx->key, key, keylen);
986 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
987 DMA_TO_DEVICE);
988 if (dma_mapping_error(jrdev, ctx->key_dma)) {
989 dev_err(jrdev, "unable to map key i/o memory\n");
990 return -ENOMEM;
991 }
992 ctx->enckeylen = keylen;
993
994 /* ablkcipher_encrypt shared descriptor */
995 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -0500996 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800997 /* Skip if already shared */
998 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
999 JUMP_COND_SHRD);
1000
1001 /* Load class1 key only */
1002 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1003 ctx->enckeylen, CLASS_1 |
1004 KEY_DEST_CLASS_REG);
1005
1006 set_jump_tgt_here(desc, key_jump_cmd);
1007
Yuan Kangacdca312011-07-15 11:21:42 +08001008 /* Load iv */
1009 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1010 LDST_CLASS_1_CCB | tfm->ivsize);
1011
1012 /* Load operation */
1013 append_operation(desc, ctx->class1_alg_type |
1014 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1015
1016 /* Perform operation */
1017 ablkcipher_append_src_dst(desc);
1018
1019 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1020 desc_bytes(desc),
1021 DMA_TO_DEVICE);
1022 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1023 dev_err(jrdev, "unable to map shared descriptor\n");
1024 return -ENOMEM;
1025 }
1026#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001027 print_hex_dump(KERN_ERR,
1028 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001029 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1030 desc_bytes(desc), 1);
1031#endif
1032 /* ablkcipher_decrypt shared descriptor */
1033 desc = ctx->sh_desc_dec;
1034
Kim Phillips61bb86b2012-07-13 17:49:28 -05001035 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001036 /* Skip if already shared */
1037 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1038 JUMP_COND_SHRD);
1039
1040 /* Load class1 key only */
1041 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1042 ctx->enckeylen, CLASS_1 |
1043 KEY_DEST_CLASS_REG);
1044
Yuan Kangacdca312011-07-15 11:21:42 +08001045 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001046
1047 /* load IV */
1048 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1049 LDST_CLASS_1_CCB | tfm->ivsize);
1050
1051 /* Choose operation */
1052 append_dec_op1(desc, ctx->class1_alg_type);
1053
1054 /* Perform operation */
1055 ablkcipher_append_src_dst(desc);
1056
Yuan Kangacdca312011-07-15 11:21:42 +08001057 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1058 desc_bytes(desc),
1059 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001060 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001061 dev_err(jrdev, "unable to map shared descriptor\n");
1062 return -ENOMEM;
1063 }
1064
1065#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001066 print_hex_dump(KERN_ERR,
1067 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001068 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1069 desc_bytes(desc), 1);
1070#endif
1071
1072 return ret;
1073}
1074
Kim Phillips8e8ec592011-03-13 16:54:26 +08001075/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001076 * aead_edesc - s/w-extended aead descriptor
1077 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001078 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001079 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001080 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001081 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001082 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001083 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001084 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001085 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1086 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001087 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1088 */
Yuan Kang0e479302011-07-15 11:21:41 +08001089struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001090 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001091 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001092 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001093 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001094 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001095 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001096 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001097 int sec4_sg_bytes;
1098 dma_addr_t sec4_sg_dma;
1099 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001100 u32 hw_desc[0];
1101};
1102
Yuan Kangacdca312011-07-15 11:21:42 +08001103/*
1104 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1105 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001106 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001107 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001108 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001109 * @iv_dma: dma address of iv for checking continuity and link table
1110 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001111 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1112 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001113 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1114 */
1115struct ablkcipher_edesc {
1116 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001117 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001118 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001119 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001120 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001121 int sec4_sg_bytes;
1122 dma_addr_t sec4_sg_dma;
1123 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001124 u32 hw_desc[0];
1125};
1126
Yuan Kang1acebad2011-07-15 11:21:42 +08001127static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001128 struct scatterlist *dst, int src_nents,
1129 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001130 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1131 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001132{
Yuan Kang643b39b2012-06-22 19:48:49 -05001133 if (dst != src) {
1134 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1135 src_chained);
1136 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1137 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001138 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001139 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1140 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001141 }
1142
Yuan Kang1acebad2011-07-15 11:21:42 +08001143 if (iv_dma)
1144 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001145 if (sec4_sg_bytes)
1146 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001147 DMA_TO_DEVICE);
1148}
1149
Yuan Kang1acebad2011-07-15 11:21:42 +08001150static void aead_unmap(struct device *dev,
1151 struct aead_edesc *edesc,
1152 struct aead_request *req)
1153{
1154 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1155 int ivsize = crypto_aead_ivsize(aead);
1156
Yuan Kang643b39b2012-06-22 19:48:49 -05001157 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1158 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001159
1160 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001161 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1162 edesc->dst_chained, edesc->iv_dma, ivsize,
1163 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001164}
1165
Yuan Kangacdca312011-07-15 11:21:42 +08001166static void ablkcipher_unmap(struct device *dev,
1167 struct ablkcipher_edesc *edesc,
1168 struct ablkcipher_request *req)
1169{
1170 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1171 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1172
1173 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001174 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1175 edesc->dst_chained, edesc->iv_dma, ivsize,
1176 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001177}
1178
Yuan Kang0e479302011-07-15 11:21:41 +08001179static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001180 void *context)
1181{
Yuan Kang0e479302011-07-15 11:21:41 +08001182 struct aead_request *req = context;
1183 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001184#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001185 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001186 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001187 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001188
1189 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1190#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001191
Yuan Kang0e479302011-07-15 11:21:41 +08001192 edesc = (struct aead_edesc *)((char *)desc -
1193 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001194
Marek Vasutfa9659c2014-04-24 20:05:12 +02001195 if (err)
1196 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001197
Yuan Kang0e479302011-07-15 11:21:41 +08001198 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001199
1200#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001201 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001202 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1203 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001204 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001205 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001206 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001207 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001208 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1209 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001210 ctx->authsize + 4, 1);
1211#endif
1212
1213 kfree(edesc);
1214
Yuan Kang0e479302011-07-15 11:21:41 +08001215 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001216}
1217
Yuan Kang0e479302011-07-15 11:21:41 +08001218static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001219 void *context)
1220{
Yuan Kang0e479302011-07-15 11:21:41 +08001221 struct aead_request *req = context;
1222 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001223#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001224 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001225 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001226 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001227
1228 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1229#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001230
Yuan Kang0e479302011-07-15 11:21:41 +08001231 edesc = (struct aead_edesc *)((char *)desc -
1232 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001233
Yuan Kang1acebad2011-07-15 11:21:42 +08001234#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001235 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001236 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1237 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001238 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001239 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02001240 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001241#endif
1242
Marek Vasutfa9659c2014-04-24 20:05:12 +02001243 if (err)
1244 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001245
Yuan Kang0e479302011-07-15 11:21:41 +08001246 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001247
1248 /*
1249 * verify hw auth check passed else return -EBADMSG
1250 */
1251 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1252 err = -EBADMSG;
1253
1254#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001255 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001256 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08001257 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1258 sizeof(struct iphdr) + req->assoclen +
1259 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001260 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05001261 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08001262 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03001263 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001264 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1265 sg->length + ctx->authsize + 16, 1);
1266 }
1267#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001268
Kim Phillips8e8ec592011-03-13 16:54:26 +08001269 kfree(edesc);
1270
Yuan Kang0e479302011-07-15 11:21:41 +08001271 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001272}
1273
Yuan Kangacdca312011-07-15 11:21:42 +08001274static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1275 void *context)
1276{
1277 struct ablkcipher_request *req = context;
1278 struct ablkcipher_edesc *edesc;
1279#ifdef DEBUG
1280 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1281 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1282
1283 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1284#endif
1285
1286 edesc = (struct ablkcipher_edesc *)((char *)desc -
1287 offsetof(struct ablkcipher_edesc, hw_desc));
1288
Marek Vasutfa9659c2014-04-24 20:05:12 +02001289 if (err)
1290 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001291
1292#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001293 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001294 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1295 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001296 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001297 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1298 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1299#endif
1300
1301 ablkcipher_unmap(jrdev, edesc, req);
1302 kfree(edesc);
1303
1304 ablkcipher_request_complete(req, err);
1305}
1306
1307static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1308 void *context)
1309{
1310 struct ablkcipher_request *req = context;
1311 struct ablkcipher_edesc *edesc;
1312#ifdef DEBUG
1313 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1314 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1315
1316 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1317#endif
1318
1319 edesc = (struct ablkcipher_edesc *)((char *)desc -
1320 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02001321 if (err)
1322 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001323
1324#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001325 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001326 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1327 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001328 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001329 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1330 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1331#endif
1332
1333 ablkcipher_unmap(jrdev, edesc, req);
1334 kfree(edesc);
1335
1336 ablkcipher_request_complete(req, err);
1337}
1338
Kim Phillips8e8ec592011-03-13 16:54:26 +08001339/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001340 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341 */
Yuan Kang1acebad2011-07-15 11:21:42 +08001342static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1343 struct aead_edesc *edesc,
1344 struct aead_request *req,
1345 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001346{
Yuan Kang0e479302011-07-15 11:21:41 +08001347 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001348 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001349 int ivsize = crypto_aead_ivsize(aead);
1350 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08001351 u32 *desc = edesc->hw_desc;
1352 u32 out_options = 0, in_options;
1353 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001354 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001355 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001356
Yuan Kang1acebad2011-07-15 11:21:42 +08001357#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08001358 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08001359 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001360 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001361 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1362 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001363 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001364 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001365 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001366 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001367 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08001368 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001369 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001370 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1371 desc_bytes(sh_desc), 1);
1372#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001373
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001374 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1375 OP_ALG_ALGSEL_AES) &&
1376 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1377 is_gcm = true;
1378
Yuan Kang1acebad2011-07-15 11:21:42 +08001379 len = desc_len(sh_desc);
1380 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1381
1382 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001383 if (is_gcm)
1384 src_dma = edesc->iv_dma;
1385 else
1386 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08001387 in_options = 0;
1388 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001389 src_dma = edesc->sec4_sg_dma;
1390 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1391 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001392 in_options = LDST_SGF;
1393 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001394
1395 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1396 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001397
Yuan Kang1acebad2011-07-15 11:21:42 +08001398 if (likely(req->src == req->dst)) {
1399 if (all_contig) {
1400 dst_dma = sg_dma_address(req->src);
1401 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001402 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001403 ((edesc->assoc_nents ? : 1) + 1);
1404 out_options = LDST_SGF;
1405 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001406 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001407 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08001408 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001409 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001410 dst_dma = edesc->sec4_sg_dma +
1411 sec4_sg_index *
1412 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001413 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001414 }
1415 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001416 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02001417 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1418 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001419 else
Yuan Kang1acebad2011-07-15 11:21:42 +08001420 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1421 out_options);
1422}
1423
1424/*
1425 * Fill in aead givencrypt job descriptor
1426 */
1427static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1428 struct aead_edesc *edesc,
1429 struct aead_request *req,
1430 int contig)
1431{
1432 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1433 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1434 int ivsize = crypto_aead_ivsize(aead);
1435 int authsize = ctx->authsize;
1436 u32 *desc = edesc->hw_desc;
1437 u32 out_options = 0, in_options;
1438 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001439 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001440
1441#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001442 debug("assoclen %d cryptlen %d authsize %d\n",
1443 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001444 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001445 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1446 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001447 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001448 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001449 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001450 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1451 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001452 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001453 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1454 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001455#endif
1456
Yuan Kang1acebad2011-07-15 11:21:42 +08001457 len = desc_len(sh_desc);
1458 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1459
1460 if (contig & GIV_SRC_CONTIG) {
1461 src_dma = sg_dma_address(req->assoc);
1462 in_options = 0;
1463 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001464 src_dma = edesc->sec4_sg_dma;
1465 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001466 in_options = LDST_SGF;
1467 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001468 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1469 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08001470
1471 if (contig & GIV_DST_CONTIG) {
1472 dst_dma = edesc->iv_dma;
1473 } else {
1474 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001475 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001476 edesc->assoc_nents;
1477 out_options = LDST_SGF;
1478 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001479 dst_dma = edesc->sec4_sg_dma +
1480 sec4_sg_index *
1481 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001482 out_options = LDST_SGF;
1483 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001484 }
1485
Horia Geantabbf9c892013-11-28 15:11:16 +02001486 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1487 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001488}
1489
1490/*
Yuan Kangacdca312011-07-15 11:21:42 +08001491 * Fill in ablkcipher job descriptor
1492 */
1493static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1494 struct ablkcipher_edesc *edesc,
1495 struct ablkcipher_request *req,
1496 bool iv_contig)
1497{
1498 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1499 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1500 u32 *desc = edesc->hw_desc;
1501 u32 out_options = 0, in_options;
1502 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001503 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001504
1505#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001506 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001507 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1508 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001509 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001510 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1511 edesc->src_nents ? 100 : req->nbytes, 1);
1512#endif
1513
1514 len = desc_len(sh_desc);
1515 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1516
1517 if (iv_contig) {
1518 src_dma = edesc->iv_dma;
1519 in_options = 0;
1520 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001521 src_dma = edesc->sec4_sg_dma;
1522 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001523 in_options = LDST_SGF;
1524 }
1525 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1526
1527 if (likely(req->src == req->dst)) {
1528 if (!edesc->src_nents && iv_contig) {
1529 dst_dma = sg_dma_address(req->src);
1530 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001531 dst_dma = edesc->sec4_sg_dma +
1532 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001533 out_options = LDST_SGF;
1534 }
1535 } else {
1536 if (!edesc->dst_nents) {
1537 dst_dma = sg_dma_address(req->dst);
1538 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001539 dst_dma = edesc->sec4_sg_dma +
1540 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001541 out_options = LDST_SGF;
1542 }
1543 }
1544 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1545}
1546
1547/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001548 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001549 */
Yuan Kang0e479302011-07-15 11:21:41 +08001550static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001551 int desc_bytes, bool *all_contig_ptr,
1552 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001553{
Yuan Kang0e479302011-07-15 11:21:41 +08001554 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001555 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1556 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001557 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1558 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1559 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001560 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001561 dma_addr_t iv_dma = 0;
1562 int sgc;
1563 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001564 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08001565 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001566 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001567 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001568 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001569
Yuan Kang643b39b2012-06-22 19:48:49 -05001570 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001571
Horia Geantabbf9c892013-11-28 15:11:16 +02001572 if (unlikely(req->dst != req->src)) {
1573 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1574 dst_nents = sg_count(req->dst,
1575 req->cryptlen +
1576 (encrypt ? authsize : (-authsize)),
1577 &dst_chained);
1578 } else {
1579 src_nents = sg_count(req->src,
1580 req->cryptlen +
1581 (encrypt ? authsize : 0),
1582 &src_chained);
1583 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001584
Yuan Kang643b39b2012-06-22 19:48:49 -05001585 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001586 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001587 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001588 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1589 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001590 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001591 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1592 DMA_TO_DEVICE, src_chained);
1593 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1594 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001595 }
1596
Yuan Kang1acebad2011-07-15 11:21:42 +08001597 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001598 if (dma_mapping_error(jrdev, iv_dma)) {
1599 dev_err(jrdev, "unable to map IV\n");
1600 return ERR_PTR(-ENOMEM);
1601 }
1602
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001603 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1604 OP_ALG_ALGSEL_AES) &&
1605 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1606 is_gcm = true;
1607
1608 /*
1609 * Check if data are contiguous.
1610 * GCM expected input sequence: IV, AAD, text
1611 * All other - expected input sequence: AAD, IV, text
1612 */
1613 if (is_gcm)
1614 all_contig = (!assoc_nents &&
1615 iv_dma + ivsize == sg_dma_address(req->assoc) &&
1616 !src_nents && sg_dma_address(req->assoc) +
1617 req->assoclen == sg_dma_address(req->src));
1618 else
1619 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
1620 req->assoclen == iv_dma && !src_nents &&
1621 iv_dma + ivsize == sg_dma_address(req->src));
1622 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08001623 assoc_nents = assoc_nents ? : 1;
1624 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001625 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001626 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001627
Yuan Kanga299c832012-06-22 19:48:46 -05001628 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001629
Yuan Kanga299c832012-06-22 19:48:46 -05001630 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001631
1632 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001633 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001634 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001635 if (!edesc) {
1636 dev_err(jrdev, "could not allocate extended descriptor\n");
1637 return ERR_PTR(-ENOMEM);
1638 }
1639
1640 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001641 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001642 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001643 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001644 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001645 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001646 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001647 edesc->sec4_sg_bytes = sec4_sg_bytes;
1648 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1649 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001650 *all_contig_ptr = all_contig;
1651
Yuan Kanga299c832012-06-22 19:48:46 -05001652 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001653 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001654 if (!is_gcm) {
1655 sg_to_sec4_sg(req->assoc,
1656 (assoc_nents ? : 1),
1657 edesc->sec4_sg +
1658 sec4_sg_index, 0);
1659 sec4_sg_index += assoc_nents ? : 1;
1660 }
1661
Yuan Kanga299c832012-06-22 19:48:46 -05001662 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001663 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001664 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001665
1666 if (is_gcm) {
1667 sg_to_sec4_sg(req->assoc,
1668 (assoc_nents ? : 1),
1669 edesc->sec4_sg +
1670 sec4_sg_index, 0);
1671 sec4_sg_index += assoc_nents ? : 1;
1672 }
1673
Yuan Kanga299c832012-06-22 19:48:46 -05001674 sg_to_sec4_sg_last(req->src,
1675 (src_nents ? : 1),
1676 edesc->sec4_sg +
1677 sec4_sg_index, 0);
1678 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08001679 }
1680 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001681 sg_to_sec4_sg_last(req->dst, dst_nents,
1682 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001683 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301684 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1685 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001686 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1687 dev_err(jrdev, "unable to map S/G table\n");
1688 return ERR_PTR(-ENOMEM);
1689 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001690
1691 return edesc;
1692}
1693
Yuan Kang0e479302011-07-15 11:21:41 +08001694static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001695{
Yuan Kang0e479302011-07-15 11:21:41 +08001696 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001697 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1699 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001700 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001701 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001702 int ret = 0;
1703
Kim Phillips8e8ec592011-03-13 16:54:26 +08001704 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001705 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001706 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001707 if (IS_ERR(edesc))
1708 return PTR_ERR(edesc);
1709
Yuan Kang1acebad2011-07-15 11:21:42 +08001710 /* Create and submit job descriptor */
1711 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1712 all_contig, true);
1713#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001714 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001715 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1716 desc_bytes(edesc->hw_desc), 1);
1717#endif
1718
Kim Phillips8e8ec592011-03-13 16:54:26 +08001719 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001720 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1721 if (!ret) {
1722 ret = -EINPROGRESS;
1723 } else {
1724 aead_unmap(jrdev, edesc, req);
1725 kfree(edesc);
1726 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001727
Yuan Kang1acebad2011-07-15 11:21:42 +08001728 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001729}
1730
Yuan Kang0e479302011-07-15 11:21:41 +08001731static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001732{
Yuan Kang1acebad2011-07-15 11:21:42 +08001733 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001734 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001735 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1736 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001737 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001738 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001739 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001740
1741 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001742 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001743 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08001744 if (IS_ERR(edesc))
1745 return PTR_ERR(edesc);
1746
Yuan Kang1acebad2011-07-15 11:21:42 +08001747#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001748 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001749 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1750 req->cryptlen, 1);
1751#endif
1752
1753 /* Create and submit job descriptor*/
1754 init_aead_job(ctx->sh_desc_dec,
1755 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1756#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001757 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001758 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1759 desc_bytes(edesc->hw_desc), 1);
1760#endif
1761
Yuan Kang0e479302011-07-15 11:21:41 +08001762 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001763 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1764 if (!ret) {
1765 ret = -EINPROGRESS;
1766 } else {
1767 aead_unmap(jrdev, edesc, req);
1768 kfree(edesc);
1769 }
Yuan Kang0e479302011-07-15 11:21:41 +08001770
Yuan Kang1acebad2011-07-15 11:21:42 +08001771 return ret;
1772}
Yuan Kang0e479302011-07-15 11:21:41 +08001773
Yuan Kang1acebad2011-07-15 11:21:42 +08001774/*
1775 * allocate and map the aead extended descriptor for aead givencrypt
1776 */
1777static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1778 *greq, int desc_bytes,
1779 u32 *contig_ptr)
1780{
1781 struct aead_request *req = &greq->areq;
1782 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1783 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1784 struct device *jrdev = ctx->jrdev;
1785 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1786 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1787 int assoc_nents, src_nents, dst_nents = 0;
1788 struct aead_edesc *edesc;
1789 dma_addr_t iv_dma = 0;
1790 int sgc;
1791 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1792 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05001793 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001794 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Yuan Kang0e479302011-07-15 11:21:41 +08001795
Yuan Kang643b39b2012-06-22 19:48:49 -05001796 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1797 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08001798
Yuan Kang1acebad2011-07-15 11:21:42 +08001799 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02001800 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1801 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001802
Yuan Kang643b39b2012-06-22 19:48:49 -05001803 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001804 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001805 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001806 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1807 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001808 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001809 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1810 DMA_TO_DEVICE, src_chained);
1811 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1812 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001813 }
1814
Yuan Kang1acebad2011-07-15 11:21:42 +08001815 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001816 if (dma_mapping_error(jrdev, iv_dma)) {
1817 dev_err(jrdev, "unable to map IV\n");
1818 return ERR_PTR(-ENOMEM);
1819 }
1820
1821 /* Check if data are contiguous */
Yuan Kang1acebad2011-07-15 11:21:42 +08001822 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1823 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1824 contig &= ~GIV_SRC_CONTIG;
1825 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1826 contig &= ~GIV_DST_CONTIG;
Kim Phillips2af8f4a2012-09-07 04:17:03 +08001827 if (unlikely(req->src != req->dst)) {
1828 dst_nents = dst_nents ? : 1;
1829 sec4_sg_len += 1;
1830 }
Yuan Kang1acebad2011-07-15 11:21:42 +08001831 if (!(contig & GIV_SRC_CONTIG)) {
1832 assoc_nents = assoc_nents ? : 1;
1833 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001834 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001835 if (likely(req->src == req->dst))
1836 contig &= ~GIV_DST_CONTIG;
1837 }
Yuan Kanga299c832012-06-22 19:48:46 -05001838 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001839
Yuan Kanga299c832012-06-22 19:48:46 -05001840 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001841
1842 /* allocate space for base edesc and hw desc commands, link tables */
1843 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001844 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08001845 if (!edesc) {
1846 dev_err(jrdev, "could not allocate extended descriptor\n");
1847 return ERR_PTR(-ENOMEM);
1848 }
1849
1850 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001851 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001852 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001853 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001854 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001855 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001856 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001857 edesc->sec4_sg_bytes = sec4_sg_bytes;
1858 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1859 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001860 *contig_ptr = contig;
1861
Yuan Kanga299c832012-06-22 19:48:46 -05001862 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001863 if (!(contig & GIV_SRC_CONTIG)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001864 sg_to_sec4_sg(req->assoc, assoc_nents,
1865 edesc->sec4_sg +
1866 sec4_sg_index, 0);
1867 sec4_sg_index += assoc_nents;
1868 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001869 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001870 sec4_sg_index += 1;
1871 sg_to_sec4_sg_last(req->src, src_nents,
1872 edesc->sec4_sg +
1873 sec4_sg_index, 0);
1874 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001875 }
1876 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05001877 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001878 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001879 sec4_sg_index += 1;
1880 sg_to_sec4_sg_last(req->dst, dst_nents,
1881 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001882 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301883 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1884 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001885 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1886 dev_err(jrdev, "unable to map S/G table\n");
1887 return ERR_PTR(-ENOMEM);
1888 }
Yuan Kang1acebad2011-07-15 11:21:42 +08001889
1890 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001891}
1892
1893static int aead_givencrypt(struct aead_givcrypt_request *areq)
1894{
1895 struct aead_request *req = &areq->areq;
1896 struct aead_edesc *edesc;
1897 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001898 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1899 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001900 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001901 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001902 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001903
Kim Phillips8e8ec592011-03-13 16:54:26 +08001904 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001905 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1906 CAAM_CMD_SZ, &contig);
1907
Kim Phillips8e8ec592011-03-13 16:54:26 +08001908 if (IS_ERR(edesc))
1909 return PTR_ERR(edesc);
1910
Yuan Kang1acebad2011-07-15 11:21:42 +08001911#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001912 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001913 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1914 req->cryptlen, 1);
1915#endif
1916
1917 /* Create and submit job descriptor*/
1918 init_aead_giv_job(ctx->sh_desc_givenc,
1919 ctx->sh_desc_givenc_dma, edesc, req, contig);
1920#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001921 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001922 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1923 desc_bytes(edesc->hw_desc), 1);
1924#endif
1925
Kim Phillips8e8ec592011-03-13 16:54:26 +08001926 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001927 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1928 if (!ret) {
1929 ret = -EINPROGRESS;
1930 } else {
1931 aead_unmap(jrdev, edesc, req);
1932 kfree(edesc);
1933 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001934
Yuan Kang1acebad2011-07-15 11:21:42 +08001935 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001936}
1937
Horia Geantaae4a8252014-03-14 17:46:52 +02001938static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1939{
1940 return aead_encrypt(&areq->areq);
1941}
1942
Yuan Kangacdca312011-07-15 11:21:42 +08001943/*
1944 * allocate and map the ablkcipher extended descriptor for ablkcipher
1945 */
1946static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1947 *req, int desc_bytes,
1948 bool *iv_contig_out)
1949{
1950 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1951 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1952 struct device *jrdev = ctx->jrdev;
1953 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1954 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1955 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001956 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001957 struct ablkcipher_edesc *edesc;
1958 dma_addr_t iv_dma = 0;
1959 bool iv_contig = false;
1960 int sgc;
1961 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05001962 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001963 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001964
Yuan Kang643b39b2012-06-22 19:48:49 -05001965 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001966
Yuan Kang643b39b2012-06-22 19:48:49 -05001967 if (req->dst != req->src)
1968 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001969
1970 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001971 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1972 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001973 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001974 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1975 DMA_TO_DEVICE, src_chained);
1976 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1977 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001978 }
1979
Horia Geantace572082014-07-11 15:34:49 +03001980 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1981 if (dma_mapping_error(jrdev, iv_dma)) {
1982 dev_err(jrdev, "unable to map IV\n");
1983 return ERR_PTR(-ENOMEM);
1984 }
1985
Yuan Kangacdca312011-07-15 11:21:42 +08001986 /*
1987 * Check if iv can be contiguous with source and destination.
1988 * If so, include it. If not, create scatterlist.
1989 */
Yuan Kangacdca312011-07-15 11:21:42 +08001990 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1991 iv_contig = true;
1992 else
1993 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001994 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1995 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001996
1997 /* allocate space for base edesc and hw desc commands, link tables */
1998 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001999 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002000 if (!edesc) {
2001 dev_err(jrdev, "could not allocate extended descriptor\n");
2002 return ERR_PTR(-ENOMEM);
2003 }
2004
2005 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002006 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002007 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002008 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002009 edesc->sec4_sg_bytes = sec4_sg_bytes;
2010 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2011 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002012
Yuan Kanga299c832012-06-22 19:48:46 -05002013 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002014 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002015 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2016 sg_to_sec4_sg_last(req->src, src_nents,
2017 edesc->sec4_sg + 1, 0);
2018 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002019 }
2020
Yuan Kang643b39b2012-06-22 19:48:49 -05002021 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002022 sg_to_sec4_sg_last(req->dst, dst_nents,
2023 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002024 }
2025
Yuan Kanga299c832012-06-22 19:48:46 -05002026 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2027 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002028 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2029 dev_err(jrdev, "unable to map S/G table\n");
2030 return ERR_PTR(-ENOMEM);
2031 }
2032
Yuan Kangacdca312011-07-15 11:21:42 +08002033 edesc->iv_dma = iv_dma;
2034
2035#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002036 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002037 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2038 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002039#endif
2040
2041 *iv_contig_out = iv_contig;
2042 return edesc;
2043}
2044
2045static int ablkcipher_encrypt(struct ablkcipher_request *req)
2046{
2047 struct ablkcipher_edesc *edesc;
2048 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2049 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2050 struct device *jrdev = ctx->jrdev;
2051 bool iv_contig;
2052 u32 *desc;
2053 int ret = 0;
2054
2055 /* allocate extended descriptor */
2056 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2057 CAAM_CMD_SZ, &iv_contig);
2058 if (IS_ERR(edesc))
2059 return PTR_ERR(edesc);
2060
2061 /* Create and submit job descriptor*/
2062 init_ablkcipher_job(ctx->sh_desc_enc,
2063 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2064#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002065 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002066 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2067 desc_bytes(edesc->hw_desc), 1);
2068#endif
2069 desc = edesc->hw_desc;
2070 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2071
2072 if (!ret) {
2073 ret = -EINPROGRESS;
2074 } else {
2075 ablkcipher_unmap(jrdev, edesc, req);
2076 kfree(edesc);
2077 }
2078
2079 return ret;
2080}
2081
2082static int ablkcipher_decrypt(struct ablkcipher_request *req)
2083{
2084 struct ablkcipher_edesc *edesc;
2085 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2086 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2087 struct device *jrdev = ctx->jrdev;
2088 bool iv_contig;
2089 u32 *desc;
2090 int ret = 0;
2091
2092 /* allocate extended descriptor */
2093 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2094 CAAM_CMD_SZ, &iv_contig);
2095 if (IS_ERR(edesc))
2096 return PTR_ERR(edesc);
2097
2098 /* Create and submit job descriptor*/
2099 init_ablkcipher_job(ctx->sh_desc_dec,
2100 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2101 desc = edesc->hw_desc;
2102#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002103 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002104 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2105 desc_bytes(edesc->hw_desc), 1);
2106#endif
2107
2108 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2109 if (!ret) {
2110 ret = -EINPROGRESS;
2111 } else {
2112 ablkcipher_unmap(jrdev, edesc, req);
2113 kfree(edesc);
2114 }
2115
2116 return ret;
2117}
2118
Yuan Kang885e9e22011-07-15 11:21:41 +08002119#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002120#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002121struct caam_alg_template {
2122 char name[CRYPTO_MAX_ALG_NAME];
2123 char driver_name[CRYPTO_MAX_ALG_NAME];
2124 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002125 u32 type;
2126 union {
2127 struct ablkcipher_alg ablkcipher;
2128 struct aead_alg aead;
2129 struct blkcipher_alg blkcipher;
2130 struct cipher_alg cipher;
2131 struct compress_alg compress;
2132 struct rng_alg rng;
2133 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002134 u32 class1_alg_type;
2135 u32 class2_alg_type;
2136 u32 alg_op;
2137};
2138
2139static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02002140 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08002141 {
Horia Geantaae4a8252014-03-14 17:46:52 +02002142 .name = "authenc(hmac(md5),ecb(cipher_null))",
2143 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
2144 .blocksize = NULL_BLOCK_SIZE,
2145 .type = CRYPTO_ALG_TYPE_AEAD,
2146 .template_aead = {
2147 .setkey = aead_setkey,
2148 .setauthsize = aead_setauthsize,
2149 .encrypt = aead_encrypt,
2150 .decrypt = aead_decrypt,
2151 .givencrypt = aead_null_givencrypt,
2152 .geniv = "<built-in>",
2153 .ivsize = NULL_IV_SIZE,
2154 .maxauthsize = MD5_DIGEST_SIZE,
2155 },
2156 .class1_alg_type = 0,
2157 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2158 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2159 },
2160 {
2161 .name = "authenc(hmac(sha1),ecb(cipher_null))",
2162 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
2163 .blocksize = NULL_BLOCK_SIZE,
2164 .type = CRYPTO_ALG_TYPE_AEAD,
2165 .template_aead = {
2166 .setkey = aead_setkey,
2167 .setauthsize = aead_setauthsize,
2168 .encrypt = aead_encrypt,
2169 .decrypt = aead_decrypt,
2170 .givencrypt = aead_null_givencrypt,
2171 .geniv = "<built-in>",
2172 .ivsize = NULL_IV_SIZE,
2173 .maxauthsize = SHA1_DIGEST_SIZE,
2174 },
2175 .class1_alg_type = 0,
2176 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2177 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2178 },
2179 {
2180 .name = "authenc(hmac(sha224),ecb(cipher_null))",
2181 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
2182 .blocksize = NULL_BLOCK_SIZE,
2183 .type = CRYPTO_ALG_TYPE_AEAD,
2184 .template_aead = {
2185 .setkey = aead_setkey,
2186 .setauthsize = aead_setauthsize,
2187 .encrypt = aead_encrypt,
2188 .decrypt = aead_decrypt,
2189 .givencrypt = aead_null_givencrypt,
2190 .geniv = "<built-in>",
2191 .ivsize = NULL_IV_SIZE,
2192 .maxauthsize = SHA224_DIGEST_SIZE,
2193 },
2194 .class1_alg_type = 0,
2195 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2196 OP_ALG_AAI_HMAC_PRECOMP,
2197 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2198 },
2199 {
2200 .name = "authenc(hmac(sha256),ecb(cipher_null))",
2201 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
2202 .blocksize = NULL_BLOCK_SIZE,
2203 .type = CRYPTO_ALG_TYPE_AEAD,
2204 .template_aead = {
2205 .setkey = aead_setkey,
2206 .setauthsize = aead_setauthsize,
2207 .encrypt = aead_encrypt,
2208 .decrypt = aead_decrypt,
2209 .givencrypt = aead_null_givencrypt,
2210 .geniv = "<built-in>",
2211 .ivsize = NULL_IV_SIZE,
2212 .maxauthsize = SHA256_DIGEST_SIZE,
2213 },
2214 .class1_alg_type = 0,
2215 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2216 OP_ALG_AAI_HMAC_PRECOMP,
2217 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2218 },
2219 {
2220 .name = "authenc(hmac(sha384),ecb(cipher_null))",
2221 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
2222 .blocksize = NULL_BLOCK_SIZE,
2223 .type = CRYPTO_ALG_TYPE_AEAD,
2224 .template_aead = {
2225 .setkey = aead_setkey,
2226 .setauthsize = aead_setauthsize,
2227 .encrypt = aead_encrypt,
2228 .decrypt = aead_decrypt,
2229 .givencrypt = aead_null_givencrypt,
2230 .geniv = "<built-in>",
2231 .ivsize = NULL_IV_SIZE,
2232 .maxauthsize = SHA384_DIGEST_SIZE,
2233 },
2234 .class1_alg_type = 0,
2235 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2236 OP_ALG_AAI_HMAC_PRECOMP,
2237 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2238 },
2239 {
2240 .name = "authenc(hmac(sha512),ecb(cipher_null))",
2241 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
2242 .blocksize = NULL_BLOCK_SIZE,
2243 .type = CRYPTO_ALG_TYPE_AEAD,
2244 .template_aead = {
2245 .setkey = aead_setkey,
2246 .setauthsize = aead_setauthsize,
2247 .encrypt = aead_encrypt,
2248 .decrypt = aead_decrypt,
2249 .givencrypt = aead_null_givencrypt,
2250 .geniv = "<built-in>",
2251 .ivsize = NULL_IV_SIZE,
2252 .maxauthsize = SHA512_DIGEST_SIZE,
2253 },
2254 .class1_alg_type = 0,
2255 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2256 OP_ALG_AAI_HMAC_PRECOMP,
2257 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2258 },
2259 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002260 .name = "authenc(hmac(md5),cbc(aes))",
2261 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
2262 .blocksize = AES_BLOCK_SIZE,
2263 .type = CRYPTO_ALG_TYPE_AEAD,
2264 .template_aead = {
2265 .setkey = aead_setkey,
2266 .setauthsize = aead_setauthsize,
2267 .encrypt = aead_encrypt,
2268 .decrypt = aead_decrypt,
2269 .givencrypt = aead_givencrypt,
2270 .geniv = "<built-in>",
2271 .ivsize = AES_BLOCK_SIZE,
2272 .maxauthsize = MD5_DIGEST_SIZE,
2273 },
2274 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2275 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2276 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2277 },
2278 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002279 .name = "authenc(hmac(sha1),cbc(aes))",
2280 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
2281 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002282 .type = CRYPTO_ALG_TYPE_AEAD,
2283 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002284 .setkey = aead_setkey,
2285 .setauthsize = aead_setauthsize,
2286 .encrypt = aead_encrypt,
2287 .decrypt = aead_decrypt,
2288 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002289 .geniv = "<built-in>",
2290 .ivsize = AES_BLOCK_SIZE,
2291 .maxauthsize = SHA1_DIGEST_SIZE,
2292 },
2293 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2294 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2295 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2296 },
2297 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002298 .name = "authenc(hmac(sha224),cbc(aes))",
2299 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
2300 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302301 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002302 .template_aead = {
2303 .setkey = aead_setkey,
2304 .setauthsize = aead_setauthsize,
2305 .encrypt = aead_encrypt,
2306 .decrypt = aead_decrypt,
2307 .givencrypt = aead_givencrypt,
2308 .geniv = "<built-in>",
2309 .ivsize = AES_BLOCK_SIZE,
2310 .maxauthsize = SHA224_DIGEST_SIZE,
2311 },
2312 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2313 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2314 OP_ALG_AAI_HMAC_PRECOMP,
2315 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2316 },
2317 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002318 .name = "authenc(hmac(sha256),cbc(aes))",
2319 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2320 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002321 .type = CRYPTO_ALG_TYPE_AEAD,
2322 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002323 .setkey = aead_setkey,
2324 .setauthsize = aead_setauthsize,
2325 .encrypt = aead_encrypt,
2326 .decrypt = aead_decrypt,
2327 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002328 .geniv = "<built-in>",
2329 .ivsize = AES_BLOCK_SIZE,
2330 .maxauthsize = SHA256_DIGEST_SIZE,
2331 },
2332 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2333 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2334 OP_ALG_AAI_HMAC_PRECOMP,
2335 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2336 },
2337 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002338 .name = "authenc(hmac(sha384),cbc(aes))",
2339 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2340 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302341 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002342 .template_aead = {
2343 .setkey = aead_setkey,
2344 .setauthsize = aead_setauthsize,
2345 .encrypt = aead_encrypt,
2346 .decrypt = aead_decrypt,
2347 .givencrypt = aead_givencrypt,
2348 .geniv = "<built-in>",
2349 .ivsize = AES_BLOCK_SIZE,
2350 .maxauthsize = SHA384_DIGEST_SIZE,
2351 },
2352 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2353 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2354 OP_ALG_AAI_HMAC_PRECOMP,
2355 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2356 },
2357
2358 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002359 .name = "authenc(hmac(sha512),cbc(aes))",
2360 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2361 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002362 .type = CRYPTO_ALG_TYPE_AEAD,
2363 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002364 .setkey = aead_setkey,
2365 .setauthsize = aead_setauthsize,
2366 .encrypt = aead_encrypt,
2367 .decrypt = aead_decrypt,
2368 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002369 .geniv = "<built-in>",
2370 .ivsize = AES_BLOCK_SIZE,
2371 .maxauthsize = SHA512_DIGEST_SIZE,
2372 },
2373 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2374 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2375 OP_ALG_AAI_HMAC_PRECOMP,
2376 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2377 },
2378 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002379 .name = "authenc(hmac(md5),cbc(des3_ede))",
2380 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2381 .blocksize = DES3_EDE_BLOCK_SIZE,
2382 .type = CRYPTO_ALG_TYPE_AEAD,
2383 .template_aead = {
2384 .setkey = aead_setkey,
2385 .setauthsize = aead_setauthsize,
2386 .encrypt = aead_encrypt,
2387 .decrypt = aead_decrypt,
2388 .givencrypt = aead_givencrypt,
2389 .geniv = "<built-in>",
2390 .ivsize = DES3_EDE_BLOCK_SIZE,
2391 .maxauthsize = MD5_DIGEST_SIZE,
2392 },
2393 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2394 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2395 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2396 },
2397 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002398 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2399 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2400 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002401 .type = CRYPTO_ALG_TYPE_AEAD,
2402 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002403 .setkey = aead_setkey,
2404 .setauthsize = aead_setauthsize,
2405 .encrypt = aead_encrypt,
2406 .decrypt = aead_decrypt,
2407 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002408 .geniv = "<built-in>",
2409 .ivsize = DES3_EDE_BLOCK_SIZE,
2410 .maxauthsize = SHA1_DIGEST_SIZE,
2411 },
2412 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2413 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2414 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2415 },
2416 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002417 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2418 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2419 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302420 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002421 .template_aead = {
2422 .setkey = aead_setkey,
2423 .setauthsize = aead_setauthsize,
2424 .encrypt = aead_encrypt,
2425 .decrypt = aead_decrypt,
2426 .givencrypt = aead_givencrypt,
2427 .geniv = "<built-in>",
2428 .ivsize = DES3_EDE_BLOCK_SIZE,
2429 .maxauthsize = SHA224_DIGEST_SIZE,
2430 },
2431 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2432 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2433 OP_ALG_AAI_HMAC_PRECOMP,
2434 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2435 },
2436 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002437 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2438 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2439 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002440 .type = CRYPTO_ALG_TYPE_AEAD,
2441 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002442 .setkey = aead_setkey,
2443 .setauthsize = aead_setauthsize,
2444 .encrypt = aead_encrypt,
2445 .decrypt = aead_decrypt,
2446 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002447 .geniv = "<built-in>",
2448 .ivsize = DES3_EDE_BLOCK_SIZE,
2449 .maxauthsize = SHA256_DIGEST_SIZE,
2450 },
2451 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2452 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2453 OP_ALG_AAI_HMAC_PRECOMP,
2454 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2455 },
2456 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002457 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2458 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2459 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302460 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002461 .template_aead = {
2462 .setkey = aead_setkey,
2463 .setauthsize = aead_setauthsize,
2464 .encrypt = aead_encrypt,
2465 .decrypt = aead_decrypt,
2466 .givencrypt = aead_givencrypt,
2467 .geniv = "<built-in>",
2468 .ivsize = DES3_EDE_BLOCK_SIZE,
2469 .maxauthsize = SHA384_DIGEST_SIZE,
2470 },
2471 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2472 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2473 OP_ALG_AAI_HMAC_PRECOMP,
2474 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2475 },
2476 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002477 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2478 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2479 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002480 .type = CRYPTO_ALG_TYPE_AEAD,
2481 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002482 .setkey = aead_setkey,
2483 .setauthsize = aead_setauthsize,
2484 .encrypt = aead_encrypt,
2485 .decrypt = aead_decrypt,
2486 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002487 .geniv = "<built-in>",
2488 .ivsize = DES3_EDE_BLOCK_SIZE,
2489 .maxauthsize = SHA512_DIGEST_SIZE,
2490 },
2491 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2492 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2493 OP_ALG_AAI_HMAC_PRECOMP,
2494 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2495 },
2496 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002497 .name = "authenc(hmac(md5),cbc(des))",
2498 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2499 .blocksize = DES_BLOCK_SIZE,
2500 .type = CRYPTO_ALG_TYPE_AEAD,
2501 .template_aead = {
2502 .setkey = aead_setkey,
2503 .setauthsize = aead_setauthsize,
2504 .encrypt = aead_encrypt,
2505 .decrypt = aead_decrypt,
2506 .givencrypt = aead_givencrypt,
2507 .geniv = "<built-in>",
2508 .ivsize = DES_BLOCK_SIZE,
2509 .maxauthsize = MD5_DIGEST_SIZE,
2510 },
2511 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2512 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2513 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2514 },
2515 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002516 .name = "authenc(hmac(sha1),cbc(des))",
2517 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2518 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002519 .type = CRYPTO_ALG_TYPE_AEAD,
2520 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002521 .setkey = aead_setkey,
2522 .setauthsize = aead_setauthsize,
2523 .encrypt = aead_encrypt,
2524 .decrypt = aead_decrypt,
2525 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002526 .geniv = "<built-in>",
2527 .ivsize = DES_BLOCK_SIZE,
2528 .maxauthsize = SHA1_DIGEST_SIZE,
2529 },
2530 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2531 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2532 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2533 },
2534 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002535 .name = "authenc(hmac(sha224),cbc(des))",
2536 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2537 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302538 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002539 .template_aead = {
2540 .setkey = aead_setkey,
2541 .setauthsize = aead_setauthsize,
2542 .encrypt = aead_encrypt,
2543 .decrypt = aead_decrypt,
2544 .givencrypt = aead_givencrypt,
2545 .geniv = "<built-in>",
2546 .ivsize = DES_BLOCK_SIZE,
2547 .maxauthsize = SHA224_DIGEST_SIZE,
2548 },
2549 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2550 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2551 OP_ALG_AAI_HMAC_PRECOMP,
2552 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2553 },
2554 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002555 .name = "authenc(hmac(sha256),cbc(des))",
2556 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2557 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002558 .type = CRYPTO_ALG_TYPE_AEAD,
2559 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002560 .setkey = aead_setkey,
2561 .setauthsize = aead_setauthsize,
2562 .encrypt = aead_encrypt,
2563 .decrypt = aead_decrypt,
2564 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002565 .geniv = "<built-in>",
2566 .ivsize = DES_BLOCK_SIZE,
2567 .maxauthsize = SHA256_DIGEST_SIZE,
2568 },
2569 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2570 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2571 OP_ALG_AAI_HMAC_PRECOMP,
2572 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2573 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05002574 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002575 .name = "authenc(hmac(sha384),cbc(des))",
2576 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2577 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302578 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002579 .template_aead = {
2580 .setkey = aead_setkey,
2581 .setauthsize = aead_setauthsize,
2582 .encrypt = aead_encrypt,
2583 .decrypt = aead_decrypt,
2584 .givencrypt = aead_givencrypt,
2585 .geniv = "<built-in>",
2586 .ivsize = DES_BLOCK_SIZE,
2587 .maxauthsize = SHA384_DIGEST_SIZE,
2588 },
2589 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2590 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2591 OP_ALG_AAI_HMAC_PRECOMP,
2592 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2593 },
2594 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002595 .name = "authenc(hmac(sha512),cbc(des))",
2596 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2597 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002598 .type = CRYPTO_ALG_TYPE_AEAD,
2599 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002600 .setkey = aead_setkey,
2601 .setauthsize = aead_setauthsize,
2602 .encrypt = aead_encrypt,
2603 .decrypt = aead_decrypt,
2604 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002605 .geniv = "<built-in>",
2606 .ivsize = DES_BLOCK_SIZE,
2607 .maxauthsize = SHA512_DIGEST_SIZE,
2608 },
2609 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2610 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2611 OP_ALG_AAI_HMAC_PRECOMP,
2612 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2613 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002614 /* Galois Counter Mode */
2615 {
2616 .name = "gcm(aes)",
2617 .driver_name = "gcm-aes-caam",
2618 .blocksize = 1,
2619 .type = CRYPTO_ALG_TYPE_AEAD,
2620 .template_aead = {
2621 .setkey = gcm_setkey,
2622 .setauthsize = gcm_setauthsize,
2623 .encrypt = aead_encrypt,
2624 .decrypt = aead_decrypt,
2625 .givencrypt = NULL,
2626 .geniv = "<built-in>",
2627 .ivsize = 12,
2628 .maxauthsize = AES_BLOCK_SIZE,
2629 },
2630 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2631 },
Yuan Kangacdca312011-07-15 11:21:42 +08002632 /* ablkcipher descriptor */
2633 {
2634 .name = "cbc(aes)",
2635 .driver_name = "cbc-aes-caam",
2636 .blocksize = AES_BLOCK_SIZE,
2637 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2638 .template_ablkcipher = {
2639 .setkey = ablkcipher_setkey,
2640 .encrypt = ablkcipher_encrypt,
2641 .decrypt = ablkcipher_decrypt,
2642 .geniv = "eseqiv",
2643 .min_keysize = AES_MIN_KEY_SIZE,
2644 .max_keysize = AES_MAX_KEY_SIZE,
2645 .ivsize = AES_BLOCK_SIZE,
2646 },
2647 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2648 },
2649 {
2650 .name = "cbc(des3_ede)",
2651 .driver_name = "cbc-3des-caam",
2652 .blocksize = DES3_EDE_BLOCK_SIZE,
2653 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2654 .template_ablkcipher = {
2655 .setkey = ablkcipher_setkey,
2656 .encrypt = ablkcipher_encrypt,
2657 .decrypt = ablkcipher_decrypt,
2658 .geniv = "eseqiv",
2659 .min_keysize = DES3_EDE_KEY_SIZE,
2660 .max_keysize = DES3_EDE_KEY_SIZE,
2661 .ivsize = DES3_EDE_BLOCK_SIZE,
2662 },
2663 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2664 },
2665 {
2666 .name = "cbc(des)",
2667 .driver_name = "cbc-des-caam",
2668 .blocksize = DES_BLOCK_SIZE,
2669 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2670 .template_ablkcipher = {
2671 .setkey = ablkcipher_setkey,
2672 .encrypt = ablkcipher_encrypt,
2673 .decrypt = ablkcipher_decrypt,
2674 .geniv = "eseqiv",
2675 .min_keysize = DES_KEY_SIZE,
2676 .max_keysize = DES_KEY_SIZE,
2677 .ivsize = DES_BLOCK_SIZE,
2678 },
2679 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2680 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002681};
2682
2683struct caam_crypto_alg {
2684 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002685 int class1_alg_type;
2686 int class2_alg_type;
2687 int alg_op;
2688 struct crypto_alg crypto_alg;
2689};
2690
2691static int caam_cra_init(struct crypto_tfm *tfm)
2692{
2693 struct crypto_alg *alg = tfm->__crt_alg;
2694 struct caam_crypto_alg *caam_alg =
2695 container_of(alg, struct caam_crypto_alg, crypto_alg);
2696 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002697
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302698 ctx->jrdev = caam_jr_alloc();
2699 if (IS_ERR(ctx->jrdev)) {
2700 pr_err("Job Ring Device allocation for transform failed\n");
2701 return PTR_ERR(ctx->jrdev);
2702 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002703
2704 /* copy descriptor header template value */
2705 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2706 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2707 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2708
2709 return 0;
2710}
2711
2712static void caam_cra_exit(struct crypto_tfm *tfm)
2713{
2714 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2715
Yuan Kang1acebad2011-07-15 11:21:42 +08002716 if (ctx->sh_desc_enc_dma &&
2717 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2718 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2719 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2720 if (ctx->sh_desc_dec_dma &&
2721 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2722 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2723 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2724 if (ctx->sh_desc_givenc_dma &&
2725 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2726 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2727 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05002728 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02002729 if (ctx->key_dma &&
2730 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2731 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2732 ctx->enckeylen + ctx->split_key_pad_len,
2733 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302734
2735 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002736}
2737
2738static void __exit caam_algapi_exit(void)
2739{
2740
Kim Phillips8e8ec592011-03-13 16:54:26 +08002741 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002742
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302743 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002744 return;
2745
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302746 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002747 crypto_unregister_alg(&t_alg->crypto_alg);
2748 list_del(&t_alg->entry);
2749 kfree(t_alg);
2750 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002751}
2752
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302753static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08002754 *template)
2755{
2756 struct caam_crypto_alg *t_alg;
2757 struct crypto_alg *alg;
2758
2759 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2760 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302761 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002762 return ERR_PTR(-ENOMEM);
2763 }
2764
2765 alg = &t_alg->crypto_alg;
2766
2767 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2768 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2769 template->driver_name);
2770 alg->cra_module = THIS_MODULE;
2771 alg->cra_init = caam_cra_init;
2772 alg->cra_exit = caam_cra_exit;
2773 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002774 alg->cra_blocksize = template->blocksize;
2775 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002776 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002777 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2778 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08002779 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08002780 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2781 alg->cra_type = &crypto_ablkcipher_type;
2782 alg->cra_ablkcipher = template->template_ablkcipher;
2783 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08002784 case CRYPTO_ALG_TYPE_AEAD:
2785 alg->cra_type = &crypto_aead_type;
2786 alg->cra_aead = template->template_aead;
2787 break;
2788 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002789
2790 t_alg->class1_alg_type = template->class1_alg_type;
2791 t_alg->class2_alg_type = template->class2_alg_type;
2792 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002793
2794 return t_alg;
2795}
2796
2797static int __init caam_algapi_init(void)
2798{
Ruchika Gupta35af6402014-07-07 10:42:12 +05302799 struct device_node *dev_node;
2800 struct platform_device *pdev;
2801 struct device *ctrldev;
2802 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002803 int i = 0, err = 0;
2804
Ruchika Gupta35af6402014-07-07 10:42:12 +05302805 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2806 if (!dev_node) {
2807 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2808 if (!dev_node)
2809 return -ENODEV;
2810 }
2811
2812 pdev = of_find_device_by_node(dev_node);
2813 if (!pdev) {
2814 of_node_put(dev_node);
2815 return -ENODEV;
2816 }
2817
2818 ctrldev = &pdev->dev;
2819 priv = dev_get_drvdata(ctrldev);
2820 of_node_put(dev_node);
2821
2822 /*
2823 * If priv is NULL, it's probably because the caam driver wasn't
2824 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2825 */
2826 if (!priv)
2827 return -ENODEV;
2828
2829
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302830 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002831
2832 /* register crypto algorithms the device supports */
2833 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2834 /* TODO: check if h/w supports alg */
2835 struct caam_crypto_alg *t_alg;
2836
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302837 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002838 if (IS_ERR(t_alg)) {
2839 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302840 pr_warn("%s alg allocation failed\n",
2841 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002842 continue;
2843 }
2844
2845 err = crypto_register_alg(&t_alg->crypto_alg);
2846 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302847 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002848 t_alg->crypto_alg.cra_driver_name);
2849 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02002850 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302851 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002852 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302853 if (!list_empty(&alg_list))
2854 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002855
2856 return err;
2857}
2858
2859module_init(caam_algapi_init);
2860module_exit(caam_algapi_exit);
2861
2862MODULE_LICENSE("GPL");
2863MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2864MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");