blob: a80ea853701db2f430dee148da1a9a39734ef34c [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Yuan Kangacdca312011-07-15 11:21:42 +080077#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
79 20 * CAAM_CMD_SZ)
80#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
81 15 * CAAM_CMD_SZ)
82
Yuan Kang1acebad2011-07-15 11:21:42 +080083#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
84 CAAM_MAX_KEY_SIZE)
85#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050086
Kim Phillips8e8ec592011-03-13 16:54:26 +080087#ifdef DEBUG
88/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080089#define debug(format, arg...) printk(format, arg)
90#else
91#define debug(format, arg...)
92#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +053093static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +080094
Yuan Kang1acebad2011-07-15 11:21:42 +080095/* Set DK bit in class 1 operation if shared */
96static inline void append_dec_op1(u32 *desc, u32 type)
97{
98 u32 *jump_cmd, *uncond_jump_cmd;
99
Horia Geantaa60384d2014-07-11 15:46:58 +0300100 /* DK bit is valid only for AES */
101 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
102 append_operation(desc, type | OP_ALG_AS_INITFINAL |
103 OP_ALG_DECRYPT);
104 return;
105 }
106
Yuan Kang1acebad2011-07-15 11:21:42 +0800107 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
108 append_operation(desc, type | OP_ALG_AS_INITFINAL |
109 OP_ALG_DECRYPT);
110 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
111 set_jump_tgt_here(desc, jump_cmd);
112 append_operation(desc, type | OP_ALG_AS_INITFINAL |
113 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
114 set_jump_tgt_here(desc, uncond_jump_cmd);
115}
116
117/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800118 * For aead functions, read payload and write payload,
119 * both of which are specified in req->src and req->dst
120 */
121static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
122{
Horia Geantaae4a8252014-03-14 17:46:52 +0200123 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800124 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
125 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800126}
127
128/*
129 * For aead encrypt and decrypt, read iv for both classes
130 */
131static inline void aead_append_ld_iv(u32 *desc, int ivsize)
132{
133 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
134 LDST_CLASS_1_CCB | ivsize);
135 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
136}
137
138/*
Yuan Kangacdca312011-07-15 11:21:42 +0800139 * For ablkcipher encrypt and decrypt, read from req->src and
140 * write to req->dst
141 */
142static inline void ablkcipher_append_src_dst(u32 *desc)
143{
Kim Phillips70d793c2012-06-22 19:42:35 -0500144 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
145 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
146 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
147 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
148 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800149}
150
151/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800152 * If all data, including src (with assoc and iv) or dst (with iv only) are
153 * contiguous
154 */
155#define GIV_SRC_CONTIG 1
156#define GIV_DST_CONTIG (1 << 1)
157
Kim Phillips8e8ec592011-03-13 16:54:26 +0800158/*
159 * per-session context
160 */
161struct caam_ctx {
162 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800163 u32 sh_desc_enc[DESC_MAX_USED_LEN];
164 u32 sh_desc_dec[DESC_MAX_USED_LEN];
165 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
166 dma_addr_t sh_desc_enc_dma;
167 dma_addr_t sh_desc_dec_dma;
168 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800169 u32 class1_alg_type;
170 u32 class2_alg_type;
171 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800172 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800173 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800174 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800175 unsigned int split_key_len;
176 unsigned int split_key_pad_len;
177 unsigned int authsize;
178};
179
Yuan Kang1acebad2011-07-15 11:21:42 +0800180static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
181 int keys_fit_inline)
182{
183 if (keys_fit_inline) {
184 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
185 ctx->split_key_len, CLASS_2 |
186 KEY_DEST_MDHA_SPLIT | KEY_ENC);
187 append_key_as_imm(desc, (void *)ctx->key +
188 ctx->split_key_pad_len, ctx->enckeylen,
189 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
190 } else {
191 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
192 KEY_DEST_MDHA_SPLIT | KEY_ENC);
193 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
194 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195 }
196}
197
198static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
199 int keys_fit_inline)
200{
201 u32 *key_jump_cmd;
202
Kim Phillips61bb86b2012-07-13 17:49:28 -0500203 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800204
205 /* Skip if already shared */
206 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
207 JUMP_COND_SHRD);
208
209 append_key_aead(desc, ctx, keys_fit_inline);
210
211 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800212}
213
Horia Geantaae4a8252014-03-14 17:46:52 +0200214static int aead_null_set_sh_desc(struct crypto_aead *aead)
215{
216 struct aead_tfm *tfm = &aead->base.crt_aead;
217 struct caam_ctx *ctx = crypto_aead_ctx(aead);
218 struct device *jrdev = ctx->jrdev;
219 bool keys_fit_inline = false;
220 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
221 u32 *desc;
222
223 /*
224 * Job Descriptor and Shared Descriptors
225 * must all fit into the 64-word Descriptor h/w Buffer
226 */
227 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
228 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
229 keys_fit_inline = true;
230
231 /* aead_encrypt shared descriptor */
232 desc = ctx->sh_desc_enc;
233
234 init_sh_desc(desc, HDR_SHARE_SERIAL);
235
236 /* Skip if already shared */
237 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238 JUMP_COND_SHRD);
239 if (keys_fit_inline)
240 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
241 ctx->split_key_len, CLASS_2 |
242 KEY_DEST_MDHA_SPLIT | KEY_ENC);
243 else
244 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
245 KEY_DEST_MDHA_SPLIT | KEY_ENC);
246 set_jump_tgt_here(desc, key_jump_cmd);
247
248 /* cryptlen = seqoutlen - authsize */
249 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
250
251 /*
252 * NULL encryption; IV is zero
253 * assoclen = (assoclen + cryptlen) - cryptlen
254 */
255 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
256
257 /* read assoc before reading payload */
258 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
259 KEY_VLF);
260
261 /* Prepare to read and write cryptlen bytes */
262 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
263 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
264
265 /*
266 * MOVE_LEN opcode is not available in all SEC HW revisions,
267 * thus need to do some magic, i.e. self-patch the descriptor
268 * buffer.
269 */
270 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
271 MOVE_DEST_MATH3 |
272 (0x6 << MOVE_LEN_SHIFT));
273 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
274 MOVE_DEST_DESCBUF |
275 MOVE_WAITCOMP |
276 (0x8 << MOVE_LEN_SHIFT));
277
278 /* Class 2 operation */
279 append_operation(desc, ctx->class2_alg_type |
280 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
281
282 /* Read and write cryptlen bytes */
283 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
284
285 set_move_tgt_here(desc, read_move_cmd);
286 set_move_tgt_here(desc, write_move_cmd);
287 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
288 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
289 MOVE_AUX_LS);
290
291 /* Write ICV */
292 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
293 LDST_SRCDST_BYTE_CONTEXT);
294
295 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
296 desc_bytes(desc),
297 DMA_TO_DEVICE);
298 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
299 dev_err(jrdev, "unable to map shared descriptor\n");
300 return -ENOMEM;
301 }
302#ifdef DEBUG
303 print_hex_dump(KERN_ERR,
304 "aead null enc shdesc@"__stringify(__LINE__)": ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc,
306 desc_bytes(desc), 1);
307#endif
308
309 /*
310 * Job Descriptor and Shared Descriptors
311 * must all fit into the 64-word Descriptor h/w Buffer
312 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500313 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200314 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
315 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
316 keys_fit_inline = true;
317
318 desc = ctx->sh_desc_dec;
319
320 /* aead_decrypt shared descriptor */
321 init_sh_desc(desc, HDR_SHARE_SERIAL);
322
323 /* Skip if already shared */
324 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
325 JUMP_COND_SHRD);
326 if (keys_fit_inline)
327 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
328 ctx->split_key_len, CLASS_2 |
329 KEY_DEST_MDHA_SPLIT | KEY_ENC);
330 else
331 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
332 KEY_DEST_MDHA_SPLIT | KEY_ENC);
333 set_jump_tgt_here(desc, key_jump_cmd);
334
335 /* Class 2 operation */
336 append_operation(desc, ctx->class2_alg_type |
337 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
338
339 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
340 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
341 ctx->authsize + tfm->ivsize);
342 /* assoclen = (assoclen + cryptlen) - cryptlen */
343 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
344 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
345
346 /* read assoc before reading payload */
347 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
348 KEY_VLF);
349
350 /* Prepare to read and write cryptlen bytes */
351 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
352 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
353
354 /*
355 * MOVE_LEN opcode is not available in all SEC HW revisions,
356 * thus need to do some magic, i.e. self-patch the descriptor
357 * buffer.
358 */
359 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
360 MOVE_DEST_MATH2 |
361 (0x6 << MOVE_LEN_SHIFT));
362 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
363 MOVE_DEST_DESCBUF |
364 MOVE_WAITCOMP |
365 (0x8 << MOVE_LEN_SHIFT));
366
367 /* Read and write cryptlen bytes */
368 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
369
370 /*
371 * Insert a NOP here, since we need at least 4 instructions between
372 * code patching the descriptor buffer and the location being patched.
373 */
374 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
375 set_jump_tgt_here(desc, jump_cmd);
376
377 set_move_tgt_here(desc, read_move_cmd);
378 set_move_tgt_here(desc, write_move_cmd);
379 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
380 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
381 MOVE_AUX_LS);
382 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383
384 /* Load ICV */
385 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
386 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
387
388 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
389 desc_bytes(desc),
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
392 dev_err(jrdev, "unable to map shared descriptor\n");
393 return -ENOMEM;
394 }
395#ifdef DEBUG
396 print_hex_dump(KERN_ERR,
397 "aead null dec shdesc@"__stringify(__LINE__)": ",
398 DUMP_PREFIX_ADDRESS, 16, 4, desc,
399 desc_bytes(desc), 1);
400#endif
401
402 return 0;
403}
404
Yuan Kang1acebad2011-07-15 11:21:42 +0800405static int aead_set_sh_desc(struct crypto_aead *aead)
406{
407 struct aead_tfm *tfm = &aead->base.crt_aead;
408 struct caam_ctx *ctx = crypto_aead_ctx(aead);
409 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800410 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800411 u32 geniv, moveiv;
412 u32 *desc;
413
Horia Geantaae4a8252014-03-14 17:46:52 +0200414 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800415 return 0;
416
Horia Geantaae4a8252014-03-14 17:46:52 +0200417 /* NULL encryption / decryption */
418 if (!ctx->enckeylen)
419 return aead_null_set_sh_desc(aead);
420
Yuan Kang1acebad2011-07-15 11:21:42 +0800421 /*
422 * Job Descriptor and Shared Descriptors
423 * must all fit into the 64-word Descriptor h/w Buffer
424 */
425 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
426 ctx->split_key_pad_len + ctx->enckeylen <=
427 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800428 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800429
430 /* aead_encrypt shared descriptor */
431 desc = ctx->sh_desc_enc;
432
433 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
434
435 /* Class 2 operation */
436 append_operation(desc, ctx->class2_alg_type |
437 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
438
439 /* cryptlen = seqoutlen - authsize */
440 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
441
442 /* assoclen + cryptlen = seqinlen - ivsize */
443 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
444
Horia Geanta4464a7d2014-03-14 17:46:49 +0200445 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800446 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
447
448 /* read assoc before reading payload */
449 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
450 KEY_VLF);
451 aead_append_ld_iv(desc, tfm->ivsize);
452
453 /* Class 1 operation */
454 append_operation(desc, ctx->class1_alg_type |
455 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
456
457 /* Read and write cryptlen bytes */
458 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
459 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
460 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
461
462 /* Write ICV */
463 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
464 LDST_SRCDST_BYTE_CONTEXT);
465
466 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
467 desc_bytes(desc),
468 DMA_TO_DEVICE);
469 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
470 dev_err(jrdev, "unable to map shared descriptor\n");
471 return -ENOMEM;
472 }
473#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300474 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800475 DUMP_PREFIX_ADDRESS, 16, 4, desc,
476 desc_bytes(desc), 1);
477#endif
478
479 /*
480 * Job Descriptor and Shared Descriptors
481 * must all fit into the 64-word Descriptor h/w Buffer
482 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500483 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800484 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
485 ctx->split_key_pad_len + ctx->enckeylen <=
486 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800487 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800488
Horia Geanta4464a7d2014-03-14 17:46:49 +0200489 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800490 desc = ctx->sh_desc_dec;
491
Horia Geanta4464a7d2014-03-14 17:46:49 +0200492 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800493
494 /* Class 2 operation */
495 append_operation(desc, ctx->class2_alg_type |
496 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
497
Horia Geanta4464a7d2014-03-14 17:46:49 +0200498 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800499 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200500 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800501 /* assoclen = (assoclen + cryptlen) - cryptlen */
502 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
503 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
504
505 /* read assoc before reading payload */
506 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
507 KEY_VLF);
508
509 aead_append_ld_iv(desc, tfm->ivsize);
510
511 append_dec_op1(desc, ctx->class1_alg_type);
512
513 /* Read and write cryptlen bytes */
514 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
515 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
516 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
517
518 /* Load ICV */
519 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
520 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800521
522 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
523 desc_bytes(desc),
524 DMA_TO_DEVICE);
525 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
526 dev_err(jrdev, "unable to map shared descriptor\n");
527 return -ENOMEM;
528 }
529#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300530 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800531 DUMP_PREFIX_ADDRESS, 16, 4, desc,
532 desc_bytes(desc), 1);
533#endif
534
535 /*
536 * Job Descriptor and Shared Descriptors
537 * must all fit into the 64-word Descriptor h/w Buffer
538 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500539 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800540 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
541 ctx->split_key_pad_len + ctx->enckeylen <=
542 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800543 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800544
545 /* aead_givencrypt shared descriptor */
546 desc = ctx->sh_desc_givenc;
547
548 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
549
550 /* Generate IV */
551 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
552 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
553 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
554 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
555 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
556 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
557 append_move(desc, MOVE_SRC_INFIFO |
558 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
559 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
560
561 /* Copy IV to class 1 context */
562 append_move(desc, MOVE_SRC_CLASS1CTX |
563 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
564
565 /* Return to encryption */
566 append_operation(desc, ctx->class2_alg_type |
567 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
568
569 /* ivsize + cryptlen = seqoutlen - authsize */
570 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
571
572 /* assoclen = seqinlen - (ivsize + cryptlen) */
573 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
574
575 /* read assoc before reading payload */
576 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
577 KEY_VLF);
578
579 /* Copy iv from class 1 ctx to class 2 fifo*/
580 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
581 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
582 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
583 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
584 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
585 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
586
587 /* Class 1 operation */
588 append_operation(desc, ctx->class1_alg_type |
589 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
590
591 /* Will write ivsize + cryptlen */
592 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
593
594 /* Not need to reload iv */
595 append_seq_fifo_load(desc, tfm->ivsize,
596 FIFOLD_CLASS_SKIP);
597
598 /* Will read cryptlen */
599 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
600 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
601
602 /* Write ICV */
603 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
604 LDST_SRCDST_BYTE_CONTEXT);
605
606 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
607 desc_bytes(desc),
608 DMA_TO_DEVICE);
609 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
610 dev_err(jrdev, "unable to map shared descriptor\n");
611 return -ENOMEM;
612 }
613#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300614 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800615 DUMP_PREFIX_ADDRESS, 16, 4, desc,
616 desc_bytes(desc), 1);
617#endif
618
619 return 0;
620}
621
Yuan Kang0e479302011-07-15 11:21:41 +0800622static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800623 unsigned int authsize)
624{
625 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
626
627 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800628 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800629
630 return 0;
631}
632
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500633static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
634 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800635{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -0500636 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
637 ctx->split_key_pad_len, key_in, authkeylen,
638 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800639}
640
Yuan Kang0e479302011-07-15 11:21:41 +0800641static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800642 const u8 *key, unsigned int keylen)
643{
644 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
645 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
646 struct caam_ctx *ctx = crypto_aead_ctx(aead);
647 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200648 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800649 int ret = 0;
650
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200651 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800652 goto badkey;
653
654 /* Pick class 2 key length from algorithm submask */
655 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
656 OP_ALG_ALGSEL_SHIFT] * 2;
657 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
658
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200659 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
660 goto badkey;
661
Kim Phillips8e8ec592011-03-13 16:54:26 +0800662#ifdef DEBUG
663 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200664 keys.authkeylen + keys.enckeylen, keys.enckeylen,
665 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800666 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
667 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300668 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800669 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
670#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +0800671
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200672 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800673 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800674 goto badkey;
675 }
676
677 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200678 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800679
Yuan Kang885e9e22011-07-15 11:21:41 +0800680 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200681 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +0800682 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800683 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +0800684 return -ENOMEM;
685 }
686#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300687 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800688 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200689 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800690#endif
691
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200692 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800693
Yuan Kang1acebad2011-07-15 11:21:42 +0800694 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800695 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +0800696 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +0200697 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800698 }
699
700 return ret;
701badkey:
702 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
703 return -EINVAL;
704}
705
Yuan Kangacdca312011-07-15 11:21:42 +0800706static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
707 const u8 *key, unsigned int keylen)
708{
709 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
710 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
711 struct device *jrdev = ctx->jrdev;
712 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +0200713 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +0800714 u32 *desc;
715
716#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300717 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800718 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
719#endif
720
721 memcpy(ctx->key, key, keylen);
722 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
723 DMA_TO_DEVICE);
724 if (dma_mapping_error(jrdev, ctx->key_dma)) {
725 dev_err(jrdev, "unable to map key i/o memory\n");
726 return -ENOMEM;
727 }
728 ctx->enckeylen = keylen;
729
730 /* ablkcipher_encrypt shared descriptor */
731 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -0500732 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800733 /* Skip if already shared */
734 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
735 JUMP_COND_SHRD);
736
737 /* Load class1 key only */
738 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
739 ctx->enckeylen, CLASS_1 |
740 KEY_DEST_CLASS_REG);
741
742 set_jump_tgt_here(desc, key_jump_cmd);
743
Yuan Kangacdca312011-07-15 11:21:42 +0800744 /* Load iv */
745 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
746 LDST_CLASS_1_CCB | tfm->ivsize);
747
748 /* Load operation */
749 append_operation(desc, ctx->class1_alg_type |
750 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
751
752 /* Perform operation */
753 ablkcipher_append_src_dst(desc);
754
755 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
756 desc_bytes(desc),
757 DMA_TO_DEVICE);
758 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
759 dev_err(jrdev, "unable to map shared descriptor\n");
760 return -ENOMEM;
761 }
762#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300763 print_hex_dump(KERN_ERR,
764 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800765 DUMP_PREFIX_ADDRESS, 16, 4, desc,
766 desc_bytes(desc), 1);
767#endif
768 /* ablkcipher_decrypt shared descriptor */
769 desc = ctx->sh_desc_dec;
770
Kim Phillips61bb86b2012-07-13 17:49:28 -0500771 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +0800772 /* Skip if already shared */
773 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
774 JUMP_COND_SHRD);
775
776 /* Load class1 key only */
777 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
778 ctx->enckeylen, CLASS_1 |
779 KEY_DEST_CLASS_REG);
780
Yuan Kangacdca312011-07-15 11:21:42 +0800781 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +0800782
783 /* load IV */
784 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
785 LDST_CLASS_1_CCB | tfm->ivsize);
786
787 /* Choose operation */
788 append_dec_op1(desc, ctx->class1_alg_type);
789
790 /* Perform operation */
791 ablkcipher_append_src_dst(desc);
792
Yuan Kangacdca312011-07-15 11:21:42 +0800793 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
794 desc_bytes(desc),
795 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +0300796 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +0800797 dev_err(jrdev, "unable to map shared descriptor\n");
798 return -ENOMEM;
799 }
800
801#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300802 print_hex_dump(KERN_ERR,
803 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +0800804 DUMP_PREFIX_ADDRESS, 16, 4, desc,
805 desc_bytes(desc), 1);
806#endif
807
808 return ret;
809}
810
Kim Phillips8e8ec592011-03-13 16:54:26 +0800811/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800812 * aead_edesc - s/w-extended aead descriptor
813 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500814 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800815 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500816 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +0800817 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500818 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +0800819 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800820 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500821 * @sec4_sg_bytes: length of dma mapped sec4_sg space
822 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +0800823 * @hw_desc: the h/w job descriptor followed by any referenced link tables
824 */
Yuan Kang0e479302011-07-15 11:21:41 +0800825struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +0800826 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500827 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800828 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500829 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800830 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500831 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +0800832 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500833 int sec4_sg_bytes;
834 dma_addr_t sec4_sg_dma;
835 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800836 u32 hw_desc[0];
837};
838
Yuan Kangacdca312011-07-15 11:21:42 +0800839/*
840 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
841 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500842 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800843 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -0500844 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +0800845 * @iv_dma: dma address of iv for checking continuity and link table
846 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -0500847 * @sec4_sg_bytes: length of dma mapped sec4_sg space
848 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +0800849 * @hw_desc: the h/w job descriptor followed by any referenced link tables
850 */
851struct ablkcipher_edesc {
852 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500853 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800854 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500855 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +0800856 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -0500857 int sec4_sg_bytes;
858 dma_addr_t sec4_sg_dma;
859 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +0800860 u32 hw_desc[0];
861};
862
Yuan Kang1acebad2011-07-15 11:21:42 +0800863static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -0500864 struct scatterlist *dst, int src_nents,
865 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -0500866 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
867 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +0800868{
Yuan Kang643b39b2012-06-22 19:48:49 -0500869 if (dst != src) {
870 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
871 src_chained);
872 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
873 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800874 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -0500875 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
876 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800877 }
878
Yuan Kang1acebad2011-07-15 11:21:42 +0800879 if (iv_dma)
880 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -0500881 if (sec4_sg_bytes)
882 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800883 DMA_TO_DEVICE);
884}
885
Yuan Kang1acebad2011-07-15 11:21:42 +0800886static void aead_unmap(struct device *dev,
887 struct aead_edesc *edesc,
888 struct aead_request *req)
889{
890 struct crypto_aead *aead = crypto_aead_reqtfm(req);
891 int ivsize = crypto_aead_ivsize(aead);
892
Yuan Kang643b39b2012-06-22 19:48:49 -0500893 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
894 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +0800895
896 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500897 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
898 edesc->dst_chained, edesc->iv_dma, ivsize,
899 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +0800900}
901
Yuan Kangacdca312011-07-15 11:21:42 +0800902static void ablkcipher_unmap(struct device *dev,
903 struct ablkcipher_edesc *edesc,
904 struct ablkcipher_request *req)
905{
906 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
907 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
908
909 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -0500910 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
911 edesc->dst_chained, edesc->iv_dma, ivsize,
912 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +0800913}
914
Yuan Kang0e479302011-07-15 11:21:41 +0800915static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800916 void *context)
917{
Yuan Kang0e479302011-07-15 11:21:41 +0800918 struct aead_request *req = context;
919 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800920#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800921 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800922 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800923 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800924
925 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
926#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800927
Yuan Kang0e479302011-07-15 11:21:41 +0800928 edesc = (struct aead_edesc *)((char *)desc -
929 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800930
Marek Vasutfa9659c2014-04-24 20:05:12 +0200931 if (err)
932 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800933
Yuan Kang0e479302011-07-15 11:21:41 +0800934 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800935
936#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300937 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800938 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
939 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300940 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800941 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800942 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300943 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +0800944 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
945 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800946 ctx->authsize + 4, 1);
947#endif
948
949 kfree(edesc);
950
Yuan Kang0e479302011-07-15 11:21:41 +0800951 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800952}
953
Yuan Kang0e479302011-07-15 11:21:41 +0800954static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800955 void *context)
956{
Yuan Kang0e479302011-07-15 11:21:41 +0800957 struct aead_request *req = context;
958 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800959#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +0800960 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800961 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +0800962 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800963
964 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
965#endif
Yuan Kang1acebad2011-07-15 11:21:42 +0800966
Yuan Kang0e479302011-07-15 11:21:41 +0800967 edesc = (struct aead_edesc *)((char *)desc -
968 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +0800969
Yuan Kang1acebad2011-07-15 11:21:42 +0800970#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300971 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800972 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
973 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300974 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800975 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +0200976 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +0800977#endif
978
Marek Vasutfa9659c2014-04-24 20:05:12 +0200979 if (err)
980 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800981
Yuan Kang0e479302011-07-15 11:21:41 +0800982 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800983
984 /*
985 * verify hw auth check passed else return -EBADMSG
986 */
987 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
988 err = -EBADMSG;
989
990#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300991 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +0800992 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +0800993 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
994 sizeof(struct iphdr) + req->assoclen +
995 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +0800996 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -0500997 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +0800998 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +0300999 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001000 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1001 sg->length + ctx->authsize + 16, 1);
1002 }
1003#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001004
Kim Phillips8e8ec592011-03-13 16:54:26 +08001005 kfree(edesc);
1006
Yuan Kang0e479302011-07-15 11:21:41 +08001007 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001008}
1009
Yuan Kangacdca312011-07-15 11:21:42 +08001010static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1011 void *context)
1012{
1013 struct ablkcipher_request *req = context;
1014 struct ablkcipher_edesc *edesc;
1015#ifdef DEBUG
1016 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1017 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1018
1019 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1020#endif
1021
1022 edesc = (struct ablkcipher_edesc *)((char *)desc -
1023 offsetof(struct ablkcipher_edesc, hw_desc));
1024
Marek Vasutfa9659c2014-04-24 20:05:12 +02001025 if (err)
1026 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001027
1028#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001029 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001030 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1031 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001032 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001033 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1034 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1035#endif
1036
1037 ablkcipher_unmap(jrdev, edesc, req);
1038 kfree(edesc);
1039
1040 ablkcipher_request_complete(req, err);
1041}
1042
1043static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1044 void *context)
1045{
1046 struct ablkcipher_request *req = context;
1047 struct ablkcipher_edesc *edesc;
1048#ifdef DEBUG
1049 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1050 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1051
1052 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1053#endif
1054
1055 edesc = (struct ablkcipher_edesc *)((char *)desc -
1056 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02001057 if (err)
1058 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001059
1060#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001061 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001062 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1063 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001064 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001065 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1066 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1067#endif
1068
1069 ablkcipher_unmap(jrdev, edesc, req);
1070 kfree(edesc);
1071
1072 ablkcipher_request_complete(req, err);
1073}
1074
Kim Phillips8e8ec592011-03-13 16:54:26 +08001075/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001076 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001077 */
Yuan Kang1acebad2011-07-15 11:21:42 +08001078static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1079 struct aead_edesc *edesc,
1080 struct aead_request *req,
1081 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001082{
Yuan Kang0e479302011-07-15 11:21:41 +08001083 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001084 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001085 int ivsize = crypto_aead_ivsize(aead);
1086 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08001087 u32 *desc = edesc->hw_desc;
1088 u32 out_options = 0, in_options;
1089 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001090 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001091
Yuan Kang1acebad2011-07-15 11:21:42 +08001092#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08001093 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08001094 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001095 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001096 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1097 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001098 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001099 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001100 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001101 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001102 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08001103 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001104 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001105 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1106 desc_bytes(sh_desc), 1);
1107#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001108
1109 len = desc_len(sh_desc);
1110 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1111
1112 if (all_contig) {
1113 src_dma = sg_dma_address(req->assoc);
1114 in_options = 0;
1115 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001116 src_dma = edesc->sec4_sg_dma;
1117 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1118 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001119 in_options = LDST_SGF;
1120 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001121
1122 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1123 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001124
Yuan Kang1acebad2011-07-15 11:21:42 +08001125 if (likely(req->src == req->dst)) {
1126 if (all_contig) {
1127 dst_dma = sg_dma_address(req->src);
1128 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001129 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001130 ((edesc->assoc_nents ? : 1) + 1);
1131 out_options = LDST_SGF;
1132 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001133 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001134 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08001135 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001136 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001137 dst_dma = edesc->sec4_sg_dma +
1138 sec4_sg_index *
1139 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001140 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001141 }
1142 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001143 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02001144 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1145 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001146 else
Yuan Kang1acebad2011-07-15 11:21:42 +08001147 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1148 out_options);
1149}
1150
1151/*
1152 * Fill in aead givencrypt job descriptor
1153 */
1154static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1155 struct aead_edesc *edesc,
1156 struct aead_request *req,
1157 int contig)
1158{
1159 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1160 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1161 int ivsize = crypto_aead_ivsize(aead);
1162 int authsize = ctx->authsize;
1163 u32 *desc = edesc->hw_desc;
1164 u32 out_options = 0, in_options;
1165 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001166 int len, sec4_sg_index = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001167
1168#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001169 debug("assoclen %d cryptlen %d authsize %d\n",
1170 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001171 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001172 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1173 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001174 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001175 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001176 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001177 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1178 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001179 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001180 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1181 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001182#endif
1183
Yuan Kang1acebad2011-07-15 11:21:42 +08001184 len = desc_len(sh_desc);
1185 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1186
1187 if (contig & GIV_SRC_CONTIG) {
1188 src_dma = sg_dma_address(req->assoc);
1189 in_options = 0;
1190 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001191 src_dma = edesc->sec4_sg_dma;
1192 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001193 in_options = LDST_SGF;
1194 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001195 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1196 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08001197
1198 if (contig & GIV_DST_CONTIG) {
1199 dst_dma = edesc->iv_dma;
1200 } else {
1201 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001202 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001203 edesc->assoc_nents;
1204 out_options = LDST_SGF;
1205 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001206 dst_dma = edesc->sec4_sg_dma +
1207 sec4_sg_index *
1208 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001209 out_options = LDST_SGF;
1210 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001211 }
1212
Horia Geantabbf9c892013-11-28 15:11:16 +02001213 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1214 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001215}
1216
1217/*
Yuan Kangacdca312011-07-15 11:21:42 +08001218 * Fill in ablkcipher job descriptor
1219 */
1220static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1221 struct ablkcipher_edesc *edesc,
1222 struct ablkcipher_request *req,
1223 bool iv_contig)
1224{
1225 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1226 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1227 u32 *desc = edesc->hw_desc;
1228 u32 out_options = 0, in_options;
1229 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001230 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001231
1232#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001233 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001234 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1235 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001236 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001237 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1238 edesc->src_nents ? 100 : req->nbytes, 1);
1239#endif
1240
1241 len = desc_len(sh_desc);
1242 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1243
1244 if (iv_contig) {
1245 src_dma = edesc->iv_dma;
1246 in_options = 0;
1247 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001248 src_dma = edesc->sec4_sg_dma;
1249 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001250 in_options = LDST_SGF;
1251 }
1252 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1253
1254 if (likely(req->src == req->dst)) {
1255 if (!edesc->src_nents && iv_contig) {
1256 dst_dma = sg_dma_address(req->src);
1257 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001258 dst_dma = edesc->sec4_sg_dma +
1259 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001260 out_options = LDST_SGF;
1261 }
1262 } else {
1263 if (!edesc->dst_nents) {
1264 dst_dma = sg_dma_address(req->dst);
1265 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001266 dst_dma = edesc->sec4_sg_dma +
1267 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001268 out_options = LDST_SGF;
1269 }
1270 }
1271 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1272}
1273
1274/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001275 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001276 */
Yuan Kang0e479302011-07-15 11:21:41 +08001277static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001278 int desc_bytes, bool *all_contig_ptr,
1279 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001280{
Yuan Kang0e479302011-07-15 11:21:41 +08001281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001282 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1283 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001284 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1285 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1286 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001287 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001288 dma_addr_t iv_dma = 0;
1289 int sgc;
1290 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001291 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08001292 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001293 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001294 unsigned int authsize = ctx->authsize;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001295
Yuan Kang643b39b2012-06-22 19:48:49 -05001296 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001297
Horia Geantabbf9c892013-11-28 15:11:16 +02001298 if (unlikely(req->dst != req->src)) {
1299 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1300 dst_nents = sg_count(req->dst,
1301 req->cryptlen +
1302 (encrypt ? authsize : (-authsize)),
1303 &dst_chained);
1304 } else {
1305 src_nents = sg_count(req->src,
1306 req->cryptlen +
1307 (encrypt ? authsize : 0),
1308 &src_chained);
1309 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001310
Yuan Kang643b39b2012-06-22 19:48:49 -05001311 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001312 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001313 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001314 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1315 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001316 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001317 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1318 DMA_TO_DEVICE, src_chained);
1319 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1320 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001321 }
1322
Yuan Kang1acebad2011-07-15 11:21:42 +08001323 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001324 if (dma_mapping_error(jrdev, iv_dma)) {
1325 dev_err(jrdev, "unable to map IV\n");
1326 return ERR_PTR(-ENOMEM);
1327 }
1328
1329 /* Check if data are contiguous */
Yuan Kang1acebad2011-07-15 11:21:42 +08001330 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1331 iv_dma || src_nents || iv_dma + ivsize !=
1332 sg_dma_address(req->src)) {
1333 all_contig = false;
1334 assoc_nents = assoc_nents ? : 1;
1335 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001336 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001337 }
Yuan Kanga299c832012-06-22 19:48:46 -05001338 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001339
Yuan Kanga299c832012-06-22 19:48:46 -05001340 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341
1342 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001343 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001344 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001345 if (!edesc) {
1346 dev_err(jrdev, "could not allocate extended descriptor\n");
1347 return ERR_PTR(-ENOMEM);
1348 }
1349
1350 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001351 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001352 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001353 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001354 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001355 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001356 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001357 edesc->sec4_sg_bytes = sec4_sg_bytes;
1358 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1359 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001360 *all_contig_ptr = all_contig;
1361
Yuan Kanga299c832012-06-22 19:48:46 -05001362 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001363 if (!all_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001364 sg_to_sec4_sg(req->assoc,
1365 (assoc_nents ? : 1),
1366 edesc->sec4_sg +
1367 sec4_sg_index, 0);
1368 sec4_sg_index += assoc_nents ? : 1;
1369 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001370 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001371 sec4_sg_index += 1;
1372 sg_to_sec4_sg_last(req->src,
1373 (src_nents ? : 1),
1374 edesc->sec4_sg +
1375 sec4_sg_index, 0);
1376 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08001377 }
1378 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001379 sg_to_sec4_sg_last(req->dst, dst_nents,
1380 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001381 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301382 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1383 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001384 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1385 dev_err(jrdev, "unable to map S/G table\n");
1386 return ERR_PTR(-ENOMEM);
1387 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001388
1389 return edesc;
1390}
1391
Yuan Kang0e479302011-07-15 11:21:41 +08001392static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001393{
Yuan Kang0e479302011-07-15 11:21:41 +08001394 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001395 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001396 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1397 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001398 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001399 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001400 int ret = 0;
1401
Kim Phillips8e8ec592011-03-13 16:54:26 +08001402 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001403 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001404 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001405 if (IS_ERR(edesc))
1406 return PTR_ERR(edesc);
1407
Yuan Kang1acebad2011-07-15 11:21:42 +08001408 /* Create and submit job descriptor */
1409 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1410 all_contig, true);
1411#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001412 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001413 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1414 desc_bytes(edesc->hw_desc), 1);
1415#endif
1416
Kim Phillips8e8ec592011-03-13 16:54:26 +08001417 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001418 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1419 if (!ret) {
1420 ret = -EINPROGRESS;
1421 } else {
1422 aead_unmap(jrdev, edesc, req);
1423 kfree(edesc);
1424 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001425
Yuan Kang1acebad2011-07-15 11:21:42 +08001426 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001427}
1428
Yuan Kang0e479302011-07-15 11:21:41 +08001429static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001430{
Yuan Kang1acebad2011-07-15 11:21:42 +08001431 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001432 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08001433 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1434 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001435 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08001436 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001437 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001438
1439 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001440 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02001441 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08001442 if (IS_ERR(edesc))
1443 return PTR_ERR(edesc);
1444
Yuan Kang1acebad2011-07-15 11:21:42 +08001445#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001446 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001447 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1448 req->cryptlen, 1);
1449#endif
1450
1451 /* Create and submit job descriptor*/
1452 init_aead_job(ctx->sh_desc_dec,
1453 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1454#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001455 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001456 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1457 desc_bytes(edesc->hw_desc), 1);
1458#endif
1459
Yuan Kang0e479302011-07-15 11:21:41 +08001460 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001461 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1462 if (!ret) {
1463 ret = -EINPROGRESS;
1464 } else {
1465 aead_unmap(jrdev, edesc, req);
1466 kfree(edesc);
1467 }
Yuan Kang0e479302011-07-15 11:21:41 +08001468
Yuan Kang1acebad2011-07-15 11:21:42 +08001469 return ret;
1470}
Yuan Kang0e479302011-07-15 11:21:41 +08001471
Yuan Kang1acebad2011-07-15 11:21:42 +08001472/*
1473 * allocate and map the aead extended descriptor for aead givencrypt
1474 */
1475static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1476 *greq, int desc_bytes,
1477 u32 *contig_ptr)
1478{
1479 struct aead_request *req = &greq->areq;
1480 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1481 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1482 struct device *jrdev = ctx->jrdev;
1483 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1484 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1485 int assoc_nents, src_nents, dst_nents = 0;
1486 struct aead_edesc *edesc;
1487 dma_addr_t iv_dma = 0;
1488 int sgc;
1489 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1490 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05001491 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001492 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Yuan Kang0e479302011-07-15 11:21:41 +08001493
Yuan Kang643b39b2012-06-22 19:48:49 -05001494 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1495 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08001496
Yuan Kang1acebad2011-07-15 11:21:42 +08001497 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02001498 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1499 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001500
Yuan Kang643b39b2012-06-22 19:48:49 -05001501 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001502 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001503 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001504 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1505 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001506 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001507 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1508 DMA_TO_DEVICE, src_chained);
1509 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1510 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001511 }
1512
Yuan Kang1acebad2011-07-15 11:21:42 +08001513 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001514 if (dma_mapping_error(jrdev, iv_dma)) {
1515 dev_err(jrdev, "unable to map IV\n");
1516 return ERR_PTR(-ENOMEM);
1517 }
1518
1519 /* Check if data are contiguous */
Yuan Kang1acebad2011-07-15 11:21:42 +08001520 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1521 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1522 contig &= ~GIV_SRC_CONTIG;
1523 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1524 contig &= ~GIV_DST_CONTIG;
Kim Phillips2af8f4a2012-09-07 04:17:03 +08001525 if (unlikely(req->src != req->dst)) {
1526 dst_nents = dst_nents ? : 1;
1527 sec4_sg_len += 1;
1528 }
Yuan Kang1acebad2011-07-15 11:21:42 +08001529 if (!(contig & GIV_SRC_CONTIG)) {
1530 assoc_nents = assoc_nents ? : 1;
1531 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001532 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001533 if (likely(req->src == req->dst))
1534 contig &= ~GIV_DST_CONTIG;
1535 }
Yuan Kanga299c832012-06-22 19:48:46 -05001536 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001537
Yuan Kanga299c832012-06-22 19:48:46 -05001538 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001539
1540 /* allocate space for base edesc and hw desc commands, link tables */
1541 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001542 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08001543 if (!edesc) {
1544 dev_err(jrdev, "could not allocate extended descriptor\n");
1545 return ERR_PTR(-ENOMEM);
1546 }
1547
1548 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001549 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001550 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001551 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001552 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001553 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001554 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001555 edesc->sec4_sg_bytes = sec4_sg_bytes;
1556 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1557 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001558 *contig_ptr = contig;
1559
Yuan Kanga299c832012-06-22 19:48:46 -05001560 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001561 if (!(contig & GIV_SRC_CONTIG)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001562 sg_to_sec4_sg(req->assoc, assoc_nents,
1563 edesc->sec4_sg +
1564 sec4_sg_index, 0);
1565 sec4_sg_index += assoc_nents;
1566 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001567 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001568 sec4_sg_index += 1;
1569 sg_to_sec4_sg_last(req->src, src_nents,
1570 edesc->sec4_sg +
1571 sec4_sg_index, 0);
1572 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001573 }
1574 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05001575 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001576 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001577 sec4_sg_index += 1;
1578 sg_to_sec4_sg_last(req->dst, dst_nents,
1579 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08001580 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301581 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1582 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001583 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1584 dev_err(jrdev, "unable to map S/G table\n");
1585 return ERR_PTR(-ENOMEM);
1586 }
Yuan Kang1acebad2011-07-15 11:21:42 +08001587
1588 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08001589}
1590
1591static int aead_givencrypt(struct aead_givcrypt_request *areq)
1592{
1593 struct aead_request *req = &areq->areq;
1594 struct aead_edesc *edesc;
1595 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001596 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1597 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001598 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001599 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001600 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001601
Kim Phillips8e8ec592011-03-13 16:54:26 +08001602 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08001603 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1604 CAAM_CMD_SZ, &contig);
1605
Kim Phillips8e8ec592011-03-13 16:54:26 +08001606 if (IS_ERR(edesc))
1607 return PTR_ERR(edesc);
1608
Yuan Kang1acebad2011-07-15 11:21:42 +08001609#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001610 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001611 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1612 req->cryptlen, 1);
1613#endif
1614
1615 /* Create and submit job descriptor*/
1616 init_aead_giv_job(ctx->sh_desc_givenc,
1617 ctx->sh_desc_givenc_dma, edesc, req, contig);
1618#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001619 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001620 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1621 desc_bytes(edesc->hw_desc), 1);
1622#endif
1623
Kim Phillips8e8ec592011-03-13 16:54:26 +08001624 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001625 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1626 if (!ret) {
1627 ret = -EINPROGRESS;
1628 } else {
1629 aead_unmap(jrdev, edesc, req);
1630 kfree(edesc);
1631 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001632
Yuan Kang1acebad2011-07-15 11:21:42 +08001633 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001634}
1635
Horia Geantaae4a8252014-03-14 17:46:52 +02001636static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1637{
1638 return aead_encrypt(&areq->areq);
1639}
1640
Yuan Kangacdca312011-07-15 11:21:42 +08001641/*
1642 * allocate and map the ablkcipher extended descriptor for ablkcipher
1643 */
1644static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1645 *req, int desc_bytes,
1646 bool *iv_contig_out)
1647{
1648 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1649 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1650 struct device *jrdev = ctx->jrdev;
1651 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1652 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1653 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05001654 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001655 struct ablkcipher_edesc *edesc;
1656 dma_addr_t iv_dma = 0;
1657 bool iv_contig = false;
1658 int sgc;
1659 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05001660 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05001661 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08001662
Yuan Kang643b39b2012-06-22 19:48:49 -05001663 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001664
Yuan Kang643b39b2012-06-22 19:48:49 -05001665 if (req->dst != req->src)
1666 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001667
1668 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001669 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1670 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001671 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001672 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1673 DMA_TO_DEVICE, src_chained);
1674 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1675 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08001676 }
1677
Horia Geantace572082014-07-11 15:34:49 +03001678 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1679 if (dma_mapping_error(jrdev, iv_dma)) {
1680 dev_err(jrdev, "unable to map IV\n");
1681 return ERR_PTR(-ENOMEM);
1682 }
1683
Yuan Kangacdca312011-07-15 11:21:42 +08001684 /*
1685 * Check if iv can be contiguous with source and destination.
1686 * If so, include it. If not, create scatterlist.
1687 */
Yuan Kangacdca312011-07-15 11:21:42 +08001688 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1689 iv_contig = true;
1690 else
1691 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001692 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1693 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001694
1695 /* allocate space for base edesc and hw desc commands, link tables */
1696 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001697 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08001698 if (!edesc) {
1699 dev_err(jrdev, "could not allocate extended descriptor\n");
1700 return ERR_PTR(-ENOMEM);
1701 }
1702
1703 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001704 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001705 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001706 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05001707 edesc->sec4_sg_bytes = sec4_sg_bytes;
1708 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1709 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08001710
Yuan Kanga299c832012-06-22 19:48:46 -05001711 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001712 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05001713 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1714 sg_to_sec4_sg_last(req->src, src_nents,
1715 edesc->sec4_sg + 1, 0);
1716 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001717 }
1718
Yuan Kang643b39b2012-06-22 19:48:49 -05001719 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05001720 sg_to_sec4_sg_last(req->dst, dst_nents,
1721 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08001722 }
1723
Yuan Kanga299c832012-06-22 19:48:46 -05001724 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1725 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001726 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1727 dev_err(jrdev, "unable to map S/G table\n");
1728 return ERR_PTR(-ENOMEM);
1729 }
1730
Yuan Kangacdca312011-07-15 11:21:42 +08001731 edesc->iv_dma = iv_dma;
1732
1733#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001734 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05001735 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1736 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08001737#endif
1738
1739 *iv_contig_out = iv_contig;
1740 return edesc;
1741}
1742
1743static int ablkcipher_encrypt(struct ablkcipher_request *req)
1744{
1745 struct ablkcipher_edesc *edesc;
1746 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1747 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1748 struct device *jrdev = ctx->jrdev;
1749 bool iv_contig;
1750 u32 *desc;
1751 int ret = 0;
1752
1753 /* allocate extended descriptor */
1754 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1755 CAAM_CMD_SZ, &iv_contig);
1756 if (IS_ERR(edesc))
1757 return PTR_ERR(edesc);
1758
1759 /* Create and submit job descriptor*/
1760 init_ablkcipher_job(ctx->sh_desc_enc,
1761 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1762#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001763 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001764 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1765 desc_bytes(edesc->hw_desc), 1);
1766#endif
1767 desc = edesc->hw_desc;
1768 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1769
1770 if (!ret) {
1771 ret = -EINPROGRESS;
1772 } else {
1773 ablkcipher_unmap(jrdev, edesc, req);
1774 kfree(edesc);
1775 }
1776
1777 return ret;
1778}
1779
1780static int ablkcipher_decrypt(struct ablkcipher_request *req)
1781{
1782 struct ablkcipher_edesc *edesc;
1783 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1784 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1785 struct device *jrdev = ctx->jrdev;
1786 bool iv_contig;
1787 u32 *desc;
1788 int ret = 0;
1789
1790 /* allocate extended descriptor */
1791 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1792 CAAM_CMD_SZ, &iv_contig);
1793 if (IS_ERR(edesc))
1794 return PTR_ERR(edesc);
1795
1796 /* Create and submit job descriptor*/
1797 init_ablkcipher_job(ctx->sh_desc_dec,
1798 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1799 desc = edesc->hw_desc;
1800#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001801 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001802 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1803 desc_bytes(edesc->hw_desc), 1);
1804#endif
1805
1806 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1807 if (!ret) {
1808 ret = -EINPROGRESS;
1809 } else {
1810 ablkcipher_unmap(jrdev, edesc, req);
1811 kfree(edesc);
1812 }
1813
1814 return ret;
1815}
1816
Yuan Kang885e9e22011-07-15 11:21:41 +08001817#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08001818#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08001819struct caam_alg_template {
1820 char name[CRYPTO_MAX_ALG_NAME];
1821 char driver_name[CRYPTO_MAX_ALG_NAME];
1822 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08001823 u32 type;
1824 union {
1825 struct ablkcipher_alg ablkcipher;
1826 struct aead_alg aead;
1827 struct blkcipher_alg blkcipher;
1828 struct cipher_alg cipher;
1829 struct compress_alg compress;
1830 struct rng_alg rng;
1831 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001832 u32 class1_alg_type;
1833 u32 class2_alg_type;
1834 u32 alg_op;
1835};
1836
1837static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02001838 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08001839 {
Horia Geantaae4a8252014-03-14 17:46:52 +02001840 .name = "authenc(hmac(md5),ecb(cipher_null))",
1841 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1842 .blocksize = NULL_BLOCK_SIZE,
1843 .type = CRYPTO_ALG_TYPE_AEAD,
1844 .template_aead = {
1845 .setkey = aead_setkey,
1846 .setauthsize = aead_setauthsize,
1847 .encrypt = aead_encrypt,
1848 .decrypt = aead_decrypt,
1849 .givencrypt = aead_null_givencrypt,
1850 .geniv = "<built-in>",
1851 .ivsize = NULL_IV_SIZE,
1852 .maxauthsize = MD5_DIGEST_SIZE,
1853 },
1854 .class1_alg_type = 0,
1855 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1856 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1857 },
1858 {
1859 .name = "authenc(hmac(sha1),ecb(cipher_null))",
1860 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1861 .blocksize = NULL_BLOCK_SIZE,
1862 .type = CRYPTO_ALG_TYPE_AEAD,
1863 .template_aead = {
1864 .setkey = aead_setkey,
1865 .setauthsize = aead_setauthsize,
1866 .encrypt = aead_encrypt,
1867 .decrypt = aead_decrypt,
1868 .givencrypt = aead_null_givencrypt,
1869 .geniv = "<built-in>",
1870 .ivsize = NULL_IV_SIZE,
1871 .maxauthsize = SHA1_DIGEST_SIZE,
1872 },
1873 .class1_alg_type = 0,
1874 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1875 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1876 },
1877 {
1878 .name = "authenc(hmac(sha224),ecb(cipher_null))",
1879 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1880 .blocksize = NULL_BLOCK_SIZE,
1881 .type = CRYPTO_ALG_TYPE_AEAD,
1882 .template_aead = {
1883 .setkey = aead_setkey,
1884 .setauthsize = aead_setauthsize,
1885 .encrypt = aead_encrypt,
1886 .decrypt = aead_decrypt,
1887 .givencrypt = aead_null_givencrypt,
1888 .geniv = "<built-in>",
1889 .ivsize = NULL_IV_SIZE,
1890 .maxauthsize = SHA224_DIGEST_SIZE,
1891 },
1892 .class1_alg_type = 0,
1893 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1894 OP_ALG_AAI_HMAC_PRECOMP,
1895 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1896 },
1897 {
1898 .name = "authenc(hmac(sha256),ecb(cipher_null))",
1899 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1900 .blocksize = NULL_BLOCK_SIZE,
1901 .type = CRYPTO_ALG_TYPE_AEAD,
1902 .template_aead = {
1903 .setkey = aead_setkey,
1904 .setauthsize = aead_setauthsize,
1905 .encrypt = aead_encrypt,
1906 .decrypt = aead_decrypt,
1907 .givencrypt = aead_null_givencrypt,
1908 .geniv = "<built-in>",
1909 .ivsize = NULL_IV_SIZE,
1910 .maxauthsize = SHA256_DIGEST_SIZE,
1911 },
1912 .class1_alg_type = 0,
1913 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1914 OP_ALG_AAI_HMAC_PRECOMP,
1915 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1916 },
1917 {
1918 .name = "authenc(hmac(sha384),ecb(cipher_null))",
1919 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1920 .blocksize = NULL_BLOCK_SIZE,
1921 .type = CRYPTO_ALG_TYPE_AEAD,
1922 .template_aead = {
1923 .setkey = aead_setkey,
1924 .setauthsize = aead_setauthsize,
1925 .encrypt = aead_encrypt,
1926 .decrypt = aead_decrypt,
1927 .givencrypt = aead_null_givencrypt,
1928 .geniv = "<built-in>",
1929 .ivsize = NULL_IV_SIZE,
1930 .maxauthsize = SHA384_DIGEST_SIZE,
1931 },
1932 .class1_alg_type = 0,
1933 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1934 OP_ALG_AAI_HMAC_PRECOMP,
1935 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1936 },
1937 {
1938 .name = "authenc(hmac(sha512),ecb(cipher_null))",
1939 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1940 .blocksize = NULL_BLOCK_SIZE,
1941 .type = CRYPTO_ALG_TYPE_AEAD,
1942 .template_aead = {
1943 .setkey = aead_setkey,
1944 .setauthsize = aead_setauthsize,
1945 .encrypt = aead_encrypt,
1946 .decrypt = aead_decrypt,
1947 .givencrypt = aead_null_givencrypt,
1948 .geniv = "<built-in>",
1949 .ivsize = NULL_IV_SIZE,
1950 .maxauthsize = SHA512_DIGEST_SIZE,
1951 },
1952 .class1_alg_type = 0,
1953 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1954 OP_ALG_AAI_HMAC_PRECOMP,
1955 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1956 },
1957 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08001958 .name = "authenc(hmac(md5),cbc(aes))",
1959 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1960 .blocksize = AES_BLOCK_SIZE,
1961 .type = CRYPTO_ALG_TYPE_AEAD,
1962 .template_aead = {
1963 .setkey = aead_setkey,
1964 .setauthsize = aead_setauthsize,
1965 .encrypt = aead_encrypt,
1966 .decrypt = aead_decrypt,
1967 .givencrypt = aead_givencrypt,
1968 .geniv = "<built-in>",
1969 .ivsize = AES_BLOCK_SIZE,
1970 .maxauthsize = MD5_DIGEST_SIZE,
1971 },
1972 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1973 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1974 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1975 },
1976 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001977 .name = "authenc(hmac(sha1),cbc(aes))",
1978 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1979 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08001980 .type = CRYPTO_ALG_TYPE_AEAD,
1981 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08001982 .setkey = aead_setkey,
1983 .setauthsize = aead_setauthsize,
1984 .encrypt = aead_encrypt,
1985 .decrypt = aead_decrypt,
1986 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001987 .geniv = "<built-in>",
1988 .ivsize = AES_BLOCK_SIZE,
1989 .maxauthsize = SHA1_DIGEST_SIZE,
1990 },
1991 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1992 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1993 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1994 },
1995 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06001996 .name = "authenc(hmac(sha224),cbc(aes))",
1997 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1998 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05301999 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002000 .template_aead = {
2001 .setkey = aead_setkey,
2002 .setauthsize = aead_setauthsize,
2003 .encrypt = aead_encrypt,
2004 .decrypt = aead_decrypt,
2005 .givencrypt = aead_givencrypt,
2006 .geniv = "<built-in>",
2007 .ivsize = AES_BLOCK_SIZE,
2008 .maxauthsize = SHA224_DIGEST_SIZE,
2009 },
2010 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2011 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2012 OP_ALG_AAI_HMAC_PRECOMP,
2013 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2014 },
2015 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002016 .name = "authenc(hmac(sha256),cbc(aes))",
2017 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2018 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002019 .type = CRYPTO_ALG_TYPE_AEAD,
2020 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002021 .setkey = aead_setkey,
2022 .setauthsize = aead_setauthsize,
2023 .encrypt = aead_encrypt,
2024 .decrypt = aead_decrypt,
2025 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002026 .geniv = "<built-in>",
2027 .ivsize = AES_BLOCK_SIZE,
2028 .maxauthsize = SHA256_DIGEST_SIZE,
2029 },
2030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2031 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2032 OP_ALG_AAI_HMAC_PRECOMP,
2033 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2034 },
2035 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002036 .name = "authenc(hmac(sha384),cbc(aes))",
2037 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2038 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302039 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002040 .template_aead = {
2041 .setkey = aead_setkey,
2042 .setauthsize = aead_setauthsize,
2043 .encrypt = aead_encrypt,
2044 .decrypt = aead_decrypt,
2045 .givencrypt = aead_givencrypt,
2046 .geniv = "<built-in>",
2047 .ivsize = AES_BLOCK_SIZE,
2048 .maxauthsize = SHA384_DIGEST_SIZE,
2049 },
2050 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2051 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2052 OP_ALG_AAI_HMAC_PRECOMP,
2053 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2054 },
2055
2056 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002057 .name = "authenc(hmac(sha512),cbc(aes))",
2058 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2059 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002060 .type = CRYPTO_ALG_TYPE_AEAD,
2061 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002062 .setkey = aead_setkey,
2063 .setauthsize = aead_setauthsize,
2064 .encrypt = aead_encrypt,
2065 .decrypt = aead_decrypt,
2066 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002067 .geniv = "<built-in>",
2068 .ivsize = AES_BLOCK_SIZE,
2069 .maxauthsize = SHA512_DIGEST_SIZE,
2070 },
2071 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2072 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2073 OP_ALG_AAI_HMAC_PRECOMP,
2074 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2075 },
2076 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002077 .name = "authenc(hmac(md5),cbc(des3_ede))",
2078 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2079 .blocksize = DES3_EDE_BLOCK_SIZE,
2080 .type = CRYPTO_ALG_TYPE_AEAD,
2081 .template_aead = {
2082 .setkey = aead_setkey,
2083 .setauthsize = aead_setauthsize,
2084 .encrypt = aead_encrypt,
2085 .decrypt = aead_decrypt,
2086 .givencrypt = aead_givencrypt,
2087 .geniv = "<built-in>",
2088 .ivsize = DES3_EDE_BLOCK_SIZE,
2089 .maxauthsize = MD5_DIGEST_SIZE,
2090 },
2091 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2092 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2093 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2094 },
2095 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002096 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2097 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2098 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002099 .type = CRYPTO_ALG_TYPE_AEAD,
2100 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002101 .setkey = aead_setkey,
2102 .setauthsize = aead_setauthsize,
2103 .encrypt = aead_encrypt,
2104 .decrypt = aead_decrypt,
2105 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002106 .geniv = "<built-in>",
2107 .ivsize = DES3_EDE_BLOCK_SIZE,
2108 .maxauthsize = SHA1_DIGEST_SIZE,
2109 },
2110 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2111 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2112 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2113 },
2114 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002115 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2116 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2117 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302118 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002119 .template_aead = {
2120 .setkey = aead_setkey,
2121 .setauthsize = aead_setauthsize,
2122 .encrypt = aead_encrypt,
2123 .decrypt = aead_decrypt,
2124 .givencrypt = aead_givencrypt,
2125 .geniv = "<built-in>",
2126 .ivsize = DES3_EDE_BLOCK_SIZE,
2127 .maxauthsize = SHA224_DIGEST_SIZE,
2128 },
2129 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2130 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2131 OP_ALG_AAI_HMAC_PRECOMP,
2132 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2133 },
2134 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002135 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2136 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2137 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002138 .type = CRYPTO_ALG_TYPE_AEAD,
2139 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002140 .setkey = aead_setkey,
2141 .setauthsize = aead_setauthsize,
2142 .encrypt = aead_encrypt,
2143 .decrypt = aead_decrypt,
2144 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002145 .geniv = "<built-in>",
2146 .ivsize = DES3_EDE_BLOCK_SIZE,
2147 .maxauthsize = SHA256_DIGEST_SIZE,
2148 },
2149 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2150 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2151 OP_ALG_AAI_HMAC_PRECOMP,
2152 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2153 },
2154 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002155 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2156 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2157 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302158 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002159 .template_aead = {
2160 .setkey = aead_setkey,
2161 .setauthsize = aead_setauthsize,
2162 .encrypt = aead_encrypt,
2163 .decrypt = aead_decrypt,
2164 .givencrypt = aead_givencrypt,
2165 .geniv = "<built-in>",
2166 .ivsize = DES3_EDE_BLOCK_SIZE,
2167 .maxauthsize = SHA384_DIGEST_SIZE,
2168 },
2169 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2170 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2171 OP_ALG_AAI_HMAC_PRECOMP,
2172 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2173 },
2174 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002175 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2176 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2177 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002178 .type = CRYPTO_ALG_TYPE_AEAD,
2179 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002180 .setkey = aead_setkey,
2181 .setauthsize = aead_setauthsize,
2182 .encrypt = aead_encrypt,
2183 .decrypt = aead_decrypt,
2184 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002185 .geniv = "<built-in>",
2186 .ivsize = DES3_EDE_BLOCK_SIZE,
2187 .maxauthsize = SHA512_DIGEST_SIZE,
2188 },
2189 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2190 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2191 OP_ALG_AAI_HMAC_PRECOMP,
2192 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2193 },
2194 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002195 .name = "authenc(hmac(md5),cbc(des))",
2196 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2197 .blocksize = DES_BLOCK_SIZE,
2198 .type = CRYPTO_ALG_TYPE_AEAD,
2199 .template_aead = {
2200 .setkey = aead_setkey,
2201 .setauthsize = aead_setauthsize,
2202 .encrypt = aead_encrypt,
2203 .decrypt = aead_decrypt,
2204 .givencrypt = aead_givencrypt,
2205 .geniv = "<built-in>",
2206 .ivsize = DES_BLOCK_SIZE,
2207 .maxauthsize = MD5_DIGEST_SIZE,
2208 },
2209 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2210 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2211 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2212 },
2213 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002214 .name = "authenc(hmac(sha1),cbc(des))",
2215 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2216 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002217 .type = CRYPTO_ALG_TYPE_AEAD,
2218 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002219 .setkey = aead_setkey,
2220 .setauthsize = aead_setauthsize,
2221 .encrypt = aead_encrypt,
2222 .decrypt = aead_decrypt,
2223 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002224 .geniv = "<built-in>",
2225 .ivsize = DES_BLOCK_SIZE,
2226 .maxauthsize = SHA1_DIGEST_SIZE,
2227 },
2228 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2229 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2230 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2231 },
2232 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002233 .name = "authenc(hmac(sha224),cbc(des))",
2234 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2235 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302236 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002237 .template_aead = {
2238 .setkey = aead_setkey,
2239 .setauthsize = aead_setauthsize,
2240 .encrypt = aead_encrypt,
2241 .decrypt = aead_decrypt,
2242 .givencrypt = aead_givencrypt,
2243 .geniv = "<built-in>",
2244 .ivsize = DES_BLOCK_SIZE,
2245 .maxauthsize = SHA224_DIGEST_SIZE,
2246 },
2247 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2248 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2249 OP_ALG_AAI_HMAC_PRECOMP,
2250 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2251 },
2252 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002253 .name = "authenc(hmac(sha256),cbc(des))",
2254 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2255 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002256 .type = CRYPTO_ALG_TYPE_AEAD,
2257 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002258 .setkey = aead_setkey,
2259 .setauthsize = aead_setauthsize,
2260 .encrypt = aead_encrypt,
2261 .decrypt = aead_decrypt,
2262 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002263 .geniv = "<built-in>",
2264 .ivsize = DES_BLOCK_SIZE,
2265 .maxauthsize = SHA256_DIGEST_SIZE,
2266 },
2267 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2268 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2269 OP_ALG_AAI_HMAC_PRECOMP,
2270 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2271 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05002272 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002273 .name = "authenc(hmac(sha384),cbc(des))",
2274 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2275 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302276 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002277 .template_aead = {
2278 .setkey = aead_setkey,
2279 .setauthsize = aead_setauthsize,
2280 .encrypt = aead_encrypt,
2281 .decrypt = aead_decrypt,
2282 .givencrypt = aead_givencrypt,
2283 .geniv = "<built-in>",
2284 .ivsize = DES_BLOCK_SIZE,
2285 .maxauthsize = SHA384_DIGEST_SIZE,
2286 },
2287 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2288 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2289 OP_ALG_AAI_HMAC_PRECOMP,
2290 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2291 },
2292 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002293 .name = "authenc(hmac(sha512),cbc(des))",
2294 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2295 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002296 .type = CRYPTO_ALG_TYPE_AEAD,
2297 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002298 .setkey = aead_setkey,
2299 .setauthsize = aead_setauthsize,
2300 .encrypt = aead_encrypt,
2301 .decrypt = aead_decrypt,
2302 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002303 .geniv = "<built-in>",
2304 .ivsize = DES_BLOCK_SIZE,
2305 .maxauthsize = SHA512_DIGEST_SIZE,
2306 },
2307 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2308 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2309 OP_ALG_AAI_HMAC_PRECOMP,
2310 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2311 },
Yuan Kangacdca312011-07-15 11:21:42 +08002312 /* ablkcipher descriptor */
2313 {
2314 .name = "cbc(aes)",
2315 .driver_name = "cbc-aes-caam",
2316 .blocksize = AES_BLOCK_SIZE,
2317 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2318 .template_ablkcipher = {
2319 .setkey = ablkcipher_setkey,
2320 .encrypt = ablkcipher_encrypt,
2321 .decrypt = ablkcipher_decrypt,
2322 .geniv = "eseqiv",
2323 .min_keysize = AES_MIN_KEY_SIZE,
2324 .max_keysize = AES_MAX_KEY_SIZE,
2325 .ivsize = AES_BLOCK_SIZE,
2326 },
2327 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2328 },
2329 {
2330 .name = "cbc(des3_ede)",
2331 .driver_name = "cbc-3des-caam",
2332 .blocksize = DES3_EDE_BLOCK_SIZE,
2333 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2334 .template_ablkcipher = {
2335 .setkey = ablkcipher_setkey,
2336 .encrypt = ablkcipher_encrypt,
2337 .decrypt = ablkcipher_decrypt,
2338 .geniv = "eseqiv",
2339 .min_keysize = DES3_EDE_KEY_SIZE,
2340 .max_keysize = DES3_EDE_KEY_SIZE,
2341 .ivsize = DES3_EDE_BLOCK_SIZE,
2342 },
2343 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2344 },
2345 {
2346 .name = "cbc(des)",
2347 .driver_name = "cbc-des-caam",
2348 .blocksize = DES_BLOCK_SIZE,
2349 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2350 .template_ablkcipher = {
2351 .setkey = ablkcipher_setkey,
2352 .encrypt = ablkcipher_encrypt,
2353 .decrypt = ablkcipher_decrypt,
2354 .geniv = "eseqiv",
2355 .min_keysize = DES_KEY_SIZE,
2356 .max_keysize = DES_KEY_SIZE,
2357 .ivsize = DES_BLOCK_SIZE,
2358 },
2359 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2360 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002361};
2362
2363struct caam_crypto_alg {
2364 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002365 int class1_alg_type;
2366 int class2_alg_type;
2367 int alg_op;
2368 struct crypto_alg crypto_alg;
2369};
2370
2371static int caam_cra_init(struct crypto_tfm *tfm)
2372{
2373 struct crypto_alg *alg = tfm->__crt_alg;
2374 struct caam_crypto_alg *caam_alg =
2375 container_of(alg, struct caam_crypto_alg, crypto_alg);
2376 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002377
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302378 ctx->jrdev = caam_jr_alloc();
2379 if (IS_ERR(ctx->jrdev)) {
2380 pr_err("Job Ring Device allocation for transform failed\n");
2381 return PTR_ERR(ctx->jrdev);
2382 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002383
2384 /* copy descriptor header template value */
2385 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2386 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2387 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2388
2389 return 0;
2390}
2391
2392static void caam_cra_exit(struct crypto_tfm *tfm)
2393{
2394 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2395
Yuan Kang1acebad2011-07-15 11:21:42 +08002396 if (ctx->sh_desc_enc_dma &&
2397 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2398 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2399 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2400 if (ctx->sh_desc_dec_dma &&
2401 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2402 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2403 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2404 if (ctx->sh_desc_givenc_dma &&
2405 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2406 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2407 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05002408 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02002409 if (ctx->key_dma &&
2410 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2411 dma_unmap_single(ctx->jrdev, ctx->key_dma,
2412 ctx->enckeylen + ctx->split_key_pad_len,
2413 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302414
2415 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002416}
2417
2418static void __exit caam_algapi_exit(void)
2419{
2420
Kim Phillips8e8ec592011-03-13 16:54:26 +08002421 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002422
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302423 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002424 return;
2425
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302426 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002427 crypto_unregister_alg(&t_alg->crypto_alg);
2428 list_del(&t_alg->entry);
2429 kfree(t_alg);
2430 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002431}
2432
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302433static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08002434 *template)
2435{
2436 struct caam_crypto_alg *t_alg;
2437 struct crypto_alg *alg;
2438
2439 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2440 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302441 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002442 return ERR_PTR(-ENOMEM);
2443 }
2444
2445 alg = &t_alg->crypto_alg;
2446
2447 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2448 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2449 template->driver_name);
2450 alg->cra_module = THIS_MODULE;
2451 alg->cra_init = caam_cra_init;
2452 alg->cra_exit = caam_cra_exit;
2453 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002454 alg->cra_blocksize = template->blocksize;
2455 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002456 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002457 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2458 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08002459 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08002460 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2461 alg->cra_type = &crypto_ablkcipher_type;
2462 alg->cra_ablkcipher = template->template_ablkcipher;
2463 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08002464 case CRYPTO_ALG_TYPE_AEAD:
2465 alg->cra_type = &crypto_aead_type;
2466 alg->cra_aead = template->template_aead;
2467 break;
2468 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002469
2470 t_alg->class1_alg_type = template->class1_alg_type;
2471 t_alg->class2_alg_type = template->class2_alg_type;
2472 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002473
2474 return t_alg;
2475}
2476
2477static int __init caam_algapi_init(void)
2478{
Ruchika Gupta35af6402014-07-07 10:42:12 +05302479 struct device_node *dev_node;
2480 struct platform_device *pdev;
2481 struct device *ctrldev;
2482 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002483 int i = 0, err = 0;
2484
Ruchika Gupta35af6402014-07-07 10:42:12 +05302485 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2486 if (!dev_node) {
2487 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2488 if (!dev_node)
2489 return -ENODEV;
2490 }
2491
2492 pdev = of_find_device_by_node(dev_node);
2493 if (!pdev) {
2494 of_node_put(dev_node);
2495 return -ENODEV;
2496 }
2497
2498 ctrldev = &pdev->dev;
2499 priv = dev_get_drvdata(ctrldev);
2500 of_node_put(dev_node);
2501
2502 /*
2503 * If priv is NULL, it's probably because the caam driver wasn't
2504 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2505 */
2506 if (!priv)
2507 return -ENODEV;
2508
2509
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302510 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002511
2512 /* register crypto algorithms the device supports */
2513 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2514 /* TODO: check if h/w supports alg */
2515 struct caam_crypto_alg *t_alg;
2516
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302517 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002518 if (IS_ERR(t_alg)) {
2519 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302520 pr_warn("%s alg allocation failed\n",
2521 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002522 continue;
2523 }
2524
2525 err = crypto_register_alg(&t_alg->crypto_alg);
2526 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302527 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002528 t_alg->crypto_alg.cra_driver_name);
2529 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02002530 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302531 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002532 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302533 if (!list_empty(&alg_list))
2534 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08002535
2536 return err;
2537}
2538
2539module_init(caam_algapi_init);
2540module_exit(caam_algapi_exit);
2541
2542MODULE_LICENSE("GPL");
2543MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2544MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");