blob: c17154c3203142ba60ceabc9cc3b80a1fc025084 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Tudor Ambarusbac68f22014-10-23 16:14:03 +030081#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
82#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
83#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
84#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
85
Yuan Kangacdca312011-07-15 11:21:42 +080086#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
87#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
88 20 * CAAM_CMD_SZ)
89#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
90 15 * CAAM_CMD_SZ)
91
Yuan Kang1acebad2011-07-15 11:21:42 +080092#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
93 CAAM_MAX_KEY_SIZE)
94#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -050095
Kim Phillips8e8ec592011-03-13 16:54:26 +080096#ifdef DEBUG
97/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +080098#define debug(format, arg...) printk(format, arg)
99#else
100#define debug(format, arg...)
101#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530102static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800103
Yuan Kang1acebad2011-07-15 11:21:42 +0800104/* Set DK bit in class 1 operation if shared */
105static inline void append_dec_op1(u32 *desc, u32 type)
106{
107 u32 *jump_cmd, *uncond_jump_cmd;
108
Horia Geantaa60384d2014-07-11 15:46:58 +0300109 /* DK bit is valid only for AES */
110 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
111 append_operation(desc, type | OP_ALG_AS_INITFINAL |
112 OP_ALG_DECRYPT);
113 return;
114 }
115
Yuan Kang1acebad2011-07-15 11:21:42 +0800116 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
117 append_operation(desc, type | OP_ALG_AS_INITFINAL |
118 OP_ALG_DECRYPT);
119 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
120 set_jump_tgt_here(desc, jump_cmd);
121 append_operation(desc, type | OP_ALG_AS_INITFINAL |
122 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
123 set_jump_tgt_here(desc, uncond_jump_cmd);
124}
125
126/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800127 * For aead functions, read payload and write payload,
128 * both of which are specified in req->src and req->dst
129 */
130static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
131{
Horia Geantaae4a8252014-03-14 17:46:52 +0200132 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800133 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
134 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800135}
136
137/*
138 * For aead encrypt and decrypt, read iv for both classes
139 */
140static inline void aead_append_ld_iv(u32 *desc, int ivsize)
141{
142 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
143 LDST_CLASS_1_CCB | ivsize);
144 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
145}
146
147/*
Yuan Kangacdca312011-07-15 11:21:42 +0800148 * For ablkcipher encrypt and decrypt, read from req->src and
149 * write to req->dst
150 */
151static inline void ablkcipher_append_src_dst(u32 *desc)
152{
Kim Phillips70d793c2012-06-22 19:42:35 -0500153 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
154 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
155 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
156 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
157 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800158}
159
160/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800161 * If all data, including src (with assoc and iv) or dst (with iv only) are
162 * contiguous
163 */
164#define GIV_SRC_CONTIG 1
165#define GIV_DST_CONTIG (1 << 1)
166
Kim Phillips8e8ec592011-03-13 16:54:26 +0800167/*
168 * per-session context
169 */
170struct caam_ctx {
171 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800172 u32 sh_desc_enc[DESC_MAX_USED_LEN];
173 u32 sh_desc_dec[DESC_MAX_USED_LEN];
174 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
175 dma_addr_t sh_desc_enc_dma;
176 dma_addr_t sh_desc_dec_dma;
177 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800178 u32 class1_alg_type;
179 u32 class2_alg_type;
180 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800181 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800182 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800183 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800184 unsigned int split_key_len;
185 unsigned int split_key_pad_len;
186 unsigned int authsize;
187};
188
Yuan Kang1acebad2011-07-15 11:21:42 +0800189static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
190 int keys_fit_inline)
191{
192 if (keys_fit_inline) {
193 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
194 ctx->split_key_len, CLASS_2 |
195 KEY_DEST_MDHA_SPLIT | KEY_ENC);
196 append_key_as_imm(desc, (void *)ctx->key +
197 ctx->split_key_pad_len, ctx->enckeylen,
198 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
199 } else {
200 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
201 KEY_DEST_MDHA_SPLIT | KEY_ENC);
202 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
203 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
204 }
205}
206
207static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
208 int keys_fit_inline)
209{
210 u32 *key_jump_cmd;
211
Kim Phillips61bb86b2012-07-13 17:49:28 -0500212 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800213
214 /* Skip if already shared */
215 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
216 JUMP_COND_SHRD);
217
218 append_key_aead(desc, ctx, keys_fit_inline);
219
220 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800221}
222
Horia Geantaae4a8252014-03-14 17:46:52 +0200223static int aead_null_set_sh_desc(struct crypto_aead *aead)
224{
225 struct aead_tfm *tfm = &aead->base.crt_aead;
226 struct caam_ctx *ctx = crypto_aead_ctx(aead);
227 struct device *jrdev = ctx->jrdev;
228 bool keys_fit_inline = false;
229 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
230 u32 *desc;
231
232 /*
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
235 */
236 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
237 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
238 keys_fit_inline = true;
239
240 /* aead_encrypt shared descriptor */
241 desc = ctx->sh_desc_enc;
242
243 init_sh_desc(desc, HDR_SHARE_SERIAL);
244
245 /* Skip if already shared */
246 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
247 JUMP_COND_SHRD);
248 if (keys_fit_inline)
249 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 ctx->split_key_len, CLASS_2 |
251 KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 else
253 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
254 KEY_DEST_MDHA_SPLIT | KEY_ENC);
255 set_jump_tgt_here(desc, key_jump_cmd);
256
257 /* cryptlen = seqoutlen - authsize */
258 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
259
260 /*
261 * NULL encryption; IV is zero
262 * assoclen = (assoclen + cryptlen) - cryptlen
263 */
264 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
265
266 /* read assoc before reading payload */
267 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
268 KEY_VLF);
269
270 /* Prepare to read and write cryptlen bytes */
271 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
272 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
273
274 /*
275 * MOVE_LEN opcode is not available in all SEC HW revisions,
276 * thus need to do some magic, i.e. self-patch the descriptor
277 * buffer.
278 */
279 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
280 MOVE_DEST_MATH3 |
281 (0x6 << MOVE_LEN_SHIFT));
282 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
283 MOVE_DEST_DESCBUF |
284 MOVE_WAITCOMP |
285 (0x8 << MOVE_LEN_SHIFT));
286
287 /* Class 2 operation */
288 append_operation(desc, ctx->class2_alg_type |
289 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
290
291 /* Read and write cryptlen bytes */
292 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
293
294 set_move_tgt_here(desc, read_move_cmd);
295 set_move_tgt_here(desc, write_move_cmd);
296 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
297 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
298 MOVE_AUX_LS);
299
300 /* Write ICV */
301 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
302 LDST_SRCDST_BYTE_CONTEXT);
303
304 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
305 desc_bytes(desc),
306 DMA_TO_DEVICE);
307 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
308 dev_err(jrdev, "unable to map shared descriptor\n");
309 return -ENOMEM;
310 }
311#ifdef DEBUG
312 print_hex_dump(KERN_ERR,
313 "aead null enc shdesc@"__stringify(__LINE__)": ",
314 DUMP_PREFIX_ADDRESS, 16, 4, desc,
315 desc_bytes(desc), 1);
316#endif
317
318 /*
319 * Job Descriptor and Shared Descriptors
320 * must all fit into the 64-word Descriptor h/w Buffer
321 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500322 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200323 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
324 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
325 keys_fit_inline = true;
326
327 desc = ctx->sh_desc_dec;
328
329 /* aead_decrypt shared descriptor */
330 init_sh_desc(desc, HDR_SHARE_SERIAL);
331
332 /* Skip if already shared */
333 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
334 JUMP_COND_SHRD);
335 if (keys_fit_inline)
336 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
337 ctx->split_key_len, CLASS_2 |
338 KEY_DEST_MDHA_SPLIT | KEY_ENC);
339 else
340 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
341 KEY_DEST_MDHA_SPLIT | KEY_ENC);
342 set_jump_tgt_here(desc, key_jump_cmd);
343
344 /* Class 2 operation */
345 append_operation(desc, ctx->class2_alg_type |
346 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
347
348 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
349 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
350 ctx->authsize + tfm->ivsize);
351 /* assoclen = (assoclen + cryptlen) - cryptlen */
352 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
353 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
354
355 /* read assoc before reading payload */
356 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
357 KEY_VLF);
358
359 /* Prepare to read and write cryptlen bytes */
360 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
361 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
362
363 /*
364 * MOVE_LEN opcode is not available in all SEC HW revisions,
365 * thus need to do some magic, i.e. self-patch the descriptor
366 * buffer.
367 */
368 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
369 MOVE_DEST_MATH2 |
370 (0x6 << MOVE_LEN_SHIFT));
371 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
372 MOVE_DEST_DESCBUF |
373 MOVE_WAITCOMP |
374 (0x8 << MOVE_LEN_SHIFT));
375
376 /* Read and write cryptlen bytes */
377 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
378
379 /*
380 * Insert a NOP here, since we need at least 4 instructions between
381 * code patching the descriptor buffer and the location being patched.
382 */
383 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
384 set_jump_tgt_here(desc, jump_cmd);
385
386 set_move_tgt_here(desc, read_move_cmd);
387 set_move_tgt_here(desc, write_move_cmd);
388 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
389 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
390 MOVE_AUX_LS);
391 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
392
393 /* Load ICV */
394 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
395 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
396
397 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
398 desc_bytes(desc),
399 DMA_TO_DEVICE);
400 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
401 dev_err(jrdev, "unable to map shared descriptor\n");
402 return -ENOMEM;
403 }
404#ifdef DEBUG
405 print_hex_dump(KERN_ERR,
406 "aead null dec shdesc@"__stringify(__LINE__)": ",
407 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1);
409#endif
410
411 return 0;
412}
413
Yuan Kang1acebad2011-07-15 11:21:42 +0800414static int aead_set_sh_desc(struct crypto_aead *aead)
415{
416 struct aead_tfm *tfm = &aead->base.crt_aead;
417 struct caam_ctx *ctx = crypto_aead_ctx(aead);
418 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800419 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800420 u32 geniv, moveiv;
421 u32 *desc;
422
Horia Geantaae4a8252014-03-14 17:46:52 +0200423 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800424 return 0;
425
Horia Geantaae4a8252014-03-14 17:46:52 +0200426 /* NULL encryption / decryption */
427 if (!ctx->enckeylen)
428 return aead_null_set_sh_desc(aead);
429
Yuan Kang1acebad2011-07-15 11:21:42 +0800430 /*
431 * Job Descriptor and Shared Descriptors
432 * must all fit into the 64-word Descriptor h/w Buffer
433 */
434 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
435 ctx->split_key_pad_len + ctx->enckeylen <=
436 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800437 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800438
439 /* aead_encrypt shared descriptor */
440 desc = ctx->sh_desc_enc;
441
442 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
443
444 /* Class 2 operation */
445 append_operation(desc, ctx->class2_alg_type |
446 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
447
448 /* cryptlen = seqoutlen - authsize */
449 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
450
451 /* assoclen + cryptlen = seqinlen - ivsize */
452 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
453
Horia Geanta4464a7d2014-03-14 17:46:49 +0200454 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800455 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
456
457 /* read assoc before reading payload */
458 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
459 KEY_VLF);
460 aead_append_ld_iv(desc, tfm->ivsize);
461
462 /* Class 1 operation */
463 append_operation(desc, ctx->class1_alg_type |
464 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
465
466 /* Read and write cryptlen bytes */
467 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
468 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
469 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
470
471 /* Write ICV */
472 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
473 LDST_SRCDST_BYTE_CONTEXT);
474
475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
476 desc_bytes(desc),
477 DMA_TO_DEVICE);
478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
479 dev_err(jrdev, "unable to map shared descriptor\n");
480 return -ENOMEM;
481 }
482#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300483 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800484 DUMP_PREFIX_ADDRESS, 16, 4, desc,
485 desc_bytes(desc), 1);
486#endif
487
488 /*
489 * Job Descriptor and Shared Descriptors
490 * must all fit into the 64-word Descriptor h/w Buffer
491 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500492 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800493 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
494 ctx->split_key_pad_len + ctx->enckeylen <=
495 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800496 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800497
Horia Geanta4464a7d2014-03-14 17:46:49 +0200498 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800499 desc = ctx->sh_desc_dec;
500
Horia Geanta4464a7d2014-03-14 17:46:49 +0200501 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800502
503 /* Class 2 operation */
504 append_operation(desc, ctx->class2_alg_type |
505 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
506
Horia Geanta4464a7d2014-03-14 17:46:49 +0200507 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800508 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200509 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800510 /* assoclen = (assoclen + cryptlen) - cryptlen */
511 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
512 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
513
514 /* read assoc before reading payload */
515 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
516 KEY_VLF);
517
518 aead_append_ld_iv(desc, tfm->ivsize);
519
520 append_dec_op1(desc, ctx->class1_alg_type);
521
522 /* Read and write cryptlen bytes */
523 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
524 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
525 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
526
527 /* Load ICV */
528 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
529 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800530
531 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
532 desc_bytes(desc),
533 DMA_TO_DEVICE);
534 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
535 dev_err(jrdev, "unable to map shared descriptor\n");
536 return -ENOMEM;
537 }
538#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300539 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800540 DUMP_PREFIX_ADDRESS, 16, 4, desc,
541 desc_bytes(desc), 1);
542#endif
543
544 /*
545 * Job Descriptor and Shared Descriptors
546 * must all fit into the 64-word Descriptor h/w Buffer
547 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500548 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800549 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
550 ctx->split_key_pad_len + ctx->enckeylen <=
551 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800552 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800553
554 /* aead_givencrypt shared descriptor */
555 desc = ctx->sh_desc_givenc;
556
557 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
558
559 /* Generate IV */
560 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
561 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
562 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
563 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
564 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
565 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
566 append_move(desc, MOVE_SRC_INFIFO |
567 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
568 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
569
570 /* Copy IV to class 1 context */
571 append_move(desc, MOVE_SRC_CLASS1CTX |
572 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
573
574 /* Return to encryption */
575 append_operation(desc, ctx->class2_alg_type |
576 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
577
578 /* ivsize + cryptlen = seqoutlen - authsize */
579 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
580
581 /* assoclen = seqinlen - (ivsize + cryptlen) */
582 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
583
584 /* read assoc before reading payload */
585 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
586 KEY_VLF);
587
588 /* Copy iv from class 1 ctx to class 2 fifo*/
589 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
590 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
591 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
592 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
593 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
594 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
595
596 /* Class 1 operation */
597 append_operation(desc, ctx->class1_alg_type |
598 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
599
600 /* Will write ivsize + cryptlen */
601 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
602
603 /* Not need to reload iv */
604 append_seq_fifo_load(desc, tfm->ivsize,
605 FIFOLD_CLASS_SKIP);
606
607 /* Will read cryptlen */
608 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
609 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
610
611 /* Write ICV */
612 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
613 LDST_SRCDST_BYTE_CONTEXT);
614
615 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
616 desc_bytes(desc),
617 DMA_TO_DEVICE);
618 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
619 dev_err(jrdev, "unable to map shared descriptor\n");
620 return -ENOMEM;
621 }
622#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300623 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800624 DUMP_PREFIX_ADDRESS, 16, 4, desc,
625 desc_bytes(desc), 1);
626#endif
627
628 return 0;
629}
630
Yuan Kang0e479302011-07-15 11:21:41 +0800631static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800632 unsigned int authsize)
633{
634 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
635
636 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800637 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800638
639 return 0;
640}
641
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300642static int gcm_set_sh_desc(struct crypto_aead *aead)
643{
644 struct aead_tfm *tfm = &aead->base.crt_aead;
645 struct caam_ctx *ctx = crypto_aead_ctx(aead);
646 struct device *jrdev = ctx->jrdev;
647 bool keys_fit_inline = false;
648 u32 *key_jump_cmd, *zero_payload_jump_cmd,
649 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
650 u32 *desc;
651
652 if (!ctx->enckeylen || !ctx->authsize)
653 return 0;
654
655 /*
656 * AES GCM encrypt shared descriptor
657 * Job Descriptor and Shared Descriptor
658 * must fit into the 64-word Descriptor h/w Buffer
659 */
660 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
661 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
662 keys_fit_inline = true;
663
664 desc = ctx->sh_desc_enc;
665
666 init_sh_desc(desc, HDR_SHARE_SERIAL);
667
668 /* skip key loading if they are loaded due to sharing */
669 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
670 JUMP_COND_SHRD | JUMP_COND_SELF);
671 if (keys_fit_inline)
672 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
673 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
674 else
675 append_key(desc, ctx->key_dma, ctx->enckeylen,
676 CLASS_1 | KEY_DEST_CLASS_REG);
677 set_jump_tgt_here(desc, key_jump_cmd);
678
679 /* class 1 operation */
680 append_operation(desc, ctx->class1_alg_type |
681 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
682
683 /* cryptlen = seqoutlen - authsize */
684 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
685
686 /* assoclen + cryptlen = seqinlen - ivsize */
687 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
688
689 /* assoclen = (assoclen + cryptlen) - cryptlen */
690 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
691
692 /* if cryptlen is ZERO jump to zero-payload commands */
693 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
694 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
695 JUMP_COND_MATH_Z);
696 /* read IV */
697 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
698 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
699
700 /* if assoclen is ZERO, skip reading the assoc data */
701 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
702 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
703 JUMP_COND_MATH_Z);
704
705 /* read assoc data */
706 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
707 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
708 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
709
710 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
711
712 /* write encrypted data */
713 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
714
715 /* read payload data */
716 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
717 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
718
719 /* jump the zero-payload commands */
720 append_jump(desc, JUMP_TEST_ALL | 7);
721
722 /* zero-payload commands */
723 set_jump_tgt_here(desc, zero_payload_jump_cmd);
724
725 /* if assoclen is ZERO, jump to IV reading - is the only input data */
726 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
727 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
728 JUMP_COND_MATH_Z);
729 /* read IV */
730 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
731 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
732
733 /* read assoc data */
734 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
735 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
736
737 /* jump to ICV writing */
738 append_jump(desc, JUMP_TEST_ALL | 2);
739
740 /* read IV - is the only input data */
741 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
742 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
743 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
744 FIFOLD_TYPE_LAST1);
745
746 /* write ICV */
747 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
748 LDST_SRCDST_BYTE_CONTEXT);
749
750 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
751 desc_bytes(desc),
752 DMA_TO_DEVICE);
753 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
754 dev_err(jrdev, "unable to map shared descriptor\n");
755 return -ENOMEM;
756 }
757#ifdef DEBUG
758 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
759 DUMP_PREFIX_ADDRESS, 16, 4, desc,
760 desc_bytes(desc), 1);
761#endif
762
763 /*
764 * Job Descriptor and Shared Descriptors
765 * must all fit into the 64-word Descriptor h/w Buffer
766 */
767 keys_fit_inline = false;
768 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
769 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
770 keys_fit_inline = true;
771
772 desc = ctx->sh_desc_dec;
773
774 init_sh_desc(desc, HDR_SHARE_SERIAL);
775
776 /* skip key loading if they are loaded due to sharing */
777 key_jump_cmd = append_jump(desc, JUMP_JSL |
778 JUMP_TEST_ALL | JUMP_COND_SHRD |
779 JUMP_COND_SELF);
780 if (keys_fit_inline)
781 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
782 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
783 else
784 append_key(desc, ctx->key_dma, ctx->enckeylen,
785 CLASS_1 | KEY_DEST_CLASS_REG);
786 set_jump_tgt_here(desc, key_jump_cmd);
787
788 /* class 1 operation */
789 append_operation(desc, ctx->class1_alg_type |
790 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
791
792 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
793 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
794 ctx->authsize + tfm->ivsize);
795
796 /* assoclen = (assoclen + cryptlen) - cryptlen */
797 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
798 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
799
800 /* read IV */
801 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
802 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
803
804 /* jump to zero-payload command if cryptlen is zero */
805 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
806 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
807 JUMP_COND_MATH_Z);
808
809 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
810 /* if asoclen is ZERO, skip reading assoc data */
811 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
812 JUMP_COND_MATH_Z);
813 /* read assoc data */
814 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
815 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
816 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
817
818 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
819
820 /* store encrypted data */
821 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
822
823 /* read payload data */
824 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
825 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
826
827 /* jump the zero-payload commands */
828 append_jump(desc, JUMP_TEST_ALL | 4);
829
830 /* zero-payload command */
831 set_jump_tgt_here(desc, zero_payload_jump_cmd);
832
833 /* if assoclen is ZERO, jump to ICV reading */
834 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
835 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
836 JUMP_COND_MATH_Z);
837 /* read assoc data */
838 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
839 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
840 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
841
842 /* read ICV */
843 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
844 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
845
846 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
847 desc_bytes(desc),
848 DMA_TO_DEVICE);
849 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
850 dev_err(jrdev, "unable to map shared descriptor\n");
851 return -ENOMEM;
852 }
853#ifdef DEBUG
854 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
855 DUMP_PREFIX_ADDRESS, 16, 4, desc,
856 desc_bytes(desc), 1);
857#endif
858
859 return 0;
860}
861
862static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
863{
864 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
865
866 ctx->authsize = authsize;
867 gcm_set_sh_desc(authenc);
868
869 return 0;
870}
871
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300872static int rfc4106_set_sh_desc(struct crypto_aead *aead)
873{
874 struct aead_tfm *tfm = &aead->base.crt_aead;
875 struct caam_ctx *ctx = crypto_aead_ctx(aead);
876 struct device *jrdev = ctx->jrdev;
877 bool keys_fit_inline = false;
878 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
879 u32 *desc;
880 u32 geniv;
881
882 if (!ctx->enckeylen || !ctx->authsize)
883 return 0;
884
885 /*
886 * RFC4106 encrypt shared descriptor
887 * Job Descriptor and Shared Descriptor
888 * must fit into the 64-word Descriptor h/w Buffer
889 */
890 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
891 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
892 keys_fit_inline = true;
893
894 desc = ctx->sh_desc_enc;
895
896 init_sh_desc(desc, HDR_SHARE_SERIAL);
897
898 /* Skip key loading if it is loaded due to sharing */
899 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
900 JUMP_COND_SHRD);
901 if (keys_fit_inline)
902 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
903 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
904 else
905 append_key(desc, ctx->key_dma, ctx->enckeylen,
906 CLASS_1 | KEY_DEST_CLASS_REG);
907 set_jump_tgt_here(desc, key_jump_cmd);
908
909 /* Class 1 operation */
910 append_operation(desc, ctx->class1_alg_type |
911 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
912
913 /* cryptlen = seqoutlen - authsize */
914 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
915 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
916
917 /* assoclen + cryptlen = seqinlen - ivsize */
918 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
919
920 /* assoclen = (assoclen + cryptlen) - cryptlen */
921 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
922
923 /* Read Salt */
924 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
925 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
926 /* Read AES-GCM-ESP IV */
927 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
928 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
929
930 /* Read assoc data */
931 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
932 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
933
934 /* Will read cryptlen bytes */
935 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
936
937 /* Write encrypted data */
938 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
939
940 /* Read payload data */
941 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
942 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
943
944 /* Write ICV */
945 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
946 LDST_SRCDST_BYTE_CONTEXT);
947
948 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
949 desc_bytes(desc),
950 DMA_TO_DEVICE);
951 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
952 dev_err(jrdev, "unable to map shared descriptor\n");
953 return -ENOMEM;
954 }
955#ifdef DEBUG
956 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, desc,
958 desc_bytes(desc), 1);
959#endif
960
961 /*
962 * Job Descriptor and Shared Descriptors
963 * must all fit into the 64-word Descriptor h/w Buffer
964 */
965 keys_fit_inline = false;
966 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
967 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
968 keys_fit_inline = true;
969
970 desc = ctx->sh_desc_dec;
971
972 init_sh_desc(desc, HDR_SHARE_SERIAL);
973
974 /* Skip key loading if it is loaded due to sharing */
975 key_jump_cmd = append_jump(desc, JUMP_JSL |
976 JUMP_TEST_ALL | JUMP_COND_SHRD);
977 if (keys_fit_inline)
978 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
979 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
980 else
981 append_key(desc, ctx->key_dma, ctx->enckeylen,
982 CLASS_1 | KEY_DEST_CLASS_REG);
983 set_jump_tgt_here(desc, key_jump_cmd);
984
985 /* Class 1 operation */
986 append_operation(desc, ctx->class1_alg_type |
987 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
988
989 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
990 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
991 ctx->authsize + tfm->ivsize);
992
993 /* assoclen = (assoclen + cryptlen) - cryptlen */
994 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
995 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
996
997 /* Will write cryptlen bytes */
998 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
999
1000 /* Read Salt */
1001 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1002 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1003 /* Read AES-GCM-ESP IV */
1004 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1005 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1006
1007 /* Read assoc data */
1008 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1009 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1010
1011 /* Will read cryptlen bytes */
1012 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1013
1014 /* Store payload data */
1015 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1016
1017 /* Read encrypted data */
1018 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1019 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1020
1021 /* Read ICV */
1022 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1023 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1024
1025 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1026 desc_bytes(desc),
1027 DMA_TO_DEVICE);
1028 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1029 dev_err(jrdev, "unable to map shared descriptor\n");
1030 return -ENOMEM;
1031 }
1032#ifdef DEBUG
1033 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1034 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1035 desc_bytes(desc), 1);
1036#endif
1037
1038 /*
1039 * Job Descriptor and Shared Descriptors
1040 * must all fit into the 64-word Descriptor h/w Buffer
1041 */
1042 keys_fit_inline = false;
1043 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1044 ctx->split_key_pad_len + ctx->enckeylen <=
1045 CAAM_DESC_BYTES_MAX)
1046 keys_fit_inline = true;
1047
1048 /* rfc4106_givencrypt shared descriptor */
1049 desc = ctx->sh_desc_givenc;
1050
1051 init_sh_desc(desc, HDR_SHARE_SERIAL);
1052
1053 /* Skip key loading if it is loaded due to sharing */
1054 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1055 JUMP_COND_SHRD);
1056 if (keys_fit_inline)
1057 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1058 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1059 else
1060 append_key(desc, ctx->key_dma, ctx->enckeylen,
1061 CLASS_1 | KEY_DEST_CLASS_REG);
1062 set_jump_tgt_here(desc, key_jump_cmd);
1063
1064 /* Generate IV */
1065 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1066 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1067 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1068 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1069 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1070 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1071 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1072 (tfm->ivsize << MOVE_LEN_SHIFT));
1073 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1074
1075 /* Copy generated IV to OFIFO */
1076 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1077 (tfm->ivsize << MOVE_LEN_SHIFT));
1078
1079 /* Class 1 operation */
1080 append_operation(desc, ctx->class1_alg_type |
1081 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1082
1083 /* ivsize + cryptlen = seqoutlen - authsize */
1084 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1085
1086 /* assoclen = seqinlen - (ivsize + cryptlen) */
1087 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1088
1089 /* Will write ivsize + cryptlen */
1090 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1091
1092 /* Read Salt and generated IV */
1093 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1094 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1095 /* Append Salt */
1096 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1097 set_move_tgt_here(desc, move_cmd);
1098 set_move_tgt_here(desc, write_iv_cmd);
1099 /* Blank commands. Will be overwritten by generated IV. */
1100 append_cmd(desc, 0x00000000);
1101 append_cmd(desc, 0x00000000);
1102 /* End of blank commands */
1103
1104 /* No need to reload iv */
1105 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1106
1107 /* Read assoc data */
1108 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1109 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1110
1111 /* Will read cryptlen */
1112 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1113
1114 /* Store generated IV and encrypted data */
1115 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1116
1117 /* Read payload data */
1118 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1119 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1120
1121 /* Write ICV */
1122 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1123 LDST_SRCDST_BYTE_CONTEXT);
1124
1125 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1126 desc_bytes(desc),
1127 DMA_TO_DEVICE);
1128 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1129 dev_err(jrdev, "unable to map shared descriptor\n");
1130 return -ENOMEM;
1131 }
1132#ifdef DEBUG
1133 print_hex_dump(KERN_ERR,
1134 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1135 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1136 desc_bytes(desc), 1);
1137#endif
1138
1139 return 0;
1140}
1141
1142static int rfc4106_setauthsize(struct crypto_aead *authenc,
1143 unsigned int authsize)
1144{
1145 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1146
1147 ctx->authsize = authsize;
1148 rfc4106_set_sh_desc(authenc);
1149
1150 return 0;
1151}
1152
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001153static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1154 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001155{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001156 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1157 ctx->split_key_pad_len, key_in, authkeylen,
1158 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001159}
1160
Yuan Kang0e479302011-07-15 11:21:41 +08001161static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001162 const u8 *key, unsigned int keylen)
1163{
1164 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1165 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1166 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1167 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001168 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001169 int ret = 0;
1170
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001171 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001172 goto badkey;
1173
1174 /* Pick class 2 key length from algorithm submask */
1175 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1176 OP_ALG_ALGSEL_SHIFT] * 2;
1177 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1178
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001179 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1180 goto badkey;
1181
Kim Phillips8e8ec592011-03-13 16:54:26 +08001182#ifdef DEBUG
1183 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001184 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1185 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001186 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1187 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001188 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001189 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1190#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001191
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001192 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001193 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001194 goto badkey;
1195 }
1196
1197 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001198 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001199
Yuan Kang885e9e22011-07-15 11:21:41 +08001200 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001201 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001202 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001203 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001204 return -ENOMEM;
1205 }
1206#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001207 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001208 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001209 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001210#endif
1211
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001212 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001213
Yuan Kang1acebad2011-07-15 11:21:42 +08001214 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001215 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001216 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001217 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001218 }
1219
1220 return ret;
1221badkey:
1222 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1223 return -EINVAL;
1224}
1225
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001226static int gcm_setkey(struct crypto_aead *aead,
1227 const u8 *key, unsigned int keylen)
1228{
1229 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1230 struct device *jrdev = ctx->jrdev;
1231 int ret = 0;
1232
1233#ifdef DEBUG
1234 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1235 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1236#endif
1237
1238 memcpy(ctx->key, key, keylen);
1239 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1240 DMA_TO_DEVICE);
1241 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1242 dev_err(jrdev, "unable to map key i/o memory\n");
1243 return -ENOMEM;
1244 }
1245 ctx->enckeylen = keylen;
1246
1247 ret = gcm_set_sh_desc(aead);
1248 if (ret) {
1249 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1250 DMA_TO_DEVICE);
1251 }
1252
1253 return ret;
1254}
1255
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001256static int rfc4106_setkey(struct crypto_aead *aead,
1257 const u8 *key, unsigned int keylen)
1258{
1259 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1260 struct device *jrdev = ctx->jrdev;
1261 int ret = 0;
1262
1263 if (keylen < 4)
1264 return -EINVAL;
1265
1266#ifdef DEBUG
1267 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1268 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1269#endif
1270
1271 memcpy(ctx->key, key, keylen);
1272
1273 /*
1274 * The last four bytes of the key material are used as the salt value
1275 * in the nonce. Update the AES key length.
1276 */
1277 ctx->enckeylen = keylen - 4;
1278
1279 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1280 DMA_TO_DEVICE);
1281 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1282 dev_err(jrdev, "unable to map key i/o memory\n");
1283 return -ENOMEM;
1284 }
1285
1286 ret = rfc4106_set_sh_desc(aead);
1287 if (ret) {
1288 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1289 DMA_TO_DEVICE);
1290 }
1291
1292 return ret;
1293}
1294
Yuan Kangacdca312011-07-15 11:21:42 +08001295static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1296 const u8 *key, unsigned int keylen)
1297{
1298 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1299 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
1300 struct device *jrdev = ctx->jrdev;
1301 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001302 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001303 u32 *desc;
1304
1305#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001306 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001307 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1308#endif
1309
1310 memcpy(ctx->key, key, keylen);
1311 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1312 DMA_TO_DEVICE);
1313 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1314 dev_err(jrdev, "unable to map key i/o memory\n");
1315 return -ENOMEM;
1316 }
1317 ctx->enckeylen = keylen;
1318
1319 /* ablkcipher_encrypt shared descriptor */
1320 desc = ctx->sh_desc_enc;
Kim Phillips61bb86b2012-07-13 17:49:28 -05001321 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001322 /* Skip if already shared */
1323 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1324 JUMP_COND_SHRD);
1325
1326 /* Load class1 key only */
1327 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1328 ctx->enckeylen, CLASS_1 |
1329 KEY_DEST_CLASS_REG);
1330
1331 set_jump_tgt_here(desc, key_jump_cmd);
1332
Yuan Kangacdca312011-07-15 11:21:42 +08001333 /* Load iv */
1334 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1335 LDST_CLASS_1_CCB | tfm->ivsize);
1336
1337 /* Load operation */
1338 append_operation(desc, ctx->class1_alg_type |
1339 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1340
1341 /* Perform operation */
1342 ablkcipher_append_src_dst(desc);
1343
1344 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1345 desc_bytes(desc),
1346 DMA_TO_DEVICE);
1347 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1348 dev_err(jrdev, "unable to map shared descriptor\n");
1349 return -ENOMEM;
1350 }
1351#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001352 print_hex_dump(KERN_ERR,
1353 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1355 desc_bytes(desc), 1);
1356#endif
1357 /* ablkcipher_decrypt shared descriptor */
1358 desc = ctx->sh_desc_dec;
1359
Kim Phillips61bb86b2012-07-13 17:49:28 -05001360 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kangacdca312011-07-15 11:21:42 +08001361 /* Skip if already shared */
1362 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1363 JUMP_COND_SHRD);
1364
1365 /* Load class1 key only */
1366 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1367 ctx->enckeylen, CLASS_1 |
1368 KEY_DEST_CLASS_REG);
1369
Yuan Kangacdca312011-07-15 11:21:42 +08001370 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001371
1372 /* load IV */
1373 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1374 LDST_CLASS_1_CCB | tfm->ivsize);
1375
1376 /* Choose operation */
1377 append_dec_op1(desc, ctx->class1_alg_type);
1378
1379 /* Perform operation */
1380 ablkcipher_append_src_dst(desc);
1381
Yuan Kangacdca312011-07-15 11:21:42 +08001382 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1383 desc_bytes(desc),
1384 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001385 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001386 dev_err(jrdev, "unable to map shared descriptor\n");
1387 return -ENOMEM;
1388 }
1389
1390#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001391 print_hex_dump(KERN_ERR,
1392 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001393 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1394 desc_bytes(desc), 1);
1395#endif
1396
1397 return ret;
1398}
1399
Kim Phillips8e8ec592011-03-13 16:54:26 +08001400/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001401 * aead_edesc - s/w-extended aead descriptor
1402 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001403 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001404 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001405 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001406 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001407 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001408 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001409 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001410 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1411 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001412 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1413 */
Yuan Kang0e479302011-07-15 11:21:41 +08001414struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001415 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001416 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001417 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001418 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001419 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001420 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001421 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001422 int sec4_sg_bytes;
1423 dma_addr_t sec4_sg_dma;
1424 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001425 u32 hw_desc[0];
1426};
1427
Yuan Kangacdca312011-07-15 11:21:42 +08001428/*
1429 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1430 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001431 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001432 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001433 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001434 * @iv_dma: dma address of iv for checking continuity and link table
1435 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001436 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1437 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001438 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1439 */
1440struct ablkcipher_edesc {
1441 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001442 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001443 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001444 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001445 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001446 int sec4_sg_bytes;
1447 dma_addr_t sec4_sg_dma;
1448 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001449 u32 hw_desc[0];
1450};
1451
Yuan Kang1acebad2011-07-15 11:21:42 +08001452static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001453 struct scatterlist *dst, int src_nents,
1454 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001455 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1456 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001457{
Yuan Kang643b39b2012-06-22 19:48:49 -05001458 if (dst != src) {
1459 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1460 src_chained);
1461 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1462 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001463 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001464 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1465 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001466 }
1467
Yuan Kang1acebad2011-07-15 11:21:42 +08001468 if (iv_dma)
1469 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001470 if (sec4_sg_bytes)
1471 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001472 DMA_TO_DEVICE);
1473}
1474
Yuan Kang1acebad2011-07-15 11:21:42 +08001475static void aead_unmap(struct device *dev,
1476 struct aead_edesc *edesc,
1477 struct aead_request *req)
1478{
1479 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1480 int ivsize = crypto_aead_ivsize(aead);
1481
Yuan Kang643b39b2012-06-22 19:48:49 -05001482 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1483 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001484
1485 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001486 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1487 edesc->dst_chained, edesc->iv_dma, ivsize,
1488 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001489}
1490
Yuan Kangacdca312011-07-15 11:21:42 +08001491static void ablkcipher_unmap(struct device *dev,
1492 struct ablkcipher_edesc *edesc,
1493 struct ablkcipher_request *req)
1494{
1495 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1496 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1497
1498 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001499 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1500 edesc->dst_chained, edesc->iv_dma, ivsize,
1501 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001502}
1503
Yuan Kang0e479302011-07-15 11:21:41 +08001504static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001505 void *context)
1506{
Yuan Kang0e479302011-07-15 11:21:41 +08001507 struct aead_request *req = context;
1508 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001509#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001510 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001511 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001512 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001513
1514 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1515#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001516
Yuan Kang0e479302011-07-15 11:21:41 +08001517 edesc = (struct aead_edesc *)((char *)desc -
1518 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001519
Marek Vasutfa9659c2014-04-24 20:05:12 +02001520 if (err)
1521 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001522
Yuan Kang0e479302011-07-15 11:21:41 +08001523 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001524
1525#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001526 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001527 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1528 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001529 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001530 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001531 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001532 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001533 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1534 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001535 ctx->authsize + 4, 1);
1536#endif
1537
1538 kfree(edesc);
1539
Yuan Kang0e479302011-07-15 11:21:41 +08001540 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001541}
1542
Yuan Kang0e479302011-07-15 11:21:41 +08001543static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001544 void *context)
1545{
Yuan Kang0e479302011-07-15 11:21:41 +08001546 struct aead_request *req = context;
1547 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001548#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001549 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001550 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08001551 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001552
1553 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1554#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001555
Yuan Kang0e479302011-07-15 11:21:41 +08001556 edesc = (struct aead_edesc *)((char *)desc -
1557 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001558
Yuan Kang1acebad2011-07-15 11:21:42 +08001559#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001560 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001561 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1562 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001563 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001564 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02001565 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001566#endif
1567
Marek Vasutfa9659c2014-04-24 20:05:12 +02001568 if (err)
1569 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001570
Yuan Kang0e479302011-07-15 11:21:41 +08001571 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001572
1573 /*
1574 * verify hw auth check passed else return -EBADMSG
1575 */
1576 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1577 err = -EBADMSG;
1578
1579#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001580 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001581 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08001582 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1583 sizeof(struct iphdr) + req->assoclen +
1584 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001585 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05001586 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08001587 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03001588 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001589 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1590 sg->length + ctx->authsize + 16, 1);
1591 }
1592#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001593
Kim Phillips8e8ec592011-03-13 16:54:26 +08001594 kfree(edesc);
1595
Yuan Kang0e479302011-07-15 11:21:41 +08001596 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001597}
1598
Yuan Kangacdca312011-07-15 11:21:42 +08001599static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1600 void *context)
1601{
1602 struct ablkcipher_request *req = context;
1603 struct ablkcipher_edesc *edesc;
1604#ifdef DEBUG
1605 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1606 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1607
1608 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1609#endif
1610
1611 edesc = (struct ablkcipher_edesc *)((char *)desc -
1612 offsetof(struct ablkcipher_edesc, hw_desc));
1613
Marek Vasutfa9659c2014-04-24 20:05:12 +02001614 if (err)
1615 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001616
1617#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001618 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001619 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1620 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001621 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001622 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1623 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1624#endif
1625
1626 ablkcipher_unmap(jrdev, edesc, req);
1627 kfree(edesc);
1628
1629 ablkcipher_request_complete(req, err);
1630}
1631
1632static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1633 void *context)
1634{
1635 struct ablkcipher_request *req = context;
1636 struct ablkcipher_edesc *edesc;
1637#ifdef DEBUG
1638 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1639 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1640
1641 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1642#endif
1643
1644 edesc = (struct ablkcipher_edesc *)((char *)desc -
1645 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02001646 if (err)
1647 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001648
1649#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001650 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001651 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1652 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001653 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001654 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1655 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1656#endif
1657
1658 ablkcipher_unmap(jrdev, edesc, req);
1659 kfree(edesc);
1660
1661 ablkcipher_request_complete(req, err);
1662}
1663
Kim Phillips8e8ec592011-03-13 16:54:26 +08001664/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001665 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001666 */
Yuan Kang1acebad2011-07-15 11:21:42 +08001667static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1668 struct aead_edesc *edesc,
1669 struct aead_request *req,
1670 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001671{
Yuan Kang0e479302011-07-15 11:21:41 +08001672 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001673 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001674 int ivsize = crypto_aead_ivsize(aead);
1675 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08001676 u32 *desc = edesc->hw_desc;
1677 u32 out_options = 0, in_options;
1678 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001679 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001680 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001681
Yuan Kang1acebad2011-07-15 11:21:42 +08001682#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08001683 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08001684 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001685 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001686 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1687 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001688 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001689 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001690 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001691 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001692 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08001693 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001694 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001695 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1696 desc_bytes(sh_desc), 1);
1697#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08001698
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001699 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1700 OP_ALG_ALGSEL_AES) &&
1701 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1702 is_gcm = true;
1703
Yuan Kang1acebad2011-07-15 11:21:42 +08001704 len = desc_len(sh_desc);
1705 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1706
1707 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001708 if (is_gcm)
1709 src_dma = edesc->iv_dma;
1710 else
1711 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08001712 in_options = 0;
1713 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001714 src_dma = edesc->sec4_sg_dma;
1715 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1716 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08001717 in_options = LDST_SGF;
1718 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001719
1720 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1721 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001722
Yuan Kang1acebad2011-07-15 11:21:42 +08001723 if (likely(req->src == req->dst)) {
1724 if (all_contig) {
1725 dst_dma = sg_dma_address(req->src);
1726 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001727 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08001728 ((edesc->assoc_nents ? : 1) + 1);
1729 out_options = LDST_SGF;
1730 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001731 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001732 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08001733 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001734 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001735 dst_dma = edesc->sec4_sg_dma +
1736 sec4_sg_index *
1737 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001738 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001739 }
1740 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001741 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02001742 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1743 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001744 else
Yuan Kang1acebad2011-07-15 11:21:42 +08001745 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1746 out_options);
1747}
1748
1749/*
1750 * Fill in aead givencrypt job descriptor
1751 */
1752static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1753 struct aead_edesc *edesc,
1754 struct aead_request *req,
1755 int contig)
1756{
1757 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1758 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1759 int ivsize = crypto_aead_ivsize(aead);
1760 int authsize = ctx->authsize;
1761 u32 *desc = edesc->hw_desc;
1762 u32 out_options = 0, in_options;
1763 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001764 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001765 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001766
1767#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08001768 debug("assoclen %d cryptlen %d authsize %d\n",
1769 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03001770 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001771 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1772 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001773 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001774 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001775 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001776 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1777 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001778 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08001779 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1780 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001781#endif
1782
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001783 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1784 OP_ALG_ALGSEL_AES) &&
1785 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1786 is_gcm = true;
1787
Yuan Kang1acebad2011-07-15 11:21:42 +08001788 len = desc_len(sh_desc);
1789 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1790
1791 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001792 if (is_gcm)
1793 src_dma = edesc->iv_dma;
1794 else
1795 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08001796 in_options = 0;
1797 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001798 src_dma = edesc->sec4_sg_dma;
1799 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001800 in_options = LDST_SGF;
1801 }
Horia Geantabbf9c892013-11-28 15:11:16 +02001802 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1803 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08001804
1805 if (contig & GIV_DST_CONTIG) {
1806 dst_dma = edesc->iv_dma;
1807 } else {
1808 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05001809 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001810 (edesc->assoc_nents +
1811 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08001812 out_options = LDST_SGF;
1813 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001814 dst_dma = edesc->sec4_sg_dma +
1815 sec4_sg_index *
1816 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08001817 out_options = LDST_SGF;
1818 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001819 }
1820
Horia Geantabbf9c892013-11-28 15:11:16 +02001821 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1822 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001823}
1824
1825/*
Yuan Kangacdca312011-07-15 11:21:42 +08001826 * Fill in ablkcipher job descriptor
1827 */
1828static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1829 struct ablkcipher_edesc *edesc,
1830 struct ablkcipher_request *req,
1831 bool iv_contig)
1832{
1833 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1834 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1835 u32 *desc = edesc->hw_desc;
1836 u32 out_options = 0, in_options;
1837 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001838 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08001839
1840#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001841 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001842 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1843 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001844 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001845 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1846 edesc->src_nents ? 100 : req->nbytes, 1);
1847#endif
1848
1849 len = desc_len(sh_desc);
1850 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1851
1852 if (iv_contig) {
1853 src_dma = edesc->iv_dma;
1854 in_options = 0;
1855 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001856 src_dma = edesc->sec4_sg_dma;
1857 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08001858 in_options = LDST_SGF;
1859 }
1860 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1861
1862 if (likely(req->src == req->dst)) {
1863 if (!edesc->src_nents && iv_contig) {
1864 dst_dma = sg_dma_address(req->src);
1865 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001866 dst_dma = edesc->sec4_sg_dma +
1867 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001868 out_options = LDST_SGF;
1869 }
1870 } else {
1871 if (!edesc->dst_nents) {
1872 dst_dma = sg_dma_address(req->dst);
1873 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05001874 dst_dma = edesc->sec4_sg_dma +
1875 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08001876 out_options = LDST_SGF;
1877 }
1878 }
1879 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1880}
1881
1882/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001883 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08001884 */
Yuan Kang0e479302011-07-15 11:21:41 +08001885static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02001886 int desc_bytes, bool *all_contig_ptr,
1887 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001888{
Yuan Kang0e479302011-07-15 11:21:41 +08001889 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001890 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1891 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08001892 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1893 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1894 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08001895 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08001896 dma_addr_t iv_dma = 0;
1897 int sgc;
1898 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05001899 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08001900 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05001901 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02001902 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001903 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001904
Yuan Kang643b39b2012-06-22 19:48:49 -05001905 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001906
Horia Geantabbf9c892013-11-28 15:11:16 +02001907 if (unlikely(req->dst != req->src)) {
1908 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1909 dst_nents = sg_count(req->dst,
1910 req->cryptlen +
1911 (encrypt ? authsize : (-authsize)),
1912 &dst_chained);
1913 } else {
1914 src_nents = sg_count(req->src,
1915 req->cryptlen +
1916 (encrypt ? authsize : 0),
1917 &src_chained);
1918 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08001919
Yuan Kang643b39b2012-06-22 19:48:49 -05001920 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03001921 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001922 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001923 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1924 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001925 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001926 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1927 DMA_TO_DEVICE, src_chained);
1928 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1929 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001930 }
1931
Yuan Kang1acebad2011-07-15 11:21:42 +08001932 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001933 if (dma_mapping_error(jrdev, iv_dma)) {
1934 dev_err(jrdev, "unable to map IV\n");
1935 return ERR_PTR(-ENOMEM);
1936 }
1937
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001938 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
1939 OP_ALG_ALGSEL_AES) &&
1940 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
1941 is_gcm = true;
1942
1943 /*
1944 * Check if data are contiguous.
1945 * GCM expected input sequence: IV, AAD, text
1946 * All other - expected input sequence: AAD, IV, text
1947 */
1948 if (is_gcm)
1949 all_contig = (!assoc_nents &&
1950 iv_dma + ivsize == sg_dma_address(req->assoc) &&
1951 !src_nents && sg_dma_address(req->assoc) +
1952 req->assoclen == sg_dma_address(req->src));
1953 else
1954 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
1955 req->assoclen == iv_dma && !src_nents &&
1956 iv_dma + ivsize == sg_dma_address(req->src));
1957 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08001958 assoc_nents = assoc_nents ? : 1;
1959 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05001960 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001961 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001962
Yuan Kanga299c832012-06-22 19:48:46 -05001963 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08001964
Yuan Kanga299c832012-06-22 19:48:46 -05001965 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001966
1967 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08001968 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05001969 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001970 if (!edesc) {
1971 dev_err(jrdev, "could not allocate extended descriptor\n");
1972 return ERR_PTR(-ENOMEM);
1973 }
1974
1975 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001976 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001977 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001978 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001979 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001980 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001981 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001982 edesc->sec4_sg_bytes = sec4_sg_bytes;
1983 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1984 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08001985 *all_contig_ptr = all_contig;
1986
Yuan Kanga299c832012-06-22 19:48:46 -05001987 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08001988 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001989 if (!is_gcm) {
1990 sg_to_sec4_sg(req->assoc,
1991 (assoc_nents ? : 1),
1992 edesc->sec4_sg +
1993 sec4_sg_index, 0);
1994 sec4_sg_index += assoc_nents ? : 1;
1995 }
1996
Yuan Kanga299c832012-06-22 19:48:46 -05001997 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08001998 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05001999 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002000
2001 if (is_gcm) {
2002 sg_to_sec4_sg(req->assoc,
2003 (assoc_nents ? : 1),
2004 edesc->sec4_sg +
2005 sec4_sg_index, 0);
2006 sec4_sg_index += assoc_nents ? : 1;
2007 }
2008
Yuan Kanga299c832012-06-22 19:48:46 -05002009 sg_to_sec4_sg_last(req->src,
2010 (src_nents ? : 1),
2011 edesc->sec4_sg +
2012 sec4_sg_index, 0);
2013 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002014 }
2015 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002016 sg_to_sec4_sg_last(req->dst, dst_nents,
2017 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002018 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302019 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2020 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002021 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2022 dev_err(jrdev, "unable to map S/G table\n");
2023 return ERR_PTR(-ENOMEM);
2024 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002025
2026 return edesc;
2027}
2028
Yuan Kang0e479302011-07-15 11:21:41 +08002029static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002030{
Yuan Kang0e479302011-07-15 11:21:41 +08002031 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002032 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002033 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2034 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002035 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002036 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002037 int ret = 0;
2038
Kim Phillips8e8ec592011-03-13 16:54:26 +08002039 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002040 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002041 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002042 if (IS_ERR(edesc))
2043 return PTR_ERR(edesc);
2044
Yuan Kang1acebad2011-07-15 11:21:42 +08002045 /* Create and submit job descriptor */
2046 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2047 all_contig, true);
2048#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002049 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002050 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2051 desc_bytes(edesc->hw_desc), 1);
2052#endif
2053
Kim Phillips8e8ec592011-03-13 16:54:26 +08002054 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002055 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2056 if (!ret) {
2057 ret = -EINPROGRESS;
2058 } else {
2059 aead_unmap(jrdev, edesc, req);
2060 kfree(edesc);
2061 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002062
Yuan Kang1acebad2011-07-15 11:21:42 +08002063 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002064}
2065
Yuan Kang0e479302011-07-15 11:21:41 +08002066static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002067{
Yuan Kang1acebad2011-07-15 11:21:42 +08002068 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002069 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002070 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2071 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002072 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002073 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002074 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002075
2076 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002077 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002078 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002079 if (IS_ERR(edesc))
2080 return PTR_ERR(edesc);
2081
Yuan Kang1acebad2011-07-15 11:21:42 +08002082#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002083 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002084 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2085 req->cryptlen, 1);
2086#endif
2087
2088 /* Create and submit job descriptor*/
2089 init_aead_job(ctx->sh_desc_dec,
2090 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2091#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002092 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002093 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2094 desc_bytes(edesc->hw_desc), 1);
2095#endif
2096
Yuan Kang0e479302011-07-15 11:21:41 +08002097 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002098 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2099 if (!ret) {
2100 ret = -EINPROGRESS;
2101 } else {
2102 aead_unmap(jrdev, edesc, req);
2103 kfree(edesc);
2104 }
Yuan Kang0e479302011-07-15 11:21:41 +08002105
Yuan Kang1acebad2011-07-15 11:21:42 +08002106 return ret;
2107}
Yuan Kang0e479302011-07-15 11:21:41 +08002108
Yuan Kang1acebad2011-07-15 11:21:42 +08002109/*
2110 * allocate and map the aead extended descriptor for aead givencrypt
2111 */
2112static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2113 *greq, int desc_bytes,
2114 u32 *contig_ptr)
2115{
2116 struct aead_request *req = &greq->areq;
2117 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2118 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2119 struct device *jrdev = ctx->jrdev;
2120 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2121 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2122 int assoc_nents, src_nents, dst_nents = 0;
2123 struct aead_edesc *edesc;
2124 dma_addr_t iv_dma = 0;
2125 int sgc;
2126 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2127 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002128 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002129 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002130 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002131
Yuan Kang643b39b2012-06-22 19:48:49 -05002132 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2133 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002134
Yuan Kang1acebad2011-07-15 11:21:42 +08002135 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002136 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2137 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002138
Yuan Kang643b39b2012-06-22 19:48:49 -05002139 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002140 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002141 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002142 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2143 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002144 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002145 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2146 DMA_TO_DEVICE, src_chained);
2147 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2148 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002149 }
2150
Yuan Kang1acebad2011-07-15 11:21:42 +08002151 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002152 if (dma_mapping_error(jrdev, iv_dma)) {
2153 dev_err(jrdev, "unable to map IV\n");
2154 return ERR_PTR(-ENOMEM);
2155 }
2156
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002157 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2158 OP_ALG_ALGSEL_AES) &&
2159 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2160 is_gcm = true;
2161
2162 /*
2163 * Check if data are contiguous.
2164 * GCM expected input sequence: IV, AAD, text
2165 * All other - expected input sequence: AAD, IV, text
2166 */
2167
2168 if (is_gcm) {
2169 if (assoc_nents || iv_dma + ivsize !=
2170 sg_dma_address(req->assoc) || src_nents ||
2171 sg_dma_address(req->assoc) + req->assoclen !=
2172 sg_dma_address(req->src))
2173 contig &= ~GIV_SRC_CONTIG;
2174 } else {
2175 if (assoc_nents ||
2176 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2177 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2178 contig &= ~GIV_SRC_CONTIG;
2179 }
2180
Yuan Kang1acebad2011-07-15 11:21:42 +08002181 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2182 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002183
Yuan Kang1acebad2011-07-15 11:21:42 +08002184 if (!(contig & GIV_SRC_CONTIG)) {
2185 assoc_nents = assoc_nents ? : 1;
2186 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002187 sec4_sg_len += assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002188 if (likely(req->src == req->dst))
2189 contig &= ~GIV_DST_CONTIG;
2190 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002191
2192 /*
2193 * Add new sg entries for GCM output sequence.
2194 * Expected output sequence: IV, encrypted text.
2195 */
2196 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2197 sec4_sg_len += 1 + src_nents;
2198
2199 if (unlikely(req->src != req->dst)) {
2200 dst_nents = dst_nents ? : 1;
2201 sec4_sg_len += 1 + dst_nents;
2202 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002203
Yuan Kanga299c832012-06-22 19:48:46 -05002204 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002205
2206 /* allocate space for base edesc and hw desc commands, link tables */
2207 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002208 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002209 if (!edesc) {
2210 dev_err(jrdev, "could not allocate extended descriptor\n");
2211 return ERR_PTR(-ENOMEM);
2212 }
2213
2214 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002215 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002216 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002217 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002218 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002219 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002220 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002221 edesc->sec4_sg_bytes = sec4_sg_bytes;
2222 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2223 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002224 *contig_ptr = contig;
2225
Yuan Kanga299c832012-06-22 19:48:46 -05002226 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002227 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002228 if (!is_gcm) {
2229 sg_to_sec4_sg(req->assoc, assoc_nents,
2230 edesc->sec4_sg + sec4_sg_index, 0);
2231 sec4_sg_index += assoc_nents;
2232 }
2233
Yuan Kanga299c832012-06-22 19:48:46 -05002234 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002235 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002236 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002237
2238 if (is_gcm) {
2239 sg_to_sec4_sg(req->assoc, assoc_nents,
2240 edesc->sec4_sg + sec4_sg_index, 0);
2241 sec4_sg_index += assoc_nents;
2242 }
2243
Yuan Kanga299c832012-06-22 19:48:46 -05002244 sg_to_sec4_sg_last(req->src, src_nents,
2245 edesc->sec4_sg +
2246 sec4_sg_index, 0);
2247 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002248 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002249
2250 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2251 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2252 iv_dma, ivsize, 0);
2253 sec4_sg_index += 1;
2254 sg_to_sec4_sg_last(req->src, src_nents,
2255 edesc->sec4_sg + sec4_sg_index, 0);
2256 }
2257
Yuan Kang1acebad2011-07-15 11:21:42 +08002258 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002259 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002260 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002261 sec4_sg_index += 1;
2262 sg_to_sec4_sg_last(req->dst, dst_nents,
2263 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002264 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302265 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2266 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002267 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2268 dev_err(jrdev, "unable to map S/G table\n");
2269 return ERR_PTR(-ENOMEM);
2270 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002271
2272 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002273}
2274
2275static int aead_givencrypt(struct aead_givcrypt_request *areq)
2276{
2277 struct aead_request *req = &areq->areq;
2278 struct aead_edesc *edesc;
2279 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002280 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2281 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002282 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002283 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002284 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002285
Kim Phillips8e8ec592011-03-13 16:54:26 +08002286 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002287 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2288 CAAM_CMD_SZ, &contig);
2289
Kim Phillips8e8ec592011-03-13 16:54:26 +08002290 if (IS_ERR(edesc))
2291 return PTR_ERR(edesc);
2292
Yuan Kang1acebad2011-07-15 11:21:42 +08002293#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002294 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002295 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2296 req->cryptlen, 1);
2297#endif
2298
2299 /* Create and submit job descriptor*/
2300 init_aead_giv_job(ctx->sh_desc_givenc,
2301 ctx->sh_desc_givenc_dma, edesc, req, contig);
2302#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002303 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002304 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2305 desc_bytes(edesc->hw_desc), 1);
2306#endif
2307
Kim Phillips8e8ec592011-03-13 16:54:26 +08002308 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002309 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2310 if (!ret) {
2311 ret = -EINPROGRESS;
2312 } else {
2313 aead_unmap(jrdev, edesc, req);
2314 kfree(edesc);
2315 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002316
Yuan Kang1acebad2011-07-15 11:21:42 +08002317 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002318}
2319
Horia Geantaae4a8252014-03-14 17:46:52 +02002320static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2321{
2322 return aead_encrypt(&areq->areq);
2323}
2324
Yuan Kangacdca312011-07-15 11:21:42 +08002325/*
2326 * allocate and map the ablkcipher extended descriptor for ablkcipher
2327 */
2328static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2329 *req, int desc_bytes,
2330 bool *iv_contig_out)
2331{
2332 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2333 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2334 struct device *jrdev = ctx->jrdev;
2335 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2336 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2337 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002338 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002339 struct ablkcipher_edesc *edesc;
2340 dma_addr_t iv_dma = 0;
2341 bool iv_contig = false;
2342 int sgc;
2343 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002344 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002345 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002346
Yuan Kang643b39b2012-06-22 19:48:49 -05002347 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002348
Yuan Kang643b39b2012-06-22 19:48:49 -05002349 if (req->dst != req->src)
2350 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002351
2352 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002353 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2354 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002355 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002356 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2357 DMA_TO_DEVICE, src_chained);
2358 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2359 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002360 }
2361
Horia Geantace572082014-07-11 15:34:49 +03002362 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2363 if (dma_mapping_error(jrdev, iv_dma)) {
2364 dev_err(jrdev, "unable to map IV\n");
2365 return ERR_PTR(-ENOMEM);
2366 }
2367
Yuan Kangacdca312011-07-15 11:21:42 +08002368 /*
2369 * Check if iv can be contiguous with source and destination.
2370 * If so, include it. If not, create scatterlist.
2371 */
Yuan Kangacdca312011-07-15 11:21:42 +08002372 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2373 iv_contig = true;
2374 else
2375 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002376 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2377 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002378
2379 /* allocate space for base edesc and hw desc commands, link tables */
2380 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002381 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002382 if (!edesc) {
2383 dev_err(jrdev, "could not allocate extended descriptor\n");
2384 return ERR_PTR(-ENOMEM);
2385 }
2386
2387 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002388 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002389 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002390 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002391 edesc->sec4_sg_bytes = sec4_sg_bytes;
2392 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2393 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002394
Yuan Kanga299c832012-06-22 19:48:46 -05002395 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002396 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002397 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2398 sg_to_sec4_sg_last(req->src, src_nents,
2399 edesc->sec4_sg + 1, 0);
2400 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002401 }
2402
Yuan Kang643b39b2012-06-22 19:48:49 -05002403 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002404 sg_to_sec4_sg_last(req->dst, dst_nents,
2405 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002406 }
2407
Yuan Kanga299c832012-06-22 19:48:46 -05002408 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2409 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002410 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2411 dev_err(jrdev, "unable to map S/G table\n");
2412 return ERR_PTR(-ENOMEM);
2413 }
2414
Yuan Kangacdca312011-07-15 11:21:42 +08002415 edesc->iv_dma = iv_dma;
2416
2417#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002418 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002419 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2420 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002421#endif
2422
2423 *iv_contig_out = iv_contig;
2424 return edesc;
2425}
2426
2427static int ablkcipher_encrypt(struct ablkcipher_request *req)
2428{
2429 struct ablkcipher_edesc *edesc;
2430 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2431 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2432 struct device *jrdev = ctx->jrdev;
2433 bool iv_contig;
2434 u32 *desc;
2435 int ret = 0;
2436
2437 /* allocate extended descriptor */
2438 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2439 CAAM_CMD_SZ, &iv_contig);
2440 if (IS_ERR(edesc))
2441 return PTR_ERR(edesc);
2442
2443 /* Create and submit job descriptor*/
2444 init_ablkcipher_job(ctx->sh_desc_enc,
2445 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2446#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002447 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002448 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2449 desc_bytes(edesc->hw_desc), 1);
2450#endif
2451 desc = edesc->hw_desc;
2452 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2453
2454 if (!ret) {
2455 ret = -EINPROGRESS;
2456 } else {
2457 ablkcipher_unmap(jrdev, edesc, req);
2458 kfree(edesc);
2459 }
2460
2461 return ret;
2462}
2463
2464static int ablkcipher_decrypt(struct ablkcipher_request *req)
2465{
2466 struct ablkcipher_edesc *edesc;
2467 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2468 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2469 struct device *jrdev = ctx->jrdev;
2470 bool iv_contig;
2471 u32 *desc;
2472 int ret = 0;
2473
2474 /* allocate extended descriptor */
2475 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2476 CAAM_CMD_SZ, &iv_contig);
2477 if (IS_ERR(edesc))
2478 return PTR_ERR(edesc);
2479
2480 /* Create and submit job descriptor*/
2481 init_ablkcipher_job(ctx->sh_desc_dec,
2482 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2483 desc = edesc->hw_desc;
2484#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002485 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002486 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2487 desc_bytes(edesc->hw_desc), 1);
2488#endif
2489
2490 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2491 if (!ret) {
2492 ret = -EINPROGRESS;
2493 } else {
2494 ablkcipher_unmap(jrdev, edesc, req);
2495 kfree(edesc);
2496 }
2497
2498 return ret;
2499}
2500
Yuan Kang885e9e22011-07-15 11:21:41 +08002501#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08002502#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08002503struct caam_alg_template {
2504 char name[CRYPTO_MAX_ALG_NAME];
2505 char driver_name[CRYPTO_MAX_ALG_NAME];
2506 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08002507 u32 type;
2508 union {
2509 struct ablkcipher_alg ablkcipher;
2510 struct aead_alg aead;
2511 struct blkcipher_alg blkcipher;
2512 struct cipher_alg cipher;
2513 struct compress_alg compress;
2514 struct rng_alg rng;
2515 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002516 u32 class1_alg_type;
2517 u32 class2_alg_type;
2518 u32 alg_op;
2519};
2520
2521static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02002522 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08002523 {
Horia Geantaae4a8252014-03-14 17:46:52 +02002524 .name = "authenc(hmac(md5),ecb(cipher_null))",
2525 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
2526 .blocksize = NULL_BLOCK_SIZE,
2527 .type = CRYPTO_ALG_TYPE_AEAD,
2528 .template_aead = {
2529 .setkey = aead_setkey,
2530 .setauthsize = aead_setauthsize,
2531 .encrypt = aead_encrypt,
2532 .decrypt = aead_decrypt,
2533 .givencrypt = aead_null_givencrypt,
2534 .geniv = "<built-in>",
2535 .ivsize = NULL_IV_SIZE,
2536 .maxauthsize = MD5_DIGEST_SIZE,
2537 },
2538 .class1_alg_type = 0,
2539 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2540 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2541 },
2542 {
2543 .name = "authenc(hmac(sha1),ecb(cipher_null))",
2544 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
2545 .blocksize = NULL_BLOCK_SIZE,
2546 .type = CRYPTO_ALG_TYPE_AEAD,
2547 .template_aead = {
2548 .setkey = aead_setkey,
2549 .setauthsize = aead_setauthsize,
2550 .encrypt = aead_encrypt,
2551 .decrypt = aead_decrypt,
2552 .givencrypt = aead_null_givencrypt,
2553 .geniv = "<built-in>",
2554 .ivsize = NULL_IV_SIZE,
2555 .maxauthsize = SHA1_DIGEST_SIZE,
2556 },
2557 .class1_alg_type = 0,
2558 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2559 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2560 },
2561 {
2562 .name = "authenc(hmac(sha224),ecb(cipher_null))",
2563 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
2564 .blocksize = NULL_BLOCK_SIZE,
2565 .type = CRYPTO_ALG_TYPE_AEAD,
2566 .template_aead = {
2567 .setkey = aead_setkey,
2568 .setauthsize = aead_setauthsize,
2569 .encrypt = aead_encrypt,
2570 .decrypt = aead_decrypt,
2571 .givencrypt = aead_null_givencrypt,
2572 .geniv = "<built-in>",
2573 .ivsize = NULL_IV_SIZE,
2574 .maxauthsize = SHA224_DIGEST_SIZE,
2575 },
2576 .class1_alg_type = 0,
2577 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2578 OP_ALG_AAI_HMAC_PRECOMP,
2579 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2580 },
2581 {
2582 .name = "authenc(hmac(sha256),ecb(cipher_null))",
2583 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
2584 .blocksize = NULL_BLOCK_SIZE,
2585 .type = CRYPTO_ALG_TYPE_AEAD,
2586 .template_aead = {
2587 .setkey = aead_setkey,
2588 .setauthsize = aead_setauthsize,
2589 .encrypt = aead_encrypt,
2590 .decrypt = aead_decrypt,
2591 .givencrypt = aead_null_givencrypt,
2592 .geniv = "<built-in>",
2593 .ivsize = NULL_IV_SIZE,
2594 .maxauthsize = SHA256_DIGEST_SIZE,
2595 },
2596 .class1_alg_type = 0,
2597 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2598 OP_ALG_AAI_HMAC_PRECOMP,
2599 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2600 },
2601 {
2602 .name = "authenc(hmac(sha384),ecb(cipher_null))",
2603 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
2604 .blocksize = NULL_BLOCK_SIZE,
2605 .type = CRYPTO_ALG_TYPE_AEAD,
2606 .template_aead = {
2607 .setkey = aead_setkey,
2608 .setauthsize = aead_setauthsize,
2609 .encrypt = aead_encrypt,
2610 .decrypt = aead_decrypt,
2611 .givencrypt = aead_null_givencrypt,
2612 .geniv = "<built-in>",
2613 .ivsize = NULL_IV_SIZE,
2614 .maxauthsize = SHA384_DIGEST_SIZE,
2615 },
2616 .class1_alg_type = 0,
2617 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2618 OP_ALG_AAI_HMAC_PRECOMP,
2619 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2620 },
2621 {
2622 .name = "authenc(hmac(sha512),ecb(cipher_null))",
2623 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
2624 .blocksize = NULL_BLOCK_SIZE,
2625 .type = CRYPTO_ALG_TYPE_AEAD,
2626 .template_aead = {
2627 .setkey = aead_setkey,
2628 .setauthsize = aead_setauthsize,
2629 .encrypt = aead_encrypt,
2630 .decrypt = aead_decrypt,
2631 .givencrypt = aead_null_givencrypt,
2632 .geniv = "<built-in>",
2633 .ivsize = NULL_IV_SIZE,
2634 .maxauthsize = SHA512_DIGEST_SIZE,
2635 },
2636 .class1_alg_type = 0,
2637 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2638 OP_ALG_AAI_HMAC_PRECOMP,
2639 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2640 },
2641 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002642 .name = "authenc(hmac(md5),cbc(aes))",
2643 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
2644 .blocksize = AES_BLOCK_SIZE,
2645 .type = CRYPTO_ALG_TYPE_AEAD,
2646 .template_aead = {
2647 .setkey = aead_setkey,
2648 .setauthsize = aead_setauthsize,
2649 .encrypt = aead_encrypt,
2650 .decrypt = aead_decrypt,
2651 .givencrypt = aead_givencrypt,
2652 .geniv = "<built-in>",
2653 .ivsize = AES_BLOCK_SIZE,
2654 .maxauthsize = MD5_DIGEST_SIZE,
2655 },
2656 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2657 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2658 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2659 },
2660 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002661 .name = "authenc(hmac(sha1),cbc(aes))",
2662 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
2663 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002664 .type = CRYPTO_ALG_TYPE_AEAD,
2665 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002666 .setkey = aead_setkey,
2667 .setauthsize = aead_setauthsize,
2668 .encrypt = aead_encrypt,
2669 .decrypt = aead_decrypt,
2670 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002671 .geniv = "<built-in>",
2672 .ivsize = AES_BLOCK_SIZE,
2673 .maxauthsize = SHA1_DIGEST_SIZE,
2674 },
2675 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2676 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2677 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2678 },
2679 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002680 .name = "authenc(hmac(sha224),cbc(aes))",
2681 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
2682 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302683 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002684 .template_aead = {
2685 .setkey = aead_setkey,
2686 .setauthsize = aead_setauthsize,
2687 .encrypt = aead_encrypt,
2688 .decrypt = aead_decrypt,
2689 .givencrypt = aead_givencrypt,
2690 .geniv = "<built-in>",
2691 .ivsize = AES_BLOCK_SIZE,
2692 .maxauthsize = SHA224_DIGEST_SIZE,
2693 },
2694 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2695 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2696 OP_ALG_AAI_HMAC_PRECOMP,
2697 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2698 },
2699 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002700 .name = "authenc(hmac(sha256),cbc(aes))",
2701 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2702 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002703 .type = CRYPTO_ALG_TYPE_AEAD,
2704 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002705 .setkey = aead_setkey,
2706 .setauthsize = aead_setauthsize,
2707 .encrypt = aead_encrypt,
2708 .decrypt = aead_decrypt,
2709 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002710 .geniv = "<built-in>",
2711 .ivsize = AES_BLOCK_SIZE,
2712 .maxauthsize = SHA256_DIGEST_SIZE,
2713 },
2714 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2715 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2716 OP_ALG_AAI_HMAC_PRECOMP,
2717 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2718 },
2719 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002720 .name = "authenc(hmac(sha384),cbc(aes))",
2721 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2722 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302723 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002724 .template_aead = {
2725 .setkey = aead_setkey,
2726 .setauthsize = aead_setauthsize,
2727 .encrypt = aead_encrypt,
2728 .decrypt = aead_decrypt,
2729 .givencrypt = aead_givencrypt,
2730 .geniv = "<built-in>",
2731 .ivsize = AES_BLOCK_SIZE,
2732 .maxauthsize = SHA384_DIGEST_SIZE,
2733 },
2734 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2735 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2736 OP_ALG_AAI_HMAC_PRECOMP,
2737 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2738 },
2739
2740 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002741 .name = "authenc(hmac(sha512),cbc(aes))",
2742 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2743 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002744 .type = CRYPTO_ALG_TYPE_AEAD,
2745 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002746 .setkey = aead_setkey,
2747 .setauthsize = aead_setauthsize,
2748 .encrypt = aead_encrypt,
2749 .decrypt = aead_decrypt,
2750 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002751 .geniv = "<built-in>",
2752 .ivsize = AES_BLOCK_SIZE,
2753 .maxauthsize = SHA512_DIGEST_SIZE,
2754 },
2755 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2756 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2757 OP_ALG_AAI_HMAC_PRECOMP,
2758 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2759 },
2760 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002761 .name = "authenc(hmac(md5),cbc(des3_ede))",
2762 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2763 .blocksize = DES3_EDE_BLOCK_SIZE,
2764 .type = CRYPTO_ALG_TYPE_AEAD,
2765 .template_aead = {
2766 .setkey = aead_setkey,
2767 .setauthsize = aead_setauthsize,
2768 .encrypt = aead_encrypt,
2769 .decrypt = aead_decrypt,
2770 .givencrypt = aead_givencrypt,
2771 .geniv = "<built-in>",
2772 .ivsize = DES3_EDE_BLOCK_SIZE,
2773 .maxauthsize = MD5_DIGEST_SIZE,
2774 },
2775 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2776 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2777 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2778 },
2779 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002780 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2781 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2782 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002783 .type = CRYPTO_ALG_TYPE_AEAD,
2784 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002785 .setkey = aead_setkey,
2786 .setauthsize = aead_setauthsize,
2787 .encrypt = aead_encrypt,
2788 .decrypt = aead_decrypt,
2789 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002790 .geniv = "<built-in>",
2791 .ivsize = DES3_EDE_BLOCK_SIZE,
2792 .maxauthsize = SHA1_DIGEST_SIZE,
2793 },
2794 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2795 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2796 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2797 },
2798 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002799 .name = "authenc(hmac(sha224),cbc(des3_ede))",
2800 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2801 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302802 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002803 .template_aead = {
2804 .setkey = aead_setkey,
2805 .setauthsize = aead_setauthsize,
2806 .encrypt = aead_encrypt,
2807 .decrypt = aead_decrypt,
2808 .givencrypt = aead_givencrypt,
2809 .geniv = "<built-in>",
2810 .ivsize = DES3_EDE_BLOCK_SIZE,
2811 .maxauthsize = SHA224_DIGEST_SIZE,
2812 },
2813 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2814 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2815 OP_ALG_AAI_HMAC_PRECOMP,
2816 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2817 },
2818 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002819 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2820 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2821 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002822 .type = CRYPTO_ALG_TYPE_AEAD,
2823 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002824 .setkey = aead_setkey,
2825 .setauthsize = aead_setauthsize,
2826 .encrypt = aead_encrypt,
2827 .decrypt = aead_decrypt,
2828 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002829 .geniv = "<built-in>",
2830 .ivsize = DES3_EDE_BLOCK_SIZE,
2831 .maxauthsize = SHA256_DIGEST_SIZE,
2832 },
2833 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2834 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2835 OP_ALG_AAI_HMAC_PRECOMP,
2836 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2837 },
2838 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002839 .name = "authenc(hmac(sha384),cbc(des3_ede))",
2840 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2841 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302842 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002843 .template_aead = {
2844 .setkey = aead_setkey,
2845 .setauthsize = aead_setauthsize,
2846 .encrypt = aead_encrypt,
2847 .decrypt = aead_decrypt,
2848 .givencrypt = aead_givencrypt,
2849 .geniv = "<built-in>",
2850 .ivsize = DES3_EDE_BLOCK_SIZE,
2851 .maxauthsize = SHA384_DIGEST_SIZE,
2852 },
2853 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2854 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2855 OP_ALG_AAI_HMAC_PRECOMP,
2856 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2857 },
2858 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002859 .name = "authenc(hmac(sha512),cbc(des3_ede))",
2860 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2861 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002862 .type = CRYPTO_ALG_TYPE_AEAD,
2863 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002864 .setkey = aead_setkey,
2865 .setauthsize = aead_setauthsize,
2866 .encrypt = aead_encrypt,
2867 .decrypt = aead_decrypt,
2868 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002869 .geniv = "<built-in>",
2870 .ivsize = DES3_EDE_BLOCK_SIZE,
2871 .maxauthsize = SHA512_DIGEST_SIZE,
2872 },
2873 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2874 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2875 OP_ALG_AAI_HMAC_PRECOMP,
2876 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2877 },
2878 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08002879 .name = "authenc(hmac(md5),cbc(des))",
2880 .driver_name = "authenc-hmac-md5-cbc-des-caam",
2881 .blocksize = DES_BLOCK_SIZE,
2882 .type = CRYPTO_ALG_TYPE_AEAD,
2883 .template_aead = {
2884 .setkey = aead_setkey,
2885 .setauthsize = aead_setauthsize,
2886 .encrypt = aead_encrypt,
2887 .decrypt = aead_decrypt,
2888 .givencrypt = aead_givencrypt,
2889 .geniv = "<built-in>",
2890 .ivsize = DES_BLOCK_SIZE,
2891 .maxauthsize = MD5_DIGEST_SIZE,
2892 },
2893 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2894 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2895 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2896 },
2897 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002898 .name = "authenc(hmac(sha1),cbc(des))",
2899 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
2900 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002901 .type = CRYPTO_ALG_TYPE_AEAD,
2902 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002903 .setkey = aead_setkey,
2904 .setauthsize = aead_setauthsize,
2905 .encrypt = aead_encrypt,
2906 .decrypt = aead_decrypt,
2907 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002908 .geniv = "<built-in>",
2909 .ivsize = DES_BLOCK_SIZE,
2910 .maxauthsize = SHA1_DIGEST_SIZE,
2911 },
2912 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2913 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2914 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2915 },
2916 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002917 .name = "authenc(hmac(sha224),cbc(des))",
2918 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
2919 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302920 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002921 .template_aead = {
2922 .setkey = aead_setkey,
2923 .setauthsize = aead_setauthsize,
2924 .encrypt = aead_encrypt,
2925 .decrypt = aead_decrypt,
2926 .givencrypt = aead_givencrypt,
2927 .geniv = "<built-in>",
2928 .ivsize = DES_BLOCK_SIZE,
2929 .maxauthsize = SHA224_DIGEST_SIZE,
2930 },
2931 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2932 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2933 OP_ALG_AAI_HMAC_PRECOMP,
2934 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2935 },
2936 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002937 .name = "authenc(hmac(sha256),cbc(des))",
2938 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
2939 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002940 .type = CRYPTO_ALG_TYPE_AEAD,
2941 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002942 .setkey = aead_setkey,
2943 .setauthsize = aead_setauthsize,
2944 .encrypt = aead_encrypt,
2945 .decrypt = aead_decrypt,
2946 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002947 .geniv = "<built-in>",
2948 .ivsize = DES_BLOCK_SIZE,
2949 .maxauthsize = SHA256_DIGEST_SIZE,
2950 },
2951 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2952 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2953 OP_ALG_AAI_HMAC_PRECOMP,
2954 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2955 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05002956 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002957 .name = "authenc(hmac(sha384),cbc(des))",
2958 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
2959 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05302960 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06002961 .template_aead = {
2962 .setkey = aead_setkey,
2963 .setauthsize = aead_setauthsize,
2964 .encrypt = aead_encrypt,
2965 .decrypt = aead_decrypt,
2966 .givencrypt = aead_givencrypt,
2967 .geniv = "<built-in>",
2968 .ivsize = DES_BLOCK_SIZE,
2969 .maxauthsize = SHA384_DIGEST_SIZE,
2970 },
2971 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2972 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2973 OP_ALG_AAI_HMAC_PRECOMP,
2974 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2975 },
2976 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05002977 .name = "authenc(hmac(sha512),cbc(des))",
2978 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
2979 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08002980 .type = CRYPTO_ALG_TYPE_AEAD,
2981 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08002982 .setkey = aead_setkey,
2983 .setauthsize = aead_setauthsize,
2984 .encrypt = aead_encrypt,
2985 .decrypt = aead_decrypt,
2986 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05002987 .geniv = "<built-in>",
2988 .ivsize = DES_BLOCK_SIZE,
2989 .maxauthsize = SHA512_DIGEST_SIZE,
2990 },
2991 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2992 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2993 OP_ALG_AAI_HMAC_PRECOMP,
2994 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2995 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002996 {
2997 .name = "rfc4106(gcm(aes))",
2998 .driver_name = "rfc4106-gcm-aes-caam",
2999 .blocksize = 1,
3000 .type = CRYPTO_ALG_TYPE_AEAD,
3001 .template_aead = {
3002 .setkey = rfc4106_setkey,
3003 .setauthsize = rfc4106_setauthsize,
3004 .encrypt = aead_encrypt,
3005 .decrypt = aead_decrypt,
3006 .givencrypt = aead_givencrypt,
3007 .geniv = "<built-in>",
3008 .ivsize = 8,
3009 .maxauthsize = AES_BLOCK_SIZE,
3010 },
3011 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3012 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003013 /* Galois Counter Mode */
3014 {
3015 .name = "gcm(aes)",
3016 .driver_name = "gcm-aes-caam",
3017 .blocksize = 1,
3018 .type = CRYPTO_ALG_TYPE_AEAD,
3019 .template_aead = {
3020 .setkey = gcm_setkey,
3021 .setauthsize = gcm_setauthsize,
3022 .encrypt = aead_encrypt,
3023 .decrypt = aead_decrypt,
3024 .givencrypt = NULL,
3025 .geniv = "<built-in>",
3026 .ivsize = 12,
3027 .maxauthsize = AES_BLOCK_SIZE,
3028 },
3029 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3030 },
Yuan Kangacdca312011-07-15 11:21:42 +08003031 /* ablkcipher descriptor */
3032 {
3033 .name = "cbc(aes)",
3034 .driver_name = "cbc-aes-caam",
3035 .blocksize = AES_BLOCK_SIZE,
3036 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3037 .template_ablkcipher = {
3038 .setkey = ablkcipher_setkey,
3039 .encrypt = ablkcipher_encrypt,
3040 .decrypt = ablkcipher_decrypt,
3041 .geniv = "eseqiv",
3042 .min_keysize = AES_MIN_KEY_SIZE,
3043 .max_keysize = AES_MAX_KEY_SIZE,
3044 .ivsize = AES_BLOCK_SIZE,
3045 },
3046 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3047 },
3048 {
3049 .name = "cbc(des3_ede)",
3050 .driver_name = "cbc-3des-caam",
3051 .blocksize = DES3_EDE_BLOCK_SIZE,
3052 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3053 .template_ablkcipher = {
3054 .setkey = ablkcipher_setkey,
3055 .encrypt = ablkcipher_encrypt,
3056 .decrypt = ablkcipher_decrypt,
3057 .geniv = "eseqiv",
3058 .min_keysize = DES3_EDE_KEY_SIZE,
3059 .max_keysize = DES3_EDE_KEY_SIZE,
3060 .ivsize = DES3_EDE_BLOCK_SIZE,
3061 },
3062 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3063 },
3064 {
3065 .name = "cbc(des)",
3066 .driver_name = "cbc-des-caam",
3067 .blocksize = DES_BLOCK_SIZE,
3068 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3069 .template_ablkcipher = {
3070 .setkey = ablkcipher_setkey,
3071 .encrypt = ablkcipher_encrypt,
3072 .decrypt = ablkcipher_decrypt,
3073 .geniv = "eseqiv",
3074 .min_keysize = DES_KEY_SIZE,
3075 .max_keysize = DES_KEY_SIZE,
3076 .ivsize = DES_BLOCK_SIZE,
3077 },
3078 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3079 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003080};
3081
3082struct caam_crypto_alg {
3083 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003084 int class1_alg_type;
3085 int class2_alg_type;
3086 int alg_op;
3087 struct crypto_alg crypto_alg;
3088};
3089
3090static int caam_cra_init(struct crypto_tfm *tfm)
3091{
3092 struct crypto_alg *alg = tfm->__crt_alg;
3093 struct caam_crypto_alg *caam_alg =
3094 container_of(alg, struct caam_crypto_alg, crypto_alg);
3095 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003096
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303097 ctx->jrdev = caam_jr_alloc();
3098 if (IS_ERR(ctx->jrdev)) {
3099 pr_err("Job Ring Device allocation for transform failed\n");
3100 return PTR_ERR(ctx->jrdev);
3101 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003102
3103 /* copy descriptor header template value */
3104 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3105 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3106 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3107
3108 return 0;
3109}
3110
3111static void caam_cra_exit(struct crypto_tfm *tfm)
3112{
3113 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3114
Yuan Kang1acebad2011-07-15 11:21:42 +08003115 if (ctx->sh_desc_enc_dma &&
3116 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3117 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3118 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3119 if (ctx->sh_desc_dec_dma &&
3120 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3121 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3122 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3123 if (ctx->sh_desc_givenc_dma &&
3124 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3125 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3126 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003127 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003128 if (ctx->key_dma &&
3129 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3130 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3131 ctx->enckeylen + ctx->split_key_pad_len,
3132 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303133
3134 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003135}
3136
3137static void __exit caam_algapi_exit(void)
3138{
3139
Kim Phillips8e8ec592011-03-13 16:54:26 +08003140 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003141
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303142 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003143 return;
3144
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303145 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003146 crypto_unregister_alg(&t_alg->crypto_alg);
3147 list_del(&t_alg->entry);
3148 kfree(t_alg);
3149 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003150}
3151
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303152static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003153 *template)
3154{
3155 struct caam_crypto_alg *t_alg;
3156 struct crypto_alg *alg;
3157
3158 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3159 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303160 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003161 return ERR_PTR(-ENOMEM);
3162 }
3163
3164 alg = &t_alg->crypto_alg;
3165
3166 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3167 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3168 template->driver_name);
3169 alg->cra_module = THIS_MODULE;
3170 alg->cra_init = caam_cra_init;
3171 alg->cra_exit = caam_cra_exit;
3172 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003173 alg->cra_blocksize = template->blocksize;
3174 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003175 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003176 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3177 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003178 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003179 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3180 alg->cra_type = &crypto_ablkcipher_type;
3181 alg->cra_ablkcipher = template->template_ablkcipher;
3182 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003183 case CRYPTO_ALG_TYPE_AEAD:
3184 alg->cra_type = &crypto_aead_type;
3185 alg->cra_aead = template->template_aead;
3186 break;
3187 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003188
3189 t_alg->class1_alg_type = template->class1_alg_type;
3190 t_alg->class2_alg_type = template->class2_alg_type;
3191 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003192
3193 return t_alg;
3194}
3195
3196static int __init caam_algapi_init(void)
3197{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303198 struct device_node *dev_node;
3199 struct platform_device *pdev;
3200 struct device *ctrldev;
3201 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003202 int i = 0, err = 0;
3203
Ruchika Gupta35af6402014-07-07 10:42:12 +05303204 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3205 if (!dev_node) {
3206 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3207 if (!dev_node)
3208 return -ENODEV;
3209 }
3210
3211 pdev = of_find_device_by_node(dev_node);
3212 if (!pdev) {
3213 of_node_put(dev_node);
3214 return -ENODEV;
3215 }
3216
3217 ctrldev = &pdev->dev;
3218 priv = dev_get_drvdata(ctrldev);
3219 of_node_put(dev_node);
3220
3221 /*
3222 * If priv is NULL, it's probably because the caam driver wasn't
3223 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3224 */
3225 if (!priv)
3226 return -ENODEV;
3227
3228
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303229 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003230
3231 /* register crypto algorithms the device supports */
3232 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3233 /* TODO: check if h/w supports alg */
3234 struct caam_crypto_alg *t_alg;
3235
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303236 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003237 if (IS_ERR(t_alg)) {
3238 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303239 pr_warn("%s alg allocation failed\n",
3240 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003241 continue;
3242 }
3243
3244 err = crypto_register_alg(&t_alg->crypto_alg);
3245 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303246 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003247 t_alg->crypto_alg.cra_driver_name);
3248 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02003249 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303250 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003251 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303252 if (!list_empty(&alg_list))
3253 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003254
3255 return err;
3256}
3257
3258module_init(caam_algapi_init);
3259module_exit(caam_algapi_exit);
3260
3261MODULE_LICENSE("GPL");
3262MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3263MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");