blob: 34f84d87a4e4bb5618ac9f98e6bb0125bfe69a55 [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Kim Phillips4427b1b2011-05-14 22:08:17 -050068/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080069#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020070#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
71#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080072#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
73
Catalin Vasiledaebc462014-10-31 12:45:37 +020074/* Note: Nonce is counted in enckeylen */
75#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
76
Horia Geantaae4a8252014-03-14 17:46:52 +020077#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
78#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
79#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
80
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030081#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
82#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
83#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
84
Tudor Ambarusbac68f22014-10-23 16:14:03 +030085#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
86#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
87#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
88#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
89
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020090#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
91#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
92#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
93#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
94
Yuan Kangacdca312011-07-15 11:21:42 +080095#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
96#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
97 20 * CAAM_CMD_SZ)
98#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
99 15 * CAAM_CMD_SZ)
100
Tudor Ambarus5d0429a2014-10-30 18:55:07 +0200101#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
Yuan Kang1acebad2011-07-15 11:21:42 +0800102 CAAM_MAX_KEY_SIZE)
103#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500104
Kim Phillips8e8ec592011-03-13 16:54:26 +0800105#ifdef DEBUG
106/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800107#define debug(format, arg...) printk(format, arg)
108#else
109#define debug(format, arg...)
110#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530111static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800112
Yuan Kang1acebad2011-07-15 11:21:42 +0800113/* Set DK bit in class 1 operation if shared */
114static inline void append_dec_op1(u32 *desc, u32 type)
115{
116 u32 *jump_cmd, *uncond_jump_cmd;
117
Horia Geantaa60384d2014-07-11 15:46:58 +0300118 /* DK bit is valid only for AES */
119 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
120 append_operation(desc, type | OP_ALG_AS_INITFINAL |
121 OP_ALG_DECRYPT);
122 return;
123 }
124
Yuan Kang1acebad2011-07-15 11:21:42 +0800125 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
126 append_operation(desc, type | OP_ALG_AS_INITFINAL |
127 OP_ALG_DECRYPT);
128 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
129 set_jump_tgt_here(desc, jump_cmd);
130 append_operation(desc, type | OP_ALG_AS_INITFINAL |
131 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
132 set_jump_tgt_here(desc, uncond_jump_cmd);
133}
134
135/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800136 * For aead functions, read payload and write payload,
137 * both of which are specified in req->src and req->dst
138 */
139static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
140{
Horia Geantaae4a8252014-03-14 17:46:52 +0200141 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800142 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
143 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800144}
145
146/*
147 * For aead encrypt and decrypt, read iv for both classes
148 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200149static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
Yuan Kang1acebad2011-07-15 11:21:42 +0800150{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200151 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
152 LDST_SRCDST_BYTE_CONTEXT |
153 (ivoffset << LDST_OFFSET_SHIFT));
154 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
155 (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800156}
157
158/*
Yuan Kangacdca312011-07-15 11:21:42 +0800159 * For ablkcipher encrypt and decrypt, read from req->src and
160 * write to req->dst
161 */
162static inline void ablkcipher_append_src_dst(u32 *desc)
163{
Kim Phillips70d793c2012-06-22 19:42:35 -0500164 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
165 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
166 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
167 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
168 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800169}
170
171/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800172 * If all data, including src (with assoc and iv) or dst (with iv only) are
173 * contiguous
174 */
175#define GIV_SRC_CONTIG 1
176#define GIV_DST_CONTIG (1 << 1)
177
Kim Phillips8e8ec592011-03-13 16:54:26 +0800178/*
179 * per-session context
180 */
181struct caam_ctx {
182 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800183 u32 sh_desc_enc[DESC_MAX_USED_LEN];
184 u32 sh_desc_dec[DESC_MAX_USED_LEN];
185 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
186 dma_addr_t sh_desc_enc_dma;
187 dma_addr_t sh_desc_dec_dma;
188 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800189 u32 class1_alg_type;
190 u32 class2_alg_type;
191 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800192 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800193 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800194 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800195 unsigned int split_key_len;
196 unsigned int split_key_pad_len;
197 unsigned int authsize;
198};
199
Yuan Kang1acebad2011-07-15 11:21:42 +0800200static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200201 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800202{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200203 u32 *nonce;
204 unsigned int enckeylen = ctx->enckeylen;
205
206 /*
207 * RFC3686 specific:
208 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
209 * | enckeylen = encryption key size + nonce size
210 */
211 if (is_rfc3686)
212 enckeylen -= CTR_RFC3686_NONCE_SIZE;
213
Yuan Kang1acebad2011-07-15 11:21:42 +0800214 if (keys_fit_inline) {
215 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
216 ctx->split_key_len, CLASS_2 |
217 KEY_DEST_MDHA_SPLIT | KEY_ENC);
218 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200219 ctx->split_key_pad_len, enckeylen,
220 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad2011-07-15 11:21:42 +0800221 } else {
222 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
223 KEY_DEST_MDHA_SPLIT | KEY_ENC);
224 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200225 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
226 }
227
228 /* Load Counter into CONTEXT1 reg */
229 if (is_rfc3686) {
230 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
231 enckeylen);
232 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
233 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
234 append_move(desc,
235 MOVE_SRC_OUTFIFO |
236 MOVE_DEST_CLASS1CTX |
237 (16 << MOVE_OFFSET_SHIFT) |
238 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800239 }
240}
241
242static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200243 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad2011-07-15 11:21:42 +0800244{
245 u32 *key_jump_cmd;
246
Catalin Vasiledaebc462014-10-31 12:45:37 +0200247 /* Note: Context registers are saved. */
248 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad2011-07-15 11:21:42 +0800249
250 /* Skip if already shared */
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 JUMP_COND_SHRD);
253
Catalin Vasiledaebc462014-10-31 12:45:37 +0200254 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800255
256 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800257}
258
Horia Geantaae4a8252014-03-14 17:46:52 +0200259static int aead_null_set_sh_desc(struct crypto_aead *aead)
260{
261 struct aead_tfm *tfm = &aead->base.crt_aead;
262 struct caam_ctx *ctx = crypto_aead_ctx(aead);
263 struct device *jrdev = ctx->jrdev;
264 bool keys_fit_inline = false;
265 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
266 u32 *desc;
267
268 /*
269 * Job Descriptor and Shared Descriptors
270 * must all fit into the 64-word Descriptor h/w Buffer
271 */
272 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
273 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
274 keys_fit_inline = true;
275
276 /* aead_encrypt shared descriptor */
277 desc = ctx->sh_desc_enc;
278
279 init_sh_desc(desc, HDR_SHARE_SERIAL);
280
281 /* Skip if already shared */
282 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
283 JUMP_COND_SHRD);
284 if (keys_fit_inline)
285 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
286 ctx->split_key_len, CLASS_2 |
287 KEY_DEST_MDHA_SPLIT | KEY_ENC);
288 else
289 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
290 KEY_DEST_MDHA_SPLIT | KEY_ENC);
291 set_jump_tgt_here(desc, key_jump_cmd);
292
293 /* cryptlen = seqoutlen - authsize */
294 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
295
296 /*
297 * NULL encryption; IV is zero
298 * assoclen = (assoclen + cryptlen) - cryptlen
299 */
300 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
301
302 /* read assoc before reading payload */
303 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
304 KEY_VLF);
305
306 /* Prepare to read and write cryptlen bytes */
307 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
308 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
309
310 /*
311 * MOVE_LEN opcode is not available in all SEC HW revisions,
312 * thus need to do some magic, i.e. self-patch the descriptor
313 * buffer.
314 */
315 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
316 MOVE_DEST_MATH3 |
317 (0x6 << MOVE_LEN_SHIFT));
318 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
319 MOVE_DEST_DESCBUF |
320 MOVE_WAITCOMP |
321 (0x8 << MOVE_LEN_SHIFT));
322
323 /* Class 2 operation */
324 append_operation(desc, ctx->class2_alg_type |
325 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
326
327 /* Read and write cryptlen bytes */
328 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
329
330 set_move_tgt_here(desc, read_move_cmd);
331 set_move_tgt_here(desc, write_move_cmd);
332 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
333 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
334 MOVE_AUX_LS);
335
336 /* Write ICV */
337 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
338 LDST_SRCDST_BYTE_CONTEXT);
339
340 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
341 desc_bytes(desc),
342 DMA_TO_DEVICE);
343 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
344 dev_err(jrdev, "unable to map shared descriptor\n");
345 return -ENOMEM;
346 }
347#ifdef DEBUG
348 print_hex_dump(KERN_ERR,
349 "aead null enc shdesc@"__stringify(__LINE__)": ",
350 DUMP_PREFIX_ADDRESS, 16, 4, desc,
351 desc_bytes(desc), 1);
352#endif
353
354 /*
355 * Job Descriptor and Shared Descriptors
356 * must all fit into the 64-word Descriptor h/w Buffer
357 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500358 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200359 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
360 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
361 keys_fit_inline = true;
362
363 desc = ctx->sh_desc_dec;
364
365 /* aead_decrypt shared descriptor */
366 init_sh_desc(desc, HDR_SHARE_SERIAL);
367
368 /* Skip if already shared */
369 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
370 JUMP_COND_SHRD);
371 if (keys_fit_inline)
372 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
373 ctx->split_key_len, CLASS_2 |
374 KEY_DEST_MDHA_SPLIT | KEY_ENC);
375 else
376 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
377 KEY_DEST_MDHA_SPLIT | KEY_ENC);
378 set_jump_tgt_here(desc, key_jump_cmd);
379
380 /* Class 2 operation */
381 append_operation(desc, ctx->class2_alg_type |
382 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
383
384 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
385 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
386 ctx->authsize + tfm->ivsize);
387 /* assoclen = (assoclen + cryptlen) - cryptlen */
388 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
389 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
390
391 /* read assoc before reading payload */
392 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
393 KEY_VLF);
394
395 /* Prepare to read and write cryptlen bytes */
396 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
397 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
398
399 /*
400 * MOVE_LEN opcode is not available in all SEC HW revisions,
401 * thus need to do some magic, i.e. self-patch the descriptor
402 * buffer.
403 */
404 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
405 MOVE_DEST_MATH2 |
406 (0x6 << MOVE_LEN_SHIFT));
407 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
408 MOVE_DEST_DESCBUF |
409 MOVE_WAITCOMP |
410 (0x8 << MOVE_LEN_SHIFT));
411
412 /* Read and write cryptlen bytes */
413 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
414
415 /*
416 * Insert a NOP here, since we need at least 4 instructions between
417 * code patching the descriptor buffer and the location being patched.
418 */
419 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
420 set_jump_tgt_here(desc, jump_cmd);
421
422 set_move_tgt_here(desc, read_move_cmd);
423 set_move_tgt_here(desc, write_move_cmd);
424 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
425 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
426 MOVE_AUX_LS);
427 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
428
429 /* Load ICV */
430 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
431 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
432
433 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
434 desc_bytes(desc),
435 DMA_TO_DEVICE);
436 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
437 dev_err(jrdev, "unable to map shared descriptor\n");
438 return -ENOMEM;
439 }
440#ifdef DEBUG
441 print_hex_dump(KERN_ERR,
442 "aead null dec shdesc@"__stringify(__LINE__)": ",
443 DUMP_PREFIX_ADDRESS, 16, 4, desc,
444 desc_bytes(desc), 1);
445#endif
446
447 return 0;
448}
449
Yuan Kang1acebad2011-07-15 11:21:42 +0800450static int aead_set_sh_desc(struct crypto_aead *aead)
451{
452 struct aead_tfm *tfm = &aead->base.crt_aead;
453 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200454 struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
455 const char *alg_name = crypto_tfm_alg_name(ctfm);
Yuan Kang1acebad2011-07-15 11:21:42 +0800456 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200457 bool keys_fit_inline;
Yuan Kang1acebad2011-07-15 11:21:42 +0800458 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200459 u32 ctx1_iv_off = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +0800460 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200461 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
462 OP_ALG_AAI_CTR_MOD128);
463 const bool is_rfc3686 = (ctr_mode &&
464 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kang1acebad2011-07-15 11:21:42 +0800465
Horia Geantaae4a8252014-03-14 17:46:52 +0200466 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800467 return 0;
468
Horia Geantaae4a8252014-03-14 17:46:52 +0200469 /* NULL encryption / decryption */
470 if (!ctx->enckeylen)
471 return aead_null_set_sh_desc(aead);
472
Yuan Kang1acebad2011-07-15 11:21:42 +0800473 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200474 * AES-CTR needs to load IV in CONTEXT1 reg
475 * at an offset of 128bits (16bytes)
476 * CONTEXT1[255:128] = IV
477 */
478 if (ctr_mode)
479 ctx1_iv_off = 16;
480
481 /*
482 * RFC3686 specific:
483 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
484 */
485 if (is_rfc3686)
486 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
487
488 /*
Yuan Kang1acebad2011-07-15 11:21:42 +0800489 * Job Descriptor and Shared Descriptors
490 * must all fit into the 64-word Descriptor h/w Buffer
491 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200492 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800493 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200494 ctx->split_key_pad_len + ctx->enckeylen +
495 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800496 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800497 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800498
499 /* aead_encrypt shared descriptor */
500 desc = ctx->sh_desc_enc;
501
Catalin Vasiledaebc462014-10-31 12:45:37 +0200502 /* Note: Context registers are saved. */
503 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800504
505 /* Class 2 operation */
506 append_operation(desc, ctx->class2_alg_type |
507 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
508
509 /* cryptlen = seqoutlen - authsize */
510 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
511
512 /* assoclen + cryptlen = seqinlen - ivsize */
513 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
514
Horia Geanta4464a7d2014-03-14 17:46:49 +0200515 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800516 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
517
518 /* read assoc before reading payload */
519 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
520 KEY_VLF);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200521 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
522
523 /* Load Counter into CONTEXT1 reg */
524 if (is_rfc3686)
525 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
526 LDST_CLASS_1_CCB |
527 LDST_SRCDST_BYTE_CONTEXT |
528 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
529 LDST_OFFSET_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800530
531 /* Class 1 operation */
532 append_operation(desc, ctx->class1_alg_type |
533 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
534
535 /* Read and write cryptlen bytes */
536 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
537 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
538 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
539
540 /* Write ICV */
541 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
542 LDST_SRCDST_BYTE_CONTEXT);
543
544 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
545 desc_bytes(desc),
546 DMA_TO_DEVICE);
547 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
548 dev_err(jrdev, "unable to map shared descriptor\n");
549 return -ENOMEM;
550 }
551#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300552 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800553 DUMP_PREFIX_ADDRESS, 16, 4, desc,
554 desc_bytes(desc), 1);
555#endif
556
557 /*
558 * Job Descriptor and Shared Descriptors
559 * must all fit into the 64-word Descriptor h/w Buffer
560 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500561 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800562 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200563 ctx->split_key_pad_len + ctx->enckeylen +
564 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800565 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800566 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800567
Horia Geanta4464a7d2014-03-14 17:46:49 +0200568 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800569 desc = ctx->sh_desc_dec;
570
Catalin Vasiledaebc462014-10-31 12:45:37 +0200571 /* Note: Context registers are saved. */
572 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800573
574 /* Class 2 operation */
575 append_operation(desc, ctx->class2_alg_type |
576 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
577
Horia Geanta4464a7d2014-03-14 17:46:49 +0200578 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800579 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200580 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800581 /* assoclen = (assoclen + cryptlen) - cryptlen */
582 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
583 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
584
585 /* read assoc before reading payload */
586 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
587 KEY_VLF);
588
Catalin Vasiledaebc462014-10-31 12:45:37 +0200589 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
Yuan Kang1acebad2011-07-15 11:21:42 +0800590
Catalin Vasiledaebc462014-10-31 12:45:37 +0200591 /* Load Counter into CONTEXT1 reg */
592 if (is_rfc3686)
593 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
594 LDST_CLASS_1_CCB |
595 LDST_SRCDST_BYTE_CONTEXT |
596 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
597 LDST_OFFSET_SHIFT));
598
599 /* Choose operation */
600 if (ctr_mode)
601 append_operation(desc, ctx->class1_alg_type |
602 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
603 else
604 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad2011-07-15 11:21:42 +0800605
606 /* Read and write cryptlen bytes */
607 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
608 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
609 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
610
611 /* Load ICV */
612 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
613 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800614
615 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
616 desc_bytes(desc),
617 DMA_TO_DEVICE);
618 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
619 dev_err(jrdev, "unable to map shared descriptor\n");
620 return -ENOMEM;
621 }
622#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300623 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800624 DUMP_PREFIX_ADDRESS, 16, 4, desc,
625 desc_bytes(desc), 1);
626#endif
627
628 /*
629 * Job Descriptor and Shared Descriptors
630 * must all fit into the 64-word Descriptor h/w Buffer
631 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500632 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800633 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200634 ctx->split_key_pad_len + ctx->enckeylen +
635 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad2011-07-15 11:21:42 +0800636 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800637 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800638
639 /* aead_givencrypt shared descriptor */
640 desc = ctx->sh_desc_givenc;
641
Catalin Vasiledaebc462014-10-31 12:45:37 +0200642 /* Note: Context registers are saved. */
643 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad2011-07-15 11:21:42 +0800644
645 /* Generate IV */
646 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
647 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
648 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
649 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
650 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
651 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200652 append_move(desc, MOVE_WAITCOMP |
653 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
654 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
655 (tfm->ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800656 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
657
658 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200659 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
660 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
661 (tfm->ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad2011-07-15 11:21:42 +0800662
663 /* Return to encryption */
664 append_operation(desc, ctx->class2_alg_type |
665 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
666
667 /* ivsize + cryptlen = seqoutlen - authsize */
668 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
669
670 /* assoclen = seqinlen - (ivsize + cryptlen) */
671 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
672
673 /* read assoc before reading payload */
674 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
675 KEY_VLF);
676
Catalin Vasiledaebc462014-10-31 12:45:37 +0200677 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad2011-07-15 11:21:42 +0800678 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
679 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
680 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
681 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
682 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
683 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
684
Catalin Vasiledaebc462014-10-31 12:45:37 +0200685 /* Load Counter into CONTEXT1 reg */
686 if (is_rfc3686)
687 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
688 LDST_CLASS_1_CCB |
689 LDST_SRCDST_BYTE_CONTEXT |
690 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
691 LDST_OFFSET_SHIFT));
692
Yuan Kang1acebad2011-07-15 11:21:42 +0800693 /* Class 1 operation */
694 append_operation(desc, ctx->class1_alg_type |
695 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696
697 /* Will write ivsize + cryptlen */
698 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
699
700 /* Not need to reload iv */
701 append_seq_fifo_load(desc, tfm->ivsize,
702 FIFOLD_CLASS_SKIP);
703
704 /* Will read cryptlen */
705 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
706 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
707
708 /* Write ICV */
709 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
710 LDST_SRCDST_BYTE_CONTEXT);
711
712 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
713 desc_bytes(desc),
714 DMA_TO_DEVICE);
715 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
716 dev_err(jrdev, "unable to map shared descriptor\n");
717 return -ENOMEM;
718 }
719#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300720 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800721 DUMP_PREFIX_ADDRESS, 16, 4, desc,
722 desc_bytes(desc), 1);
723#endif
724
725 return 0;
726}
727
Yuan Kang0e479302011-07-15 11:21:41 +0800728static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800729 unsigned int authsize)
730{
731 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
732
733 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800734 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800735
736 return 0;
737}
738
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300739static int gcm_set_sh_desc(struct crypto_aead *aead)
740{
741 struct aead_tfm *tfm = &aead->base.crt_aead;
742 struct caam_ctx *ctx = crypto_aead_ctx(aead);
743 struct device *jrdev = ctx->jrdev;
744 bool keys_fit_inline = false;
745 u32 *key_jump_cmd, *zero_payload_jump_cmd,
746 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747 u32 *desc;
748
749 if (!ctx->enckeylen || !ctx->authsize)
750 return 0;
751
752 /*
753 * AES GCM encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
756 */
757 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
758 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759 keys_fit_inline = true;
760
761 desc = ctx->sh_desc_enc;
762
763 init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765 /* skip key loading if they are loaded due to sharing */
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767 JUMP_COND_SHRD | JUMP_COND_SELF);
768 if (keys_fit_inline)
769 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771 else
772 append_key(desc, ctx->key_dma, ctx->enckeylen,
773 CLASS_1 | KEY_DEST_CLASS_REG);
774 set_jump_tgt_here(desc, key_jump_cmd);
775
776 /* class 1 operation */
777 append_operation(desc, ctx->class1_alg_type |
778 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
780 /* cryptlen = seqoutlen - authsize */
781 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
782
783 /* assoclen + cryptlen = seqinlen - ivsize */
784 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
785
786 /* assoclen = (assoclen + cryptlen) - cryptlen */
787 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
788
789 /* if cryptlen is ZERO jump to zero-payload commands */
790 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
792 JUMP_COND_MATH_Z);
793 /* read IV */
794 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
795 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
796
797 /* if assoclen is ZERO, skip reading the assoc data */
798 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
799 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
800 JUMP_COND_MATH_Z);
801
802 /* read assoc data */
803 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
807 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
808
809 /* write encrypted data */
810 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812 /* read payload data */
813 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816 /* jump the zero-payload commands */
817 append_jump(desc, JUMP_TEST_ALL | 7);
818
819 /* zero-payload commands */
820 set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
822 /* if assoclen is ZERO, jump to IV reading - is the only input data */
823 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
824 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
825 JUMP_COND_MATH_Z);
826 /* read IV */
827 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
828 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
829
830 /* read assoc data */
831 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
832 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
833
834 /* jump to ICV writing */
835 append_jump(desc, JUMP_TEST_ALL | 2);
836
837 /* read IV - is the only input data */
838 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
839 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
840 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
841 FIFOLD_TYPE_LAST1);
842
843 /* write ICV */
844 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
845 LDST_SRCDST_BYTE_CONTEXT);
846
847 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
848 desc_bytes(desc),
849 DMA_TO_DEVICE);
850 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
851 dev_err(jrdev, "unable to map shared descriptor\n");
852 return -ENOMEM;
853 }
854#ifdef DEBUG
855 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, desc,
857 desc_bytes(desc), 1);
858#endif
859
860 /*
861 * Job Descriptor and Shared Descriptors
862 * must all fit into the 64-word Descriptor h/w Buffer
863 */
864 keys_fit_inline = false;
865 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
866 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
867 keys_fit_inline = true;
868
869 desc = ctx->sh_desc_dec;
870
871 init_sh_desc(desc, HDR_SHARE_SERIAL);
872
873 /* skip key loading if they are loaded due to sharing */
874 key_jump_cmd = append_jump(desc, JUMP_JSL |
875 JUMP_TEST_ALL | JUMP_COND_SHRD |
876 JUMP_COND_SELF);
877 if (keys_fit_inline)
878 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
879 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
880 else
881 append_key(desc, ctx->key_dma, ctx->enckeylen,
882 CLASS_1 | KEY_DEST_CLASS_REG);
883 set_jump_tgt_here(desc, key_jump_cmd);
884
885 /* class 1 operation */
886 append_operation(desc, ctx->class1_alg_type |
887 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
888
889 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
890 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
891 ctx->authsize + tfm->ivsize);
892
893 /* assoclen = (assoclen + cryptlen) - cryptlen */
894 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
895 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
896
897 /* read IV */
898 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
899 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
900
901 /* jump to zero-payload command if cryptlen is zero */
902 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
903 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
904 JUMP_COND_MATH_Z);
905
906 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
907 /* if asoclen is ZERO, skip reading assoc data */
908 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
909 JUMP_COND_MATH_Z);
910 /* read assoc data */
911 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
912 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
913 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
914
915 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
916
917 /* store encrypted data */
918 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
919
920 /* read payload data */
921 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
922 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
923
924 /* jump the zero-payload commands */
925 append_jump(desc, JUMP_TEST_ALL | 4);
926
927 /* zero-payload command */
928 set_jump_tgt_here(desc, zero_payload_jump_cmd);
929
930 /* if assoclen is ZERO, jump to ICV reading */
931 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
932 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
933 JUMP_COND_MATH_Z);
934 /* read assoc data */
935 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
936 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
937 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
938
939 /* read ICV */
940 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
941 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
942
943 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
944 desc_bytes(desc),
945 DMA_TO_DEVICE);
946 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
947 dev_err(jrdev, "unable to map shared descriptor\n");
948 return -ENOMEM;
949 }
950#ifdef DEBUG
951 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
952 DUMP_PREFIX_ADDRESS, 16, 4, desc,
953 desc_bytes(desc), 1);
954#endif
955
956 return 0;
957}
958
959static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
960{
961 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
962
963 ctx->authsize = authsize;
964 gcm_set_sh_desc(authenc);
965
966 return 0;
967}
968
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300969static int rfc4106_set_sh_desc(struct crypto_aead *aead)
970{
971 struct aead_tfm *tfm = &aead->base.crt_aead;
972 struct caam_ctx *ctx = crypto_aead_ctx(aead);
973 struct device *jrdev = ctx->jrdev;
974 bool keys_fit_inline = false;
975 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
976 u32 *desc;
977 u32 geniv;
978
979 if (!ctx->enckeylen || !ctx->authsize)
980 return 0;
981
982 /*
983 * RFC4106 encrypt shared descriptor
984 * Job Descriptor and Shared Descriptor
985 * must fit into the 64-word Descriptor h/w Buffer
986 */
987 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
988 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
989 keys_fit_inline = true;
990
991 desc = ctx->sh_desc_enc;
992
993 init_sh_desc(desc, HDR_SHARE_SERIAL);
994
995 /* Skip key loading if it is loaded due to sharing */
996 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
997 JUMP_COND_SHRD);
998 if (keys_fit_inline)
999 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1000 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1001 else
1002 append_key(desc, ctx->key_dma, ctx->enckeylen,
1003 CLASS_1 | KEY_DEST_CLASS_REG);
1004 set_jump_tgt_here(desc, key_jump_cmd);
1005
1006 /* Class 1 operation */
1007 append_operation(desc, ctx->class1_alg_type |
1008 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1009
1010 /* cryptlen = seqoutlen - authsize */
1011 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1012 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1013
1014 /* assoclen + cryptlen = seqinlen - ivsize */
1015 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
1016
1017 /* assoclen = (assoclen + cryptlen) - cryptlen */
1018 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
1019
1020 /* Read Salt */
1021 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1022 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1023 /* Read AES-GCM-ESP IV */
1024 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1025 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1026
1027 /* Read assoc data */
1028 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1029 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1030
1031 /* Will read cryptlen bytes */
1032 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1033
1034 /* Write encrypted data */
1035 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1036
1037 /* Read payload data */
1038 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1039 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1040
1041 /* Write ICV */
1042 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1043 LDST_SRCDST_BYTE_CONTEXT);
1044
1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1046 desc_bytes(desc),
1047 DMA_TO_DEVICE);
1048 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1049 dev_err(jrdev, "unable to map shared descriptor\n");
1050 return -ENOMEM;
1051 }
1052#ifdef DEBUG
1053 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1054 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1055 desc_bytes(desc), 1);
1056#endif
1057
1058 /*
1059 * Job Descriptor and Shared Descriptors
1060 * must all fit into the 64-word Descriptor h/w Buffer
1061 */
1062 keys_fit_inline = false;
1063 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1064 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1065 keys_fit_inline = true;
1066
1067 desc = ctx->sh_desc_dec;
1068
1069 init_sh_desc(desc, HDR_SHARE_SERIAL);
1070
1071 /* Skip key loading if it is loaded due to sharing */
1072 key_jump_cmd = append_jump(desc, JUMP_JSL |
1073 JUMP_TEST_ALL | JUMP_COND_SHRD);
1074 if (keys_fit_inline)
1075 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1076 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1077 else
1078 append_key(desc, ctx->key_dma, ctx->enckeylen,
1079 CLASS_1 | KEY_DEST_CLASS_REG);
1080 set_jump_tgt_here(desc, key_jump_cmd);
1081
1082 /* Class 1 operation */
1083 append_operation(desc, ctx->class1_alg_type |
1084 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1085
1086 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
1087 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
1088 ctx->authsize + tfm->ivsize);
1089
1090 /* assoclen = (assoclen + cryptlen) - cryptlen */
1091 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1092 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1093
1094 /* Will write cryptlen bytes */
1095 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1096
1097 /* Read Salt */
1098 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1099 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1100 /* Read AES-GCM-ESP IV */
1101 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1102 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1103
1104 /* Read assoc data */
1105 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1106 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1107
1108 /* Will read cryptlen bytes */
1109 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1110
1111 /* Store payload data */
1112 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1113
1114 /* Read encrypted data */
1115 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1116 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1117
1118 /* Read ICV */
1119 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1120 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1121
1122 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1123 desc_bytes(desc),
1124 DMA_TO_DEVICE);
1125 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1126 dev_err(jrdev, "unable to map shared descriptor\n");
1127 return -ENOMEM;
1128 }
1129#ifdef DEBUG
1130 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1131 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1132 desc_bytes(desc), 1);
1133#endif
1134
1135 /*
1136 * Job Descriptor and Shared Descriptors
1137 * must all fit into the 64-word Descriptor h/w Buffer
1138 */
1139 keys_fit_inline = false;
1140 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1141 ctx->split_key_pad_len + ctx->enckeylen <=
1142 CAAM_DESC_BYTES_MAX)
1143 keys_fit_inline = true;
1144
1145 /* rfc4106_givencrypt shared descriptor */
1146 desc = ctx->sh_desc_givenc;
1147
1148 init_sh_desc(desc, HDR_SHARE_SERIAL);
1149
1150 /* Skip key loading if it is loaded due to sharing */
1151 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1152 JUMP_COND_SHRD);
1153 if (keys_fit_inline)
1154 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1155 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1156 else
1157 append_key(desc, ctx->key_dma, ctx->enckeylen,
1158 CLASS_1 | KEY_DEST_CLASS_REG);
1159 set_jump_tgt_here(desc, key_jump_cmd);
1160
1161 /* Generate IV */
1162 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1163 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1164 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1165 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1166 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1167 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1168 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1169 (tfm->ivsize << MOVE_LEN_SHIFT));
1170 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1171
1172 /* Copy generated IV to OFIFO */
1173 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1174 (tfm->ivsize << MOVE_LEN_SHIFT));
1175
1176 /* Class 1 operation */
1177 append_operation(desc, ctx->class1_alg_type |
1178 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1179
1180 /* ivsize + cryptlen = seqoutlen - authsize */
1181 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1182
1183 /* assoclen = seqinlen - (ivsize + cryptlen) */
1184 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1185
1186 /* Will write ivsize + cryptlen */
1187 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1188
1189 /* Read Salt and generated IV */
1190 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1191 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1192 /* Append Salt */
1193 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1194 set_move_tgt_here(desc, move_cmd);
1195 set_move_tgt_here(desc, write_iv_cmd);
1196 /* Blank commands. Will be overwritten by generated IV. */
1197 append_cmd(desc, 0x00000000);
1198 append_cmd(desc, 0x00000000);
1199 /* End of blank commands */
1200
1201 /* No need to reload iv */
1202 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1203
1204 /* Read assoc data */
1205 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1206 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1207
1208 /* Will read cryptlen */
1209 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1210
1211 /* Store generated IV and encrypted data */
1212 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1213
1214 /* Read payload data */
1215 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1216 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1217
1218 /* Write ICV */
1219 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1220 LDST_SRCDST_BYTE_CONTEXT);
1221
1222 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1223 desc_bytes(desc),
1224 DMA_TO_DEVICE);
1225 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1226 dev_err(jrdev, "unable to map shared descriptor\n");
1227 return -ENOMEM;
1228 }
1229#ifdef DEBUG
1230 print_hex_dump(KERN_ERR,
1231 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233 desc_bytes(desc), 1);
1234#endif
1235
1236 return 0;
1237}
1238
1239static int rfc4106_setauthsize(struct crypto_aead *authenc,
1240 unsigned int authsize)
1241{
1242 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1243
1244 ctx->authsize = authsize;
1245 rfc4106_set_sh_desc(authenc);
1246
1247 return 0;
1248}
1249
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001250static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1251{
1252 struct aead_tfm *tfm = &aead->base.crt_aead;
1253 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1254 struct device *jrdev = ctx->jrdev;
1255 bool keys_fit_inline = false;
1256 u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1257 u32 *read_move_cmd, *write_move_cmd;
1258 u32 *desc;
1259 u32 geniv;
1260
1261 if (!ctx->enckeylen || !ctx->authsize)
1262 return 0;
1263
1264 /*
1265 * RFC4543 encrypt shared descriptor
1266 * Job Descriptor and Shared Descriptor
1267 * must fit into the 64-word Descriptor h/w Buffer
1268 */
1269 if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1270 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1271 keys_fit_inline = true;
1272
1273 desc = ctx->sh_desc_enc;
1274
1275 init_sh_desc(desc, HDR_SHARE_SERIAL);
1276
1277 /* Skip key loading if it is loaded due to sharing */
1278 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1279 JUMP_COND_SHRD);
1280 if (keys_fit_inline)
1281 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1282 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1283 else
1284 append_key(desc, ctx->key_dma, ctx->enckeylen,
1285 CLASS_1 | KEY_DEST_CLASS_REG);
1286 set_jump_tgt_here(desc, key_jump_cmd);
1287
1288 /* Class 1 operation */
1289 append_operation(desc, ctx->class1_alg_type |
1290 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1291
1292 /* Load AES-GMAC ESP IV into Math1 register */
1293 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1294 LDST_CLASS_DECO | tfm->ivsize);
1295
1296 /* Wait the DMA transaction to finish */
1297 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1298 (1 << JUMP_OFFSET_SHIFT));
1299
1300 /* Overwrite blank immediate AES-GMAC ESP IV data */
1301 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1302 (tfm->ivsize << MOVE_LEN_SHIFT));
1303
1304 /* Overwrite blank immediate AAD data */
1305 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1306 (tfm->ivsize << MOVE_LEN_SHIFT));
1307
1308 /* cryptlen = seqoutlen - authsize */
1309 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1310
1311 /* assoclen = (seqinlen - ivsize) - cryptlen */
1312 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1313
1314 /* Read Salt and AES-GMAC ESP IV */
1315 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1316 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1317 /* Append Salt */
1318 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1319 set_move_tgt_here(desc, write_iv_cmd);
1320 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1321 append_cmd(desc, 0x00000000);
1322 append_cmd(desc, 0x00000000);
1323 /* End of blank commands */
1324
1325 /* Read assoc data */
1326 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1327 FIFOLD_TYPE_AAD);
1328
1329 /* Will read cryptlen bytes */
1330 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1331
1332 /* Will write cryptlen bytes */
1333 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1334
1335 /*
1336 * MOVE_LEN opcode is not available in all SEC HW revisions,
1337 * thus need to do some magic, i.e. self-patch the descriptor
1338 * buffer.
1339 */
1340 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1341 (0x6 << MOVE_LEN_SHIFT));
1342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1343 (0x8 << MOVE_LEN_SHIFT));
1344
1345 /* Authenticate AES-GMAC ESP IV */
1346 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1347 FIFOLD_TYPE_AAD | tfm->ivsize);
1348 set_move_tgt_here(desc, write_aad_cmd);
1349 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1350 append_cmd(desc, 0x00000000);
1351 append_cmd(desc, 0x00000000);
1352 /* End of blank commands */
1353
1354 /* Read and write cryptlen bytes */
1355 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1356
1357 set_move_tgt_here(desc, read_move_cmd);
1358 set_move_tgt_here(desc, write_move_cmd);
1359 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1360 /* Move payload data to OFIFO */
1361 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1362
1363 /* Write ICV */
1364 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1365 LDST_SRCDST_BYTE_CONTEXT);
1366
1367 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1368 desc_bytes(desc),
1369 DMA_TO_DEVICE);
1370 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1371 dev_err(jrdev, "unable to map shared descriptor\n");
1372 return -ENOMEM;
1373 }
1374#ifdef DEBUG
1375 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1376 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1377 desc_bytes(desc), 1);
1378#endif
1379
1380 /*
1381 * Job Descriptor and Shared Descriptors
1382 * must all fit into the 64-word Descriptor h/w Buffer
1383 */
1384 keys_fit_inline = false;
1385 if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1386 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1387 keys_fit_inline = true;
1388
1389 desc = ctx->sh_desc_dec;
1390
1391 init_sh_desc(desc, HDR_SHARE_SERIAL);
1392
1393 /* Skip key loading if it is loaded due to sharing */
1394 key_jump_cmd = append_jump(desc, JUMP_JSL |
1395 JUMP_TEST_ALL | JUMP_COND_SHRD);
1396 if (keys_fit_inline)
1397 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1398 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1399 else
1400 append_key(desc, ctx->key_dma, ctx->enckeylen,
1401 CLASS_1 | KEY_DEST_CLASS_REG);
1402 set_jump_tgt_here(desc, key_jump_cmd);
1403
1404 /* Class 1 operation */
1405 append_operation(desc, ctx->class1_alg_type |
1406 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1407
1408 /* Load AES-GMAC ESP IV into Math1 register */
1409 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1410 LDST_CLASS_DECO | tfm->ivsize);
1411
1412 /* Wait the DMA transaction to finish */
1413 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1414 (1 << JUMP_OFFSET_SHIFT));
1415
1416 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1417 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1418
1419 /* Overwrite blank immediate AES-GMAC ESP IV data */
1420 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1421 (tfm->ivsize << MOVE_LEN_SHIFT));
1422
1423 /* Overwrite blank immediate AAD data */
1424 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1425 (tfm->ivsize << MOVE_LEN_SHIFT));
1426
1427 /* assoclen = (assoclen + cryptlen) - cryptlen */
1428 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1429 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1430
1431 /*
1432 * MOVE_LEN opcode is not available in all SEC HW revisions,
1433 * thus need to do some magic, i.e. self-patch the descriptor
1434 * buffer.
1435 */
1436 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1437 (0x6 << MOVE_LEN_SHIFT));
1438 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1439 (0x8 << MOVE_LEN_SHIFT));
1440
1441 /* Read Salt and AES-GMAC ESP IV */
1442 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1443 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1444 /* Append Salt */
1445 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1446 set_move_tgt_here(desc, write_iv_cmd);
1447 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1448 append_cmd(desc, 0x00000000);
1449 append_cmd(desc, 0x00000000);
1450 /* End of blank commands */
1451
1452 /* Read assoc data */
1453 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1454 FIFOLD_TYPE_AAD);
1455
1456 /* Will read cryptlen bytes */
1457 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1458
1459 /* Will write cryptlen bytes */
1460 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1461
1462 /* Authenticate AES-GMAC ESP IV */
1463 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1464 FIFOLD_TYPE_AAD | tfm->ivsize);
1465 set_move_tgt_here(desc, write_aad_cmd);
1466 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1467 append_cmd(desc, 0x00000000);
1468 append_cmd(desc, 0x00000000);
1469 /* End of blank commands */
1470
1471 /* Store payload data */
1472 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1473
1474 /* In-snoop cryptlen data */
1475 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1476 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1477
1478 set_move_tgt_here(desc, read_move_cmd);
1479 set_move_tgt_here(desc, write_move_cmd);
1480 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1481 /* Move payload data to OFIFO */
1482 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1483 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1484
1485 /* Read ICV */
1486 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1487 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1488
1489 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1490 desc_bytes(desc),
1491 DMA_TO_DEVICE);
1492 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1493 dev_err(jrdev, "unable to map shared descriptor\n");
1494 return -ENOMEM;
1495 }
1496#ifdef DEBUG
1497 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1498 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1499 desc_bytes(desc), 1);
1500#endif
1501
1502 /*
1503 * Job Descriptor and Shared Descriptors
1504 * must all fit into the 64-word Descriptor h/w Buffer
1505 */
1506 keys_fit_inline = false;
1507 if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1508 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1509 keys_fit_inline = true;
1510
1511 /* rfc4543_givencrypt shared descriptor */
1512 desc = ctx->sh_desc_givenc;
1513
1514 init_sh_desc(desc, HDR_SHARE_SERIAL);
1515
1516 /* Skip key loading if it is loaded due to sharing */
1517 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1518 JUMP_COND_SHRD);
1519 if (keys_fit_inline)
1520 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1521 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1522 else
1523 append_key(desc, ctx->key_dma, ctx->enckeylen,
1524 CLASS_1 | KEY_DEST_CLASS_REG);
1525 set_jump_tgt_here(desc, key_jump_cmd);
1526
1527 /* Generate IV */
1528 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1529 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1530 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1531 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1532 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1533 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1534 /* Move generated IV to Math1 register */
1535 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1536 (tfm->ivsize << MOVE_LEN_SHIFT));
1537 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1538
1539 /* Overwrite blank immediate AES-GMAC IV data */
1540 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1541 (tfm->ivsize << MOVE_LEN_SHIFT));
1542
1543 /* Overwrite blank immediate AAD data */
1544 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1545 (tfm->ivsize << MOVE_LEN_SHIFT));
1546
1547 /* Copy generated IV to OFIFO */
1548 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1549 (tfm->ivsize << MOVE_LEN_SHIFT));
1550
1551 /* Class 1 operation */
1552 append_operation(desc, ctx->class1_alg_type |
1553 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1554
1555 /* ivsize + cryptlen = seqoutlen - authsize */
1556 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1557
1558 /* assoclen = seqinlen - (ivsize + cryptlen) */
1559 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1560
1561 /* Will write ivsize + cryptlen */
1562 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1563
1564 /*
1565 * MOVE_LEN opcode is not available in all SEC HW revisions,
1566 * thus need to do some magic, i.e. self-patch the descriptor
1567 * buffer.
1568 */
1569 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1570 (0x6 << MOVE_LEN_SHIFT));
1571 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1572 (0x8 << MOVE_LEN_SHIFT));
1573
1574 /* Read Salt and AES-GMAC generated IV */
1575 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1576 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1577 /* Append Salt */
1578 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1579 set_move_tgt_here(desc, write_iv_cmd);
1580 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1581 append_cmd(desc, 0x00000000);
1582 append_cmd(desc, 0x00000000);
1583 /* End of blank commands */
1584
1585 /* No need to reload iv */
1586 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1587
1588 /* Read assoc data */
1589 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1590 FIFOLD_TYPE_AAD);
1591
1592 /* Will read cryptlen */
1593 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1594
1595 /* Authenticate AES-GMAC IV */
1596 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1597 FIFOLD_TYPE_AAD | tfm->ivsize);
1598 set_move_tgt_here(desc, write_aad_cmd);
1599 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1600 append_cmd(desc, 0x00000000);
1601 append_cmd(desc, 0x00000000);
1602 /* End of blank commands */
1603
1604 /* Read and write cryptlen bytes */
1605 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1606
1607 set_move_tgt_here(desc, read_move_cmd);
1608 set_move_tgt_here(desc, write_move_cmd);
1609 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1610 /* Move payload data to OFIFO */
1611 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1612
1613 /* Write ICV */
1614 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1615 LDST_SRCDST_BYTE_CONTEXT);
1616
1617 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1618 desc_bytes(desc),
1619 DMA_TO_DEVICE);
1620 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1621 dev_err(jrdev, "unable to map shared descriptor\n");
1622 return -ENOMEM;
1623 }
1624#ifdef DEBUG
1625 print_hex_dump(KERN_ERR,
1626 "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1627 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1628 desc_bytes(desc), 1);
1629#endif
1630
1631 return 0;
1632}
1633
1634static int rfc4543_setauthsize(struct crypto_aead *authenc,
1635 unsigned int authsize)
1636{
1637 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1638
1639 ctx->authsize = authsize;
1640 rfc4543_set_sh_desc(authenc);
1641
1642 return 0;
1643}
1644
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001645static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1646 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001647{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001648 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1649 ctx->split_key_pad_len, key_in, authkeylen,
1650 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001651}
1652
Yuan Kang0e479302011-07-15 11:21:41 +08001653static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001654 const u8 *key, unsigned int keylen)
1655{
1656 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1657 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1658 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1659 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001660 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001661 int ret = 0;
1662
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001663 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001664 goto badkey;
1665
1666 /* Pick class 2 key length from algorithm submask */
1667 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1668 OP_ALG_ALGSEL_SHIFT] * 2;
1669 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1670
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001671 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1672 goto badkey;
1673
Kim Phillips8e8ec592011-03-13 16:54:26 +08001674#ifdef DEBUG
1675 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001676 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1677 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001678 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1679 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001680 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001681 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1682#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001683
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001684 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001685 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001686 goto badkey;
1687 }
1688
1689 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001690 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001691
Yuan Kang885e9e22011-07-15 11:21:41 +08001692 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001693 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001694 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001695 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001696 return -ENOMEM;
1697 }
1698#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001699 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001700 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001701 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001702#endif
1703
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001704 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001705
Yuan Kang1acebad2011-07-15 11:21:42 +08001706 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001707 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001708 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001709 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001710 }
1711
1712 return ret;
1713badkey:
1714 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1715 return -EINVAL;
1716}
1717
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001718static int gcm_setkey(struct crypto_aead *aead,
1719 const u8 *key, unsigned int keylen)
1720{
1721 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1722 struct device *jrdev = ctx->jrdev;
1723 int ret = 0;
1724
1725#ifdef DEBUG
1726 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1727 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1728#endif
1729
1730 memcpy(ctx->key, key, keylen);
1731 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1732 DMA_TO_DEVICE);
1733 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1734 dev_err(jrdev, "unable to map key i/o memory\n");
1735 return -ENOMEM;
1736 }
1737 ctx->enckeylen = keylen;
1738
1739 ret = gcm_set_sh_desc(aead);
1740 if (ret) {
1741 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1742 DMA_TO_DEVICE);
1743 }
1744
1745 return ret;
1746}
1747
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001748static int rfc4106_setkey(struct crypto_aead *aead,
1749 const u8 *key, unsigned int keylen)
1750{
1751 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1752 struct device *jrdev = ctx->jrdev;
1753 int ret = 0;
1754
1755 if (keylen < 4)
1756 return -EINVAL;
1757
1758#ifdef DEBUG
1759 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1760 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1761#endif
1762
1763 memcpy(ctx->key, key, keylen);
1764
1765 /*
1766 * The last four bytes of the key material are used as the salt value
1767 * in the nonce. Update the AES key length.
1768 */
1769 ctx->enckeylen = keylen - 4;
1770
1771 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1772 DMA_TO_DEVICE);
1773 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1774 dev_err(jrdev, "unable to map key i/o memory\n");
1775 return -ENOMEM;
1776 }
1777
1778 ret = rfc4106_set_sh_desc(aead);
1779 if (ret) {
1780 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1781 DMA_TO_DEVICE);
1782 }
1783
1784 return ret;
1785}
1786
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001787static int rfc4543_setkey(struct crypto_aead *aead,
1788 const u8 *key, unsigned int keylen)
1789{
1790 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1791 struct device *jrdev = ctx->jrdev;
1792 int ret = 0;
1793
1794 if (keylen < 4)
1795 return -EINVAL;
1796
1797#ifdef DEBUG
1798 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1799 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1800#endif
1801
1802 memcpy(ctx->key, key, keylen);
1803
1804 /*
1805 * The last four bytes of the key material are used as the salt value
1806 * in the nonce. Update the AES key length.
1807 */
1808 ctx->enckeylen = keylen - 4;
1809
1810 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1811 DMA_TO_DEVICE);
1812 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1813 dev_err(jrdev, "unable to map key i/o memory\n");
1814 return -ENOMEM;
1815 }
1816
1817 ret = rfc4543_set_sh_desc(aead);
1818 if (ret) {
1819 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1820 DMA_TO_DEVICE);
1821 }
1822
1823 return ret;
1824}
1825
Yuan Kangacdca312011-07-15 11:21:42 +08001826static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1827 const u8 *key, unsigned int keylen)
1828{
1829 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001830 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1831 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1832 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001833 struct device *jrdev = ctx->jrdev;
1834 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001835 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001836 u32 *desc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001837 u32 *nonce;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001838 u32 ctx1_iv_off = 0;
1839 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1840 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001841 const bool is_rfc3686 = (ctr_mode &&
1842 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001843
1844#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001845 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001846 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1847#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001848 /*
1849 * AES-CTR needs to load IV in CONTEXT1 reg
1850 * at an offset of 128bits (16bytes)
1851 * CONTEXT1[255:128] = IV
1852 */
1853 if (ctr_mode)
1854 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001855
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001856 /*
1857 * RFC3686 specific:
1858 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1859 * | *key = {KEY, NONCE}
1860 */
1861 if (is_rfc3686) {
1862 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1863 keylen -= CTR_RFC3686_NONCE_SIZE;
1864 }
1865
Yuan Kangacdca312011-07-15 11:21:42 +08001866 memcpy(ctx->key, key, keylen);
1867 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1868 DMA_TO_DEVICE);
1869 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1870 dev_err(jrdev, "unable to map key i/o memory\n");
1871 return -ENOMEM;
1872 }
1873 ctx->enckeylen = keylen;
1874
1875 /* ablkcipher_encrypt shared descriptor */
1876 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001877 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001878 /* Skip if already shared */
1879 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1880 JUMP_COND_SHRD);
1881
1882 /* Load class1 key only */
1883 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1884 ctx->enckeylen, CLASS_1 |
1885 KEY_DEST_CLASS_REG);
1886
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001887 /* Load nonce into CONTEXT1 reg */
1888 if (is_rfc3686) {
1889 nonce = (u32 *)(key + keylen);
1890 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1891 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1892 append_move(desc, MOVE_WAITCOMP |
1893 MOVE_SRC_OUTFIFO |
1894 MOVE_DEST_CLASS1CTX |
1895 (16 << MOVE_OFFSET_SHIFT) |
1896 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1897 }
1898
Yuan Kangacdca312011-07-15 11:21:42 +08001899 set_jump_tgt_here(desc, key_jump_cmd);
1900
Yuan Kangacdca312011-07-15 11:21:42 +08001901 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001902 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001903 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001904
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001905 /* Load counter into CONTEXT1 reg */
1906 if (is_rfc3686)
1907 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1908 LDST_CLASS_1_CCB |
1909 LDST_SRCDST_BYTE_CONTEXT |
1910 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1911 LDST_OFFSET_SHIFT));
1912
Yuan Kangacdca312011-07-15 11:21:42 +08001913 /* Load operation */
1914 append_operation(desc, ctx->class1_alg_type |
1915 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1916
1917 /* Perform operation */
1918 ablkcipher_append_src_dst(desc);
1919
1920 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1921 desc_bytes(desc),
1922 DMA_TO_DEVICE);
1923 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1924 dev_err(jrdev, "unable to map shared descriptor\n");
1925 return -ENOMEM;
1926 }
1927#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001928 print_hex_dump(KERN_ERR,
1929 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001930 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1931 desc_bytes(desc), 1);
1932#endif
1933 /* ablkcipher_decrypt shared descriptor */
1934 desc = ctx->sh_desc_dec;
1935
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001936 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001937 /* Skip if already shared */
1938 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1939 JUMP_COND_SHRD);
1940
1941 /* Load class1 key only */
1942 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1943 ctx->enckeylen, CLASS_1 |
1944 KEY_DEST_CLASS_REG);
1945
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001946 /* Load nonce into CONTEXT1 reg */
1947 if (is_rfc3686) {
1948 nonce = (u32 *)(key + keylen);
1949 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1950 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1951 append_move(desc, MOVE_WAITCOMP |
1952 MOVE_SRC_OUTFIFO |
1953 MOVE_DEST_CLASS1CTX |
1954 (16 << MOVE_OFFSET_SHIFT) |
1955 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1956 }
1957
Yuan Kangacdca312011-07-15 11:21:42 +08001958 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001959
1960 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001961 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001962 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001963
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001964 /* Load counter into CONTEXT1 reg */
1965 if (is_rfc3686)
1966 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1967 LDST_CLASS_1_CCB |
1968 LDST_SRCDST_BYTE_CONTEXT |
1969 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1970 LDST_OFFSET_SHIFT));
1971
Yuan Kangacdca312011-07-15 11:21:42 +08001972 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001973 if (ctr_mode)
1974 append_operation(desc, ctx->class1_alg_type |
1975 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1976 else
1977 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001978
1979 /* Perform operation */
1980 ablkcipher_append_src_dst(desc);
1981
Yuan Kangacdca312011-07-15 11:21:42 +08001982 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1983 desc_bytes(desc),
1984 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001985 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001986 dev_err(jrdev, "unable to map shared descriptor\n");
1987 return -ENOMEM;
1988 }
1989
1990#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001991 print_hex_dump(KERN_ERR,
1992 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001993 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1994 desc_bytes(desc), 1);
1995#endif
1996
1997 return ret;
1998}
1999
Kim Phillips8e8ec592011-03-13 16:54:26 +08002000/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002001 * aead_edesc - s/w-extended aead descriptor
2002 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05002003 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08002004 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05002005 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08002006 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05002007 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08002008 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08002009 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05002010 * @sec4_sg_bytes: length of dma mapped sec4_sg space
2011 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08002012 * @hw_desc: the h/w job descriptor followed by any referenced link tables
2013 */
Yuan Kang0e479302011-07-15 11:21:41 +08002014struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002015 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002016 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002017 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002018 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002019 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002020 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002021 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002022 int sec4_sg_bytes;
2023 dma_addr_t sec4_sg_dma;
2024 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002025 u32 hw_desc[0];
2026};
2027
Yuan Kangacdca312011-07-15 11:21:42 +08002028/*
2029 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
2030 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05002031 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08002032 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05002033 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08002034 * @iv_dma: dma address of iv for checking continuity and link table
2035 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05002036 * @sec4_sg_bytes: length of dma mapped sec4_sg space
2037 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08002038 * @hw_desc: the h/w job descriptor followed by any referenced link tables
2039 */
2040struct ablkcipher_edesc {
2041 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002042 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002043 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002044 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002045 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002046 int sec4_sg_bytes;
2047 dma_addr_t sec4_sg_dma;
2048 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08002049 u32 hw_desc[0];
2050};
2051
Yuan Kang1acebad2011-07-15 11:21:42 +08002052static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05002053 struct scatterlist *dst, int src_nents,
2054 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05002055 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
2056 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002057{
Yuan Kang643b39b2012-06-22 19:48:49 -05002058 if (dst != src) {
2059 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
2060 src_chained);
2061 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
2062 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002063 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002064 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
2065 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002066 }
2067
Yuan Kang1acebad2011-07-15 11:21:42 +08002068 if (iv_dma)
2069 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05002070 if (sec4_sg_bytes)
2071 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002072 DMA_TO_DEVICE);
2073}
2074
Yuan Kang1acebad2011-07-15 11:21:42 +08002075static void aead_unmap(struct device *dev,
2076 struct aead_edesc *edesc,
2077 struct aead_request *req)
2078{
2079 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2080 int ivsize = crypto_aead_ivsize(aead);
2081
Yuan Kang643b39b2012-06-22 19:48:49 -05002082 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
2083 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002084
2085 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05002086 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
2087 edesc->dst_chained, edesc->iv_dma, ivsize,
2088 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08002089}
2090
Yuan Kangacdca312011-07-15 11:21:42 +08002091static void ablkcipher_unmap(struct device *dev,
2092 struct ablkcipher_edesc *edesc,
2093 struct ablkcipher_request *req)
2094{
2095 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2096 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2097
2098 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05002099 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
2100 edesc->dst_chained, edesc->iv_dma, ivsize,
2101 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002102}
2103
Yuan Kang0e479302011-07-15 11:21:41 +08002104static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002105 void *context)
2106{
Yuan Kang0e479302011-07-15 11:21:41 +08002107 struct aead_request *req = context;
2108 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002109#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08002110 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002111 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002112 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002113
2114 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2115#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002116
Yuan Kang0e479302011-07-15 11:21:41 +08002117 edesc = (struct aead_edesc *)((char *)desc -
2118 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002119
Marek Vasutfa9659c2014-04-24 20:05:12 +02002120 if (err)
2121 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002122
Yuan Kang0e479302011-07-15 11:21:41 +08002123 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002124
2125#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002126 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002127 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2128 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002129 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002130 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002131 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002132 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002133 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2134 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002135 ctx->authsize + 4, 1);
2136#endif
2137
2138 kfree(edesc);
2139
Yuan Kang0e479302011-07-15 11:21:41 +08002140 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002141}
2142
Yuan Kang0e479302011-07-15 11:21:41 +08002143static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002144 void *context)
2145{
Yuan Kang0e479302011-07-15 11:21:41 +08002146 struct aead_request *req = context;
2147 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002148#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08002149 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002150 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002151 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002152
2153 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2154#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002155
Yuan Kang0e479302011-07-15 11:21:41 +08002156 edesc = (struct aead_edesc *)((char *)desc -
2157 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002158
Yuan Kang1acebad2011-07-15 11:21:42 +08002159#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002160 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002161 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2162 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002163 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002164 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02002165 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002166#endif
2167
Marek Vasutfa9659c2014-04-24 20:05:12 +02002168 if (err)
2169 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002170
Yuan Kang0e479302011-07-15 11:21:41 +08002171 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002172
2173 /*
2174 * verify hw auth check passed else return -EBADMSG
2175 */
2176 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2177 err = -EBADMSG;
2178
2179#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002180 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002181 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08002182 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
2183 sizeof(struct iphdr) + req->assoclen +
2184 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002185 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05002186 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08002187 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03002188 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002189 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
2190 sg->length + ctx->authsize + 16, 1);
2191 }
2192#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002193
Kim Phillips8e8ec592011-03-13 16:54:26 +08002194 kfree(edesc);
2195
Yuan Kang0e479302011-07-15 11:21:41 +08002196 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002197}
2198
Yuan Kangacdca312011-07-15 11:21:42 +08002199static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2200 void *context)
2201{
2202 struct ablkcipher_request *req = context;
2203 struct ablkcipher_edesc *edesc;
2204#ifdef DEBUG
2205 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2206 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2207
2208 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2209#endif
2210
2211 edesc = (struct ablkcipher_edesc *)((char *)desc -
2212 offsetof(struct ablkcipher_edesc, hw_desc));
2213
Marek Vasutfa9659c2014-04-24 20:05:12 +02002214 if (err)
2215 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002216
2217#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002218 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002219 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2220 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002221 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002222 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2223 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2224#endif
2225
2226 ablkcipher_unmap(jrdev, edesc, req);
2227 kfree(edesc);
2228
2229 ablkcipher_request_complete(req, err);
2230}
2231
2232static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2233 void *context)
2234{
2235 struct ablkcipher_request *req = context;
2236 struct ablkcipher_edesc *edesc;
2237#ifdef DEBUG
2238 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2239 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2240
2241 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2242#endif
2243
2244 edesc = (struct ablkcipher_edesc *)((char *)desc -
2245 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002246 if (err)
2247 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002248
2249#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002250 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002251 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2252 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002253 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002254 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2255 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2256#endif
2257
2258 ablkcipher_unmap(jrdev, edesc, req);
2259 kfree(edesc);
2260
2261 ablkcipher_request_complete(req, err);
2262}
2263
Kim Phillips8e8ec592011-03-13 16:54:26 +08002264/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002265 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002266 */
Yuan Kang1acebad2011-07-15 11:21:42 +08002267static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2268 struct aead_edesc *edesc,
2269 struct aead_request *req,
2270 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002271{
Yuan Kang0e479302011-07-15 11:21:41 +08002272 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002273 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002274 int ivsize = crypto_aead_ivsize(aead);
2275 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002276 u32 *desc = edesc->hw_desc;
2277 u32 out_options = 0, in_options;
2278 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002279 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002280 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002281
Yuan Kang1acebad2011-07-15 11:21:42 +08002282#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002283 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002284 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002285 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002286 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2287 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002288 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002289 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002290 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002291 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002292 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08002293 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002294 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002295 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2296 desc_bytes(sh_desc), 1);
2297#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002298
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002299 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2300 OP_ALG_ALGSEL_AES) &&
2301 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2302 is_gcm = true;
2303
Yuan Kang1acebad2011-07-15 11:21:42 +08002304 len = desc_len(sh_desc);
2305 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2306
2307 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002308 if (is_gcm)
2309 src_dma = edesc->iv_dma;
2310 else
2311 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002312 in_options = 0;
2313 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002314 src_dma = edesc->sec4_sg_dma;
2315 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2316 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002317 in_options = LDST_SGF;
2318 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002319
2320 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2321 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002322
Yuan Kang1acebad2011-07-15 11:21:42 +08002323 if (likely(req->src == req->dst)) {
2324 if (all_contig) {
2325 dst_dma = sg_dma_address(req->src);
2326 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002327 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08002328 ((edesc->assoc_nents ? : 1) + 1);
2329 out_options = LDST_SGF;
2330 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002331 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002332 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002333 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002334 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002335 dst_dma = edesc->sec4_sg_dma +
2336 sec4_sg_index *
2337 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002338 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002339 }
2340 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002341 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002342 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2343 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002344 else
Yuan Kang1acebad2011-07-15 11:21:42 +08002345 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2346 out_options);
2347}
2348
2349/*
2350 * Fill in aead givencrypt job descriptor
2351 */
2352static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2353 struct aead_edesc *edesc,
2354 struct aead_request *req,
2355 int contig)
2356{
2357 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2358 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2359 int ivsize = crypto_aead_ivsize(aead);
2360 int authsize = ctx->authsize;
2361 u32 *desc = edesc->hw_desc;
2362 u32 out_options = 0, in_options;
2363 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002364 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002365 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002366
2367#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08002368 debug("assoclen %d cryptlen %d authsize %d\n",
2369 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002370 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002371 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2372 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002373 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002374 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002375 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002376 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2377 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002378 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002379 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2380 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002381#endif
2382
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002383 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2384 OP_ALG_ALGSEL_AES) &&
2385 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2386 is_gcm = true;
2387
Yuan Kang1acebad2011-07-15 11:21:42 +08002388 len = desc_len(sh_desc);
2389 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2390
2391 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002392 if (is_gcm)
2393 src_dma = edesc->iv_dma;
2394 else
2395 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002396 in_options = 0;
2397 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002398 src_dma = edesc->sec4_sg_dma;
2399 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002400 in_options = LDST_SGF;
2401 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002402 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2403 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08002404
2405 if (contig & GIV_DST_CONTIG) {
2406 dst_dma = edesc->iv_dma;
2407 } else {
2408 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002409 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002410 (edesc->assoc_nents +
2411 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08002412 out_options = LDST_SGF;
2413 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002414 dst_dma = edesc->sec4_sg_dma +
2415 sec4_sg_index *
2416 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002417 out_options = LDST_SGF;
2418 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002419 }
2420
Horia Geantabbf9c892013-11-28 15:11:16 +02002421 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2422 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002423}
2424
2425/*
Yuan Kangacdca312011-07-15 11:21:42 +08002426 * Fill in ablkcipher job descriptor
2427 */
2428static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2429 struct ablkcipher_edesc *edesc,
2430 struct ablkcipher_request *req,
2431 bool iv_contig)
2432{
2433 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2434 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2435 u32 *desc = edesc->hw_desc;
2436 u32 out_options = 0, in_options;
2437 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002438 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002439
2440#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002441 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002442 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2443 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002444 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002445 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2446 edesc->src_nents ? 100 : req->nbytes, 1);
2447#endif
2448
2449 len = desc_len(sh_desc);
2450 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2451
2452 if (iv_contig) {
2453 src_dma = edesc->iv_dma;
2454 in_options = 0;
2455 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002456 src_dma = edesc->sec4_sg_dma;
2457 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002458 in_options = LDST_SGF;
2459 }
2460 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2461
2462 if (likely(req->src == req->dst)) {
2463 if (!edesc->src_nents && iv_contig) {
2464 dst_dma = sg_dma_address(req->src);
2465 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002466 dst_dma = edesc->sec4_sg_dma +
2467 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002468 out_options = LDST_SGF;
2469 }
2470 } else {
2471 if (!edesc->dst_nents) {
2472 dst_dma = sg_dma_address(req->dst);
2473 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002474 dst_dma = edesc->sec4_sg_dma +
2475 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002476 out_options = LDST_SGF;
2477 }
2478 }
2479 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2480}
2481
2482/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002483 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002484 */
Yuan Kang0e479302011-07-15 11:21:41 +08002485static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02002486 int desc_bytes, bool *all_contig_ptr,
2487 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002488{
Yuan Kang0e479302011-07-15 11:21:41 +08002489 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002490 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2491 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002492 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2493 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2494 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002495 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002496 dma_addr_t iv_dma = 0;
2497 int sgc;
2498 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002499 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08002500 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002501 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002502 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002503 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002504
Yuan Kang643b39b2012-06-22 19:48:49 -05002505 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002506
Horia Geantabbf9c892013-11-28 15:11:16 +02002507 if (unlikely(req->dst != req->src)) {
2508 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2509 dst_nents = sg_count(req->dst,
2510 req->cryptlen +
2511 (encrypt ? authsize : (-authsize)),
2512 &dst_chained);
2513 } else {
2514 src_nents = sg_count(req->src,
2515 req->cryptlen +
2516 (encrypt ? authsize : 0),
2517 &src_chained);
2518 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002519
Yuan Kang643b39b2012-06-22 19:48:49 -05002520 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002521 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002522 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002523 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2524 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002525 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002526 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2527 DMA_TO_DEVICE, src_chained);
2528 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2529 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002530 }
2531
Yuan Kang1acebad2011-07-15 11:21:42 +08002532 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002533 if (dma_mapping_error(jrdev, iv_dma)) {
2534 dev_err(jrdev, "unable to map IV\n");
2535 return ERR_PTR(-ENOMEM);
2536 }
2537
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002538 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2539 OP_ALG_ALGSEL_AES) &&
2540 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2541 is_gcm = true;
2542
2543 /*
2544 * Check if data are contiguous.
2545 * GCM expected input sequence: IV, AAD, text
2546 * All other - expected input sequence: AAD, IV, text
2547 */
2548 if (is_gcm)
2549 all_contig = (!assoc_nents &&
2550 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2551 !src_nents && sg_dma_address(req->assoc) +
2552 req->assoclen == sg_dma_address(req->src));
2553 else
2554 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2555 req->assoclen == iv_dma && !src_nents &&
2556 iv_dma + ivsize == sg_dma_address(req->src));
2557 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08002558 assoc_nents = assoc_nents ? : 1;
2559 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002560 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002561 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002562
Yuan Kanga299c832012-06-22 19:48:46 -05002563 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002564
Yuan Kanga299c832012-06-22 19:48:46 -05002565 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002566
2567 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002568 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002569 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002570 if (!edesc) {
2571 dev_err(jrdev, "could not allocate extended descriptor\n");
2572 return ERR_PTR(-ENOMEM);
2573 }
2574
2575 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002576 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002577 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002578 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002579 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002580 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002581 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002582 edesc->sec4_sg_bytes = sec4_sg_bytes;
2583 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2584 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002585 *all_contig_ptr = all_contig;
2586
Yuan Kanga299c832012-06-22 19:48:46 -05002587 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002588 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002589 if (!is_gcm) {
2590 sg_to_sec4_sg(req->assoc,
2591 (assoc_nents ? : 1),
2592 edesc->sec4_sg +
2593 sec4_sg_index, 0);
2594 sec4_sg_index += assoc_nents ? : 1;
2595 }
2596
Yuan Kanga299c832012-06-22 19:48:46 -05002597 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002598 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002599 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002600
2601 if (is_gcm) {
2602 sg_to_sec4_sg(req->assoc,
2603 (assoc_nents ? : 1),
2604 edesc->sec4_sg +
2605 sec4_sg_index, 0);
2606 sec4_sg_index += assoc_nents ? : 1;
2607 }
2608
Yuan Kanga299c832012-06-22 19:48:46 -05002609 sg_to_sec4_sg_last(req->src,
2610 (src_nents ? : 1),
2611 edesc->sec4_sg +
2612 sec4_sg_index, 0);
2613 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002614 }
2615 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002616 sg_to_sec4_sg_last(req->dst, dst_nents,
2617 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002618 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302619 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2620 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002621 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2622 dev_err(jrdev, "unable to map S/G table\n");
2623 return ERR_PTR(-ENOMEM);
2624 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002625
2626 return edesc;
2627}
2628
Yuan Kang0e479302011-07-15 11:21:41 +08002629static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002630{
Yuan Kang0e479302011-07-15 11:21:41 +08002631 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002632 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002633 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2634 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002635 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002636 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002637 int ret = 0;
2638
Kim Phillips8e8ec592011-03-13 16:54:26 +08002639 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002640 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002641 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002642 if (IS_ERR(edesc))
2643 return PTR_ERR(edesc);
2644
Yuan Kang1acebad2011-07-15 11:21:42 +08002645 /* Create and submit job descriptor */
2646 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2647 all_contig, true);
2648#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002649 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002650 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2651 desc_bytes(edesc->hw_desc), 1);
2652#endif
2653
Kim Phillips8e8ec592011-03-13 16:54:26 +08002654 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002655 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2656 if (!ret) {
2657 ret = -EINPROGRESS;
2658 } else {
2659 aead_unmap(jrdev, edesc, req);
2660 kfree(edesc);
2661 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002662
Yuan Kang1acebad2011-07-15 11:21:42 +08002663 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002664}
2665
Yuan Kang0e479302011-07-15 11:21:41 +08002666static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002667{
Yuan Kang1acebad2011-07-15 11:21:42 +08002668 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002669 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002670 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2671 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002672 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002673 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002674 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002675
2676 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002677 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002678 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002679 if (IS_ERR(edesc))
2680 return PTR_ERR(edesc);
2681
Yuan Kang1acebad2011-07-15 11:21:42 +08002682#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002683 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002684 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2685 req->cryptlen, 1);
2686#endif
2687
2688 /* Create and submit job descriptor*/
2689 init_aead_job(ctx->sh_desc_dec,
2690 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2691#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002692 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002693 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2694 desc_bytes(edesc->hw_desc), 1);
2695#endif
2696
Yuan Kang0e479302011-07-15 11:21:41 +08002697 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002698 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2699 if (!ret) {
2700 ret = -EINPROGRESS;
2701 } else {
2702 aead_unmap(jrdev, edesc, req);
2703 kfree(edesc);
2704 }
Yuan Kang0e479302011-07-15 11:21:41 +08002705
Yuan Kang1acebad2011-07-15 11:21:42 +08002706 return ret;
2707}
Yuan Kang0e479302011-07-15 11:21:41 +08002708
Yuan Kang1acebad2011-07-15 11:21:42 +08002709/*
2710 * allocate and map the aead extended descriptor for aead givencrypt
2711 */
2712static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2713 *greq, int desc_bytes,
2714 u32 *contig_ptr)
2715{
2716 struct aead_request *req = &greq->areq;
2717 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2718 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2719 struct device *jrdev = ctx->jrdev;
2720 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2721 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2722 int assoc_nents, src_nents, dst_nents = 0;
2723 struct aead_edesc *edesc;
2724 dma_addr_t iv_dma = 0;
2725 int sgc;
2726 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2727 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002728 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002729 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002730 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002731
Yuan Kang643b39b2012-06-22 19:48:49 -05002732 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2733 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002734
Yuan Kang1acebad2011-07-15 11:21:42 +08002735 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002736 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2737 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002738
Yuan Kang643b39b2012-06-22 19:48:49 -05002739 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002740 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002741 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002742 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2743 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002744 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002745 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2746 DMA_TO_DEVICE, src_chained);
2747 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2748 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002749 }
2750
Yuan Kang1acebad2011-07-15 11:21:42 +08002751 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002752 if (dma_mapping_error(jrdev, iv_dma)) {
2753 dev_err(jrdev, "unable to map IV\n");
2754 return ERR_PTR(-ENOMEM);
2755 }
2756
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002757 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2758 OP_ALG_ALGSEL_AES) &&
2759 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2760 is_gcm = true;
2761
2762 /*
2763 * Check if data are contiguous.
2764 * GCM expected input sequence: IV, AAD, text
2765 * All other - expected input sequence: AAD, IV, text
2766 */
2767
2768 if (is_gcm) {
2769 if (assoc_nents || iv_dma + ivsize !=
2770 sg_dma_address(req->assoc) || src_nents ||
2771 sg_dma_address(req->assoc) + req->assoclen !=
2772 sg_dma_address(req->src))
2773 contig &= ~GIV_SRC_CONTIG;
2774 } else {
2775 if (assoc_nents ||
2776 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2777 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2778 contig &= ~GIV_SRC_CONTIG;
2779 }
2780
Yuan Kang1acebad2011-07-15 11:21:42 +08002781 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2782 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002783
Yuan Kang1acebad2011-07-15 11:21:42 +08002784 if (!(contig & GIV_SRC_CONTIG)) {
2785 assoc_nents = assoc_nents ? : 1;
2786 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002787 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002788 if (req->src == req->dst &&
2789 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002790 contig &= ~GIV_DST_CONTIG;
2791 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002792
2793 /*
2794 * Add new sg entries for GCM output sequence.
2795 * Expected output sequence: IV, encrypted text.
2796 */
2797 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2798 sec4_sg_len += 1 + src_nents;
2799
2800 if (unlikely(req->src != req->dst)) {
2801 dst_nents = dst_nents ? : 1;
2802 sec4_sg_len += 1 + dst_nents;
2803 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002804
Yuan Kanga299c832012-06-22 19:48:46 -05002805 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002806
2807 /* allocate space for base edesc and hw desc commands, link tables */
2808 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002809 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002810 if (!edesc) {
2811 dev_err(jrdev, "could not allocate extended descriptor\n");
2812 return ERR_PTR(-ENOMEM);
2813 }
2814
2815 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002816 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002817 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002818 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002819 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002820 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002821 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002822 edesc->sec4_sg_bytes = sec4_sg_bytes;
2823 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2824 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002825 *contig_ptr = contig;
2826
Yuan Kanga299c832012-06-22 19:48:46 -05002827 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002828 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002829 if (!is_gcm) {
2830 sg_to_sec4_sg(req->assoc, assoc_nents,
2831 edesc->sec4_sg + sec4_sg_index, 0);
2832 sec4_sg_index += assoc_nents;
2833 }
2834
Yuan Kanga299c832012-06-22 19:48:46 -05002835 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002836 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002837 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002838
2839 if (is_gcm) {
2840 sg_to_sec4_sg(req->assoc, assoc_nents,
2841 edesc->sec4_sg + sec4_sg_index, 0);
2842 sec4_sg_index += assoc_nents;
2843 }
2844
Yuan Kanga299c832012-06-22 19:48:46 -05002845 sg_to_sec4_sg_last(req->src, src_nents,
2846 edesc->sec4_sg +
2847 sec4_sg_index, 0);
2848 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002849 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002850
2851 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2852 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2853 iv_dma, ivsize, 0);
2854 sec4_sg_index += 1;
2855 sg_to_sec4_sg_last(req->src, src_nents,
2856 edesc->sec4_sg + sec4_sg_index, 0);
2857 }
2858
Yuan Kang1acebad2011-07-15 11:21:42 +08002859 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002860 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002861 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002862 sec4_sg_index += 1;
2863 sg_to_sec4_sg_last(req->dst, dst_nents,
2864 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002865 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302866 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2867 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002868 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2869 dev_err(jrdev, "unable to map S/G table\n");
2870 return ERR_PTR(-ENOMEM);
2871 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002872
2873 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002874}
2875
2876static int aead_givencrypt(struct aead_givcrypt_request *areq)
2877{
2878 struct aead_request *req = &areq->areq;
2879 struct aead_edesc *edesc;
2880 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002881 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2882 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002883 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002884 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002885 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002886
Kim Phillips8e8ec592011-03-13 16:54:26 +08002887 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002888 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2889 CAAM_CMD_SZ, &contig);
2890
Kim Phillips8e8ec592011-03-13 16:54:26 +08002891 if (IS_ERR(edesc))
2892 return PTR_ERR(edesc);
2893
Yuan Kang1acebad2011-07-15 11:21:42 +08002894#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002895 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002896 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2897 req->cryptlen, 1);
2898#endif
2899
2900 /* Create and submit job descriptor*/
2901 init_aead_giv_job(ctx->sh_desc_givenc,
2902 ctx->sh_desc_givenc_dma, edesc, req, contig);
2903#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002904 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002905 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2906 desc_bytes(edesc->hw_desc), 1);
2907#endif
2908
Kim Phillips8e8ec592011-03-13 16:54:26 +08002909 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002910 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2911 if (!ret) {
2912 ret = -EINPROGRESS;
2913 } else {
2914 aead_unmap(jrdev, edesc, req);
2915 kfree(edesc);
2916 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002917
Yuan Kang1acebad2011-07-15 11:21:42 +08002918 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002919}
2920
Horia Geantaae4a8252014-03-14 17:46:52 +02002921static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2922{
2923 return aead_encrypt(&areq->areq);
2924}
2925
Yuan Kangacdca312011-07-15 11:21:42 +08002926/*
2927 * allocate and map the ablkcipher extended descriptor for ablkcipher
2928 */
2929static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2930 *req, int desc_bytes,
2931 bool *iv_contig_out)
2932{
2933 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2934 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2935 struct device *jrdev = ctx->jrdev;
2936 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2937 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2938 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002939 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002940 struct ablkcipher_edesc *edesc;
2941 dma_addr_t iv_dma = 0;
2942 bool iv_contig = false;
2943 int sgc;
2944 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002945 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002946 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002947
Yuan Kang643b39b2012-06-22 19:48:49 -05002948 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002949
Yuan Kang643b39b2012-06-22 19:48:49 -05002950 if (req->dst != req->src)
2951 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002952
2953 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002954 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2955 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002956 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002957 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2958 DMA_TO_DEVICE, src_chained);
2959 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2960 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002961 }
2962
Horia Geantace572082014-07-11 15:34:49 +03002963 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2964 if (dma_mapping_error(jrdev, iv_dma)) {
2965 dev_err(jrdev, "unable to map IV\n");
2966 return ERR_PTR(-ENOMEM);
2967 }
2968
Yuan Kangacdca312011-07-15 11:21:42 +08002969 /*
2970 * Check if iv can be contiguous with source and destination.
2971 * If so, include it. If not, create scatterlist.
2972 */
Yuan Kangacdca312011-07-15 11:21:42 +08002973 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2974 iv_contig = true;
2975 else
2976 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002977 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2978 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002979
2980 /* allocate space for base edesc and hw desc commands, link tables */
2981 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002982 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002983 if (!edesc) {
2984 dev_err(jrdev, "could not allocate extended descriptor\n");
2985 return ERR_PTR(-ENOMEM);
2986 }
2987
2988 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002989 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002990 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002991 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002992 edesc->sec4_sg_bytes = sec4_sg_bytes;
2993 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2994 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002995
Yuan Kanga299c832012-06-22 19:48:46 -05002996 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002997 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002998 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2999 sg_to_sec4_sg_last(req->src, src_nents,
3000 edesc->sec4_sg + 1, 0);
3001 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08003002 }
3003
Yuan Kang643b39b2012-06-22 19:48:49 -05003004 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05003005 sg_to_sec4_sg_last(req->dst, dst_nents,
3006 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08003007 }
3008
Yuan Kanga299c832012-06-22 19:48:46 -05003009 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3010 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03003011 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3012 dev_err(jrdev, "unable to map S/G table\n");
3013 return ERR_PTR(-ENOMEM);
3014 }
3015
Yuan Kangacdca312011-07-15 11:21:42 +08003016 edesc->iv_dma = iv_dma;
3017
3018#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003019 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05003020 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3021 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08003022#endif
3023
3024 *iv_contig_out = iv_contig;
3025 return edesc;
3026}
3027
3028static int ablkcipher_encrypt(struct ablkcipher_request *req)
3029{
3030 struct ablkcipher_edesc *edesc;
3031 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3032 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3033 struct device *jrdev = ctx->jrdev;
3034 bool iv_contig;
3035 u32 *desc;
3036 int ret = 0;
3037
3038 /* allocate extended descriptor */
3039 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3040 CAAM_CMD_SZ, &iv_contig);
3041 if (IS_ERR(edesc))
3042 return PTR_ERR(edesc);
3043
3044 /* Create and submit job descriptor*/
3045 init_ablkcipher_job(ctx->sh_desc_enc,
3046 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
3047#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003048 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003049 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3050 desc_bytes(edesc->hw_desc), 1);
3051#endif
3052 desc = edesc->hw_desc;
3053 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3054
3055 if (!ret) {
3056 ret = -EINPROGRESS;
3057 } else {
3058 ablkcipher_unmap(jrdev, edesc, req);
3059 kfree(edesc);
3060 }
3061
3062 return ret;
3063}
3064
3065static int ablkcipher_decrypt(struct ablkcipher_request *req)
3066{
3067 struct ablkcipher_edesc *edesc;
3068 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3069 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3070 struct device *jrdev = ctx->jrdev;
3071 bool iv_contig;
3072 u32 *desc;
3073 int ret = 0;
3074
3075 /* allocate extended descriptor */
3076 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3077 CAAM_CMD_SZ, &iv_contig);
3078 if (IS_ERR(edesc))
3079 return PTR_ERR(edesc);
3080
3081 /* Create and submit job descriptor*/
3082 init_ablkcipher_job(ctx->sh_desc_dec,
3083 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
3084 desc = edesc->hw_desc;
3085#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003086 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003087 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3088 desc_bytes(edesc->hw_desc), 1);
3089#endif
3090
3091 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3092 if (!ret) {
3093 ret = -EINPROGRESS;
3094 } else {
3095 ablkcipher_unmap(jrdev, edesc, req);
3096 kfree(edesc);
3097 }
3098
3099 return ret;
3100}
3101
Yuan Kang885e9e22011-07-15 11:21:41 +08003102#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08003103#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08003104struct caam_alg_template {
3105 char name[CRYPTO_MAX_ALG_NAME];
3106 char driver_name[CRYPTO_MAX_ALG_NAME];
3107 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08003108 u32 type;
3109 union {
3110 struct ablkcipher_alg ablkcipher;
3111 struct aead_alg aead;
3112 struct blkcipher_alg blkcipher;
3113 struct cipher_alg cipher;
3114 struct compress_alg compress;
3115 struct rng_alg rng;
3116 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003117 u32 class1_alg_type;
3118 u32 class2_alg_type;
3119 u32 alg_op;
3120};
3121
3122static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02003123 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08003124 {
Horia Geantaae4a8252014-03-14 17:46:52 +02003125 .name = "authenc(hmac(md5),ecb(cipher_null))",
3126 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3127 .blocksize = NULL_BLOCK_SIZE,
3128 .type = CRYPTO_ALG_TYPE_AEAD,
3129 .template_aead = {
3130 .setkey = aead_setkey,
3131 .setauthsize = aead_setauthsize,
3132 .encrypt = aead_encrypt,
3133 .decrypt = aead_decrypt,
3134 .givencrypt = aead_null_givencrypt,
3135 .geniv = "<built-in>",
3136 .ivsize = NULL_IV_SIZE,
3137 .maxauthsize = MD5_DIGEST_SIZE,
3138 },
3139 .class1_alg_type = 0,
3140 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3141 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3142 },
3143 {
3144 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3145 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3146 .blocksize = NULL_BLOCK_SIZE,
3147 .type = CRYPTO_ALG_TYPE_AEAD,
3148 .template_aead = {
3149 .setkey = aead_setkey,
3150 .setauthsize = aead_setauthsize,
3151 .encrypt = aead_encrypt,
3152 .decrypt = aead_decrypt,
3153 .givencrypt = aead_null_givencrypt,
3154 .geniv = "<built-in>",
3155 .ivsize = NULL_IV_SIZE,
3156 .maxauthsize = SHA1_DIGEST_SIZE,
3157 },
3158 .class1_alg_type = 0,
3159 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3160 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3161 },
3162 {
3163 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3164 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3165 .blocksize = NULL_BLOCK_SIZE,
3166 .type = CRYPTO_ALG_TYPE_AEAD,
3167 .template_aead = {
3168 .setkey = aead_setkey,
3169 .setauthsize = aead_setauthsize,
3170 .encrypt = aead_encrypt,
3171 .decrypt = aead_decrypt,
3172 .givencrypt = aead_null_givencrypt,
3173 .geniv = "<built-in>",
3174 .ivsize = NULL_IV_SIZE,
3175 .maxauthsize = SHA224_DIGEST_SIZE,
3176 },
3177 .class1_alg_type = 0,
3178 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3179 OP_ALG_AAI_HMAC_PRECOMP,
3180 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3181 },
3182 {
3183 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3184 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3185 .blocksize = NULL_BLOCK_SIZE,
3186 .type = CRYPTO_ALG_TYPE_AEAD,
3187 .template_aead = {
3188 .setkey = aead_setkey,
3189 .setauthsize = aead_setauthsize,
3190 .encrypt = aead_encrypt,
3191 .decrypt = aead_decrypt,
3192 .givencrypt = aead_null_givencrypt,
3193 .geniv = "<built-in>",
3194 .ivsize = NULL_IV_SIZE,
3195 .maxauthsize = SHA256_DIGEST_SIZE,
3196 },
3197 .class1_alg_type = 0,
3198 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3199 OP_ALG_AAI_HMAC_PRECOMP,
3200 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3201 },
3202 {
3203 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3204 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3205 .blocksize = NULL_BLOCK_SIZE,
3206 .type = CRYPTO_ALG_TYPE_AEAD,
3207 .template_aead = {
3208 .setkey = aead_setkey,
3209 .setauthsize = aead_setauthsize,
3210 .encrypt = aead_encrypt,
3211 .decrypt = aead_decrypt,
3212 .givencrypt = aead_null_givencrypt,
3213 .geniv = "<built-in>",
3214 .ivsize = NULL_IV_SIZE,
3215 .maxauthsize = SHA384_DIGEST_SIZE,
3216 },
3217 .class1_alg_type = 0,
3218 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3219 OP_ALG_AAI_HMAC_PRECOMP,
3220 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3221 },
3222 {
3223 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3224 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3225 .blocksize = NULL_BLOCK_SIZE,
3226 .type = CRYPTO_ALG_TYPE_AEAD,
3227 .template_aead = {
3228 .setkey = aead_setkey,
3229 .setauthsize = aead_setauthsize,
3230 .encrypt = aead_encrypt,
3231 .decrypt = aead_decrypt,
3232 .givencrypt = aead_null_givencrypt,
3233 .geniv = "<built-in>",
3234 .ivsize = NULL_IV_SIZE,
3235 .maxauthsize = SHA512_DIGEST_SIZE,
3236 },
3237 .class1_alg_type = 0,
3238 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3239 OP_ALG_AAI_HMAC_PRECOMP,
3240 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3241 },
3242 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003243 .name = "authenc(hmac(md5),cbc(aes))",
3244 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3245 .blocksize = AES_BLOCK_SIZE,
3246 .type = CRYPTO_ALG_TYPE_AEAD,
3247 .template_aead = {
3248 .setkey = aead_setkey,
3249 .setauthsize = aead_setauthsize,
3250 .encrypt = aead_encrypt,
3251 .decrypt = aead_decrypt,
3252 .givencrypt = aead_givencrypt,
3253 .geniv = "<built-in>",
3254 .ivsize = AES_BLOCK_SIZE,
3255 .maxauthsize = MD5_DIGEST_SIZE,
3256 },
3257 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3258 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3259 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3260 },
3261 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003262 .name = "authenc(hmac(sha1),cbc(aes))",
3263 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3264 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003265 .type = CRYPTO_ALG_TYPE_AEAD,
3266 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003267 .setkey = aead_setkey,
3268 .setauthsize = aead_setauthsize,
3269 .encrypt = aead_encrypt,
3270 .decrypt = aead_decrypt,
3271 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003272 .geniv = "<built-in>",
3273 .ivsize = AES_BLOCK_SIZE,
3274 .maxauthsize = SHA1_DIGEST_SIZE,
3275 },
3276 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3277 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3278 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3279 },
3280 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003281 .name = "authenc(hmac(sha224),cbc(aes))",
3282 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3283 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303284 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003285 .template_aead = {
3286 .setkey = aead_setkey,
3287 .setauthsize = aead_setauthsize,
3288 .encrypt = aead_encrypt,
3289 .decrypt = aead_decrypt,
3290 .givencrypt = aead_givencrypt,
3291 .geniv = "<built-in>",
3292 .ivsize = AES_BLOCK_SIZE,
3293 .maxauthsize = SHA224_DIGEST_SIZE,
3294 },
3295 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3296 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3297 OP_ALG_AAI_HMAC_PRECOMP,
3298 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3299 },
3300 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003301 .name = "authenc(hmac(sha256),cbc(aes))",
3302 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3303 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003304 .type = CRYPTO_ALG_TYPE_AEAD,
3305 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003306 .setkey = aead_setkey,
3307 .setauthsize = aead_setauthsize,
3308 .encrypt = aead_encrypt,
3309 .decrypt = aead_decrypt,
3310 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003311 .geniv = "<built-in>",
3312 .ivsize = AES_BLOCK_SIZE,
3313 .maxauthsize = SHA256_DIGEST_SIZE,
3314 },
3315 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3316 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3317 OP_ALG_AAI_HMAC_PRECOMP,
3318 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3319 },
3320 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003321 .name = "authenc(hmac(sha384),cbc(aes))",
3322 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3323 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303324 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003325 .template_aead = {
3326 .setkey = aead_setkey,
3327 .setauthsize = aead_setauthsize,
3328 .encrypt = aead_encrypt,
3329 .decrypt = aead_decrypt,
3330 .givencrypt = aead_givencrypt,
3331 .geniv = "<built-in>",
3332 .ivsize = AES_BLOCK_SIZE,
3333 .maxauthsize = SHA384_DIGEST_SIZE,
3334 },
3335 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3336 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3337 OP_ALG_AAI_HMAC_PRECOMP,
3338 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3339 },
3340
3341 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003342 .name = "authenc(hmac(sha512),cbc(aes))",
3343 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3344 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003345 .type = CRYPTO_ALG_TYPE_AEAD,
3346 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003347 .setkey = aead_setkey,
3348 .setauthsize = aead_setauthsize,
3349 .encrypt = aead_encrypt,
3350 .decrypt = aead_decrypt,
3351 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003352 .geniv = "<built-in>",
3353 .ivsize = AES_BLOCK_SIZE,
3354 .maxauthsize = SHA512_DIGEST_SIZE,
3355 },
3356 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3357 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3358 OP_ALG_AAI_HMAC_PRECOMP,
3359 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3360 },
3361 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003362 .name = "authenc(hmac(md5),cbc(des3_ede))",
3363 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3364 .blocksize = DES3_EDE_BLOCK_SIZE,
3365 .type = CRYPTO_ALG_TYPE_AEAD,
3366 .template_aead = {
3367 .setkey = aead_setkey,
3368 .setauthsize = aead_setauthsize,
3369 .encrypt = aead_encrypt,
3370 .decrypt = aead_decrypt,
3371 .givencrypt = aead_givencrypt,
3372 .geniv = "<built-in>",
3373 .ivsize = DES3_EDE_BLOCK_SIZE,
3374 .maxauthsize = MD5_DIGEST_SIZE,
3375 },
3376 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3377 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3378 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3379 },
3380 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003381 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3382 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3383 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003384 .type = CRYPTO_ALG_TYPE_AEAD,
3385 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003386 .setkey = aead_setkey,
3387 .setauthsize = aead_setauthsize,
3388 .encrypt = aead_encrypt,
3389 .decrypt = aead_decrypt,
3390 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003391 .geniv = "<built-in>",
3392 .ivsize = DES3_EDE_BLOCK_SIZE,
3393 .maxauthsize = SHA1_DIGEST_SIZE,
3394 },
3395 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3396 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3397 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3398 },
3399 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003400 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3401 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3402 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303403 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003404 .template_aead = {
3405 .setkey = aead_setkey,
3406 .setauthsize = aead_setauthsize,
3407 .encrypt = aead_encrypt,
3408 .decrypt = aead_decrypt,
3409 .givencrypt = aead_givencrypt,
3410 .geniv = "<built-in>",
3411 .ivsize = DES3_EDE_BLOCK_SIZE,
3412 .maxauthsize = SHA224_DIGEST_SIZE,
3413 },
3414 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3415 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3416 OP_ALG_AAI_HMAC_PRECOMP,
3417 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3418 },
3419 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003420 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3421 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3422 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003423 .type = CRYPTO_ALG_TYPE_AEAD,
3424 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003425 .setkey = aead_setkey,
3426 .setauthsize = aead_setauthsize,
3427 .encrypt = aead_encrypt,
3428 .decrypt = aead_decrypt,
3429 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003430 .geniv = "<built-in>",
3431 .ivsize = DES3_EDE_BLOCK_SIZE,
3432 .maxauthsize = SHA256_DIGEST_SIZE,
3433 },
3434 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3435 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3436 OP_ALG_AAI_HMAC_PRECOMP,
3437 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3438 },
3439 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003440 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3441 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3442 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303443 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003444 .template_aead = {
3445 .setkey = aead_setkey,
3446 .setauthsize = aead_setauthsize,
3447 .encrypt = aead_encrypt,
3448 .decrypt = aead_decrypt,
3449 .givencrypt = aead_givencrypt,
3450 .geniv = "<built-in>",
3451 .ivsize = DES3_EDE_BLOCK_SIZE,
3452 .maxauthsize = SHA384_DIGEST_SIZE,
3453 },
3454 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3455 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3456 OP_ALG_AAI_HMAC_PRECOMP,
3457 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3458 },
3459 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003460 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3461 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3462 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003463 .type = CRYPTO_ALG_TYPE_AEAD,
3464 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003465 .setkey = aead_setkey,
3466 .setauthsize = aead_setauthsize,
3467 .encrypt = aead_encrypt,
3468 .decrypt = aead_decrypt,
3469 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003470 .geniv = "<built-in>",
3471 .ivsize = DES3_EDE_BLOCK_SIZE,
3472 .maxauthsize = SHA512_DIGEST_SIZE,
3473 },
3474 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3475 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3476 OP_ALG_AAI_HMAC_PRECOMP,
3477 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3478 },
3479 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003480 .name = "authenc(hmac(md5),cbc(des))",
3481 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3482 .blocksize = DES_BLOCK_SIZE,
3483 .type = CRYPTO_ALG_TYPE_AEAD,
3484 .template_aead = {
3485 .setkey = aead_setkey,
3486 .setauthsize = aead_setauthsize,
3487 .encrypt = aead_encrypt,
3488 .decrypt = aead_decrypt,
3489 .givencrypt = aead_givencrypt,
3490 .geniv = "<built-in>",
3491 .ivsize = DES_BLOCK_SIZE,
3492 .maxauthsize = MD5_DIGEST_SIZE,
3493 },
3494 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3495 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3496 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3497 },
3498 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003499 .name = "authenc(hmac(sha1),cbc(des))",
3500 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3501 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003502 .type = CRYPTO_ALG_TYPE_AEAD,
3503 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003504 .setkey = aead_setkey,
3505 .setauthsize = aead_setauthsize,
3506 .encrypt = aead_encrypt,
3507 .decrypt = aead_decrypt,
3508 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003509 .geniv = "<built-in>",
3510 .ivsize = DES_BLOCK_SIZE,
3511 .maxauthsize = SHA1_DIGEST_SIZE,
3512 },
3513 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3514 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3515 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3516 },
3517 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003518 .name = "authenc(hmac(sha224),cbc(des))",
3519 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3520 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303521 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003522 .template_aead = {
3523 .setkey = aead_setkey,
3524 .setauthsize = aead_setauthsize,
3525 .encrypt = aead_encrypt,
3526 .decrypt = aead_decrypt,
3527 .givencrypt = aead_givencrypt,
3528 .geniv = "<built-in>",
3529 .ivsize = DES_BLOCK_SIZE,
3530 .maxauthsize = SHA224_DIGEST_SIZE,
3531 },
3532 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3533 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3534 OP_ALG_AAI_HMAC_PRECOMP,
3535 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3536 },
3537 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003538 .name = "authenc(hmac(sha256),cbc(des))",
3539 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3540 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003541 .type = CRYPTO_ALG_TYPE_AEAD,
3542 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003543 .setkey = aead_setkey,
3544 .setauthsize = aead_setauthsize,
3545 .encrypt = aead_encrypt,
3546 .decrypt = aead_decrypt,
3547 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003548 .geniv = "<built-in>",
3549 .ivsize = DES_BLOCK_SIZE,
3550 .maxauthsize = SHA256_DIGEST_SIZE,
3551 },
3552 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3553 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3554 OP_ALG_AAI_HMAC_PRECOMP,
3555 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3556 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003557 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003558 .name = "authenc(hmac(sha384),cbc(des))",
3559 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3560 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303561 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003562 .template_aead = {
3563 .setkey = aead_setkey,
3564 .setauthsize = aead_setauthsize,
3565 .encrypt = aead_encrypt,
3566 .decrypt = aead_decrypt,
3567 .givencrypt = aead_givencrypt,
3568 .geniv = "<built-in>",
3569 .ivsize = DES_BLOCK_SIZE,
3570 .maxauthsize = SHA384_DIGEST_SIZE,
3571 },
3572 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3573 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3574 OP_ALG_AAI_HMAC_PRECOMP,
3575 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3576 },
3577 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003578 .name = "authenc(hmac(sha512),cbc(des))",
3579 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3580 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003581 .type = CRYPTO_ALG_TYPE_AEAD,
3582 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003583 .setkey = aead_setkey,
3584 .setauthsize = aead_setauthsize,
3585 .encrypt = aead_encrypt,
3586 .decrypt = aead_decrypt,
3587 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003588 .geniv = "<built-in>",
3589 .ivsize = DES_BLOCK_SIZE,
3590 .maxauthsize = SHA512_DIGEST_SIZE,
3591 },
3592 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3593 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3594 OP_ALG_AAI_HMAC_PRECOMP,
3595 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3596 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003597 {
Catalin Vasiledaebc462014-10-31 12:45:37 +02003598 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3599 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3600 .blocksize = 1,
3601 .type = CRYPTO_ALG_TYPE_AEAD,
3602 .template_aead = {
3603 .setkey = aead_setkey,
3604 .setauthsize = aead_setauthsize,
3605 .encrypt = aead_encrypt,
3606 .decrypt = aead_decrypt,
3607 .givencrypt = aead_givencrypt,
3608 .geniv = "<built-in>",
3609 .ivsize = CTR_RFC3686_IV_SIZE,
3610 .maxauthsize = MD5_DIGEST_SIZE,
3611 },
3612 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3613 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3614 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3615 },
3616 {
3617 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3618 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3619 .blocksize = 1,
3620 .type = CRYPTO_ALG_TYPE_AEAD,
3621 .template_aead = {
3622 .setkey = aead_setkey,
3623 .setauthsize = aead_setauthsize,
3624 .encrypt = aead_encrypt,
3625 .decrypt = aead_decrypt,
3626 .givencrypt = aead_givencrypt,
3627 .geniv = "<built-in>",
3628 .ivsize = CTR_RFC3686_IV_SIZE,
3629 .maxauthsize = SHA1_DIGEST_SIZE,
3630 },
3631 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3632 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3633 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3634 },
3635 {
3636 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3637 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3638 .blocksize = 1,
3639 .type = CRYPTO_ALG_TYPE_AEAD,
3640 .template_aead = {
3641 .setkey = aead_setkey,
3642 .setauthsize = aead_setauthsize,
3643 .encrypt = aead_encrypt,
3644 .decrypt = aead_decrypt,
3645 .givencrypt = aead_givencrypt,
3646 .geniv = "<built-in>",
3647 .ivsize = CTR_RFC3686_IV_SIZE,
3648 .maxauthsize = SHA224_DIGEST_SIZE,
3649 },
3650 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3651 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3652 OP_ALG_AAI_HMAC_PRECOMP,
3653 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3654 },
3655 {
3656 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3657 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3658 .blocksize = 1,
3659 .type = CRYPTO_ALG_TYPE_AEAD,
3660 .template_aead = {
3661 .setkey = aead_setkey,
3662 .setauthsize = aead_setauthsize,
3663 .encrypt = aead_encrypt,
3664 .decrypt = aead_decrypt,
3665 .givencrypt = aead_givencrypt,
3666 .geniv = "<built-in>",
3667 .ivsize = CTR_RFC3686_IV_SIZE,
3668 .maxauthsize = SHA256_DIGEST_SIZE,
3669 },
3670 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3671 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3672 OP_ALG_AAI_HMAC_PRECOMP,
3673 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3674 },
3675 {
3676 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3677 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3678 .blocksize = 1,
3679 .type = CRYPTO_ALG_TYPE_AEAD,
3680 .template_aead = {
3681 .setkey = aead_setkey,
3682 .setauthsize = aead_setauthsize,
3683 .encrypt = aead_encrypt,
3684 .decrypt = aead_decrypt,
3685 .givencrypt = aead_givencrypt,
3686 .geniv = "<built-in>",
3687 .ivsize = CTR_RFC3686_IV_SIZE,
3688 .maxauthsize = SHA384_DIGEST_SIZE,
3689 },
3690 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3691 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3692 OP_ALG_AAI_HMAC_PRECOMP,
3693 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3694 },
3695 {
3696 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3697 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3698 .blocksize = 1,
3699 .type = CRYPTO_ALG_TYPE_AEAD,
3700 .template_aead = {
3701 .setkey = aead_setkey,
3702 .setauthsize = aead_setauthsize,
3703 .encrypt = aead_encrypt,
3704 .decrypt = aead_decrypt,
3705 .givencrypt = aead_givencrypt,
3706 .geniv = "<built-in>",
3707 .ivsize = CTR_RFC3686_IV_SIZE,
3708 .maxauthsize = SHA512_DIGEST_SIZE,
3709 },
3710 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3711 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3712 OP_ALG_AAI_HMAC_PRECOMP,
3713 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3714 },
3715 {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003716 .name = "rfc4106(gcm(aes))",
3717 .driver_name = "rfc4106-gcm-aes-caam",
3718 .blocksize = 1,
3719 .type = CRYPTO_ALG_TYPE_AEAD,
3720 .template_aead = {
3721 .setkey = rfc4106_setkey,
3722 .setauthsize = rfc4106_setauthsize,
3723 .encrypt = aead_encrypt,
3724 .decrypt = aead_decrypt,
3725 .givencrypt = aead_givencrypt,
3726 .geniv = "<built-in>",
3727 .ivsize = 8,
3728 .maxauthsize = AES_BLOCK_SIZE,
3729 },
3730 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3731 },
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02003732 {
3733 .name = "rfc4543(gcm(aes))",
3734 .driver_name = "rfc4543-gcm-aes-caam",
3735 .blocksize = 1,
3736 .type = CRYPTO_ALG_TYPE_AEAD,
3737 .template_aead = {
3738 .setkey = rfc4543_setkey,
3739 .setauthsize = rfc4543_setauthsize,
3740 .encrypt = aead_encrypt,
3741 .decrypt = aead_decrypt,
3742 .givencrypt = aead_givencrypt,
3743 .geniv = "<built-in>",
3744 .ivsize = 8,
3745 .maxauthsize = AES_BLOCK_SIZE,
3746 },
3747 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3748 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003749 /* Galois Counter Mode */
3750 {
3751 .name = "gcm(aes)",
3752 .driver_name = "gcm-aes-caam",
3753 .blocksize = 1,
3754 .type = CRYPTO_ALG_TYPE_AEAD,
3755 .template_aead = {
3756 .setkey = gcm_setkey,
3757 .setauthsize = gcm_setauthsize,
3758 .encrypt = aead_encrypt,
3759 .decrypt = aead_decrypt,
3760 .givencrypt = NULL,
3761 .geniv = "<built-in>",
3762 .ivsize = 12,
3763 .maxauthsize = AES_BLOCK_SIZE,
3764 },
3765 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3766 },
Yuan Kangacdca312011-07-15 11:21:42 +08003767 /* ablkcipher descriptor */
3768 {
3769 .name = "cbc(aes)",
3770 .driver_name = "cbc-aes-caam",
3771 .blocksize = AES_BLOCK_SIZE,
3772 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3773 .template_ablkcipher = {
3774 .setkey = ablkcipher_setkey,
3775 .encrypt = ablkcipher_encrypt,
3776 .decrypt = ablkcipher_decrypt,
3777 .geniv = "eseqiv",
3778 .min_keysize = AES_MIN_KEY_SIZE,
3779 .max_keysize = AES_MAX_KEY_SIZE,
3780 .ivsize = AES_BLOCK_SIZE,
3781 },
3782 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3783 },
3784 {
3785 .name = "cbc(des3_ede)",
3786 .driver_name = "cbc-3des-caam",
3787 .blocksize = DES3_EDE_BLOCK_SIZE,
3788 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3789 .template_ablkcipher = {
3790 .setkey = ablkcipher_setkey,
3791 .encrypt = ablkcipher_encrypt,
3792 .decrypt = ablkcipher_decrypt,
3793 .geniv = "eseqiv",
3794 .min_keysize = DES3_EDE_KEY_SIZE,
3795 .max_keysize = DES3_EDE_KEY_SIZE,
3796 .ivsize = DES3_EDE_BLOCK_SIZE,
3797 },
3798 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3799 },
3800 {
3801 .name = "cbc(des)",
3802 .driver_name = "cbc-des-caam",
3803 .blocksize = DES_BLOCK_SIZE,
3804 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3805 .template_ablkcipher = {
3806 .setkey = ablkcipher_setkey,
3807 .encrypt = ablkcipher_encrypt,
3808 .decrypt = ablkcipher_decrypt,
3809 .geniv = "eseqiv",
3810 .min_keysize = DES_KEY_SIZE,
3811 .max_keysize = DES_KEY_SIZE,
3812 .ivsize = DES_BLOCK_SIZE,
3813 },
3814 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02003815 },
3816 {
3817 .name = "ctr(aes)",
3818 .driver_name = "ctr-aes-caam",
3819 .blocksize = 1,
3820 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3821 .template_ablkcipher = {
3822 .setkey = ablkcipher_setkey,
3823 .encrypt = ablkcipher_encrypt,
3824 .decrypt = ablkcipher_decrypt,
3825 .geniv = "chainiv",
3826 .min_keysize = AES_MIN_KEY_SIZE,
3827 .max_keysize = AES_MAX_KEY_SIZE,
3828 .ivsize = AES_BLOCK_SIZE,
3829 },
3830 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02003831 },
3832 {
3833 .name = "rfc3686(ctr(aes))",
3834 .driver_name = "rfc3686-ctr-aes-caam",
3835 .blocksize = 1,
3836 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3837 .template_ablkcipher = {
3838 .setkey = ablkcipher_setkey,
3839 .encrypt = ablkcipher_encrypt,
3840 .decrypt = ablkcipher_decrypt,
3841 .geniv = "seqiv",
3842 .min_keysize = AES_MIN_KEY_SIZE +
3843 CTR_RFC3686_NONCE_SIZE,
3844 .max_keysize = AES_MAX_KEY_SIZE +
3845 CTR_RFC3686_NONCE_SIZE,
3846 .ivsize = CTR_RFC3686_IV_SIZE,
3847 },
3848 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Yuan Kangacdca312011-07-15 11:21:42 +08003849 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003850};
3851
3852struct caam_crypto_alg {
3853 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003854 int class1_alg_type;
3855 int class2_alg_type;
3856 int alg_op;
3857 struct crypto_alg crypto_alg;
3858};
3859
3860static int caam_cra_init(struct crypto_tfm *tfm)
3861{
3862 struct crypto_alg *alg = tfm->__crt_alg;
3863 struct caam_crypto_alg *caam_alg =
3864 container_of(alg, struct caam_crypto_alg, crypto_alg);
3865 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003866
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303867 ctx->jrdev = caam_jr_alloc();
3868 if (IS_ERR(ctx->jrdev)) {
3869 pr_err("Job Ring Device allocation for transform failed\n");
3870 return PTR_ERR(ctx->jrdev);
3871 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003872
3873 /* copy descriptor header template value */
3874 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3875 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3876 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3877
3878 return 0;
3879}
3880
3881static void caam_cra_exit(struct crypto_tfm *tfm)
3882{
3883 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3884
Yuan Kang1acebad2011-07-15 11:21:42 +08003885 if (ctx->sh_desc_enc_dma &&
3886 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3887 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3888 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3889 if (ctx->sh_desc_dec_dma &&
3890 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3891 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3892 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3893 if (ctx->sh_desc_givenc_dma &&
3894 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3895 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3896 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003897 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003898 if (ctx->key_dma &&
3899 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3900 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3901 ctx->enckeylen + ctx->split_key_pad_len,
3902 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303903
3904 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003905}
3906
3907static void __exit caam_algapi_exit(void)
3908{
3909
Kim Phillips8e8ec592011-03-13 16:54:26 +08003910 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003911
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303912 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003913 return;
3914
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303915 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003916 crypto_unregister_alg(&t_alg->crypto_alg);
3917 list_del(&t_alg->entry);
3918 kfree(t_alg);
3919 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003920}
3921
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303922static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003923 *template)
3924{
3925 struct caam_crypto_alg *t_alg;
3926 struct crypto_alg *alg;
3927
3928 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3929 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303930 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003931 return ERR_PTR(-ENOMEM);
3932 }
3933
3934 alg = &t_alg->crypto_alg;
3935
3936 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3937 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3938 template->driver_name);
3939 alg->cra_module = THIS_MODULE;
3940 alg->cra_init = caam_cra_init;
3941 alg->cra_exit = caam_cra_exit;
3942 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003943 alg->cra_blocksize = template->blocksize;
3944 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003945 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003946 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3947 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003948 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003949 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3950 alg->cra_type = &crypto_ablkcipher_type;
3951 alg->cra_ablkcipher = template->template_ablkcipher;
3952 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003953 case CRYPTO_ALG_TYPE_AEAD:
3954 alg->cra_type = &crypto_aead_type;
3955 alg->cra_aead = template->template_aead;
3956 break;
3957 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003958
3959 t_alg->class1_alg_type = template->class1_alg_type;
3960 t_alg->class2_alg_type = template->class2_alg_type;
3961 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003962
3963 return t_alg;
3964}
3965
3966static int __init caam_algapi_init(void)
3967{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303968 struct device_node *dev_node;
3969 struct platform_device *pdev;
3970 struct device *ctrldev;
3971 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003972 int i = 0, err = 0;
3973
Ruchika Gupta35af6402014-07-07 10:42:12 +05303974 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3975 if (!dev_node) {
3976 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3977 if (!dev_node)
3978 return -ENODEV;
3979 }
3980
3981 pdev = of_find_device_by_node(dev_node);
3982 if (!pdev) {
3983 of_node_put(dev_node);
3984 return -ENODEV;
3985 }
3986
3987 ctrldev = &pdev->dev;
3988 priv = dev_get_drvdata(ctrldev);
3989 of_node_put(dev_node);
3990
3991 /*
3992 * If priv is NULL, it's probably because the caam driver wasn't
3993 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3994 */
3995 if (!priv)
3996 return -ENODEV;
3997
3998
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303999 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004000
4001 /* register crypto algorithms the device supports */
4002 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4003 /* TODO: check if h/w supports alg */
4004 struct caam_crypto_alg *t_alg;
4005
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304006 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004007 if (IS_ERR(t_alg)) {
4008 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304009 pr_warn("%s alg allocation failed\n",
4010 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004011 continue;
4012 }
4013
4014 err = crypto_register_alg(&t_alg->crypto_alg);
4015 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304016 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004017 t_alg->crypto_alg.cra_driver_name);
4018 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02004019 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304020 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004021 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304022 if (!list_empty(&alg_list))
4023 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004024
4025 return err;
4026}
4027
4028module_init(caam_algapi_init);
4029module_exit(caam_algapi_exit);
4030
4031MODULE_LICENSE("GPL");
4032MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4033MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");