blob: e9a4fd16031d5be9bae9be6a33190905a9ee11df [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH 16
66
Kim Phillips4427b1b2011-05-14 22:08:17 -050067/* length of descriptors text */
Yuan Kang1acebad2011-07-15 11:21:42 +080068#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020069#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad2011-07-15 11:21:42 +080071#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
Horia Geantaae4a8252014-03-14 17:46:52 +020073#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030077#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
78#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
79#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
80
Tudor Ambarusbac68f22014-10-23 16:14:03 +030081#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
82#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
83#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
84#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
85
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020086#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
87#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
88#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
89#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
90
Yuan Kangacdca312011-07-15 11:21:42 +080091#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
92#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
93 20 * CAAM_CMD_SZ)
94#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
95 15 * CAAM_CMD_SZ)
96
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020097#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
Yuan Kang1acebad2011-07-15 11:21:42 +080098 CAAM_MAX_KEY_SIZE)
99#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500100
Kim Phillips8e8ec592011-03-13 16:54:26 +0800101#ifdef DEBUG
102/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800103#define debug(format, arg...) printk(format, arg)
104#else
105#define debug(format, arg...)
106#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530107static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108
Yuan Kang1acebad2011-07-15 11:21:42 +0800109/* Set DK bit in class 1 operation if shared */
110static inline void append_dec_op1(u32 *desc, u32 type)
111{
112 u32 *jump_cmd, *uncond_jump_cmd;
113
Horia Geantaa60384d2014-07-11 15:46:58 +0300114 /* DK bit is valid only for AES */
115 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
116 append_operation(desc, type | OP_ALG_AS_INITFINAL |
117 OP_ALG_DECRYPT);
118 return;
119 }
120
Yuan Kang1acebad2011-07-15 11:21:42 +0800121 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
122 append_operation(desc, type | OP_ALG_AS_INITFINAL |
123 OP_ALG_DECRYPT);
124 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
125 set_jump_tgt_here(desc, jump_cmd);
126 append_operation(desc, type | OP_ALG_AS_INITFINAL |
127 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
128 set_jump_tgt_here(desc, uncond_jump_cmd);
129}
130
131/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800132 * For aead functions, read payload and write payload,
133 * both of which are specified in req->src and req->dst
134 */
135static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
136{
Horia Geantaae4a8252014-03-14 17:46:52 +0200137 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad2011-07-15 11:21:42 +0800138 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
139 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad2011-07-15 11:21:42 +0800140}
141
142/*
143 * For aead encrypt and decrypt, read iv for both classes
144 */
145static inline void aead_append_ld_iv(u32 *desc, int ivsize)
146{
147 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
148 LDST_CLASS_1_CCB | ivsize);
149 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
150}
151
152/*
Yuan Kangacdca312011-07-15 11:21:42 +0800153 * For ablkcipher encrypt and decrypt, read from req->src and
154 * write to req->dst
155 */
156static inline void ablkcipher_append_src_dst(u32 *desc)
157{
Kim Phillips70d793c2012-06-22 19:42:35 -0500158 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
159 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
160 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
161 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
162 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800163}
164
165/*
Yuan Kang1acebad2011-07-15 11:21:42 +0800166 * If all data, including src (with assoc and iv) or dst (with iv only) are
167 * contiguous
168 */
169#define GIV_SRC_CONTIG 1
170#define GIV_DST_CONTIG (1 << 1)
171
Kim Phillips8e8ec592011-03-13 16:54:26 +0800172/*
173 * per-session context
174 */
175struct caam_ctx {
176 struct device *jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +0800177 u32 sh_desc_enc[DESC_MAX_USED_LEN];
178 u32 sh_desc_dec[DESC_MAX_USED_LEN];
179 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
180 dma_addr_t sh_desc_enc_dma;
181 dma_addr_t sh_desc_dec_dma;
182 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800183 u32 class1_alg_type;
184 u32 class2_alg_type;
185 u32 alg_op;
Yuan Kang1acebad2011-07-15 11:21:42 +0800186 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800187 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800188 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800189 unsigned int split_key_len;
190 unsigned int split_key_pad_len;
191 unsigned int authsize;
192};
193
Yuan Kang1acebad2011-07-15 11:21:42 +0800194static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
195 int keys_fit_inline)
196{
197 if (keys_fit_inline) {
198 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
199 ctx->split_key_len, CLASS_2 |
200 KEY_DEST_MDHA_SPLIT | KEY_ENC);
201 append_key_as_imm(desc, (void *)ctx->key +
202 ctx->split_key_pad_len, ctx->enckeylen,
203 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
204 } else {
205 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
206 KEY_DEST_MDHA_SPLIT | KEY_ENC);
207 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
208 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
209 }
210}
211
212static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
213 int keys_fit_inline)
214{
215 u32 *key_jump_cmd;
216
Kim Phillips61bb86b2012-07-13 17:49:28 -0500217 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang1acebad2011-07-15 11:21:42 +0800218
219 /* Skip if already shared */
220 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
221 JUMP_COND_SHRD);
222
223 append_key_aead(desc, ctx, keys_fit_inline);
224
225 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad2011-07-15 11:21:42 +0800226}
227
Horia Geantaae4a8252014-03-14 17:46:52 +0200228static int aead_null_set_sh_desc(struct crypto_aead *aead)
229{
230 struct aead_tfm *tfm = &aead->base.crt_aead;
231 struct caam_ctx *ctx = crypto_aead_ctx(aead);
232 struct device *jrdev = ctx->jrdev;
233 bool keys_fit_inline = false;
234 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
235 u32 *desc;
236
237 /*
238 * Job Descriptor and Shared Descriptors
239 * must all fit into the 64-word Descriptor h/w Buffer
240 */
241 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
242 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
243 keys_fit_inline = true;
244
245 /* aead_encrypt shared descriptor */
246 desc = ctx->sh_desc_enc;
247
248 init_sh_desc(desc, HDR_SHARE_SERIAL);
249
250 /* Skip if already shared */
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
252 JUMP_COND_SHRD);
253 if (keys_fit_inline)
254 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
255 ctx->split_key_len, CLASS_2 |
256 KEY_DEST_MDHA_SPLIT | KEY_ENC);
257 else
258 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
259 KEY_DEST_MDHA_SPLIT | KEY_ENC);
260 set_jump_tgt_here(desc, key_jump_cmd);
261
262 /* cryptlen = seqoutlen - authsize */
263 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
264
265 /*
266 * NULL encryption; IV is zero
267 * assoclen = (assoclen + cryptlen) - cryptlen
268 */
269 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
270
271 /* read assoc before reading payload */
272 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
273 KEY_VLF);
274
275 /* Prepare to read and write cryptlen bytes */
276 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
277 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
278
279 /*
280 * MOVE_LEN opcode is not available in all SEC HW revisions,
281 * thus need to do some magic, i.e. self-patch the descriptor
282 * buffer.
283 */
284 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
285 MOVE_DEST_MATH3 |
286 (0x6 << MOVE_LEN_SHIFT));
287 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
288 MOVE_DEST_DESCBUF |
289 MOVE_WAITCOMP |
290 (0x8 << MOVE_LEN_SHIFT));
291
292 /* Class 2 operation */
293 append_operation(desc, ctx->class2_alg_type |
294 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
295
296 /* Read and write cryptlen bytes */
297 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
298
299 set_move_tgt_here(desc, read_move_cmd);
300 set_move_tgt_here(desc, write_move_cmd);
301 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
302 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
303 MOVE_AUX_LS);
304
305 /* Write ICV */
306 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
307 LDST_SRCDST_BYTE_CONTEXT);
308
309 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
310 desc_bytes(desc),
311 DMA_TO_DEVICE);
312 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
313 dev_err(jrdev, "unable to map shared descriptor\n");
314 return -ENOMEM;
315 }
316#ifdef DEBUG
317 print_hex_dump(KERN_ERR,
318 "aead null enc shdesc@"__stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, desc,
320 desc_bytes(desc), 1);
321#endif
322
323 /*
324 * Job Descriptor and Shared Descriptors
325 * must all fit into the 64-word Descriptor h/w Buffer
326 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500327 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200328 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
329 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
330 keys_fit_inline = true;
331
332 desc = ctx->sh_desc_dec;
333
334 /* aead_decrypt shared descriptor */
335 init_sh_desc(desc, HDR_SHARE_SERIAL);
336
337 /* Skip if already shared */
338 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
339 JUMP_COND_SHRD);
340 if (keys_fit_inline)
341 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
342 ctx->split_key_len, CLASS_2 |
343 KEY_DEST_MDHA_SPLIT | KEY_ENC);
344 else
345 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
346 KEY_DEST_MDHA_SPLIT | KEY_ENC);
347 set_jump_tgt_here(desc, key_jump_cmd);
348
349 /* Class 2 operation */
350 append_operation(desc, ctx->class2_alg_type |
351 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
352
353 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
354 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
355 ctx->authsize + tfm->ivsize);
356 /* assoclen = (assoclen + cryptlen) - cryptlen */
357 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
358 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
359
360 /* read assoc before reading payload */
361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
362 KEY_VLF);
363
364 /* Prepare to read and write cryptlen bytes */
365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
366 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
367
368 /*
369 * MOVE_LEN opcode is not available in all SEC HW revisions,
370 * thus need to do some magic, i.e. self-patch the descriptor
371 * buffer.
372 */
373 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
374 MOVE_DEST_MATH2 |
375 (0x6 << MOVE_LEN_SHIFT));
376 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
377 MOVE_DEST_DESCBUF |
378 MOVE_WAITCOMP |
379 (0x8 << MOVE_LEN_SHIFT));
380
381 /* Read and write cryptlen bytes */
382 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
383
384 /*
385 * Insert a NOP here, since we need at least 4 instructions between
386 * code patching the descriptor buffer and the location being patched.
387 */
388 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
389 set_jump_tgt_here(desc, jump_cmd);
390
391 set_move_tgt_here(desc, read_move_cmd);
392 set_move_tgt_here(desc, write_move_cmd);
393 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
394 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
395 MOVE_AUX_LS);
396 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
397
398 /* Load ICV */
399 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
400 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
401
402 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
403 desc_bytes(desc),
404 DMA_TO_DEVICE);
405 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
406 dev_err(jrdev, "unable to map shared descriptor\n");
407 return -ENOMEM;
408 }
409#ifdef DEBUG
410 print_hex_dump(KERN_ERR,
411 "aead null dec shdesc@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, desc,
413 desc_bytes(desc), 1);
414#endif
415
416 return 0;
417}
418
Yuan Kang1acebad2011-07-15 11:21:42 +0800419static int aead_set_sh_desc(struct crypto_aead *aead)
420{
421 struct aead_tfm *tfm = &aead->base.crt_aead;
422 struct caam_ctx *ctx = crypto_aead_ctx(aead);
423 struct device *jrdev = ctx->jrdev;
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800424 bool keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800425 u32 geniv, moveiv;
426 u32 *desc;
427
Horia Geantaae4a8252014-03-14 17:46:52 +0200428 if (!ctx->authsize)
Yuan Kang1acebad2011-07-15 11:21:42 +0800429 return 0;
430
Horia Geantaae4a8252014-03-14 17:46:52 +0200431 /* NULL encryption / decryption */
432 if (!ctx->enckeylen)
433 return aead_null_set_sh_desc(aead);
434
Yuan Kang1acebad2011-07-15 11:21:42 +0800435 /*
436 * Job Descriptor and Shared Descriptors
437 * must all fit into the 64-word Descriptor h/w Buffer
438 */
439 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
440 ctx->split_key_pad_len + ctx->enckeylen <=
441 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800442 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800443
444 /* aead_encrypt shared descriptor */
445 desc = ctx->sh_desc_enc;
446
447 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
448
449 /* Class 2 operation */
450 append_operation(desc, ctx->class2_alg_type |
451 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
452
453 /* cryptlen = seqoutlen - authsize */
454 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
455
456 /* assoclen + cryptlen = seqinlen - ivsize */
457 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
458
Horia Geanta4464a7d2014-03-14 17:46:49 +0200459 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad2011-07-15 11:21:42 +0800460 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
461
462 /* read assoc before reading payload */
463 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
464 KEY_VLF);
465 aead_append_ld_iv(desc, tfm->ivsize);
466
467 /* Class 1 operation */
468 append_operation(desc, ctx->class1_alg_type |
469 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
470
471 /* Read and write cryptlen bytes */
472 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
473 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
474 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
475
476 /* Write ICV */
477 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
479
480 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
481 desc_bytes(desc),
482 DMA_TO_DEVICE);
483 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
484 dev_err(jrdev, "unable to map shared descriptor\n");
485 return -ENOMEM;
486 }
487#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300488 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800489 DUMP_PREFIX_ADDRESS, 16, 4, desc,
490 desc_bytes(desc), 1);
491#endif
492
493 /*
494 * Job Descriptor and Shared Descriptors
495 * must all fit into the 64-word Descriptor h/w Buffer
496 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500497 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800498 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
499 ctx->split_key_pad_len + ctx->enckeylen <=
500 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800501 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800502
Horia Geanta4464a7d2014-03-14 17:46:49 +0200503 /* aead_decrypt shared descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +0800504 desc = ctx->sh_desc_dec;
505
Horia Geanta4464a7d2014-03-14 17:46:49 +0200506 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
Yuan Kang1acebad2011-07-15 11:21:42 +0800507
508 /* Class 2 operation */
509 append_operation(desc, ctx->class2_alg_type |
510 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
511
Horia Geanta4464a7d2014-03-14 17:46:49 +0200512 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad2011-07-15 11:21:42 +0800513 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Horia Geantaae4a8252014-03-14 17:46:52 +0200514 ctx->authsize + tfm->ivsize);
Yuan Kang1acebad2011-07-15 11:21:42 +0800515 /* assoclen = (assoclen + cryptlen) - cryptlen */
516 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
517 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
518
519 /* read assoc before reading payload */
520 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521 KEY_VLF);
522
523 aead_append_ld_iv(desc, tfm->ivsize);
524
525 append_dec_op1(desc, ctx->class1_alg_type);
526
527 /* Read and write cryptlen bytes */
528 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
529 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
530 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
531
532 /* Load ICV */
533 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
534 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad2011-07-15 11:21:42 +0800535
536 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
537 desc_bytes(desc),
538 DMA_TO_DEVICE);
539 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
540 dev_err(jrdev, "unable to map shared descriptor\n");
541 return -ENOMEM;
542 }
543#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300544 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800545 DUMP_PREFIX_ADDRESS, 16, 4, desc,
546 desc_bytes(desc), 1);
547#endif
548
549 /*
550 * Job Descriptor and Shared Descriptors
551 * must all fit into the 64-word Descriptor h/w Buffer
552 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500553 keys_fit_inline = false;
Yuan Kang1acebad2011-07-15 11:21:42 +0800554 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
555 ctx->split_key_pad_len + ctx->enckeylen <=
556 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800557 keys_fit_inline = true;
Yuan Kang1acebad2011-07-15 11:21:42 +0800558
559 /* aead_givencrypt shared descriptor */
560 desc = ctx->sh_desc_givenc;
561
562 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
563
564 /* Generate IV */
565 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
566 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
567 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
568 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
569 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
570 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
571 append_move(desc, MOVE_SRC_INFIFO |
572 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
573 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
574
575 /* Copy IV to class 1 context */
576 append_move(desc, MOVE_SRC_CLASS1CTX |
577 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
578
579 /* Return to encryption */
580 append_operation(desc, ctx->class2_alg_type |
581 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
582
583 /* ivsize + cryptlen = seqoutlen - authsize */
584 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
585
586 /* assoclen = seqinlen - (ivsize + cryptlen) */
587 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
588
589 /* read assoc before reading payload */
590 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
591 KEY_VLF);
592
593 /* Copy iv from class 1 ctx to class 2 fifo*/
594 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
595 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
596 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
597 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
598 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
599 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
600
601 /* Class 1 operation */
602 append_operation(desc, ctx->class1_alg_type |
603 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
604
605 /* Will write ivsize + cryptlen */
606 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
607
608 /* Not need to reload iv */
609 append_seq_fifo_load(desc, tfm->ivsize,
610 FIFOLD_CLASS_SKIP);
611
612 /* Will read cryptlen */
613 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
614 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
615
616 /* Write ICV */
617 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
618 LDST_SRCDST_BYTE_CONTEXT);
619
620 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
621 desc_bytes(desc),
622 DMA_TO_DEVICE);
623 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
624 dev_err(jrdev, "unable to map shared descriptor\n");
625 return -ENOMEM;
626 }
627#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300628 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +0800629 DUMP_PREFIX_ADDRESS, 16, 4, desc,
630 desc_bytes(desc), 1);
631#endif
632
633 return 0;
634}
635
Yuan Kang0e479302011-07-15 11:21:41 +0800636static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800637 unsigned int authsize)
638{
639 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
640
641 ctx->authsize = authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +0800642 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800643
644 return 0;
645}
646
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300647static int gcm_set_sh_desc(struct crypto_aead *aead)
648{
649 struct aead_tfm *tfm = &aead->base.crt_aead;
650 struct caam_ctx *ctx = crypto_aead_ctx(aead);
651 struct device *jrdev = ctx->jrdev;
652 bool keys_fit_inline = false;
653 u32 *key_jump_cmd, *zero_payload_jump_cmd,
654 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
655 u32 *desc;
656
657 if (!ctx->enckeylen || !ctx->authsize)
658 return 0;
659
660 /*
661 * AES GCM encrypt shared descriptor
662 * Job Descriptor and Shared Descriptor
663 * must fit into the 64-word Descriptor h/w Buffer
664 */
665 if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
666 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
667 keys_fit_inline = true;
668
669 desc = ctx->sh_desc_enc;
670
671 init_sh_desc(desc, HDR_SHARE_SERIAL);
672
673 /* skip key loading if they are loaded due to sharing */
674 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
675 JUMP_COND_SHRD | JUMP_COND_SELF);
676 if (keys_fit_inline)
677 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
678 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
679 else
680 append_key(desc, ctx->key_dma, ctx->enckeylen,
681 CLASS_1 | KEY_DEST_CLASS_REG);
682 set_jump_tgt_here(desc, key_jump_cmd);
683
684 /* class 1 operation */
685 append_operation(desc, ctx->class1_alg_type |
686 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
687
688 /* cryptlen = seqoutlen - authsize */
689 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
690
691 /* assoclen + cryptlen = seqinlen - ivsize */
692 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
693
694 /* assoclen = (assoclen + cryptlen) - cryptlen */
695 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
696
697 /* if cryptlen is ZERO jump to zero-payload commands */
698 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
699 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
700 JUMP_COND_MATH_Z);
701 /* read IV */
702 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
703 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
704
705 /* if assoclen is ZERO, skip reading the assoc data */
706 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
707 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
708 JUMP_COND_MATH_Z);
709
710 /* read assoc data */
711 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
712 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
713 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
714
715 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
716
717 /* write encrypted data */
718 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
719
720 /* read payload data */
721 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
722 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
723
724 /* jump the zero-payload commands */
725 append_jump(desc, JUMP_TEST_ALL | 7);
726
727 /* zero-payload commands */
728 set_jump_tgt_here(desc, zero_payload_jump_cmd);
729
730 /* if assoclen is ZERO, jump to IV reading - is the only input data */
731 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
732 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
733 JUMP_COND_MATH_Z);
734 /* read IV */
735 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
736 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
737
738 /* read assoc data */
739 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
740 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
741
742 /* jump to ICV writing */
743 append_jump(desc, JUMP_TEST_ALL | 2);
744
745 /* read IV - is the only input data */
746 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
747 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
748 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
749 FIFOLD_TYPE_LAST1);
750
751 /* write ICV */
752 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
753 LDST_SRCDST_BYTE_CONTEXT);
754
755 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
756 desc_bytes(desc),
757 DMA_TO_DEVICE);
758 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
759 dev_err(jrdev, "unable to map shared descriptor\n");
760 return -ENOMEM;
761 }
762#ifdef DEBUG
763 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
764 DUMP_PREFIX_ADDRESS, 16, 4, desc,
765 desc_bytes(desc), 1);
766#endif
767
768 /*
769 * Job Descriptor and Shared Descriptors
770 * must all fit into the 64-word Descriptor h/w Buffer
771 */
772 keys_fit_inline = false;
773 if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
774 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
775 keys_fit_inline = true;
776
777 desc = ctx->sh_desc_dec;
778
779 init_sh_desc(desc, HDR_SHARE_SERIAL);
780
781 /* skip key loading if they are loaded due to sharing */
782 key_jump_cmd = append_jump(desc, JUMP_JSL |
783 JUMP_TEST_ALL | JUMP_COND_SHRD |
784 JUMP_COND_SELF);
785 if (keys_fit_inline)
786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
787 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
788 else
789 append_key(desc, ctx->key_dma, ctx->enckeylen,
790 CLASS_1 | KEY_DEST_CLASS_REG);
791 set_jump_tgt_here(desc, key_jump_cmd);
792
793 /* class 1 operation */
794 append_operation(desc, ctx->class1_alg_type |
795 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
796
797 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
798 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
799 ctx->authsize + tfm->ivsize);
800
801 /* assoclen = (assoclen + cryptlen) - cryptlen */
802 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
803 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
804
805 /* read IV */
806 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
807 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
808
809 /* jump to zero-payload command if cryptlen is zero */
810 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
811 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
812 JUMP_COND_MATH_Z);
813
814 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
815 /* if asoclen is ZERO, skip reading assoc data */
816 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
817 JUMP_COND_MATH_Z);
818 /* read assoc data */
819 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
820 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
821 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
822
823 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
824
825 /* store encrypted data */
826 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
827
828 /* read payload data */
829 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
830 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
831
832 /* jump the zero-payload commands */
833 append_jump(desc, JUMP_TEST_ALL | 4);
834
835 /* zero-payload command */
836 set_jump_tgt_here(desc, zero_payload_jump_cmd);
837
838 /* if assoclen is ZERO, jump to ICV reading */
839 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
840 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
841 JUMP_COND_MATH_Z);
842 /* read assoc data */
843 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
844 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
845 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
846
847 /* read ICV */
848 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
849 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
850
851 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
852 desc_bytes(desc),
853 DMA_TO_DEVICE);
854 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
855 dev_err(jrdev, "unable to map shared descriptor\n");
856 return -ENOMEM;
857 }
858#ifdef DEBUG
859 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
860 DUMP_PREFIX_ADDRESS, 16, 4, desc,
861 desc_bytes(desc), 1);
862#endif
863
864 return 0;
865}
866
867static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
868{
869 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
870
871 ctx->authsize = authsize;
872 gcm_set_sh_desc(authenc);
873
874 return 0;
875}
876
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300877static int rfc4106_set_sh_desc(struct crypto_aead *aead)
878{
879 struct aead_tfm *tfm = &aead->base.crt_aead;
880 struct caam_ctx *ctx = crypto_aead_ctx(aead);
881 struct device *jrdev = ctx->jrdev;
882 bool keys_fit_inline = false;
883 u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
884 u32 *desc;
885 u32 geniv;
886
887 if (!ctx->enckeylen || !ctx->authsize)
888 return 0;
889
890 /*
891 * RFC4106 encrypt shared descriptor
892 * Job Descriptor and Shared Descriptor
893 * must fit into the 64-word Descriptor h/w Buffer
894 */
895 if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
896 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
897 keys_fit_inline = true;
898
899 desc = ctx->sh_desc_enc;
900
901 init_sh_desc(desc, HDR_SHARE_SERIAL);
902
903 /* Skip key loading if it is loaded due to sharing */
904 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
905 JUMP_COND_SHRD);
906 if (keys_fit_inline)
907 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
908 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
909 else
910 append_key(desc, ctx->key_dma, ctx->enckeylen,
911 CLASS_1 | KEY_DEST_CLASS_REG);
912 set_jump_tgt_here(desc, key_jump_cmd);
913
914 /* Class 1 operation */
915 append_operation(desc, ctx->class1_alg_type |
916 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
917
918 /* cryptlen = seqoutlen - authsize */
919 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
920 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
921
922 /* assoclen + cryptlen = seqinlen - ivsize */
923 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
924
925 /* assoclen = (assoclen + cryptlen) - cryptlen */
926 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
927
928 /* Read Salt */
929 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
930 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
931 /* Read AES-GCM-ESP IV */
932 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
933 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
934
935 /* Read assoc data */
936 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
937 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
938
939 /* Will read cryptlen bytes */
940 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
941
942 /* Write encrypted data */
943 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
944
945 /* Read payload data */
946 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
947 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
948
949 /* Write ICV */
950 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
951 LDST_SRCDST_BYTE_CONTEXT);
952
953 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
954 desc_bytes(desc),
955 DMA_TO_DEVICE);
956 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
957 dev_err(jrdev, "unable to map shared descriptor\n");
958 return -ENOMEM;
959 }
960#ifdef DEBUG
961 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
962 DUMP_PREFIX_ADDRESS, 16, 4, desc,
963 desc_bytes(desc), 1);
964#endif
965
966 /*
967 * Job Descriptor and Shared Descriptors
968 * must all fit into the 64-word Descriptor h/w Buffer
969 */
970 keys_fit_inline = false;
971 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
972 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
973 keys_fit_inline = true;
974
975 desc = ctx->sh_desc_dec;
976
977 init_sh_desc(desc, HDR_SHARE_SERIAL);
978
979 /* Skip key loading if it is loaded due to sharing */
980 key_jump_cmd = append_jump(desc, JUMP_JSL |
981 JUMP_TEST_ALL | JUMP_COND_SHRD);
982 if (keys_fit_inline)
983 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
984 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
985 else
986 append_key(desc, ctx->key_dma, ctx->enckeylen,
987 CLASS_1 | KEY_DEST_CLASS_REG);
988 set_jump_tgt_here(desc, key_jump_cmd);
989
990 /* Class 1 operation */
991 append_operation(desc, ctx->class1_alg_type |
992 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
993
994 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
995 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
996 ctx->authsize + tfm->ivsize);
997
998 /* assoclen = (assoclen + cryptlen) - cryptlen */
999 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1000 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1001
1002 /* Will write cryptlen bytes */
1003 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1004
1005 /* Read Salt */
1006 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
1007 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
1008 /* Read AES-GCM-ESP IV */
1009 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
1010 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1011
1012 /* Read assoc data */
1013 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1014 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1015
1016 /* Will read cryptlen bytes */
1017 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1018
1019 /* Store payload data */
1020 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1021
1022 /* Read encrypted data */
1023 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1024 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1025
1026 /* Read ICV */
1027 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1028 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1029
1030 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1031 desc_bytes(desc),
1032 DMA_TO_DEVICE);
1033 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1034 dev_err(jrdev, "unable to map shared descriptor\n");
1035 return -ENOMEM;
1036 }
1037#ifdef DEBUG
1038 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1039 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1040 desc_bytes(desc), 1);
1041#endif
1042
1043 /*
1044 * Job Descriptor and Shared Descriptors
1045 * must all fit into the 64-word Descriptor h/w Buffer
1046 */
1047 keys_fit_inline = false;
1048 if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
1049 ctx->split_key_pad_len + ctx->enckeylen <=
1050 CAAM_DESC_BYTES_MAX)
1051 keys_fit_inline = true;
1052
1053 /* rfc4106_givencrypt shared descriptor */
1054 desc = ctx->sh_desc_givenc;
1055
1056 init_sh_desc(desc, HDR_SHARE_SERIAL);
1057
1058 /* Skip key loading if it is loaded due to sharing */
1059 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1060 JUMP_COND_SHRD);
1061 if (keys_fit_inline)
1062 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1063 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1064 else
1065 append_key(desc, ctx->key_dma, ctx->enckeylen,
1066 CLASS_1 | KEY_DEST_CLASS_REG);
1067 set_jump_tgt_here(desc, key_jump_cmd);
1068
1069 /* Generate IV */
1070 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1071 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1072 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1073 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1074 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1075 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1076 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
1077 (tfm->ivsize << MOVE_LEN_SHIFT));
1078 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1079
1080 /* Copy generated IV to OFIFO */
1081 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
1082 (tfm->ivsize << MOVE_LEN_SHIFT));
1083
1084 /* Class 1 operation */
1085 append_operation(desc, ctx->class1_alg_type |
1086 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1087
1088 /* ivsize + cryptlen = seqoutlen - authsize */
1089 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1090
1091 /* assoclen = seqinlen - (ivsize + cryptlen) */
1092 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1093
1094 /* Will write ivsize + cryptlen */
1095 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1096
1097 /* Read Salt and generated IV */
1098 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
1099 FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
1100 /* Append Salt */
1101 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1102 set_move_tgt_here(desc, move_cmd);
1103 set_move_tgt_here(desc, write_iv_cmd);
1104 /* Blank commands. Will be overwritten by generated IV. */
1105 append_cmd(desc, 0x00000000);
1106 append_cmd(desc, 0x00000000);
1107 /* End of blank commands */
1108
1109 /* No need to reload iv */
1110 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1111
1112 /* Read assoc data */
1113 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1114 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1115
1116 /* Will read cryptlen */
1117 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1118
1119 /* Store generated IV and encrypted data */
1120 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1121
1122 /* Read payload data */
1123 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1124 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1125
1126 /* Write ICV */
1127 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1128 LDST_SRCDST_BYTE_CONTEXT);
1129
1130 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1131 desc_bytes(desc),
1132 DMA_TO_DEVICE);
1133 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1134 dev_err(jrdev, "unable to map shared descriptor\n");
1135 return -ENOMEM;
1136 }
1137#ifdef DEBUG
1138 print_hex_dump(KERN_ERR,
1139 "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
1140 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1141 desc_bytes(desc), 1);
1142#endif
1143
1144 return 0;
1145}
1146
1147static int rfc4106_setauthsize(struct crypto_aead *authenc,
1148 unsigned int authsize)
1149{
1150 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1151
1152 ctx->authsize = authsize;
1153 rfc4106_set_sh_desc(authenc);
1154
1155 return 0;
1156}
1157
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001158static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1159{
1160 struct aead_tfm *tfm = &aead->base.crt_aead;
1161 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1162 struct device *jrdev = ctx->jrdev;
1163 bool keys_fit_inline = false;
1164 u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
1165 u32 *read_move_cmd, *write_move_cmd;
1166 u32 *desc;
1167 u32 geniv;
1168
1169 if (!ctx->enckeylen || !ctx->authsize)
1170 return 0;
1171
1172 /*
1173 * RFC4543 encrypt shared descriptor
1174 * Job Descriptor and Shared Descriptor
1175 * must fit into the 64-word Descriptor h/w Buffer
1176 */
1177 if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
1178 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1179 keys_fit_inline = true;
1180
1181 desc = ctx->sh_desc_enc;
1182
1183 init_sh_desc(desc, HDR_SHARE_SERIAL);
1184
1185 /* Skip key loading if it is loaded due to sharing */
1186 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1187 JUMP_COND_SHRD);
1188 if (keys_fit_inline)
1189 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1190 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1191 else
1192 append_key(desc, ctx->key_dma, ctx->enckeylen,
1193 CLASS_1 | KEY_DEST_CLASS_REG);
1194 set_jump_tgt_here(desc, key_jump_cmd);
1195
1196 /* Class 1 operation */
1197 append_operation(desc, ctx->class1_alg_type |
1198 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1199
1200 /* Load AES-GMAC ESP IV into Math1 register */
1201 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1202 LDST_CLASS_DECO | tfm->ivsize);
1203
1204 /* Wait the DMA transaction to finish */
1205 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1206 (1 << JUMP_OFFSET_SHIFT));
1207
1208 /* Overwrite blank immediate AES-GMAC ESP IV data */
1209 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1210 (tfm->ivsize << MOVE_LEN_SHIFT));
1211
1212 /* Overwrite blank immediate AAD data */
1213 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1214 (tfm->ivsize << MOVE_LEN_SHIFT));
1215
1216 /* cryptlen = seqoutlen - authsize */
1217 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1218
1219 /* assoclen = (seqinlen - ivsize) - cryptlen */
1220 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1221
1222 /* Read Salt and AES-GMAC ESP IV */
1223 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1224 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1225 /* Append Salt */
1226 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1227 set_move_tgt_here(desc, write_iv_cmd);
1228 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1229 append_cmd(desc, 0x00000000);
1230 append_cmd(desc, 0x00000000);
1231 /* End of blank commands */
1232
1233 /* Read assoc data */
1234 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1235 FIFOLD_TYPE_AAD);
1236
1237 /* Will read cryptlen bytes */
1238 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
1239
1240 /* Will write cryptlen bytes */
1241 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1242
1243 /*
1244 * MOVE_LEN opcode is not available in all SEC HW revisions,
1245 * thus need to do some magic, i.e. self-patch the descriptor
1246 * buffer.
1247 */
1248 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1249 (0x6 << MOVE_LEN_SHIFT));
1250 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1251 (0x8 << MOVE_LEN_SHIFT));
1252
1253 /* Authenticate AES-GMAC ESP IV */
1254 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1255 FIFOLD_TYPE_AAD | tfm->ivsize);
1256 set_move_tgt_here(desc, write_aad_cmd);
1257 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1258 append_cmd(desc, 0x00000000);
1259 append_cmd(desc, 0x00000000);
1260 /* End of blank commands */
1261
1262 /* Read and write cryptlen bytes */
1263 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1264
1265 set_move_tgt_here(desc, read_move_cmd);
1266 set_move_tgt_here(desc, write_move_cmd);
1267 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1268 /* Move payload data to OFIFO */
1269 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1270
1271 /* Write ICV */
1272 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1273 LDST_SRCDST_BYTE_CONTEXT);
1274
1275 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1276 desc_bytes(desc),
1277 DMA_TO_DEVICE);
1278 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1279 dev_err(jrdev, "unable to map shared descriptor\n");
1280 return -ENOMEM;
1281 }
1282#ifdef DEBUG
1283 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1284 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1285 desc_bytes(desc), 1);
1286#endif
1287
1288 /*
1289 * Job Descriptor and Shared Descriptors
1290 * must all fit into the 64-word Descriptor h/w Buffer
1291 */
1292 keys_fit_inline = false;
1293 if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
1294 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1295 keys_fit_inline = true;
1296
1297 desc = ctx->sh_desc_dec;
1298
1299 init_sh_desc(desc, HDR_SHARE_SERIAL);
1300
1301 /* Skip key loading if it is loaded due to sharing */
1302 key_jump_cmd = append_jump(desc, JUMP_JSL |
1303 JUMP_TEST_ALL | JUMP_COND_SHRD);
1304 if (keys_fit_inline)
1305 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1306 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1307 else
1308 append_key(desc, ctx->key_dma, ctx->enckeylen,
1309 CLASS_1 | KEY_DEST_CLASS_REG);
1310 set_jump_tgt_here(desc, key_jump_cmd);
1311
1312 /* Class 1 operation */
1313 append_operation(desc, ctx->class1_alg_type |
1314 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1315
1316 /* Load AES-GMAC ESP IV into Math1 register */
1317 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
1318 LDST_CLASS_DECO | tfm->ivsize);
1319
1320 /* Wait the DMA transaction to finish */
1321 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
1322 (1 << JUMP_OFFSET_SHIFT));
1323
1324 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1325 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
1326
1327 /* Overwrite blank immediate AES-GMAC ESP IV data */
1328 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1329 (tfm->ivsize << MOVE_LEN_SHIFT));
1330
1331 /* Overwrite blank immediate AAD data */
1332 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1333 (tfm->ivsize << MOVE_LEN_SHIFT));
1334
1335 /* assoclen = (assoclen + cryptlen) - cryptlen */
1336 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1337 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
1338
1339 /*
1340 * MOVE_LEN opcode is not available in all SEC HW revisions,
1341 * thus need to do some magic, i.e. self-patch the descriptor
1342 * buffer.
1343 */
1344 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1345 (0x6 << MOVE_LEN_SHIFT));
1346 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1347 (0x8 << MOVE_LEN_SHIFT));
1348
1349 /* Read Salt and AES-GMAC ESP IV */
1350 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1351 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1352 /* Append Salt */
1353 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1354 set_move_tgt_here(desc, write_iv_cmd);
1355 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1356 append_cmd(desc, 0x00000000);
1357 append_cmd(desc, 0x00000000);
1358 /* End of blank commands */
1359
1360 /* Read assoc data */
1361 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1362 FIFOLD_TYPE_AAD);
1363
1364 /* Will read cryptlen bytes */
1365 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
1366
1367 /* Will write cryptlen bytes */
1368 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
1369
1370 /* Authenticate AES-GMAC ESP IV */
1371 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1372 FIFOLD_TYPE_AAD | tfm->ivsize);
1373 set_move_tgt_here(desc, write_aad_cmd);
1374 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1375 append_cmd(desc, 0x00000000);
1376 append_cmd(desc, 0x00000000);
1377 /* End of blank commands */
1378
1379 /* Store payload data */
1380 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1381
1382 /* In-snoop cryptlen data */
1383 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1384 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1385
1386 set_move_tgt_here(desc, read_move_cmd);
1387 set_move_tgt_here(desc, write_move_cmd);
1388 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1389 /* Move payload data to OFIFO */
1390 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1391 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1392
1393 /* Read ICV */
1394 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1395 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1396
1397 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1398 desc_bytes(desc),
1399 DMA_TO_DEVICE);
1400 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1401 dev_err(jrdev, "unable to map shared descriptor\n");
1402 return -ENOMEM;
1403 }
1404#ifdef DEBUG
1405 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1407 desc_bytes(desc), 1);
1408#endif
1409
1410 /*
1411 * Job Descriptor and Shared Descriptors
1412 * must all fit into the 64-word Descriptor h/w Buffer
1413 */
1414 keys_fit_inline = false;
1415 if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
1416 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1417 keys_fit_inline = true;
1418
1419 /* rfc4543_givencrypt shared descriptor */
1420 desc = ctx->sh_desc_givenc;
1421
1422 init_sh_desc(desc, HDR_SHARE_SERIAL);
1423
1424 /* Skip key loading if it is loaded due to sharing */
1425 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1426 JUMP_COND_SHRD);
1427 if (keys_fit_inline)
1428 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1429 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1430 else
1431 append_key(desc, ctx->key_dma, ctx->enckeylen,
1432 CLASS_1 | KEY_DEST_CLASS_REG);
1433 set_jump_tgt_here(desc, key_jump_cmd);
1434
1435 /* Generate IV */
1436 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1437 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1438 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
1439 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1440 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1441 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1442 /* Move generated IV to Math1 register */
1443 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
1444 (tfm->ivsize << MOVE_LEN_SHIFT));
1445 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1446
1447 /* Overwrite blank immediate AES-GMAC IV data */
1448 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1449 (tfm->ivsize << MOVE_LEN_SHIFT));
1450
1451 /* Overwrite blank immediate AAD data */
1452 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
1453 (tfm->ivsize << MOVE_LEN_SHIFT));
1454
1455 /* Copy generated IV to OFIFO */
1456 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
1457 (tfm->ivsize << MOVE_LEN_SHIFT));
1458
1459 /* Class 1 operation */
1460 append_operation(desc, ctx->class1_alg_type |
1461 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1462
1463 /* ivsize + cryptlen = seqoutlen - authsize */
1464 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
1465
1466 /* assoclen = seqinlen - (ivsize + cryptlen) */
1467 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
1468
1469 /* Will write ivsize + cryptlen */
1470 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
1471
1472 /*
1473 * MOVE_LEN opcode is not available in all SEC HW revisions,
1474 * thus need to do some magic, i.e. self-patch the descriptor
1475 * buffer.
1476 */
1477 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1478 (0x6 << MOVE_LEN_SHIFT));
1479 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1480 (0x8 << MOVE_LEN_SHIFT));
1481
1482 /* Read Salt and AES-GMAC generated IV */
1483 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1484 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
1485 /* Append Salt */
1486 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
1487 set_move_tgt_here(desc, write_iv_cmd);
1488 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1489 append_cmd(desc, 0x00000000);
1490 append_cmd(desc, 0x00000000);
1491 /* End of blank commands */
1492
1493 /* No need to reload iv */
1494 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
1495
1496 /* Read assoc data */
1497 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1498 FIFOLD_TYPE_AAD);
1499
1500 /* Will read cryptlen */
1501 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1502
1503 /* Authenticate AES-GMAC IV */
1504 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1505 FIFOLD_TYPE_AAD | tfm->ivsize);
1506 set_move_tgt_here(desc, write_aad_cmd);
1507 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1508 append_cmd(desc, 0x00000000);
1509 append_cmd(desc, 0x00000000);
1510 /* End of blank commands */
1511
1512 /* Read and write cryptlen bytes */
1513 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1514
1515 set_move_tgt_here(desc, read_move_cmd);
1516 set_move_tgt_here(desc, write_move_cmd);
1517 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1518 /* Move payload data to OFIFO */
1519 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1520
1521 /* Write ICV */
1522 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1523 LDST_SRCDST_BYTE_CONTEXT);
1524
1525 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1526 desc_bytes(desc),
1527 DMA_TO_DEVICE);
1528 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1529 dev_err(jrdev, "unable to map shared descriptor\n");
1530 return -ENOMEM;
1531 }
1532#ifdef DEBUG
1533 print_hex_dump(KERN_ERR,
1534 "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
1535 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1536 desc_bytes(desc), 1);
1537#endif
1538
1539 return 0;
1540}
1541
1542static int rfc4543_setauthsize(struct crypto_aead *authenc,
1543 unsigned int authsize)
1544{
1545 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1546
1547 ctx->authsize = authsize;
1548 rfc4543_set_sh_desc(authenc);
1549
1550 return 0;
1551}
1552
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001553static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1554 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001555{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001556 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1557 ctx->split_key_pad_len, key_in, authkeylen,
1558 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001559}
1560
Yuan Kang0e479302011-07-15 11:21:41 +08001561static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001562 const u8 *key, unsigned int keylen)
1563{
1564 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1565 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1566 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1567 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001568 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001569 int ret = 0;
1570
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001572 goto badkey;
1573
1574 /* Pick class 2 key length from algorithm submask */
1575 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1576 OP_ALG_ALGSEL_SHIFT] * 2;
1577 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1578
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001579 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1580 goto badkey;
1581
Kim Phillips8e8ec592011-03-13 16:54:26 +08001582#ifdef DEBUG
1583 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001584 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1585 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001586 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1587 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1590#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001591
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001592 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001593 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001594 goto badkey;
1595 }
1596
1597 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001598 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001599
Yuan Kang885e9e22011-07-15 11:21:41 +08001600 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001601 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001602 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001603 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001604 return -ENOMEM;
1605 }
1606#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001607 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001608 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001609 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001610#endif
1611
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001612 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001613
Yuan Kang1acebad2011-07-15 11:21:42 +08001614 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001615 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001616 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001617 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001618 }
1619
1620 return ret;
1621badkey:
1622 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1623 return -EINVAL;
1624}
1625
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001626static int gcm_setkey(struct crypto_aead *aead,
1627 const u8 *key, unsigned int keylen)
1628{
1629 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1630 struct device *jrdev = ctx->jrdev;
1631 int ret = 0;
1632
1633#ifdef DEBUG
1634 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1635 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1636#endif
1637
1638 memcpy(ctx->key, key, keylen);
1639 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1640 DMA_TO_DEVICE);
1641 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1642 dev_err(jrdev, "unable to map key i/o memory\n");
1643 return -ENOMEM;
1644 }
1645 ctx->enckeylen = keylen;
1646
1647 ret = gcm_set_sh_desc(aead);
1648 if (ret) {
1649 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1650 DMA_TO_DEVICE);
1651 }
1652
1653 return ret;
1654}
1655
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001656static int rfc4106_setkey(struct crypto_aead *aead,
1657 const u8 *key, unsigned int keylen)
1658{
1659 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1660 struct device *jrdev = ctx->jrdev;
1661 int ret = 0;
1662
1663 if (keylen < 4)
1664 return -EINVAL;
1665
1666#ifdef DEBUG
1667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1669#endif
1670
1671 memcpy(ctx->key, key, keylen);
1672
1673 /*
1674 * The last four bytes of the key material are used as the salt value
1675 * in the nonce. Update the AES key length.
1676 */
1677 ctx->enckeylen = keylen - 4;
1678
1679 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1680 DMA_TO_DEVICE);
1681 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1682 dev_err(jrdev, "unable to map key i/o memory\n");
1683 return -ENOMEM;
1684 }
1685
1686 ret = rfc4106_set_sh_desc(aead);
1687 if (ret) {
1688 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1689 DMA_TO_DEVICE);
1690 }
1691
1692 return ret;
1693}
1694
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001695static int rfc4543_setkey(struct crypto_aead *aead,
1696 const u8 *key, unsigned int keylen)
1697{
1698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1699 struct device *jrdev = ctx->jrdev;
1700 int ret = 0;
1701
1702 if (keylen < 4)
1703 return -EINVAL;
1704
1705#ifdef DEBUG
1706 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1707 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1708#endif
1709
1710 memcpy(ctx->key, key, keylen);
1711
1712 /*
1713 * The last four bytes of the key material are used as the salt value
1714 * in the nonce. Update the AES key length.
1715 */
1716 ctx->enckeylen = keylen - 4;
1717
1718 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1719 DMA_TO_DEVICE);
1720 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1721 dev_err(jrdev, "unable to map key i/o memory\n");
1722 return -ENOMEM;
1723 }
1724
1725 ret = rfc4543_set_sh_desc(aead);
1726 if (ret) {
1727 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1728 DMA_TO_DEVICE);
1729 }
1730
1731 return ret;
1732}
1733
Yuan Kangacdca312011-07-15 11:21:42 +08001734static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1735 const u8 *key, unsigned int keylen)
1736{
1737 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001738 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1739 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1740 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001741 struct device *jrdev = ctx->jrdev;
1742 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001743 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001744 u32 *desc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001745 u32 *nonce;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001746 u32 ctx1_iv_off = 0;
1747 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1748 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001749 const bool is_rfc3686 = (ctr_mode &&
1750 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001751
1752#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001753 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001754 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1755#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001756 /*
1757 * AES-CTR needs to load IV in CONTEXT1 reg
1758 * at an offset of 128bits (16bytes)
1759 * CONTEXT1[255:128] = IV
1760 */
1761 if (ctr_mode)
1762 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001763
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001764 /*
1765 * RFC3686 specific:
1766 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1767 * | *key = {KEY, NONCE}
1768 */
1769 if (is_rfc3686) {
1770 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1771 keylen -= CTR_RFC3686_NONCE_SIZE;
1772 }
1773
Yuan Kangacdca312011-07-15 11:21:42 +08001774 memcpy(ctx->key, key, keylen);
1775 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1776 DMA_TO_DEVICE);
1777 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1778 dev_err(jrdev, "unable to map key i/o memory\n");
1779 return -ENOMEM;
1780 }
1781 ctx->enckeylen = keylen;
1782
1783 /* ablkcipher_encrypt shared descriptor */
1784 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001785 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001786 /* Skip if already shared */
1787 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1788 JUMP_COND_SHRD);
1789
1790 /* Load class1 key only */
1791 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1792 ctx->enckeylen, CLASS_1 |
1793 KEY_DEST_CLASS_REG);
1794
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001795 /* Load nonce into CONTEXT1 reg */
1796 if (is_rfc3686) {
1797 nonce = (u32 *)(key + keylen);
1798 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1799 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1800 append_move(desc, MOVE_WAITCOMP |
1801 MOVE_SRC_OUTFIFO |
1802 MOVE_DEST_CLASS1CTX |
1803 (16 << MOVE_OFFSET_SHIFT) |
1804 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1805 }
1806
Yuan Kangacdca312011-07-15 11:21:42 +08001807 set_jump_tgt_here(desc, key_jump_cmd);
1808
Yuan Kangacdca312011-07-15 11:21:42 +08001809 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001810 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001811 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001812
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001813 /* Load counter into CONTEXT1 reg */
1814 if (is_rfc3686)
1815 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1816 LDST_CLASS_1_CCB |
1817 LDST_SRCDST_BYTE_CONTEXT |
1818 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1819 LDST_OFFSET_SHIFT));
1820
Yuan Kangacdca312011-07-15 11:21:42 +08001821 /* Load operation */
1822 append_operation(desc, ctx->class1_alg_type |
1823 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1824
1825 /* Perform operation */
1826 ablkcipher_append_src_dst(desc);
1827
1828 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1829 desc_bytes(desc),
1830 DMA_TO_DEVICE);
1831 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1832 dev_err(jrdev, "unable to map shared descriptor\n");
1833 return -ENOMEM;
1834 }
1835#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001836 print_hex_dump(KERN_ERR,
1837 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001838 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1839 desc_bytes(desc), 1);
1840#endif
1841 /* ablkcipher_decrypt shared descriptor */
1842 desc = ctx->sh_desc_dec;
1843
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001844 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001845 /* Skip if already shared */
1846 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1847 JUMP_COND_SHRD);
1848
1849 /* Load class1 key only */
1850 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1851 ctx->enckeylen, CLASS_1 |
1852 KEY_DEST_CLASS_REG);
1853
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001854 /* Load nonce into CONTEXT1 reg */
1855 if (is_rfc3686) {
1856 nonce = (u32 *)(key + keylen);
1857 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1858 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1859 append_move(desc, MOVE_WAITCOMP |
1860 MOVE_SRC_OUTFIFO |
1861 MOVE_DEST_CLASS1CTX |
1862 (16 << MOVE_OFFSET_SHIFT) |
1863 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1864 }
1865
Yuan Kangacdca312011-07-15 11:21:42 +08001866 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001867
1868 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001869 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001870 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001871
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001872 /* Load counter into CONTEXT1 reg */
1873 if (is_rfc3686)
1874 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1875 LDST_CLASS_1_CCB |
1876 LDST_SRCDST_BYTE_CONTEXT |
1877 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1878 LDST_OFFSET_SHIFT));
1879
Yuan Kangacdca312011-07-15 11:21:42 +08001880 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001881 if (ctr_mode)
1882 append_operation(desc, ctx->class1_alg_type |
1883 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1884 else
1885 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001886
1887 /* Perform operation */
1888 ablkcipher_append_src_dst(desc);
1889
Yuan Kangacdca312011-07-15 11:21:42 +08001890 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1891 desc_bytes(desc),
1892 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001893 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001894 dev_err(jrdev, "unable to map shared descriptor\n");
1895 return -ENOMEM;
1896 }
1897
1898#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001899 print_hex_dump(KERN_ERR,
1900 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001901 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1902 desc_bytes(desc), 1);
1903#endif
1904
1905 return ret;
1906}
1907
Kim Phillips8e8ec592011-03-13 16:54:26 +08001908/*
Yuan Kang1acebad2011-07-15 11:21:42 +08001909 * aead_edesc - s/w-extended aead descriptor
1910 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001911 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001912 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001913 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001914 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001915 * @dst_chained: if destination is chained
Yuan Kang1acebad2011-07-15 11:21:42 +08001916 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001917 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001918 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1919 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001920 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1921 */
Yuan Kang0e479302011-07-15 11:21:41 +08001922struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001923 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001924 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001925 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001926 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001927 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001928 bool dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08001929 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001930 int sec4_sg_bytes;
1931 dma_addr_t sec4_sg_dma;
1932 struct sec4_sg_entry *sec4_sg;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001933 u32 hw_desc[0];
1934};
1935
Yuan Kangacdca312011-07-15 11:21:42 +08001936/*
1937 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1938 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001939 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001940 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001941 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001942 * @iv_dma: dma address of iv for checking continuity and link table
1943 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001944 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1945 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001946 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1947 */
1948struct ablkcipher_edesc {
1949 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001950 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001951 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001952 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001953 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001954 int sec4_sg_bytes;
1955 dma_addr_t sec4_sg_dma;
1956 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001957 u32 hw_desc[0];
1958};
1959
Yuan Kang1acebad2011-07-15 11:21:42 +08001960static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001961 struct scatterlist *dst, int src_nents,
1962 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001963 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1964 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001965{
Yuan Kang643b39b2012-06-22 19:48:49 -05001966 if (dst != src) {
1967 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1968 src_chained);
1969 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1970 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001971 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001972 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1973 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001974 }
1975
Yuan Kang1acebad2011-07-15 11:21:42 +08001976 if (iv_dma)
1977 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001978 if (sec4_sg_bytes)
1979 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001980 DMA_TO_DEVICE);
1981}
1982
Yuan Kang1acebad2011-07-15 11:21:42 +08001983static void aead_unmap(struct device *dev,
1984 struct aead_edesc *edesc,
1985 struct aead_request *req)
1986{
1987 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1988 int ivsize = crypto_aead_ivsize(aead);
1989
Yuan Kang643b39b2012-06-22 19:48:49 -05001990 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1991 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08001992
1993 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001994 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1995 edesc->dst_chained, edesc->iv_dma, ivsize,
1996 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad2011-07-15 11:21:42 +08001997}
1998
Yuan Kangacdca312011-07-15 11:21:42 +08001999static void ablkcipher_unmap(struct device *dev,
2000 struct ablkcipher_edesc *edesc,
2001 struct ablkcipher_request *req)
2002{
2003 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2004 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2005
2006 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05002007 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
2008 edesc->dst_chained, edesc->iv_dma, ivsize,
2009 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08002010}
2011
Yuan Kang0e479302011-07-15 11:21:41 +08002012static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002013 void *context)
2014{
Yuan Kang0e479302011-07-15 11:21:41 +08002015 struct aead_request *req = context;
2016 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002017#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08002018 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002019 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002020 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002021
2022 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2023#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002024
Yuan Kang0e479302011-07-15 11:21:41 +08002025 edesc = (struct aead_edesc *)((char *)desc -
2026 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002027
Marek Vasutfa9659c2014-04-24 20:05:12 +02002028 if (err)
2029 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002030
Yuan Kang0e479302011-07-15 11:21:41 +08002031 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002032
2033#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002034 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002035 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2036 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002037 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002038 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002039 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002040 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002041 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2042 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002043 ctx->authsize + 4, 1);
2044#endif
2045
2046 kfree(edesc);
2047
Yuan Kang0e479302011-07-15 11:21:41 +08002048 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002049}
2050
Yuan Kang0e479302011-07-15 11:21:41 +08002051static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002052 void *context)
2053{
Yuan Kang0e479302011-07-15 11:21:41 +08002054 struct aead_request *req = context;
2055 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002056#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08002057 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002058 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad2011-07-15 11:21:42 +08002059 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002060
2061 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2062#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002063
Yuan Kang0e479302011-07-15 11:21:41 +08002064 edesc = (struct aead_edesc *)((char *)desc -
2065 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08002066
Yuan Kang1acebad2011-07-15 11:21:42 +08002067#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002068 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002069 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
2070 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002071 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002072 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02002073 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002074#endif
2075
Marek Vasutfa9659c2014-04-24 20:05:12 +02002076 if (err)
2077 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002078
Yuan Kang0e479302011-07-15 11:21:41 +08002079 aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002080
2081 /*
2082 * verify hw auth check passed else return -EBADMSG
2083 */
2084 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2085 err = -EBADMSG;
2086
2087#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002088 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002089 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08002090 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
2091 sizeof(struct iphdr) + req->assoclen +
2092 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08002093 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05002094 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08002095 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03002096 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002097 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
2098 sg->length + ctx->authsize + 16, 1);
2099 }
2100#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002101
Kim Phillips8e8ec592011-03-13 16:54:26 +08002102 kfree(edesc);
2103
Yuan Kang0e479302011-07-15 11:21:41 +08002104 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002105}
2106
Yuan Kangacdca312011-07-15 11:21:42 +08002107static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2108 void *context)
2109{
2110 struct ablkcipher_request *req = context;
2111 struct ablkcipher_edesc *edesc;
2112#ifdef DEBUG
2113 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2114 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2115
2116 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2117#endif
2118
2119 edesc = (struct ablkcipher_edesc *)((char *)desc -
2120 offsetof(struct ablkcipher_edesc, hw_desc));
2121
Marek Vasutfa9659c2014-04-24 20:05:12 +02002122 if (err)
2123 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002124
2125#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002126 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002127 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2128 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002129 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002130 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2131 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2132#endif
2133
2134 ablkcipher_unmap(jrdev, edesc, req);
2135 kfree(edesc);
2136
2137 ablkcipher_request_complete(req, err);
2138}
2139
2140static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2141 void *context)
2142{
2143 struct ablkcipher_request *req = context;
2144 struct ablkcipher_edesc *edesc;
2145#ifdef DEBUG
2146 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2147 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2148
2149 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2150#endif
2151
2152 edesc = (struct ablkcipher_edesc *)((char *)desc -
2153 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002154 if (err)
2155 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002156
2157#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002158 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002159 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2160 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002161 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002162 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2163 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2164#endif
2165
2166 ablkcipher_unmap(jrdev, edesc, req);
2167 kfree(edesc);
2168
2169 ablkcipher_request_complete(req, err);
2170}
2171
Kim Phillips8e8ec592011-03-13 16:54:26 +08002172/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002173 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002174 */
Yuan Kang1acebad2011-07-15 11:21:42 +08002175static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2176 struct aead_edesc *edesc,
2177 struct aead_request *req,
2178 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002179{
Yuan Kang0e479302011-07-15 11:21:41 +08002180 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002181 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002182 int ivsize = crypto_aead_ivsize(aead);
2183 int authsize = ctx->authsize;
Yuan Kang1acebad2011-07-15 11:21:42 +08002184 u32 *desc = edesc->hw_desc;
2185 u32 out_options = 0, in_options;
2186 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002187 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002188 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002189
Yuan Kang1acebad2011-07-15 11:21:42 +08002190#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002191 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002192 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002193 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002194 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2195 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002196 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002197 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002198 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002199 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002200 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad2011-07-15 11:21:42 +08002201 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002202 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002203 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2204 desc_bytes(sh_desc), 1);
2205#endif
Yuan Kang1acebad2011-07-15 11:21:42 +08002206
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002207 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2208 OP_ALG_ALGSEL_AES) &&
2209 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2210 is_gcm = true;
2211
Yuan Kang1acebad2011-07-15 11:21:42 +08002212 len = desc_len(sh_desc);
2213 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2214
2215 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002216 if (is_gcm)
2217 src_dma = edesc->iv_dma;
2218 else
2219 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002220 in_options = 0;
2221 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002222 src_dma = edesc->sec4_sg_dma;
2223 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2224 (edesc->src_nents ? : 1);
Yuan Kang1acebad2011-07-15 11:21:42 +08002225 in_options = LDST_SGF;
2226 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002227
2228 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2229 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002230
Yuan Kang1acebad2011-07-15 11:21:42 +08002231 if (likely(req->src == req->dst)) {
2232 if (all_contig) {
2233 dst_dma = sg_dma_address(req->src);
2234 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002235 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad2011-07-15 11:21:42 +08002236 ((edesc->assoc_nents ? : 1) + 1);
2237 out_options = LDST_SGF;
2238 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002239 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002240 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002241 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002242 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002243 dst_dma = edesc->sec4_sg_dma +
2244 sec4_sg_index *
2245 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002246 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002247 }
2248 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002249 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002250 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2251 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002252 else
Yuan Kang1acebad2011-07-15 11:21:42 +08002253 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2254 out_options);
2255}
2256
2257/*
2258 * Fill in aead givencrypt job descriptor
2259 */
2260static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2261 struct aead_edesc *edesc,
2262 struct aead_request *req,
2263 int contig)
2264{
2265 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2266 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2267 int ivsize = crypto_aead_ivsize(aead);
2268 int authsize = ctx->authsize;
2269 u32 *desc = edesc->hw_desc;
2270 u32 out_options = 0, in_options;
2271 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002272 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002273 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002274
2275#ifdef DEBUG
Yuan Kang1acebad2011-07-15 11:21:42 +08002276 debug("assoclen %d cryptlen %d authsize %d\n",
2277 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002278 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002279 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2280 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002281 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002282 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002283 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002284 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2285 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002286 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002287 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2288 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002289#endif
2290
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002291 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2292 OP_ALG_ALGSEL_AES) &&
2293 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2294 is_gcm = true;
2295
Yuan Kang1acebad2011-07-15 11:21:42 +08002296 len = desc_len(sh_desc);
2297 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2298
2299 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002300 if (is_gcm)
2301 src_dma = edesc->iv_dma;
2302 else
2303 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad2011-07-15 11:21:42 +08002304 in_options = 0;
2305 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002306 src_dma = edesc->sec4_sg_dma;
2307 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002308 in_options = LDST_SGF;
2309 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002310 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2311 in_options);
Yuan Kang1acebad2011-07-15 11:21:42 +08002312
2313 if (contig & GIV_DST_CONTIG) {
2314 dst_dma = edesc->iv_dma;
2315 } else {
2316 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002317 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002318 (edesc->assoc_nents +
2319 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad2011-07-15 11:21:42 +08002320 out_options = LDST_SGF;
2321 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002322 dst_dma = edesc->sec4_sg_dma +
2323 sec4_sg_index *
2324 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002325 out_options = LDST_SGF;
2326 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002327 }
2328
Horia Geantabbf9c892013-11-28 15:11:16 +02002329 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2330 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002331}
2332
2333/*
Yuan Kangacdca312011-07-15 11:21:42 +08002334 * Fill in ablkcipher job descriptor
2335 */
2336static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2337 struct ablkcipher_edesc *edesc,
2338 struct ablkcipher_request *req,
2339 bool iv_contig)
2340{
2341 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2342 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2343 u32 *desc = edesc->hw_desc;
2344 u32 out_options = 0, in_options;
2345 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002346 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002347
2348#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002349 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002350 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2351 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002352 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002353 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2354 edesc->src_nents ? 100 : req->nbytes, 1);
2355#endif
2356
2357 len = desc_len(sh_desc);
2358 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2359
2360 if (iv_contig) {
2361 src_dma = edesc->iv_dma;
2362 in_options = 0;
2363 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002364 src_dma = edesc->sec4_sg_dma;
2365 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002366 in_options = LDST_SGF;
2367 }
2368 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2369
2370 if (likely(req->src == req->dst)) {
2371 if (!edesc->src_nents && iv_contig) {
2372 dst_dma = sg_dma_address(req->src);
2373 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002374 dst_dma = edesc->sec4_sg_dma +
2375 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002376 out_options = LDST_SGF;
2377 }
2378 } else {
2379 if (!edesc->dst_nents) {
2380 dst_dma = sg_dma_address(req->dst);
2381 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002382 dst_dma = edesc->sec4_sg_dma +
2383 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002384 out_options = LDST_SGF;
2385 }
2386 }
2387 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2388}
2389
2390/*
Yuan Kang1acebad2011-07-15 11:21:42 +08002391 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002392 */
Yuan Kang0e479302011-07-15 11:21:41 +08002393static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
Horia Geantabbf9c892013-11-28 15:11:16 +02002394 int desc_bytes, bool *all_contig_ptr,
2395 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002396{
Yuan Kang0e479302011-07-15 11:21:41 +08002397 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002398 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2399 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002400 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2401 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2402 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002403 struct aead_edesc *edesc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002404 dma_addr_t iv_dma = 0;
2405 int sgc;
2406 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002407 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad2011-07-15 11:21:42 +08002408 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002409 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002410 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002411 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002412
Yuan Kang643b39b2012-06-22 19:48:49 -05002413 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002414
Horia Geantabbf9c892013-11-28 15:11:16 +02002415 if (unlikely(req->dst != req->src)) {
2416 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2417 dst_nents = sg_count(req->dst,
2418 req->cryptlen +
2419 (encrypt ? authsize : (-authsize)),
2420 &dst_chained);
2421 } else {
2422 src_nents = sg_count(req->src,
2423 req->cryptlen +
2424 (encrypt ? authsize : 0),
2425 &src_chained);
2426 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002427
Yuan Kang643b39b2012-06-22 19:48:49 -05002428 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002429 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002430 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002431 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2432 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002433 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002434 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2435 DMA_TO_DEVICE, src_chained);
2436 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2437 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002438 }
2439
Yuan Kang1acebad2011-07-15 11:21:42 +08002440 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002441 if (dma_mapping_error(jrdev, iv_dma)) {
2442 dev_err(jrdev, "unable to map IV\n");
2443 return ERR_PTR(-ENOMEM);
2444 }
2445
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002446 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2447 OP_ALG_ALGSEL_AES) &&
2448 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2449 is_gcm = true;
2450
2451 /*
2452 * Check if data are contiguous.
2453 * GCM expected input sequence: IV, AAD, text
2454 * All other - expected input sequence: AAD, IV, text
2455 */
2456 if (is_gcm)
2457 all_contig = (!assoc_nents &&
2458 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2459 !src_nents && sg_dma_address(req->assoc) +
2460 req->assoclen == sg_dma_address(req->src));
2461 else
2462 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2463 req->assoclen == iv_dma && !src_nents &&
2464 iv_dma + ivsize == sg_dma_address(req->src));
2465 if (!all_contig) {
Yuan Kang1acebad2011-07-15 11:21:42 +08002466 assoc_nents = assoc_nents ? : 1;
2467 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002468 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002469 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002470
Yuan Kanga299c832012-06-22 19:48:46 -05002471 sec4_sg_len += dst_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002472
Yuan Kanga299c832012-06-22 19:48:46 -05002473 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002474
2475 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002476 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002477 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002478 if (!edesc) {
2479 dev_err(jrdev, "could not allocate extended descriptor\n");
2480 return ERR_PTR(-ENOMEM);
2481 }
2482
2483 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002484 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002485 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002486 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002487 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002488 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002489 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002490 edesc->sec4_sg_bytes = sec4_sg_bytes;
2491 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2492 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002493 *all_contig_ptr = all_contig;
2494
Yuan Kanga299c832012-06-22 19:48:46 -05002495 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002496 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002497 if (!is_gcm) {
2498 sg_to_sec4_sg(req->assoc,
2499 (assoc_nents ? : 1),
2500 edesc->sec4_sg +
2501 sec4_sg_index, 0);
2502 sec4_sg_index += assoc_nents ? : 1;
2503 }
2504
Yuan Kanga299c832012-06-22 19:48:46 -05002505 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002506 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002507 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002508
2509 if (is_gcm) {
2510 sg_to_sec4_sg(req->assoc,
2511 (assoc_nents ? : 1),
2512 edesc->sec4_sg +
2513 sec4_sg_index, 0);
2514 sec4_sg_index += assoc_nents ? : 1;
2515 }
2516
Yuan Kanga299c832012-06-22 19:48:46 -05002517 sg_to_sec4_sg_last(req->src,
2518 (src_nents ? : 1),
2519 edesc->sec4_sg +
2520 sec4_sg_index, 0);
2521 sec4_sg_index += src_nents ? : 1;
Yuan Kang1acebad2011-07-15 11:21:42 +08002522 }
2523 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002524 sg_to_sec4_sg_last(req->dst, dst_nents,
2525 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002526 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302527 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2528 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002529 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2530 dev_err(jrdev, "unable to map S/G table\n");
2531 return ERR_PTR(-ENOMEM);
2532 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002533
2534 return edesc;
2535}
2536
Yuan Kang0e479302011-07-15 11:21:41 +08002537static int aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002538{
Yuan Kang0e479302011-07-15 11:21:41 +08002539 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002540 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002541 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2542 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002543 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002544 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002545 int ret = 0;
2546
Kim Phillips8e8ec592011-03-13 16:54:26 +08002547 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002548 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002549 CAAM_CMD_SZ, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002550 if (IS_ERR(edesc))
2551 return PTR_ERR(edesc);
2552
Yuan Kang1acebad2011-07-15 11:21:42 +08002553 /* Create and submit job descriptor */
2554 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2555 all_contig, true);
2556#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002557 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002558 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2559 desc_bytes(edesc->hw_desc), 1);
2560#endif
2561
Kim Phillips8e8ec592011-03-13 16:54:26 +08002562 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002563 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2564 if (!ret) {
2565 ret = -EINPROGRESS;
2566 } else {
2567 aead_unmap(jrdev, edesc, req);
2568 kfree(edesc);
2569 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002570
Yuan Kang1acebad2011-07-15 11:21:42 +08002571 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002572}
2573
Yuan Kang0e479302011-07-15 11:21:41 +08002574static int aead_decrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002575{
Yuan Kang1acebad2011-07-15 11:21:42 +08002576 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002577 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002578 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2579 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002580 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002581 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002582 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002583
2584 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002585 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
Horia Geantabbf9c892013-11-28 15:11:16 +02002586 CAAM_CMD_SZ, &all_contig, false);
Yuan Kang0e479302011-07-15 11:21:41 +08002587 if (IS_ERR(edesc))
2588 return PTR_ERR(edesc);
2589
Yuan Kang1acebad2011-07-15 11:21:42 +08002590#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002591 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002592 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2593 req->cryptlen, 1);
2594#endif
2595
2596 /* Create and submit job descriptor*/
2597 init_aead_job(ctx->sh_desc_dec,
2598 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2599#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002600 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002601 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2602 desc_bytes(edesc->hw_desc), 1);
2603#endif
2604
Yuan Kang0e479302011-07-15 11:21:41 +08002605 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002606 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2607 if (!ret) {
2608 ret = -EINPROGRESS;
2609 } else {
2610 aead_unmap(jrdev, edesc, req);
2611 kfree(edesc);
2612 }
Yuan Kang0e479302011-07-15 11:21:41 +08002613
Yuan Kang1acebad2011-07-15 11:21:42 +08002614 return ret;
2615}
Yuan Kang0e479302011-07-15 11:21:41 +08002616
Yuan Kang1acebad2011-07-15 11:21:42 +08002617/*
2618 * allocate and map the aead extended descriptor for aead givencrypt
2619 */
2620static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2621 *greq, int desc_bytes,
2622 u32 *contig_ptr)
2623{
2624 struct aead_request *req = &greq->areq;
2625 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2626 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2627 struct device *jrdev = ctx->jrdev;
2628 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2629 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2630 int assoc_nents, src_nents, dst_nents = 0;
2631 struct aead_edesc *edesc;
2632 dma_addr_t iv_dma = 0;
2633 int sgc;
2634 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2635 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002636 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002637 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002638 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002639
Yuan Kang643b39b2012-06-22 19:48:49 -05002640 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2641 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002642
Yuan Kang1acebad2011-07-15 11:21:42 +08002643 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002644 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2645 &dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002646
Yuan Kang643b39b2012-06-22 19:48:49 -05002647 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002648 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002649 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002650 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2651 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002652 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002653 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2654 DMA_TO_DEVICE, src_chained);
2655 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2656 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad2011-07-15 11:21:42 +08002657 }
2658
Yuan Kang1acebad2011-07-15 11:21:42 +08002659 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002660 if (dma_mapping_error(jrdev, iv_dma)) {
2661 dev_err(jrdev, "unable to map IV\n");
2662 return ERR_PTR(-ENOMEM);
2663 }
2664
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002665 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2666 OP_ALG_ALGSEL_AES) &&
2667 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2668 is_gcm = true;
2669
2670 /*
2671 * Check if data are contiguous.
2672 * GCM expected input sequence: IV, AAD, text
2673 * All other - expected input sequence: AAD, IV, text
2674 */
2675
2676 if (is_gcm) {
2677 if (assoc_nents || iv_dma + ivsize !=
2678 sg_dma_address(req->assoc) || src_nents ||
2679 sg_dma_address(req->assoc) + req->assoclen !=
2680 sg_dma_address(req->src))
2681 contig &= ~GIV_SRC_CONTIG;
2682 } else {
2683 if (assoc_nents ||
2684 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2685 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2686 contig &= ~GIV_SRC_CONTIG;
2687 }
2688
Yuan Kang1acebad2011-07-15 11:21:42 +08002689 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2690 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002691
Yuan Kang1acebad2011-07-15 11:21:42 +08002692 if (!(contig & GIV_SRC_CONTIG)) {
2693 assoc_nents = assoc_nents ? : 1;
2694 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002695 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002696 if (req->src == req->dst &&
2697 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad2011-07-15 11:21:42 +08002698 contig &= ~GIV_DST_CONTIG;
2699 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002700
2701 /*
2702 * Add new sg entries for GCM output sequence.
2703 * Expected output sequence: IV, encrypted text.
2704 */
2705 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2706 sec4_sg_len += 1 + src_nents;
2707
2708 if (unlikely(req->src != req->dst)) {
2709 dst_nents = dst_nents ? : 1;
2710 sec4_sg_len += 1 + dst_nents;
2711 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002712
Yuan Kanga299c832012-06-22 19:48:46 -05002713 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad2011-07-15 11:21:42 +08002714
2715 /* allocate space for base edesc and hw desc commands, link tables */
2716 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002717 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad2011-07-15 11:21:42 +08002718 if (!edesc) {
2719 dev_err(jrdev, "could not allocate extended descriptor\n");
2720 return ERR_PTR(-ENOMEM);
2721 }
2722
2723 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002724 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002725 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002726 edesc->src_chained = src_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002727 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002728 edesc->dst_chained = dst_chained;
Yuan Kang1acebad2011-07-15 11:21:42 +08002729 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002730 edesc->sec4_sg_bytes = sec4_sg_bytes;
2731 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2732 desc_bytes;
Yuan Kang1acebad2011-07-15 11:21:42 +08002733 *contig_ptr = contig;
2734
Yuan Kanga299c832012-06-22 19:48:46 -05002735 sec4_sg_index = 0;
Yuan Kang1acebad2011-07-15 11:21:42 +08002736 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002737 if (!is_gcm) {
2738 sg_to_sec4_sg(req->assoc, assoc_nents,
2739 edesc->sec4_sg + sec4_sg_index, 0);
2740 sec4_sg_index += assoc_nents;
2741 }
2742
Yuan Kanga299c832012-06-22 19:48:46 -05002743 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002744 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002745 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002746
2747 if (is_gcm) {
2748 sg_to_sec4_sg(req->assoc, assoc_nents,
2749 edesc->sec4_sg + sec4_sg_index, 0);
2750 sec4_sg_index += assoc_nents;
2751 }
2752
Yuan Kanga299c832012-06-22 19:48:46 -05002753 sg_to_sec4_sg_last(req->src, src_nents,
2754 edesc->sec4_sg +
2755 sec4_sg_index, 0);
2756 sec4_sg_index += src_nents;
Yuan Kang1acebad2011-07-15 11:21:42 +08002757 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002758
2759 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2760 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2761 iv_dma, ivsize, 0);
2762 sec4_sg_index += 1;
2763 sg_to_sec4_sg_last(req->src, src_nents,
2764 edesc->sec4_sg + sec4_sg_index, 0);
2765 }
2766
Yuan Kang1acebad2011-07-15 11:21:42 +08002767 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002768 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad2011-07-15 11:21:42 +08002769 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002770 sec4_sg_index += 1;
2771 sg_to_sec4_sg_last(req->dst, dst_nents,
2772 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad2011-07-15 11:21:42 +08002773 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302774 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2775 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002776 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2777 dev_err(jrdev, "unable to map S/G table\n");
2778 return ERR_PTR(-ENOMEM);
2779 }
Yuan Kang1acebad2011-07-15 11:21:42 +08002780
2781 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002782}
2783
2784static int aead_givencrypt(struct aead_givcrypt_request *areq)
2785{
2786 struct aead_request *req = &areq->areq;
2787 struct aead_edesc *edesc;
2788 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002789 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2790 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad2011-07-15 11:21:42 +08002791 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002792 u32 *desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002793 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002794
Kim Phillips8e8ec592011-03-13 16:54:26 +08002795 /* allocate extended descriptor */
Yuan Kang1acebad2011-07-15 11:21:42 +08002796 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
2797 CAAM_CMD_SZ, &contig);
2798
Kim Phillips8e8ec592011-03-13 16:54:26 +08002799 if (IS_ERR(edesc))
2800 return PTR_ERR(edesc);
2801
Yuan Kang1acebad2011-07-15 11:21:42 +08002802#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002803 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002804 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2805 req->cryptlen, 1);
2806#endif
2807
2808 /* Create and submit job descriptor*/
2809 init_aead_giv_job(ctx->sh_desc_givenc,
2810 ctx->sh_desc_givenc_dma, edesc, req, contig);
2811#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002812 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad2011-07-15 11:21:42 +08002813 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2814 desc_bytes(edesc->hw_desc), 1);
2815#endif
2816
Kim Phillips8e8ec592011-03-13 16:54:26 +08002817 desc = edesc->hw_desc;
Yuan Kang1acebad2011-07-15 11:21:42 +08002818 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2819 if (!ret) {
2820 ret = -EINPROGRESS;
2821 } else {
2822 aead_unmap(jrdev, edesc, req);
2823 kfree(edesc);
2824 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002825
Yuan Kang1acebad2011-07-15 11:21:42 +08002826 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002827}
2828
Horia Geantaae4a8252014-03-14 17:46:52 +02002829static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
2830{
2831 return aead_encrypt(&areq->areq);
2832}
2833
Yuan Kangacdca312011-07-15 11:21:42 +08002834/*
2835 * allocate and map the ablkcipher extended descriptor for ablkcipher
2836 */
2837static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2838 *req, int desc_bytes,
2839 bool *iv_contig_out)
2840{
2841 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2842 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2843 struct device *jrdev = ctx->jrdev;
2844 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2845 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2846 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05002847 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002848 struct ablkcipher_edesc *edesc;
2849 dma_addr_t iv_dma = 0;
2850 bool iv_contig = false;
2851 int sgc;
2852 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05002853 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002854 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08002855
Yuan Kang643b39b2012-06-22 19:48:49 -05002856 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002857
Yuan Kang643b39b2012-06-22 19:48:49 -05002858 if (req->dst != req->src)
2859 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002860
2861 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002862 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2863 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002864 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002865 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2866 DMA_TO_DEVICE, src_chained);
2867 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2868 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08002869 }
2870
Horia Geantace572082014-07-11 15:34:49 +03002871 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2872 if (dma_mapping_error(jrdev, iv_dma)) {
2873 dev_err(jrdev, "unable to map IV\n");
2874 return ERR_PTR(-ENOMEM);
2875 }
2876
Yuan Kangacdca312011-07-15 11:21:42 +08002877 /*
2878 * Check if iv can be contiguous with source and destination.
2879 * If so, include it. If not, create scatterlist.
2880 */
Yuan Kangacdca312011-07-15 11:21:42 +08002881 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2882 iv_contig = true;
2883 else
2884 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002885 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2886 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002887
2888 /* allocate space for base edesc and hw desc commands, link tables */
2889 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002890 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08002891 if (!edesc) {
2892 dev_err(jrdev, "could not allocate extended descriptor\n");
2893 return ERR_PTR(-ENOMEM);
2894 }
2895
2896 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002897 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08002898 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002899 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05002900 edesc->sec4_sg_bytes = sec4_sg_bytes;
2901 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2902 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08002903
Yuan Kanga299c832012-06-22 19:48:46 -05002904 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002905 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05002906 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2907 sg_to_sec4_sg_last(req->src, src_nents,
2908 edesc->sec4_sg + 1, 0);
2909 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08002910 }
2911
Yuan Kang643b39b2012-06-22 19:48:49 -05002912 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002913 sg_to_sec4_sg_last(req->dst, dst_nents,
2914 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08002915 }
2916
Yuan Kanga299c832012-06-22 19:48:46 -05002917 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2918 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002919 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2920 dev_err(jrdev, "unable to map S/G table\n");
2921 return ERR_PTR(-ENOMEM);
2922 }
2923
Yuan Kangacdca312011-07-15 11:21:42 +08002924 edesc->iv_dma = iv_dma;
2925
2926#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002927 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05002928 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2929 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08002930#endif
2931
2932 *iv_contig_out = iv_contig;
2933 return edesc;
2934}
2935
2936static int ablkcipher_encrypt(struct ablkcipher_request *req)
2937{
2938 struct ablkcipher_edesc *edesc;
2939 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2940 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2941 struct device *jrdev = ctx->jrdev;
2942 bool iv_contig;
2943 u32 *desc;
2944 int ret = 0;
2945
2946 /* allocate extended descriptor */
2947 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2948 CAAM_CMD_SZ, &iv_contig);
2949 if (IS_ERR(edesc))
2950 return PTR_ERR(edesc);
2951
2952 /* Create and submit job descriptor*/
2953 init_ablkcipher_job(ctx->sh_desc_enc,
2954 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2955#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002956 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002957 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2958 desc_bytes(edesc->hw_desc), 1);
2959#endif
2960 desc = edesc->hw_desc;
2961 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2962
2963 if (!ret) {
2964 ret = -EINPROGRESS;
2965 } else {
2966 ablkcipher_unmap(jrdev, edesc, req);
2967 kfree(edesc);
2968 }
2969
2970 return ret;
2971}
2972
2973static int ablkcipher_decrypt(struct ablkcipher_request *req)
2974{
2975 struct ablkcipher_edesc *edesc;
2976 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2977 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2978 struct device *jrdev = ctx->jrdev;
2979 bool iv_contig;
2980 u32 *desc;
2981 int ret = 0;
2982
2983 /* allocate extended descriptor */
2984 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2985 CAAM_CMD_SZ, &iv_contig);
2986 if (IS_ERR(edesc))
2987 return PTR_ERR(edesc);
2988
2989 /* Create and submit job descriptor*/
2990 init_ablkcipher_job(ctx->sh_desc_dec,
2991 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2992 desc = edesc->hw_desc;
2993#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002994 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002995 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2996 desc_bytes(edesc->hw_desc), 1);
2997#endif
2998
2999 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3000 if (!ret) {
3001 ret = -EINPROGRESS;
3002 } else {
3003 ablkcipher_unmap(jrdev, edesc, req);
3004 kfree(edesc);
3005 }
3006
3007 return ret;
3008}
3009
Yuan Kang885e9e22011-07-15 11:21:41 +08003010#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08003011#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08003012struct caam_alg_template {
3013 char name[CRYPTO_MAX_ALG_NAME];
3014 char driver_name[CRYPTO_MAX_ALG_NAME];
3015 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08003016 u32 type;
3017 union {
3018 struct ablkcipher_alg ablkcipher;
3019 struct aead_alg aead;
3020 struct blkcipher_alg blkcipher;
3021 struct cipher_alg cipher;
3022 struct compress_alg compress;
3023 struct rng_alg rng;
3024 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003025 u32 class1_alg_type;
3026 u32 class2_alg_type;
3027 u32 alg_op;
3028};
3029
3030static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02003031 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08003032 {
Horia Geantaae4a8252014-03-14 17:46:52 +02003033 .name = "authenc(hmac(md5),ecb(cipher_null))",
3034 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3035 .blocksize = NULL_BLOCK_SIZE,
3036 .type = CRYPTO_ALG_TYPE_AEAD,
3037 .template_aead = {
3038 .setkey = aead_setkey,
3039 .setauthsize = aead_setauthsize,
3040 .encrypt = aead_encrypt,
3041 .decrypt = aead_decrypt,
3042 .givencrypt = aead_null_givencrypt,
3043 .geniv = "<built-in>",
3044 .ivsize = NULL_IV_SIZE,
3045 .maxauthsize = MD5_DIGEST_SIZE,
3046 },
3047 .class1_alg_type = 0,
3048 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3049 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3050 },
3051 {
3052 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3053 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3054 .blocksize = NULL_BLOCK_SIZE,
3055 .type = CRYPTO_ALG_TYPE_AEAD,
3056 .template_aead = {
3057 .setkey = aead_setkey,
3058 .setauthsize = aead_setauthsize,
3059 .encrypt = aead_encrypt,
3060 .decrypt = aead_decrypt,
3061 .givencrypt = aead_null_givencrypt,
3062 .geniv = "<built-in>",
3063 .ivsize = NULL_IV_SIZE,
3064 .maxauthsize = SHA1_DIGEST_SIZE,
3065 },
3066 .class1_alg_type = 0,
3067 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3068 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3069 },
3070 {
3071 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3072 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3073 .blocksize = NULL_BLOCK_SIZE,
3074 .type = CRYPTO_ALG_TYPE_AEAD,
3075 .template_aead = {
3076 .setkey = aead_setkey,
3077 .setauthsize = aead_setauthsize,
3078 .encrypt = aead_encrypt,
3079 .decrypt = aead_decrypt,
3080 .givencrypt = aead_null_givencrypt,
3081 .geniv = "<built-in>",
3082 .ivsize = NULL_IV_SIZE,
3083 .maxauthsize = SHA224_DIGEST_SIZE,
3084 },
3085 .class1_alg_type = 0,
3086 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3087 OP_ALG_AAI_HMAC_PRECOMP,
3088 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3089 },
3090 {
3091 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3092 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3093 .blocksize = NULL_BLOCK_SIZE,
3094 .type = CRYPTO_ALG_TYPE_AEAD,
3095 .template_aead = {
3096 .setkey = aead_setkey,
3097 .setauthsize = aead_setauthsize,
3098 .encrypt = aead_encrypt,
3099 .decrypt = aead_decrypt,
3100 .givencrypt = aead_null_givencrypt,
3101 .geniv = "<built-in>",
3102 .ivsize = NULL_IV_SIZE,
3103 .maxauthsize = SHA256_DIGEST_SIZE,
3104 },
3105 .class1_alg_type = 0,
3106 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3107 OP_ALG_AAI_HMAC_PRECOMP,
3108 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3109 },
3110 {
3111 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3112 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3113 .blocksize = NULL_BLOCK_SIZE,
3114 .type = CRYPTO_ALG_TYPE_AEAD,
3115 .template_aead = {
3116 .setkey = aead_setkey,
3117 .setauthsize = aead_setauthsize,
3118 .encrypt = aead_encrypt,
3119 .decrypt = aead_decrypt,
3120 .givencrypt = aead_null_givencrypt,
3121 .geniv = "<built-in>",
3122 .ivsize = NULL_IV_SIZE,
3123 .maxauthsize = SHA384_DIGEST_SIZE,
3124 },
3125 .class1_alg_type = 0,
3126 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3127 OP_ALG_AAI_HMAC_PRECOMP,
3128 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3129 },
3130 {
3131 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3132 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3133 .blocksize = NULL_BLOCK_SIZE,
3134 .type = CRYPTO_ALG_TYPE_AEAD,
3135 .template_aead = {
3136 .setkey = aead_setkey,
3137 .setauthsize = aead_setauthsize,
3138 .encrypt = aead_encrypt,
3139 .decrypt = aead_decrypt,
3140 .givencrypt = aead_null_givencrypt,
3141 .geniv = "<built-in>",
3142 .ivsize = NULL_IV_SIZE,
3143 .maxauthsize = SHA512_DIGEST_SIZE,
3144 },
3145 .class1_alg_type = 0,
3146 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3147 OP_ALG_AAI_HMAC_PRECOMP,
3148 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3149 },
3150 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003151 .name = "authenc(hmac(md5),cbc(aes))",
3152 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3153 .blocksize = AES_BLOCK_SIZE,
3154 .type = CRYPTO_ALG_TYPE_AEAD,
3155 .template_aead = {
3156 .setkey = aead_setkey,
3157 .setauthsize = aead_setauthsize,
3158 .encrypt = aead_encrypt,
3159 .decrypt = aead_decrypt,
3160 .givencrypt = aead_givencrypt,
3161 .geniv = "<built-in>",
3162 .ivsize = AES_BLOCK_SIZE,
3163 .maxauthsize = MD5_DIGEST_SIZE,
3164 },
3165 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3166 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3167 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3168 },
3169 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003170 .name = "authenc(hmac(sha1),cbc(aes))",
3171 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3172 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003173 .type = CRYPTO_ALG_TYPE_AEAD,
3174 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003175 .setkey = aead_setkey,
3176 .setauthsize = aead_setauthsize,
3177 .encrypt = aead_encrypt,
3178 .decrypt = aead_decrypt,
3179 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003180 .geniv = "<built-in>",
3181 .ivsize = AES_BLOCK_SIZE,
3182 .maxauthsize = SHA1_DIGEST_SIZE,
3183 },
3184 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3185 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3186 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3187 },
3188 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003189 .name = "authenc(hmac(sha224),cbc(aes))",
3190 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3191 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303192 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003193 .template_aead = {
3194 .setkey = aead_setkey,
3195 .setauthsize = aead_setauthsize,
3196 .encrypt = aead_encrypt,
3197 .decrypt = aead_decrypt,
3198 .givencrypt = aead_givencrypt,
3199 .geniv = "<built-in>",
3200 .ivsize = AES_BLOCK_SIZE,
3201 .maxauthsize = SHA224_DIGEST_SIZE,
3202 },
3203 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3204 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3205 OP_ALG_AAI_HMAC_PRECOMP,
3206 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3207 },
3208 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003209 .name = "authenc(hmac(sha256),cbc(aes))",
3210 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3211 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003212 .type = CRYPTO_ALG_TYPE_AEAD,
3213 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003214 .setkey = aead_setkey,
3215 .setauthsize = aead_setauthsize,
3216 .encrypt = aead_encrypt,
3217 .decrypt = aead_decrypt,
3218 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003219 .geniv = "<built-in>",
3220 .ivsize = AES_BLOCK_SIZE,
3221 .maxauthsize = SHA256_DIGEST_SIZE,
3222 },
3223 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3224 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3225 OP_ALG_AAI_HMAC_PRECOMP,
3226 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3227 },
3228 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003229 .name = "authenc(hmac(sha384),cbc(aes))",
3230 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3231 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303232 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003233 .template_aead = {
3234 .setkey = aead_setkey,
3235 .setauthsize = aead_setauthsize,
3236 .encrypt = aead_encrypt,
3237 .decrypt = aead_decrypt,
3238 .givencrypt = aead_givencrypt,
3239 .geniv = "<built-in>",
3240 .ivsize = AES_BLOCK_SIZE,
3241 .maxauthsize = SHA384_DIGEST_SIZE,
3242 },
3243 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3244 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3245 OP_ALG_AAI_HMAC_PRECOMP,
3246 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3247 },
3248
3249 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003250 .name = "authenc(hmac(sha512),cbc(aes))",
3251 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3252 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003253 .type = CRYPTO_ALG_TYPE_AEAD,
3254 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003255 .setkey = aead_setkey,
3256 .setauthsize = aead_setauthsize,
3257 .encrypt = aead_encrypt,
3258 .decrypt = aead_decrypt,
3259 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003260 .geniv = "<built-in>",
3261 .ivsize = AES_BLOCK_SIZE,
3262 .maxauthsize = SHA512_DIGEST_SIZE,
3263 },
3264 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3265 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3266 OP_ALG_AAI_HMAC_PRECOMP,
3267 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3268 },
3269 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003270 .name = "authenc(hmac(md5),cbc(des3_ede))",
3271 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3272 .blocksize = DES3_EDE_BLOCK_SIZE,
3273 .type = CRYPTO_ALG_TYPE_AEAD,
3274 .template_aead = {
3275 .setkey = aead_setkey,
3276 .setauthsize = aead_setauthsize,
3277 .encrypt = aead_encrypt,
3278 .decrypt = aead_decrypt,
3279 .givencrypt = aead_givencrypt,
3280 .geniv = "<built-in>",
3281 .ivsize = DES3_EDE_BLOCK_SIZE,
3282 .maxauthsize = MD5_DIGEST_SIZE,
3283 },
3284 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3285 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3286 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3287 },
3288 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003289 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3290 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3291 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003292 .type = CRYPTO_ALG_TYPE_AEAD,
3293 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003294 .setkey = aead_setkey,
3295 .setauthsize = aead_setauthsize,
3296 .encrypt = aead_encrypt,
3297 .decrypt = aead_decrypt,
3298 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003299 .geniv = "<built-in>",
3300 .ivsize = DES3_EDE_BLOCK_SIZE,
3301 .maxauthsize = SHA1_DIGEST_SIZE,
3302 },
3303 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3304 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3305 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3306 },
3307 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003308 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3309 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3310 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303311 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003312 .template_aead = {
3313 .setkey = aead_setkey,
3314 .setauthsize = aead_setauthsize,
3315 .encrypt = aead_encrypt,
3316 .decrypt = aead_decrypt,
3317 .givencrypt = aead_givencrypt,
3318 .geniv = "<built-in>",
3319 .ivsize = DES3_EDE_BLOCK_SIZE,
3320 .maxauthsize = SHA224_DIGEST_SIZE,
3321 },
3322 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3323 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3324 OP_ALG_AAI_HMAC_PRECOMP,
3325 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3326 },
3327 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003328 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3329 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3330 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003331 .type = CRYPTO_ALG_TYPE_AEAD,
3332 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003333 .setkey = aead_setkey,
3334 .setauthsize = aead_setauthsize,
3335 .encrypt = aead_encrypt,
3336 .decrypt = aead_decrypt,
3337 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003338 .geniv = "<built-in>",
3339 .ivsize = DES3_EDE_BLOCK_SIZE,
3340 .maxauthsize = SHA256_DIGEST_SIZE,
3341 },
3342 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3343 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3344 OP_ALG_AAI_HMAC_PRECOMP,
3345 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3346 },
3347 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003348 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3349 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3350 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303351 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003352 .template_aead = {
3353 .setkey = aead_setkey,
3354 .setauthsize = aead_setauthsize,
3355 .encrypt = aead_encrypt,
3356 .decrypt = aead_decrypt,
3357 .givencrypt = aead_givencrypt,
3358 .geniv = "<built-in>",
3359 .ivsize = DES3_EDE_BLOCK_SIZE,
3360 .maxauthsize = SHA384_DIGEST_SIZE,
3361 },
3362 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3363 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3364 OP_ALG_AAI_HMAC_PRECOMP,
3365 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3366 },
3367 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003368 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3369 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3370 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003371 .type = CRYPTO_ALG_TYPE_AEAD,
3372 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003373 .setkey = aead_setkey,
3374 .setauthsize = aead_setauthsize,
3375 .encrypt = aead_encrypt,
3376 .decrypt = aead_decrypt,
3377 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003378 .geniv = "<built-in>",
3379 .ivsize = DES3_EDE_BLOCK_SIZE,
3380 .maxauthsize = SHA512_DIGEST_SIZE,
3381 },
3382 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3383 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3384 OP_ALG_AAI_HMAC_PRECOMP,
3385 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3386 },
3387 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003388 .name = "authenc(hmac(md5),cbc(des))",
3389 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3390 .blocksize = DES_BLOCK_SIZE,
3391 .type = CRYPTO_ALG_TYPE_AEAD,
3392 .template_aead = {
3393 .setkey = aead_setkey,
3394 .setauthsize = aead_setauthsize,
3395 .encrypt = aead_encrypt,
3396 .decrypt = aead_decrypt,
3397 .givencrypt = aead_givencrypt,
3398 .geniv = "<built-in>",
3399 .ivsize = DES_BLOCK_SIZE,
3400 .maxauthsize = MD5_DIGEST_SIZE,
3401 },
3402 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3403 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3404 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3405 },
3406 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003407 .name = "authenc(hmac(sha1),cbc(des))",
3408 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3409 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003410 .type = CRYPTO_ALG_TYPE_AEAD,
3411 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003412 .setkey = aead_setkey,
3413 .setauthsize = aead_setauthsize,
3414 .encrypt = aead_encrypt,
3415 .decrypt = aead_decrypt,
3416 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003417 .geniv = "<built-in>",
3418 .ivsize = DES_BLOCK_SIZE,
3419 .maxauthsize = SHA1_DIGEST_SIZE,
3420 },
3421 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3422 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3423 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3424 },
3425 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003426 .name = "authenc(hmac(sha224),cbc(des))",
3427 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3428 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303429 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003430 .template_aead = {
3431 .setkey = aead_setkey,
3432 .setauthsize = aead_setauthsize,
3433 .encrypt = aead_encrypt,
3434 .decrypt = aead_decrypt,
3435 .givencrypt = aead_givencrypt,
3436 .geniv = "<built-in>",
3437 .ivsize = DES_BLOCK_SIZE,
3438 .maxauthsize = SHA224_DIGEST_SIZE,
3439 },
3440 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3441 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3442 OP_ALG_AAI_HMAC_PRECOMP,
3443 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3444 },
3445 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003446 .name = "authenc(hmac(sha256),cbc(des))",
3447 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3448 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003449 .type = CRYPTO_ALG_TYPE_AEAD,
3450 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003451 .setkey = aead_setkey,
3452 .setauthsize = aead_setauthsize,
3453 .encrypt = aead_encrypt,
3454 .decrypt = aead_decrypt,
3455 .givencrypt = aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003456 .geniv = "<built-in>",
3457 .ivsize = DES_BLOCK_SIZE,
3458 .maxauthsize = SHA256_DIGEST_SIZE,
3459 },
3460 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3461 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3462 OP_ALG_AAI_HMAC_PRECOMP,
3463 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3464 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003465 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003466 .name = "authenc(hmac(sha384),cbc(des))",
3467 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3468 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303469 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003470 .template_aead = {
3471 .setkey = aead_setkey,
3472 .setauthsize = aead_setauthsize,
3473 .encrypt = aead_encrypt,
3474 .decrypt = aead_decrypt,
3475 .givencrypt = aead_givencrypt,
3476 .geniv = "<built-in>",
3477 .ivsize = DES_BLOCK_SIZE,
3478 .maxauthsize = SHA384_DIGEST_SIZE,
3479 },
3480 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3481 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3482 OP_ALG_AAI_HMAC_PRECOMP,
3483 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3484 },
3485 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003486 .name = "authenc(hmac(sha512),cbc(des))",
3487 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3488 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003489 .type = CRYPTO_ALG_TYPE_AEAD,
3490 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003491 .setkey = aead_setkey,
3492 .setauthsize = aead_setauthsize,
3493 .encrypt = aead_encrypt,
3494 .decrypt = aead_decrypt,
3495 .givencrypt = aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003496 .geniv = "<built-in>",
3497 .ivsize = DES_BLOCK_SIZE,
3498 .maxauthsize = SHA512_DIGEST_SIZE,
3499 },
3500 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3501 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3502 OP_ALG_AAI_HMAC_PRECOMP,
3503 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3504 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003505 {
3506 .name = "rfc4106(gcm(aes))",
3507 .driver_name = "rfc4106-gcm-aes-caam",
3508 .blocksize = 1,
3509 .type = CRYPTO_ALG_TYPE_AEAD,
3510 .template_aead = {
3511 .setkey = rfc4106_setkey,
3512 .setauthsize = rfc4106_setauthsize,
3513 .encrypt = aead_encrypt,
3514 .decrypt = aead_decrypt,
3515 .givencrypt = aead_givencrypt,
3516 .geniv = "<built-in>",
3517 .ivsize = 8,
3518 .maxauthsize = AES_BLOCK_SIZE,
3519 },
3520 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3521 },
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02003522 {
3523 .name = "rfc4543(gcm(aes))",
3524 .driver_name = "rfc4543-gcm-aes-caam",
3525 .blocksize = 1,
3526 .type = CRYPTO_ALG_TYPE_AEAD,
3527 .template_aead = {
3528 .setkey = rfc4543_setkey,
3529 .setauthsize = rfc4543_setauthsize,
3530 .encrypt = aead_encrypt,
3531 .decrypt = aead_decrypt,
3532 .givencrypt = aead_givencrypt,
3533 .geniv = "<built-in>",
3534 .ivsize = 8,
3535 .maxauthsize = AES_BLOCK_SIZE,
3536 },
3537 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3538 },
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03003539 /* Galois Counter Mode */
3540 {
3541 .name = "gcm(aes)",
3542 .driver_name = "gcm-aes-caam",
3543 .blocksize = 1,
3544 .type = CRYPTO_ALG_TYPE_AEAD,
3545 .template_aead = {
3546 .setkey = gcm_setkey,
3547 .setauthsize = gcm_setauthsize,
3548 .encrypt = aead_encrypt,
3549 .decrypt = aead_decrypt,
3550 .givencrypt = NULL,
3551 .geniv = "<built-in>",
3552 .ivsize = 12,
3553 .maxauthsize = AES_BLOCK_SIZE,
3554 },
3555 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3556 },
Yuan Kangacdca312011-07-15 11:21:42 +08003557 /* ablkcipher descriptor */
3558 {
3559 .name = "cbc(aes)",
3560 .driver_name = "cbc-aes-caam",
3561 .blocksize = AES_BLOCK_SIZE,
3562 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3563 .template_ablkcipher = {
3564 .setkey = ablkcipher_setkey,
3565 .encrypt = ablkcipher_encrypt,
3566 .decrypt = ablkcipher_decrypt,
3567 .geniv = "eseqiv",
3568 .min_keysize = AES_MIN_KEY_SIZE,
3569 .max_keysize = AES_MAX_KEY_SIZE,
3570 .ivsize = AES_BLOCK_SIZE,
3571 },
3572 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3573 },
3574 {
3575 .name = "cbc(des3_ede)",
3576 .driver_name = "cbc-3des-caam",
3577 .blocksize = DES3_EDE_BLOCK_SIZE,
3578 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3579 .template_ablkcipher = {
3580 .setkey = ablkcipher_setkey,
3581 .encrypt = ablkcipher_encrypt,
3582 .decrypt = ablkcipher_decrypt,
3583 .geniv = "eseqiv",
3584 .min_keysize = DES3_EDE_KEY_SIZE,
3585 .max_keysize = DES3_EDE_KEY_SIZE,
3586 .ivsize = DES3_EDE_BLOCK_SIZE,
3587 },
3588 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3589 },
3590 {
3591 .name = "cbc(des)",
3592 .driver_name = "cbc-des-caam",
3593 .blocksize = DES_BLOCK_SIZE,
3594 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3595 .template_ablkcipher = {
3596 .setkey = ablkcipher_setkey,
3597 .encrypt = ablkcipher_encrypt,
3598 .decrypt = ablkcipher_decrypt,
3599 .geniv = "eseqiv",
3600 .min_keysize = DES_KEY_SIZE,
3601 .max_keysize = DES_KEY_SIZE,
3602 .ivsize = DES_BLOCK_SIZE,
3603 },
3604 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02003605 },
3606 {
3607 .name = "ctr(aes)",
3608 .driver_name = "ctr-aes-caam",
3609 .blocksize = 1,
3610 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3611 .template_ablkcipher = {
3612 .setkey = ablkcipher_setkey,
3613 .encrypt = ablkcipher_encrypt,
3614 .decrypt = ablkcipher_decrypt,
3615 .geniv = "chainiv",
3616 .min_keysize = AES_MIN_KEY_SIZE,
3617 .max_keysize = AES_MAX_KEY_SIZE,
3618 .ivsize = AES_BLOCK_SIZE,
3619 },
3620 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02003621 },
3622 {
3623 .name = "rfc3686(ctr(aes))",
3624 .driver_name = "rfc3686-ctr-aes-caam",
3625 .blocksize = 1,
3626 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3627 .template_ablkcipher = {
3628 .setkey = ablkcipher_setkey,
3629 .encrypt = ablkcipher_encrypt,
3630 .decrypt = ablkcipher_decrypt,
3631 .geniv = "seqiv",
3632 .min_keysize = AES_MIN_KEY_SIZE +
3633 CTR_RFC3686_NONCE_SIZE,
3634 .max_keysize = AES_MAX_KEY_SIZE +
3635 CTR_RFC3686_NONCE_SIZE,
3636 .ivsize = CTR_RFC3686_IV_SIZE,
3637 },
3638 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Yuan Kangacdca312011-07-15 11:21:42 +08003639 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003640};
3641
3642struct caam_crypto_alg {
3643 struct list_head entry;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003644 int class1_alg_type;
3645 int class2_alg_type;
3646 int alg_op;
3647 struct crypto_alg crypto_alg;
3648};
3649
3650static int caam_cra_init(struct crypto_tfm *tfm)
3651{
3652 struct crypto_alg *alg = tfm->__crt_alg;
3653 struct caam_crypto_alg *caam_alg =
3654 container_of(alg, struct caam_crypto_alg, crypto_alg);
3655 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003656
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303657 ctx->jrdev = caam_jr_alloc();
3658 if (IS_ERR(ctx->jrdev)) {
3659 pr_err("Job Ring Device allocation for transform failed\n");
3660 return PTR_ERR(ctx->jrdev);
3661 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003662
3663 /* copy descriptor header template value */
3664 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
3665 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
3666 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
3667
3668 return 0;
3669}
3670
3671static void caam_cra_exit(struct crypto_tfm *tfm)
3672{
3673 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3674
Yuan Kang1acebad2011-07-15 11:21:42 +08003675 if (ctx->sh_desc_enc_dma &&
3676 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3677 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3678 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3679 if (ctx->sh_desc_dec_dma &&
3680 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3681 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3682 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3683 if (ctx->sh_desc_givenc_dma &&
3684 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3685 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3686 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05003687 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02003688 if (ctx->key_dma &&
3689 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3690 dma_unmap_single(ctx->jrdev, ctx->key_dma,
3691 ctx->enckeylen + ctx->split_key_pad_len,
3692 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303693
3694 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003695}
3696
3697static void __exit caam_algapi_exit(void)
3698{
3699
Kim Phillips8e8ec592011-03-13 16:54:26 +08003700 struct caam_crypto_alg *t_alg, *n;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003701
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303702 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08003703 return;
3704
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303705 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003706 crypto_unregister_alg(&t_alg->crypto_alg);
3707 list_del(&t_alg->entry);
3708 kfree(t_alg);
3709 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003710}
3711
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303712static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08003713 *template)
3714{
3715 struct caam_crypto_alg *t_alg;
3716 struct crypto_alg *alg;
3717
3718 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
3719 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303720 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003721 return ERR_PTR(-ENOMEM);
3722 }
3723
3724 alg = &t_alg->crypto_alg;
3725
3726 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3727 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3728 template->driver_name);
3729 alg->cra_module = THIS_MODULE;
3730 alg->cra_init = caam_cra_init;
3731 alg->cra_exit = caam_cra_exit;
3732 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003733 alg->cra_blocksize = template->blocksize;
3734 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003735 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003736 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3737 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08003738 switch (template->type) {
Yuan Kangacdca312011-07-15 11:21:42 +08003739 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3740 alg->cra_type = &crypto_ablkcipher_type;
3741 alg->cra_ablkcipher = template->template_ablkcipher;
3742 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08003743 case CRYPTO_ALG_TYPE_AEAD:
3744 alg->cra_type = &crypto_aead_type;
3745 alg->cra_aead = template->template_aead;
3746 break;
3747 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003748
3749 t_alg->class1_alg_type = template->class1_alg_type;
3750 t_alg->class2_alg_type = template->class2_alg_type;
3751 t_alg->alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003752
3753 return t_alg;
3754}
3755
3756static int __init caam_algapi_init(void)
3757{
Ruchika Gupta35af6402014-07-07 10:42:12 +05303758 struct device_node *dev_node;
3759 struct platform_device *pdev;
3760 struct device *ctrldev;
3761 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003762 int i = 0, err = 0;
3763
Ruchika Gupta35af6402014-07-07 10:42:12 +05303764 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3765 if (!dev_node) {
3766 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3767 if (!dev_node)
3768 return -ENODEV;
3769 }
3770
3771 pdev = of_find_device_by_node(dev_node);
3772 if (!pdev) {
3773 of_node_put(dev_node);
3774 return -ENODEV;
3775 }
3776
3777 ctrldev = &pdev->dev;
3778 priv = dev_get_drvdata(ctrldev);
3779 of_node_put(dev_node);
3780
3781 /*
3782 * If priv is NULL, it's probably because the caam driver wasn't
3783 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3784 */
3785 if (!priv)
3786 return -ENODEV;
3787
3788
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303789 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003790
3791 /* register crypto algorithms the device supports */
3792 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3793 /* TODO: check if h/w supports alg */
3794 struct caam_crypto_alg *t_alg;
3795
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303796 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003797 if (IS_ERR(t_alg)) {
3798 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303799 pr_warn("%s alg allocation failed\n",
3800 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003801 continue;
3802 }
3803
3804 err = crypto_register_alg(&t_alg->crypto_alg);
3805 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303806 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08003807 t_alg->crypto_alg.cra_driver_name);
3808 kfree(t_alg);
Horia Geanta246bbed2013-03-20 16:31:58 +02003809 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303810 list_add_tail(&t_alg->entry, &alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003811 }
Ruchika Guptacfc6f112013-10-25 12:01:03 +05303812 if (!list_empty(&alg_list))
3813 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08003814
3815 return err;
3816}
3817
3818module_init(caam_algapi_init);
3819module_exit(caam_algapi_exit);
3820
3821MODULE_LICENSE("GPL");
3822MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3823MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");