blob: e493734095821549f3f0dffc6d51d330187ec83f [file] [log] [blame]
Kim Phillips8e8ec592011-03-13 16:54:26 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050040 * | (output length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080041 * | SEQ_IN_PTR |
42 * | (input buffer) |
Yuan Kang6ec47332012-06-22 19:48:43 -050043 * | (input length) |
Kim Phillips8e8ec592011-03-13 16:54:26 +080044 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
Yuan Kanga299c832012-06-22 19:48:46 -050054#include "sg_sw_sec4.h"
Yuan Kang4c1ec1f2012-06-22 19:48:45 -050055#include "key_gen.h"
Kim Phillips8e8ec592011-03-13 16:54:26 +080056
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY 3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
Catalin Vasiledaebc462014-10-31 12:45:37 +020063 CTR_RFC3686_NONCE_SIZE + \
Kim Phillips8e8ec592011-03-13 16:54:26 +080064 SHA512_DIGEST_SIZE * 2)
65/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66#define CAAM_MAX_IV_LENGTH 16
67
Herbert Xuf2147b82015-06-16 13:54:23 +080068#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71
Kim Phillips4427b1b2011-05-14 22:08:17 -050072/* length of descriptors text */
Yuan Kang1acebad32011-07-15 11:21:42 +080073#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
Horia Geanta4464a7d2014-03-14 17:46:49 +020074#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
75#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
Yuan Kang1acebad32011-07-15 11:21:42 +080076#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
77
Catalin Vasiledaebc462014-10-31 12:45:37 +020078/* Note: Nonce is counted in enckeylen */
79#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
80
Horia Geantaae4a8252014-03-14 17:46:52 +020081#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
82#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
83#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
84
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030085#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080086#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
87#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus3ef8d942014-10-23 16:11:23 +030088
Tudor Ambarusbac68f22014-10-23 16:14:03 +030089#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
Horia Geant?4aad0cc2015-07-30 22:11:18 +030090#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
91#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
Tudor Ambarusbac68f22014-10-23 16:14:03 +030092
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020093#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
Herbert Xuf2147b82015-06-16 13:54:23 +080094#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
95#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
Tudor Ambarus5d0429a2014-10-30 18:55:07 +020096
Yuan Kangacdca312011-07-15 11:21:42 +080097#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
98#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
99 20 * CAAM_CMD_SZ)
100#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
101 15 * CAAM_CMD_SZ)
102
Herbert Xu87e51b02015-06-18 14:25:55 +0800103#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
104#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
Kim Phillips4427b1b2011-05-14 22:08:17 -0500105
Kim Phillips8e8ec592011-03-13 16:54:26 +0800106#ifdef DEBUG
107/* for print_hex_dumps with line references */
Kim Phillips8e8ec592011-03-13 16:54:26 +0800108#define debug(format, arg...) printk(format, arg)
109#else
110#define debug(format, arg...)
111#endif
Ruchika Guptacfc6f112013-10-25 12:01:03 +0530112static struct list_head alg_list;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800113
Yuan Kang1acebad32011-07-15 11:21:42 +0800114/* Set DK bit in class 1 operation if shared */
115static inline void append_dec_op1(u32 *desc, u32 type)
116{
117 u32 *jump_cmd, *uncond_jump_cmd;
118
Horia Geantaa60384d2014-07-11 15:46:58 +0300119 /* DK bit is valid only for AES */
120 if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
121 append_operation(desc, type | OP_ALG_AS_INITFINAL |
122 OP_ALG_DECRYPT);
123 return;
124 }
125
Yuan Kang1acebad32011-07-15 11:21:42 +0800126 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
127 append_operation(desc, type | OP_ALG_AS_INITFINAL |
128 OP_ALG_DECRYPT);
129 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
130 set_jump_tgt_here(desc, jump_cmd);
131 append_operation(desc, type | OP_ALG_AS_INITFINAL |
132 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
133 set_jump_tgt_here(desc, uncond_jump_cmd);
134}
135
136/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800137 * For aead functions, read payload and write payload,
138 * both of which are specified in req->src and req->dst
139 */
140static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
141{
Horia Geantaae4a8252014-03-14 17:46:52 +0200142 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kang1acebad32011-07-15 11:21:42 +0800143 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
144 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
Yuan Kang1acebad32011-07-15 11:21:42 +0800145}
146
147/*
148 * For aead encrypt and decrypt, read iv for both classes
149 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200150static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
Yuan Kang1acebad32011-07-15 11:21:42 +0800151{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200152 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
153 LDST_SRCDST_BYTE_CONTEXT |
154 (ivoffset << LDST_OFFSET_SHIFT));
155 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
156 (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
Yuan Kang1acebad32011-07-15 11:21:42 +0800157}
158
159/*
Yuan Kangacdca312011-07-15 11:21:42 +0800160 * For ablkcipher encrypt and decrypt, read from req->src and
161 * write to req->dst
162 */
163static inline void ablkcipher_append_src_dst(u32 *desc)
164{
Kim Phillips70d793c2012-06-22 19:42:35 -0500165 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
166 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
167 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
168 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
169 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Yuan Kangacdca312011-07-15 11:21:42 +0800170}
171
172/*
Yuan Kang1acebad32011-07-15 11:21:42 +0800173 * If all data, including src (with assoc and iv) or dst (with iv only) are
174 * contiguous
175 */
176#define GIV_SRC_CONTIG 1
177#define GIV_DST_CONTIG (1 << 1)
178
Kim Phillips8e8ec592011-03-13 16:54:26 +0800179/*
180 * per-session context
181 */
182struct caam_ctx {
183 struct device *jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +0800184 u32 sh_desc_enc[DESC_MAX_USED_LEN];
185 u32 sh_desc_dec[DESC_MAX_USED_LEN];
186 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
187 dma_addr_t sh_desc_enc_dma;
188 dma_addr_t sh_desc_dec_dma;
189 dma_addr_t sh_desc_givenc_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800190 u32 class1_alg_type;
191 u32 class2_alg_type;
192 u32 alg_op;
Yuan Kang1acebad32011-07-15 11:21:42 +0800193 u8 key[CAAM_MAX_KEY_SIZE];
Yuan Kang885e9e22011-07-15 11:21:41 +0800194 dma_addr_t key_dma;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800195 unsigned int enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +0800196 unsigned int split_key_len;
197 unsigned int split_key_pad_len;
198 unsigned int authsize;
199};
200
Yuan Kang1acebad32011-07-15 11:21:42 +0800201static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200202 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad32011-07-15 11:21:42 +0800203{
Catalin Vasiledaebc462014-10-31 12:45:37 +0200204 u32 *nonce;
205 unsigned int enckeylen = ctx->enckeylen;
206
207 /*
208 * RFC3686 specific:
209 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
210 * | enckeylen = encryption key size + nonce size
211 */
212 if (is_rfc3686)
213 enckeylen -= CTR_RFC3686_NONCE_SIZE;
214
Yuan Kang1acebad32011-07-15 11:21:42 +0800215 if (keys_fit_inline) {
216 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
217 ctx->split_key_len, CLASS_2 |
218 KEY_DEST_MDHA_SPLIT | KEY_ENC);
219 append_key_as_imm(desc, (void *)ctx->key +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200220 ctx->split_key_pad_len, enckeylen,
221 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
Yuan Kang1acebad32011-07-15 11:21:42 +0800222 } else {
223 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
224 KEY_DEST_MDHA_SPLIT | KEY_ENC);
225 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200226 enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
227 }
228
229 /* Load Counter into CONTEXT1 reg */
230 if (is_rfc3686) {
231 nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
232 enckeylen);
233 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
234 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
235 append_move(desc,
236 MOVE_SRC_OUTFIFO |
237 MOVE_DEST_CLASS1CTX |
238 (16 << MOVE_OFFSET_SHIFT) |
239 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
Yuan Kang1acebad32011-07-15 11:21:42 +0800240 }
241}
242
243static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
Catalin Vasiledaebc462014-10-31 12:45:37 +0200244 int keys_fit_inline, bool is_rfc3686)
Yuan Kang1acebad32011-07-15 11:21:42 +0800245{
246 u32 *key_jump_cmd;
247
Catalin Vasiledaebc462014-10-31 12:45:37 +0200248 /* Note: Context registers are saved. */
249 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kang1acebad32011-07-15 11:21:42 +0800250
251 /* Skip if already shared */
252 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
253 JUMP_COND_SHRD);
254
Catalin Vasiledaebc462014-10-31 12:45:37 +0200255 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad32011-07-15 11:21:42 +0800256
257 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kang1acebad32011-07-15 11:21:42 +0800258}
259
Horia Geantaae4a8252014-03-14 17:46:52 +0200260static int aead_null_set_sh_desc(struct crypto_aead *aead)
261{
Herbert Xuadd86d52015-05-11 17:47:50 +0800262 unsigned int ivsize = crypto_aead_ivsize(aead);
Horia Geantaae4a8252014-03-14 17:46:52 +0200263 struct caam_ctx *ctx = crypto_aead_ctx(aead);
264 struct device *jrdev = ctx->jrdev;
265 bool keys_fit_inline = false;
266 u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
267 u32 *desc;
268
269 /*
270 * Job Descriptor and Shared Descriptors
271 * must all fit into the 64-word Descriptor h/w Buffer
272 */
273 if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
274 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
275 keys_fit_inline = true;
276
Herbert Xuf2147b82015-06-16 13:54:23 +0800277 /* old_aead_encrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200278 desc = ctx->sh_desc_enc;
279
280 init_sh_desc(desc, HDR_SHARE_SERIAL);
281
282 /* Skip if already shared */
283 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
284 JUMP_COND_SHRD);
285 if (keys_fit_inline)
286 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
287 ctx->split_key_len, CLASS_2 |
288 KEY_DEST_MDHA_SPLIT | KEY_ENC);
289 else
290 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
291 KEY_DEST_MDHA_SPLIT | KEY_ENC);
292 set_jump_tgt_here(desc, key_jump_cmd);
293
294 /* cryptlen = seqoutlen - authsize */
295 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
296
297 /*
298 * NULL encryption; IV is zero
299 * assoclen = (assoclen + cryptlen) - cryptlen
300 */
301 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
302
303 /* read assoc before reading payload */
304 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
305 KEY_VLF);
306
307 /* Prepare to read and write cryptlen bytes */
308 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
309 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
310
311 /*
312 * MOVE_LEN opcode is not available in all SEC HW revisions,
313 * thus need to do some magic, i.e. self-patch the descriptor
314 * buffer.
315 */
316 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
317 MOVE_DEST_MATH3 |
318 (0x6 << MOVE_LEN_SHIFT));
319 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
320 MOVE_DEST_DESCBUF |
321 MOVE_WAITCOMP |
322 (0x8 << MOVE_LEN_SHIFT));
323
324 /* Class 2 operation */
325 append_operation(desc, ctx->class2_alg_type |
326 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
327
328 /* Read and write cryptlen bytes */
329 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
330
331 set_move_tgt_here(desc, read_move_cmd);
332 set_move_tgt_here(desc, write_move_cmd);
333 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
334 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
335 MOVE_AUX_LS);
336
337 /* Write ICV */
338 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
339 LDST_SRCDST_BYTE_CONTEXT);
340
341 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
342 desc_bytes(desc),
343 DMA_TO_DEVICE);
344 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
345 dev_err(jrdev, "unable to map shared descriptor\n");
346 return -ENOMEM;
347 }
348#ifdef DEBUG
349 print_hex_dump(KERN_ERR,
350 "aead null enc shdesc@"__stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, desc,
352 desc_bytes(desc), 1);
353#endif
354
355 /*
356 * Job Descriptor and Shared Descriptors
357 * must all fit into the 64-word Descriptor h/w Buffer
358 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500359 keys_fit_inline = false;
Horia Geantaae4a8252014-03-14 17:46:52 +0200360 if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
361 ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
362 keys_fit_inline = true;
363
364 desc = ctx->sh_desc_dec;
365
Herbert Xuf2147b82015-06-16 13:54:23 +0800366 /* old_aead_decrypt shared descriptor */
Horia Geantaae4a8252014-03-14 17:46:52 +0200367 init_sh_desc(desc, HDR_SHARE_SERIAL);
368
369 /* Skip if already shared */
370 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
371 JUMP_COND_SHRD);
372 if (keys_fit_inline)
373 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
374 ctx->split_key_len, CLASS_2 |
375 KEY_DEST_MDHA_SPLIT | KEY_ENC);
376 else
377 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
378 KEY_DEST_MDHA_SPLIT | KEY_ENC);
379 set_jump_tgt_here(desc, key_jump_cmd);
380
381 /* Class 2 operation */
382 append_operation(desc, ctx->class2_alg_type |
383 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
384
385 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
386 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Herbert Xuadd86d52015-05-11 17:47:50 +0800387 ctx->authsize + ivsize);
Horia Geantaae4a8252014-03-14 17:46:52 +0200388 /* assoclen = (assoclen + cryptlen) - cryptlen */
389 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
390 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
391
392 /* read assoc before reading payload */
393 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
394 KEY_VLF);
395
396 /* Prepare to read and write cryptlen bytes */
397 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
398 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
399
400 /*
401 * MOVE_LEN opcode is not available in all SEC HW revisions,
402 * thus need to do some magic, i.e. self-patch the descriptor
403 * buffer.
404 */
405 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
406 MOVE_DEST_MATH2 |
407 (0x6 << MOVE_LEN_SHIFT));
408 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
409 MOVE_DEST_DESCBUF |
410 MOVE_WAITCOMP |
411 (0x8 << MOVE_LEN_SHIFT));
412
413 /* Read and write cryptlen bytes */
414 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
415
416 /*
417 * Insert a NOP here, since we need at least 4 instructions between
418 * code patching the descriptor buffer and the location being patched.
419 */
420 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
421 set_jump_tgt_here(desc, jump_cmd);
422
423 set_move_tgt_here(desc, read_move_cmd);
424 set_move_tgt_here(desc, write_move_cmd);
425 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
426 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
427 MOVE_AUX_LS);
428 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
429
430 /* Load ICV */
431 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
432 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
433
434 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
435 desc_bytes(desc),
436 DMA_TO_DEVICE);
437 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
438 dev_err(jrdev, "unable to map shared descriptor\n");
439 return -ENOMEM;
440 }
441#ifdef DEBUG
442 print_hex_dump(KERN_ERR,
443 "aead null dec shdesc@"__stringify(__LINE__)": ",
444 DUMP_PREFIX_ADDRESS, 16, 4, desc,
445 desc_bytes(desc), 1);
446#endif
447
448 return 0;
449}
450
Yuan Kang1acebad32011-07-15 11:21:42 +0800451static int aead_set_sh_desc(struct crypto_aead *aead)
452{
Herbert Xuadd86d52015-05-11 17:47:50 +0800453 unsigned int ivsize = crypto_aead_ivsize(aead);
Yuan Kang1acebad32011-07-15 11:21:42 +0800454 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200455 struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
456 const char *alg_name = crypto_tfm_alg_name(ctfm);
Yuan Kang1acebad32011-07-15 11:21:42 +0800457 struct device *jrdev = ctx->jrdev;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200458 bool keys_fit_inline;
Yuan Kang1acebad32011-07-15 11:21:42 +0800459 u32 geniv, moveiv;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200460 u32 ctx1_iv_off = 0;
Yuan Kang1acebad32011-07-15 11:21:42 +0800461 u32 *desc;
Catalin Vasiledaebc462014-10-31 12:45:37 +0200462 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
463 OP_ALG_AAI_CTR_MOD128);
464 const bool is_rfc3686 = (ctr_mode &&
465 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kang1acebad32011-07-15 11:21:42 +0800466
Horia Geantaae4a8252014-03-14 17:46:52 +0200467 if (!ctx->authsize)
Yuan Kang1acebad32011-07-15 11:21:42 +0800468 return 0;
469
Horia Geantaae4a8252014-03-14 17:46:52 +0200470 /* NULL encryption / decryption */
471 if (!ctx->enckeylen)
472 return aead_null_set_sh_desc(aead);
473
Yuan Kang1acebad32011-07-15 11:21:42 +0800474 /*
Catalin Vasiledaebc462014-10-31 12:45:37 +0200475 * AES-CTR needs to load IV in CONTEXT1 reg
476 * at an offset of 128bits (16bytes)
477 * CONTEXT1[255:128] = IV
478 */
479 if (ctr_mode)
480 ctx1_iv_off = 16;
481
482 /*
483 * RFC3686 specific:
484 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
485 */
486 if (is_rfc3686)
487 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
488
489 /*
Yuan Kang1acebad32011-07-15 11:21:42 +0800490 * Job Descriptor and Shared Descriptors
491 * must all fit into the 64-word Descriptor h/w Buffer
492 */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200493 keys_fit_inline = false;
Yuan Kang1acebad32011-07-15 11:21:42 +0800494 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200495 ctx->split_key_pad_len + ctx->enckeylen +
496 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad32011-07-15 11:21:42 +0800497 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800498 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800499
Herbert Xuf2147b82015-06-16 13:54:23 +0800500 /* old_aead_encrypt shared descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +0800501 desc = ctx->sh_desc_enc;
502
Catalin Vasiledaebc462014-10-31 12:45:37 +0200503 /* Note: Context registers are saved. */
504 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad32011-07-15 11:21:42 +0800505
506 /* Class 2 operation */
507 append_operation(desc, ctx->class2_alg_type |
508 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
509
510 /* cryptlen = seqoutlen - authsize */
511 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
512
513 /* assoclen + cryptlen = seqinlen - ivsize */
Herbert Xuadd86d52015-05-11 17:47:50 +0800514 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
Yuan Kang1acebad32011-07-15 11:21:42 +0800515
Horia Geanta4464a7d2014-03-14 17:46:49 +0200516 /* assoclen = (assoclen + cryptlen) - cryptlen */
Yuan Kang1acebad32011-07-15 11:21:42 +0800517 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
518
519 /* read assoc before reading payload */
520 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
521 KEY_VLF);
Herbert Xuadd86d52015-05-11 17:47:50 +0800522 aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200523
524 /* Load Counter into CONTEXT1 reg */
525 if (is_rfc3686)
526 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
527 LDST_CLASS_1_CCB |
528 LDST_SRCDST_BYTE_CONTEXT |
529 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
530 LDST_OFFSET_SHIFT));
Yuan Kang1acebad32011-07-15 11:21:42 +0800531
532 /* Class 1 operation */
533 append_operation(desc, ctx->class1_alg_type |
534 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
535
536 /* Read and write cryptlen bytes */
537 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
538 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
539 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
540
541 /* Write ICV */
542 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
543 LDST_SRCDST_BYTE_CONTEXT);
544
545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
546 desc_bytes(desc),
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549 dev_err(jrdev, "unable to map shared descriptor\n");
550 return -ENOMEM;
551 }
552#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300553 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800554 DUMP_PREFIX_ADDRESS, 16, 4, desc,
555 desc_bytes(desc), 1);
556#endif
557
558 /*
559 * Job Descriptor and Shared Descriptors
560 * must all fit into the 64-word Descriptor h/w Buffer
561 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500562 keys_fit_inline = false;
Yuan Kang1acebad32011-07-15 11:21:42 +0800563 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200564 ctx->split_key_pad_len + ctx->enckeylen +
565 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad32011-07-15 11:21:42 +0800566 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800567 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800568
Herbert Xuf2147b82015-06-16 13:54:23 +0800569 /* old_aead_decrypt shared descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +0800570 desc = ctx->sh_desc_dec;
571
Catalin Vasiledaebc462014-10-31 12:45:37 +0200572 /* Note: Context registers are saved. */
573 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad32011-07-15 11:21:42 +0800574
575 /* Class 2 operation */
576 append_operation(desc, ctx->class2_alg_type |
577 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
578
Horia Geanta4464a7d2014-03-14 17:46:49 +0200579 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
Yuan Kang1acebad32011-07-15 11:21:42 +0800580 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
Herbert Xuadd86d52015-05-11 17:47:50 +0800581 ctx->authsize + ivsize);
Yuan Kang1acebad32011-07-15 11:21:42 +0800582 /* assoclen = (assoclen + cryptlen) - cryptlen */
583 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
584 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
585
586 /* read assoc before reading payload */
587 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
588 KEY_VLF);
589
Herbert Xuadd86d52015-05-11 17:47:50 +0800590 aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
Yuan Kang1acebad32011-07-15 11:21:42 +0800591
Catalin Vasiledaebc462014-10-31 12:45:37 +0200592 /* Load Counter into CONTEXT1 reg */
593 if (is_rfc3686)
594 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
595 LDST_CLASS_1_CCB |
596 LDST_SRCDST_BYTE_CONTEXT |
597 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
598 LDST_OFFSET_SHIFT));
599
600 /* Choose operation */
601 if (ctr_mode)
602 append_operation(desc, ctx->class1_alg_type |
603 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
604 else
605 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kang1acebad32011-07-15 11:21:42 +0800606
607 /* Read and write cryptlen bytes */
608 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
609 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
610 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
611
612 /* Load ICV */
613 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
614 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
Yuan Kang1acebad32011-07-15 11:21:42 +0800615
616 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
617 desc_bytes(desc),
618 DMA_TO_DEVICE);
619 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
620 dev_err(jrdev, "unable to map shared descriptor\n");
621 return -ENOMEM;
622 }
623#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300624 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800625 DUMP_PREFIX_ADDRESS, 16, 4, desc,
626 desc_bytes(desc), 1);
627#endif
628
629 /*
630 * Job Descriptor and Shared Descriptors
631 * must all fit into the 64-word Descriptor h/w Buffer
632 */
Vakul Garg80cd88f2014-05-09 20:34:40 -0500633 keys_fit_inline = false;
Yuan Kang1acebad32011-07-15 11:21:42 +0800634 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
Catalin Vasiledaebc462014-10-31 12:45:37 +0200635 ctx->split_key_pad_len + ctx->enckeylen +
636 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
Yuan Kang1acebad32011-07-15 11:21:42 +0800637 CAAM_DESC_BYTES_MAX)
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800638 keys_fit_inline = true;
Yuan Kang1acebad32011-07-15 11:21:42 +0800639
640 /* aead_givencrypt shared descriptor */
641 desc = ctx->sh_desc_givenc;
642
Catalin Vasiledaebc462014-10-31 12:45:37 +0200643 /* Note: Context registers are saved. */
644 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
Yuan Kang1acebad32011-07-15 11:21:42 +0800645
646 /* Generate IV */
647 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
648 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800649 NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad32011-07-15 11:21:42 +0800650 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
651 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
652 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
Catalin Vasiledaebc462014-10-31 12:45:37 +0200653 append_move(desc, MOVE_WAITCOMP |
654 MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
655 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800656 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad32011-07-15 11:21:42 +0800657 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
658
659 /* Copy IV to class 1 context */
Catalin Vasiledaebc462014-10-31 12:45:37 +0200660 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
661 (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
Herbert Xuadd86d52015-05-11 17:47:50 +0800662 (ivsize << MOVE_LEN_SHIFT));
Yuan Kang1acebad32011-07-15 11:21:42 +0800663
664 /* Return to encryption */
665 append_operation(desc, ctx->class2_alg_type |
666 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
667
668 /* ivsize + cryptlen = seqoutlen - authsize */
669 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
670
671 /* assoclen = seqinlen - (ivsize + cryptlen) */
672 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
673
674 /* read assoc before reading payload */
675 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
676 KEY_VLF);
677
Catalin Vasiledaebc462014-10-31 12:45:37 +0200678 /* Copy iv from outfifo to class 2 fifo */
Yuan Kang1acebad32011-07-15 11:21:42 +0800679 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
Herbert Xuadd86d52015-05-11 17:47:50 +0800680 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
Yuan Kang1acebad32011-07-15 11:21:42 +0800681 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
682 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
Herbert Xuadd86d52015-05-11 17:47:50 +0800683 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
Yuan Kang1acebad32011-07-15 11:21:42 +0800684 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
685
Catalin Vasiledaebc462014-10-31 12:45:37 +0200686 /* Load Counter into CONTEXT1 reg */
687 if (is_rfc3686)
688 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
689 LDST_CLASS_1_CCB |
690 LDST_SRCDST_BYTE_CONTEXT |
691 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
692 LDST_OFFSET_SHIFT));
693
Yuan Kang1acebad32011-07-15 11:21:42 +0800694 /* Class 1 operation */
695 append_operation(desc, ctx->class1_alg_type |
696 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
697
698 /* Will write ivsize + cryptlen */
699 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
700
701 /* Not need to reload iv */
Herbert Xuadd86d52015-05-11 17:47:50 +0800702 append_seq_fifo_load(desc, ivsize,
Yuan Kang1acebad32011-07-15 11:21:42 +0800703 FIFOLD_CLASS_SKIP);
704
705 /* Will read cryptlen */
706 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
707 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
708
709 /* Write ICV */
710 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
711 LDST_SRCDST_BYTE_CONTEXT);
712
713 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
714 desc_bytes(desc),
715 DMA_TO_DEVICE);
716 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
717 dev_err(jrdev, "unable to map shared descriptor\n");
718 return -ENOMEM;
719 }
720#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300721 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +0800722 DUMP_PREFIX_ADDRESS, 16, 4, desc,
723 desc_bytes(desc), 1);
724#endif
725
726 return 0;
727}
728
Yuan Kang0e479302011-07-15 11:21:41 +0800729static int aead_setauthsize(struct crypto_aead *authenc,
Kim Phillips8e8ec592011-03-13 16:54:26 +0800730 unsigned int authsize)
731{
732 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
733
734 ctx->authsize = authsize;
Yuan Kang1acebad32011-07-15 11:21:42 +0800735 aead_set_sh_desc(authenc);
Kim Phillips8e8ec592011-03-13 16:54:26 +0800736
737 return 0;
738}
739
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300740static int gcm_set_sh_desc(struct crypto_aead *aead)
741{
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300742 struct caam_ctx *ctx = crypto_aead_ctx(aead);
743 struct device *jrdev = ctx->jrdev;
744 bool keys_fit_inline = false;
745 u32 *key_jump_cmd, *zero_payload_jump_cmd,
746 *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
747 u32 *desc;
748
749 if (!ctx->enckeylen || !ctx->authsize)
750 return 0;
751
752 /*
753 * AES GCM encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
756 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800757 if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300758 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
759 keys_fit_inline = true;
760
761 desc = ctx->sh_desc_enc;
762
763 init_sh_desc(desc, HDR_SHARE_SERIAL);
764
765 /* skip key loading if they are loaded due to sharing */
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
767 JUMP_COND_SHRD | JUMP_COND_SELF);
768 if (keys_fit_inline)
769 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
770 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
771 else
772 append_key(desc, ctx->key_dma, ctx->enckeylen,
773 CLASS_1 | KEY_DEST_CLASS_REG);
774 set_jump_tgt_here(desc, key_jump_cmd);
775
776 /* class 1 operation */
777 append_operation(desc, ctx->class1_alg_type |
778 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
779
Herbert Xuf2147b82015-06-16 13:54:23 +0800780 /* if assoclen + cryptlen is ZERO, skip to ICV write */
781 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
782 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
783 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300784
785 /* if assoclen is ZERO, skip reading the assoc data */
Herbert Xuf2147b82015-06-16 13:54:23 +0800786 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300787 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800788 JUMP_COND_MATH_Z);
789
790 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
791
792 /* skip assoc data */
793 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
794
795 /* cryptlen = seqinlen - assoclen */
796 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
797
798 /* if cryptlen is ZERO jump to zero-payload commands */
799 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
800 JUMP_COND_MATH_Z);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300801
802 /* read assoc data */
803 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
804 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
805 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
806
Herbert Xuf2147b82015-06-16 13:54:23 +0800807 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300808
809 /* write encrypted data */
810 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
811
812 /* read payload data */
813 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
814 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
815
816 /* jump the zero-payload commands */
Herbert Xuf2147b82015-06-16 13:54:23 +0800817 append_jump(desc, JUMP_TEST_ALL | 2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300818
819 /* zero-payload commands */
820 set_jump_tgt_here(desc, zero_payload_jump_cmd);
821
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300822 /* read assoc data */
823 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
824 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
825
Herbert Xuf2147b82015-06-16 13:54:23 +0800826 /* There is no input data */
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300827 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300828
829 /* write ICV */
830 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
831 LDST_SRCDST_BYTE_CONTEXT);
832
833 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
834 desc_bytes(desc),
835 DMA_TO_DEVICE);
836 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
837 dev_err(jrdev, "unable to map shared descriptor\n");
838 return -ENOMEM;
839 }
840#ifdef DEBUG
841 print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
842 DUMP_PREFIX_ADDRESS, 16, 4, desc,
843 desc_bytes(desc), 1);
844#endif
845
846 /*
847 * Job Descriptor and Shared Descriptors
848 * must all fit into the 64-word Descriptor h/w Buffer
849 */
850 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800851 if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300852 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
853 keys_fit_inline = true;
854
855 desc = ctx->sh_desc_dec;
856
857 init_sh_desc(desc, HDR_SHARE_SERIAL);
858
859 /* skip key loading if they are loaded due to sharing */
860 key_jump_cmd = append_jump(desc, JUMP_JSL |
861 JUMP_TEST_ALL | JUMP_COND_SHRD |
862 JUMP_COND_SELF);
863 if (keys_fit_inline)
864 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
865 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
866 else
867 append_key(desc, ctx->key_dma, ctx->enckeylen,
868 CLASS_1 | KEY_DEST_CLASS_REG);
869 set_jump_tgt_here(desc, key_jump_cmd);
870
871 /* class 1 operation */
872 append_operation(desc, ctx->class1_alg_type |
873 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
874
Herbert Xuf2147b82015-06-16 13:54:23 +0800875 /* if assoclen is ZERO, skip reading the assoc data */
876 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300877 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
Herbert Xuf2147b82015-06-16 13:54:23 +0800878 JUMP_COND_MATH_Z);
879
880 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
881
882 /* skip assoc data */
883 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
884
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300885 /* read assoc data */
886 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
887 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
Herbert Xuf2147b82015-06-16 13:54:23 +0800888
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300889 set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
890
Herbert Xuf2147b82015-06-16 13:54:23 +0800891 /* cryptlen = seqoutlen - assoclen */
892 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
893
894 /* jump to zero-payload command if cryptlen is zero */
895 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
896 JUMP_COND_MATH_Z);
897
898 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300899
900 /* store encrypted data */
901 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
902
903 /* read payload data */
904 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
905 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
906
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300907 /* zero-payload command */
908 set_jump_tgt_here(desc, zero_payload_jump_cmd);
909
Tudor Ambarus3ef8d942014-10-23 16:11:23 +0300910 /* read ICV */
911 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
912 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
913
914 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
915 desc_bytes(desc),
916 DMA_TO_DEVICE);
917 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
918 dev_err(jrdev, "unable to map shared descriptor\n");
919 return -ENOMEM;
920 }
921#ifdef DEBUG
922 print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
923 DUMP_PREFIX_ADDRESS, 16, 4, desc,
924 desc_bytes(desc), 1);
925#endif
926
927 return 0;
928}
929
930static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
931{
932 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
933
934 ctx->authsize = authsize;
935 gcm_set_sh_desc(authenc);
936
937 return 0;
938}
939
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300940static int rfc4106_set_sh_desc(struct crypto_aead *aead)
941{
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300942 struct caam_ctx *ctx = crypto_aead_ctx(aead);
943 struct device *jrdev = ctx->jrdev;
944 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +0800945 u32 *key_jump_cmd;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300946 u32 *desc;
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300947
948 if (!ctx->enckeylen || !ctx->authsize)
949 return 0;
950
951 /*
952 * RFC4106 encrypt shared descriptor
953 * Job Descriptor and Shared Descriptor
954 * must fit into the 64-word Descriptor h/w Buffer
955 */
Herbert Xuf2147b82015-06-16 13:54:23 +0800956 if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300957 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
958 keys_fit_inline = true;
959
960 desc = ctx->sh_desc_enc;
961
962 init_sh_desc(desc, HDR_SHARE_SERIAL);
963
964 /* Skip key loading if it is loaded due to sharing */
965 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
966 JUMP_COND_SHRD);
967 if (keys_fit_inline)
968 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
969 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
970 else
971 append_key(desc, ctx->key_dma, ctx->enckeylen,
972 CLASS_1 | KEY_DEST_CLASS_REG);
973 set_jump_tgt_here(desc, key_jump_cmd);
974
975 /* Class 1 operation */
976 append_operation(desc, ctx->class1_alg_type |
977 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
978
Herbert Xu46218752015-07-09 07:17:33 +0800979 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300980 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
981
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300982 /* Read assoc data */
983 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
984 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
985
Herbert Xu46218752015-07-09 07:17:33 +0800986 /* Skip IV */
987 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +0800988
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300989 /* Will read cryptlen bytes */
Herbert Xuf2147b82015-06-16 13:54:23 +0800990 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300991
Horia Geant?4aad0cc2015-07-30 22:11:18 +0300992 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
993 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +0300994
Herbert Xu46218752015-07-09 07:17:33 +0800995 /* Skip assoc data */
996 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
997
998 /* cryptlen = seqoutlen - assoclen */
Horia Geant?4aad0cc2015-07-30 22:11:18 +0300999 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
Herbert Xu46218752015-07-09 07:17:33 +08001000
1001 /* Write encrypted data */
1002 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1003
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001004 /* Read payload data */
1005 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1006 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1007
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001008 /* Write ICV */
1009 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1010 LDST_SRCDST_BYTE_CONTEXT);
1011
1012 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1013 desc_bytes(desc),
1014 DMA_TO_DEVICE);
1015 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1016 dev_err(jrdev, "unable to map shared descriptor\n");
1017 return -ENOMEM;
1018 }
1019#ifdef DEBUG
1020 print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1021 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1022 desc_bytes(desc), 1);
1023#endif
1024
1025 /*
1026 * Job Descriptor and Shared Descriptors
1027 * must all fit into the 64-word Descriptor h/w Buffer
1028 */
1029 keys_fit_inline = false;
1030 if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1031 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1032 keys_fit_inline = true;
1033
1034 desc = ctx->sh_desc_dec;
1035
1036 init_sh_desc(desc, HDR_SHARE_SERIAL);
1037
1038 /* Skip key loading if it is loaded due to sharing */
1039 key_jump_cmd = append_jump(desc, JUMP_JSL |
1040 JUMP_TEST_ALL | JUMP_COND_SHRD);
1041 if (keys_fit_inline)
1042 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1043 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1044 else
1045 append_key(desc, ctx->key_dma, ctx->enckeylen,
1046 CLASS_1 | KEY_DEST_CLASS_REG);
1047 set_jump_tgt_here(desc, key_jump_cmd);
1048
1049 /* Class 1 operation */
1050 append_operation(desc, ctx->class1_alg_type |
1051 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1052
Herbert Xu46218752015-07-09 07:17:33 +08001053 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
Herbert Xuf2147b82015-06-16 13:54:23 +08001054 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001055
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001056 /* Read assoc data */
1057 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1058 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1059
Herbert Xu46218752015-07-09 07:17:33 +08001060 /* Skip IV */
1061 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
Herbert Xuf2147b82015-06-16 13:54:23 +08001062
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001063 /* Will read cryptlen bytes */
Herbert Xu46218752015-07-09 07:17:33 +08001064 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001065
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001066 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1067 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001068
Herbert Xu46218752015-07-09 07:17:33 +08001069 /* Skip assoc data */
1070 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1071
1072 /* Will write cryptlen bytes */
1073 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1074
1075 /* Store payload data */
1076 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1077
Horia Geant?4aad0cc2015-07-30 22:11:18 +03001078 /* Read encrypted data */
1079 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1080 FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1081
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001082 /* Read ICV */
1083 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1084 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1085
1086 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1087 desc_bytes(desc),
1088 DMA_TO_DEVICE);
1089 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1090 dev_err(jrdev, "unable to map shared descriptor\n");
1091 return -ENOMEM;
1092 }
1093#ifdef DEBUG
1094 print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1095 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1096 desc_bytes(desc), 1);
1097#endif
1098
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001099 return 0;
1100}
1101
1102static int rfc4106_setauthsize(struct crypto_aead *authenc,
1103 unsigned int authsize)
1104{
1105 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1106
1107 ctx->authsize = authsize;
1108 rfc4106_set_sh_desc(authenc);
1109
1110 return 0;
1111}
1112
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001113static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1114{
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001115 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1116 struct device *jrdev = ctx->jrdev;
1117 bool keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001118 u32 *key_jump_cmd;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001119 u32 *read_move_cmd, *write_move_cmd;
1120 u32 *desc;
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001121
1122 if (!ctx->enckeylen || !ctx->authsize)
1123 return 0;
1124
1125 /*
1126 * RFC4543 encrypt shared descriptor
1127 * Job Descriptor and Shared Descriptor
1128 * must fit into the 64-word Descriptor h/w Buffer
1129 */
Herbert Xuf2147b82015-06-16 13:54:23 +08001130 if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001131 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1132 keys_fit_inline = true;
1133
1134 desc = ctx->sh_desc_enc;
1135
1136 init_sh_desc(desc, HDR_SHARE_SERIAL);
1137
1138 /* Skip key loading if it is loaded due to sharing */
1139 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1140 JUMP_COND_SHRD);
1141 if (keys_fit_inline)
1142 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1143 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1144 else
1145 append_key(desc, ctx->key_dma, ctx->enckeylen,
1146 CLASS_1 | KEY_DEST_CLASS_REG);
1147 set_jump_tgt_here(desc, key_jump_cmd);
1148
1149 /* Class 1 operation */
1150 append_operation(desc, ctx->class1_alg_type |
1151 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1152
Herbert Xuf2147b82015-06-16 13:54:23 +08001153 /* assoclen + cryptlen = seqinlen */
1154 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001155
1156 /*
1157 * MOVE_LEN opcode is not available in all SEC HW revisions,
1158 * thus need to do some magic, i.e. self-patch the descriptor
1159 * buffer.
1160 */
1161 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1162 (0x6 << MOVE_LEN_SHIFT));
1163 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1164 (0x8 << MOVE_LEN_SHIFT));
1165
Herbert Xuf2147b82015-06-16 13:54:23 +08001166 /* Will read assoclen + cryptlen bytes */
1167 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001168
Herbert Xuf2147b82015-06-16 13:54:23 +08001169 /* Will write assoclen + cryptlen bytes */
1170 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1171
1172 /* Read and write assoclen + cryptlen bytes */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001173 aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1174
1175 set_move_tgt_here(desc, read_move_cmd);
1176 set_move_tgt_here(desc, write_move_cmd);
1177 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1178 /* Move payload data to OFIFO */
1179 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1180
1181 /* Write ICV */
1182 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1183 LDST_SRCDST_BYTE_CONTEXT);
1184
1185 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1186 desc_bytes(desc),
1187 DMA_TO_DEVICE);
1188 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1189 dev_err(jrdev, "unable to map shared descriptor\n");
1190 return -ENOMEM;
1191 }
1192#ifdef DEBUG
1193 print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1194 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1195 desc_bytes(desc), 1);
1196#endif
1197
1198 /*
1199 * Job Descriptor and Shared Descriptors
1200 * must all fit into the 64-word Descriptor h/w Buffer
1201 */
1202 keys_fit_inline = false;
Herbert Xuf2147b82015-06-16 13:54:23 +08001203 if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001204 ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1205 keys_fit_inline = true;
1206
1207 desc = ctx->sh_desc_dec;
1208
1209 init_sh_desc(desc, HDR_SHARE_SERIAL);
1210
1211 /* Skip key loading if it is loaded due to sharing */
1212 key_jump_cmd = append_jump(desc, JUMP_JSL |
1213 JUMP_TEST_ALL | JUMP_COND_SHRD);
1214 if (keys_fit_inline)
1215 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1216 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1217 else
1218 append_key(desc, ctx->key_dma, ctx->enckeylen,
1219 CLASS_1 | KEY_DEST_CLASS_REG);
1220 set_jump_tgt_here(desc, key_jump_cmd);
1221
1222 /* Class 1 operation */
1223 append_operation(desc, ctx->class1_alg_type |
1224 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1225
Herbert Xuf2147b82015-06-16 13:54:23 +08001226 /* assoclen + cryptlen = seqoutlen */
1227 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001228
1229 /*
1230 * MOVE_LEN opcode is not available in all SEC HW revisions,
1231 * thus need to do some magic, i.e. self-patch the descriptor
1232 * buffer.
1233 */
1234 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1235 (0x6 << MOVE_LEN_SHIFT));
1236 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1237 (0x8 << MOVE_LEN_SHIFT));
1238
Herbert Xuf2147b82015-06-16 13:54:23 +08001239 /* Will read assoclen + cryptlen bytes */
1240 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001241
Herbert Xuf2147b82015-06-16 13:54:23 +08001242 /* Will write assoclen + cryptlen bytes */
1243 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001244
1245 /* Store payload data */
1246 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1247
Herbert Xuf2147b82015-06-16 13:54:23 +08001248 /* In-snoop assoclen + cryptlen data */
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001249 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1250 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1251
1252 set_move_tgt_here(desc, read_move_cmd);
1253 set_move_tgt_here(desc, write_move_cmd);
1254 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1255 /* Move payload data to OFIFO */
1256 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1257 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1258
1259 /* Read ICV */
1260 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1261 FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1262
1263 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1264 desc_bytes(desc),
1265 DMA_TO_DEVICE);
1266 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1267 dev_err(jrdev, "unable to map shared descriptor\n");
1268 return -ENOMEM;
1269 }
1270#ifdef DEBUG
1271 print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1272 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1273 desc_bytes(desc), 1);
1274#endif
1275
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001276 return 0;
1277}
1278
1279static int rfc4543_setauthsize(struct crypto_aead *authenc,
1280 unsigned int authsize)
1281{
1282 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1283
1284 ctx->authsize = authsize;
1285 rfc4543_set_sh_desc(authenc);
1286
1287 return 0;
1288}
1289
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001290static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1291 u32 authkeylen)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001292{
Yuan Kang4c1ec1f2012-06-22 19:48:45 -05001293 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1294 ctx->split_key_pad_len, key_in, authkeylen,
1295 ctx->alg_op);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001296}
1297
Yuan Kang0e479302011-07-15 11:21:41 +08001298static int aead_setkey(struct crypto_aead *aead,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001299 const u8 *key, unsigned int keylen)
1300{
1301 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1302 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1303 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1304 struct device *jrdev = ctx->jrdev;
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001305 struct crypto_authenc_keys keys;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001306 int ret = 0;
1307
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001308 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001309 goto badkey;
1310
1311 /* Pick class 2 key length from algorithm submask */
1312 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1313 OP_ALG_ALGSEL_SHIFT] * 2;
1314 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1315
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001316 if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1317 goto badkey;
1318
Kim Phillips8e8ec592011-03-13 16:54:26 +08001319#ifdef DEBUG
1320 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001321 keys.authkeylen + keys.enckeylen, keys.enckeylen,
1322 keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001323 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1324 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +03001325 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001326 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1327#endif
Kim Phillips8e8ec592011-03-13 16:54:26 +08001328
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001329 ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001330 if (ret) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001331 goto badkey;
1332 }
1333
1334 /* postpend encryption key to auth split key */
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001335 memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001336
Yuan Kang885e9e22011-07-15 11:21:41 +08001337 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001338 keys.enckeylen, DMA_TO_DEVICE);
Yuan Kang885e9e22011-07-15 11:21:41 +08001339 if (dma_mapping_error(jrdev, ctx->key_dma)) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001340 dev_err(jrdev, "unable to map key i/o memory\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08001341 return -ENOMEM;
1342 }
1343#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001344 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001345 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001346 ctx->split_key_pad_len + keys.enckeylen, 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001347#endif
1348
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001349 ctx->enckeylen = keys.enckeylen;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001350
Yuan Kang1acebad32011-07-15 11:21:42 +08001351 ret = aead_set_sh_desc(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001352 if (ret) {
Yuan Kang885e9e22011-07-15 11:21:41 +08001353 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
Horia Geanta4e6e0b22013-12-19 17:27:35 +02001354 keys.enckeylen, DMA_TO_DEVICE);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001355 }
1356
1357 return ret;
1358badkey:
1359 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1360 return -EINVAL;
1361}
1362
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03001363static int gcm_setkey(struct crypto_aead *aead,
1364 const u8 *key, unsigned int keylen)
1365{
1366 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1367 struct device *jrdev = ctx->jrdev;
1368 int ret = 0;
1369
1370#ifdef DEBUG
1371 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1372 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1373#endif
1374
1375 memcpy(ctx->key, key, keylen);
1376 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1377 DMA_TO_DEVICE);
1378 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1379 dev_err(jrdev, "unable to map key i/o memory\n");
1380 return -ENOMEM;
1381 }
1382 ctx->enckeylen = keylen;
1383
1384 ret = gcm_set_sh_desc(aead);
1385 if (ret) {
1386 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1387 DMA_TO_DEVICE);
1388 }
1389
1390 return ret;
1391}
1392
Tudor Ambarusbac68f22014-10-23 16:14:03 +03001393static int rfc4106_setkey(struct crypto_aead *aead,
1394 const u8 *key, unsigned int keylen)
1395{
1396 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1397 struct device *jrdev = ctx->jrdev;
1398 int ret = 0;
1399
1400 if (keylen < 4)
1401 return -EINVAL;
1402
1403#ifdef DEBUG
1404 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1405 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1406#endif
1407
1408 memcpy(ctx->key, key, keylen);
1409
1410 /*
1411 * The last four bytes of the key material are used as the salt value
1412 * in the nonce. Update the AES key length.
1413 */
1414 ctx->enckeylen = keylen - 4;
1415
1416 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1417 DMA_TO_DEVICE);
1418 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1419 dev_err(jrdev, "unable to map key i/o memory\n");
1420 return -ENOMEM;
1421 }
1422
1423 ret = rfc4106_set_sh_desc(aead);
1424 if (ret) {
1425 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1426 DMA_TO_DEVICE);
1427 }
1428
1429 return ret;
1430}
1431
Tudor Ambarus5d0429a2014-10-30 18:55:07 +02001432static int rfc4543_setkey(struct crypto_aead *aead,
1433 const u8 *key, unsigned int keylen)
1434{
1435 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1436 struct device *jrdev = ctx->jrdev;
1437 int ret = 0;
1438
1439 if (keylen < 4)
1440 return -EINVAL;
1441
1442#ifdef DEBUG
1443 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1444 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1445#endif
1446
1447 memcpy(ctx->key, key, keylen);
1448
1449 /*
1450 * The last four bytes of the key material are used as the salt value
1451 * in the nonce. Update the AES key length.
1452 */
1453 ctx->enckeylen = keylen - 4;
1454
1455 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1456 DMA_TO_DEVICE);
1457 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1458 dev_err(jrdev, "unable to map key i/o memory\n");
1459 return -ENOMEM;
1460 }
1461
1462 ret = rfc4543_set_sh_desc(aead);
1463 if (ret) {
1464 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1465 DMA_TO_DEVICE);
1466 }
1467
1468 return ret;
1469}
1470
Yuan Kangacdca312011-07-15 11:21:42 +08001471static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1472 const u8 *key, unsigned int keylen)
1473{
1474 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001475 struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1476 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1477 const char *alg_name = crypto_tfm_alg_name(tfm);
Yuan Kangacdca312011-07-15 11:21:42 +08001478 struct device *jrdev = ctx->jrdev;
1479 int ret = 0;
Horia Geanta4464a7d2014-03-14 17:46:49 +02001480 u32 *key_jump_cmd;
Yuan Kangacdca312011-07-15 11:21:42 +08001481 u32 *desc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001482 u32 *nonce;
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001483 u32 geniv;
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001484 u32 ctx1_iv_off = 0;
1485 const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1486 OP_ALG_AAI_CTR_MOD128);
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001487 const bool is_rfc3686 = (ctr_mode &&
1488 (strstr(alg_name, "rfc3686") != NULL));
Yuan Kangacdca312011-07-15 11:21:42 +08001489
1490#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001491 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001492 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1493#endif
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001494 /*
1495 * AES-CTR needs to load IV in CONTEXT1 reg
1496 * at an offset of 128bits (16bytes)
1497 * CONTEXT1[255:128] = IV
1498 */
1499 if (ctr_mode)
1500 ctx1_iv_off = 16;
Yuan Kangacdca312011-07-15 11:21:42 +08001501
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001502 /*
1503 * RFC3686 specific:
1504 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1505 * | *key = {KEY, NONCE}
1506 */
1507 if (is_rfc3686) {
1508 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1509 keylen -= CTR_RFC3686_NONCE_SIZE;
1510 }
1511
Yuan Kangacdca312011-07-15 11:21:42 +08001512 memcpy(ctx->key, key, keylen);
1513 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1514 DMA_TO_DEVICE);
1515 if (dma_mapping_error(jrdev, ctx->key_dma)) {
1516 dev_err(jrdev, "unable to map key i/o memory\n");
1517 return -ENOMEM;
1518 }
1519 ctx->enckeylen = keylen;
1520
1521 /* ablkcipher_encrypt shared descriptor */
1522 desc = ctx->sh_desc_enc;
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001523 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001524 /* Skip if already shared */
1525 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1526 JUMP_COND_SHRD);
1527
1528 /* Load class1 key only */
1529 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1530 ctx->enckeylen, CLASS_1 |
1531 KEY_DEST_CLASS_REG);
1532
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001533 /* Load nonce into CONTEXT1 reg */
1534 if (is_rfc3686) {
1535 nonce = (u32 *)(key + keylen);
1536 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1537 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1538 append_move(desc, MOVE_WAITCOMP |
1539 MOVE_SRC_OUTFIFO |
1540 MOVE_DEST_CLASS1CTX |
1541 (16 << MOVE_OFFSET_SHIFT) |
1542 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1543 }
1544
Yuan Kangacdca312011-07-15 11:21:42 +08001545 set_jump_tgt_here(desc, key_jump_cmd);
1546
Yuan Kangacdca312011-07-15 11:21:42 +08001547 /* Load iv */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001548 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001549 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001550
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001551 /* Load counter into CONTEXT1 reg */
1552 if (is_rfc3686)
1553 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1554 LDST_CLASS_1_CCB |
1555 LDST_SRCDST_BYTE_CONTEXT |
1556 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1557 LDST_OFFSET_SHIFT));
1558
Yuan Kangacdca312011-07-15 11:21:42 +08001559 /* Load operation */
1560 append_operation(desc, ctx->class1_alg_type |
1561 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1562
1563 /* Perform operation */
1564 ablkcipher_append_src_dst(desc);
1565
1566 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1567 desc_bytes(desc),
1568 DMA_TO_DEVICE);
1569 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1570 dev_err(jrdev, "unable to map shared descriptor\n");
1571 return -ENOMEM;
1572 }
1573#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001574 print_hex_dump(KERN_ERR,
1575 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001576 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1577 desc_bytes(desc), 1);
1578#endif
1579 /* ablkcipher_decrypt shared descriptor */
1580 desc = ctx->sh_desc_dec;
1581
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001582 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
Yuan Kangacdca312011-07-15 11:21:42 +08001583 /* Skip if already shared */
1584 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1585 JUMP_COND_SHRD);
1586
1587 /* Load class1 key only */
1588 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1589 ctx->enckeylen, CLASS_1 |
1590 KEY_DEST_CLASS_REG);
1591
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001592 /* Load nonce into CONTEXT1 reg */
1593 if (is_rfc3686) {
1594 nonce = (u32 *)(key + keylen);
1595 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1596 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1597 append_move(desc, MOVE_WAITCOMP |
1598 MOVE_SRC_OUTFIFO |
1599 MOVE_DEST_CLASS1CTX |
1600 (16 << MOVE_OFFSET_SHIFT) |
1601 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1602 }
1603
Yuan Kangacdca312011-07-15 11:21:42 +08001604 set_jump_tgt_here(desc, key_jump_cmd);
Yuan Kangacdca312011-07-15 11:21:42 +08001605
1606 /* load IV */
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001607 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001608 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
Yuan Kangacdca312011-07-15 11:21:42 +08001609
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02001610 /* Load counter into CONTEXT1 reg */
1611 if (is_rfc3686)
1612 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
1613 LDST_CLASS_1_CCB |
1614 LDST_SRCDST_BYTE_CONTEXT |
1615 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1616 LDST_OFFSET_SHIFT));
1617
Yuan Kangacdca312011-07-15 11:21:42 +08001618 /* Choose operation */
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02001619 if (ctr_mode)
1620 append_operation(desc, ctx->class1_alg_type |
1621 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1622 else
1623 append_dec_op1(desc, ctx->class1_alg_type);
Yuan Kangacdca312011-07-15 11:21:42 +08001624
1625 /* Perform operation */
1626 ablkcipher_append_src_dst(desc);
1627
Yuan Kangacdca312011-07-15 11:21:42 +08001628 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1629 desc_bytes(desc),
1630 DMA_TO_DEVICE);
Horia Geanta71c65f72014-07-11 15:34:48 +03001631 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Yuan Kangacdca312011-07-15 11:21:42 +08001632 dev_err(jrdev, "unable to map shared descriptor\n");
1633 return -ENOMEM;
1634 }
1635
1636#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001637 print_hex_dump(KERN_ERR,
1638 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08001639 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1640 desc_bytes(desc), 1);
1641#endif
Catalin Vasile7222d1a2014-10-31 12:45:38 +02001642 /* ablkcipher_givencrypt shared descriptor */
1643 desc = ctx->sh_desc_givenc;
1644
1645 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1646 /* Skip if already shared */
1647 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1648 JUMP_COND_SHRD);
1649
1650 /* Load class1 key only */
1651 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1652 ctx->enckeylen, CLASS_1 |
1653 KEY_DEST_CLASS_REG);
1654
1655 /* Load Nonce into CONTEXT1 reg */
1656 if (is_rfc3686) {
1657 nonce = (u32 *)(key + keylen);
1658 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
1659 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1660 append_move(desc, MOVE_WAITCOMP |
1661 MOVE_SRC_OUTFIFO |
1662 MOVE_DEST_CLASS1CTX |
1663 (16 << MOVE_OFFSET_SHIFT) |
1664 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1665 }
1666 set_jump_tgt_here(desc, key_jump_cmd);
1667
1668 /* Generate IV */
1669 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1670 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1671 NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1672 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1673 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1674 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1675 append_move(desc, MOVE_WAITCOMP |
1676 MOVE_SRC_INFIFO |
1677 MOVE_DEST_CLASS1CTX |
1678 (crt->ivsize << MOVE_LEN_SHIFT) |
1679 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1680 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1681
1682 /* Copy generated IV to memory */
1683 append_seq_store(desc, crt->ivsize,
1684 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1685 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1686
1687 /* Load Counter into CONTEXT1 reg */
1688 if (is_rfc3686)
1689 append_load_imm_u32(desc, (u32)1, LDST_IMM |
1690 LDST_CLASS_1_CCB |
1691 LDST_SRCDST_BYTE_CONTEXT |
1692 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1693 LDST_OFFSET_SHIFT));
1694
1695 if (ctx1_iv_off)
1696 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1697 (1 << JUMP_OFFSET_SHIFT));
1698
1699 /* Load operation */
1700 append_operation(desc, ctx->class1_alg_type |
1701 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1702
1703 /* Perform operation */
1704 ablkcipher_append_src_dst(desc);
1705
1706 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1707 desc_bytes(desc),
1708 DMA_TO_DEVICE);
1709 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1710 dev_err(jrdev, "unable to map shared descriptor\n");
1711 return -ENOMEM;
1712 }
1713#ifdef DEBUG
1714 print_hex_dump(KERN_ERR,
1715 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1716 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1717 desc_bytes(desc), 1);
1718#endif
Yuan Kangacdca312011-07-15 11:21:42 +08001719
1720 return ret;
1721}
1722
Kim Phillips8e8ec592011-03-13 16:54:26 +08001723/*
Yuan Kang1acebad32011-07-15 11:21:42 +08001724 * aead_edesc - s/w-extended aead descriptor
1725 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001726 * @assoc_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001727 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001728 * @src_chained: if source is chained
Kim Phillips8e8ec592011-03-13 16:54:26 +08001729 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001730 * @dst_chained: if destination is chained
Yuan Kang1acebad32011-07-15 11:21:42 +08001731 * @iv_dma: dma address of iv for checking continuity and link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001732 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001733 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1734 * @sec4_sg_dma: bus physical mapped address of h/w link table
Kim Phillips8e8ec592011-03-13 16:54:26 +08001735 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1736 */
Yuan Kang0e479302011-07-15 11:21:41 +08001737struct aead_edesc {
Kim Phillips8e8ec592011-03-13 16:54:26 +08001738 int assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001739 bool assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001740 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001741 bool src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001742 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001743 bool dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08001744 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001745 int sec4_sg_bytes;
1746 dma_addr_t sec4_sg_dma;
1747 struct sec4_sg_entry *sec4_sg;
Herbert Xuf2147b82015-06-16 13:54:23 +08001748 u32 hw_desc[];
Kim Phillips8e8ec592011-03-13 16:54:26 +08001749};
1750
Yuan Kangacdca312011-07-15 11:21:42 +08001751/*
1752 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1753 * @src_nents: number of segments in input scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001754 * @src_chained: if source is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001755 * @dst_nents: number of segments in output scatterlist
Yuan Kang643b39b2012-06-22 19:48:49 -05001756 * @dst_chained: if destination is chained
Yuan Kangacdca312011-07-15 11:21:42 +08001757 * @iv_dma: dma address of iv for checking continuity and link table
1758 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Yuan Kanga299c832012-06-22 19:48:46 -05001759 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1760 * @sec4_sg_dma: bus physical mapped address of h/w link table
Yuan Kangacdca312011-07-15 11:21:42 +08001761 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1762 */
1763struct ablkcipher_edesc {
1764 int src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001765 bool src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001766 int dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001767 bool dst_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08001768 dma_addr_t iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05001769 int sec4_sg_bytes;
1770 dma_addr_t sec4_sg_dma;
1771 struct sec4_sg_entry *sec4_sg;
Yuan Kangacdca312011-07-15 11:21:42 +08001772 u32 hw_desc[0];
1773};
1774
Yuan Kang1acebad32011-07-15 11:21:42 +08001775static void caam_unmap(struct device *dev, struct scatterlist *src,
Yuan Kang643b39b2012-06-22 19:48:49 -05001776 struct scatterlist *dst, int src_nents,
1777 bool src_chained, int dst_nents, bool dst_chained,
Yuan Kanga299c832012-06-22 19:48:46 -05001778 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1779 int sec4_sg_bytes)
Kim Phillips8e8ec592011-03-13 16:54:26 +08001780{
Yuan Kang643b39b2012-06-22 19:48:49 -05001781 if (dst != src) {
1782 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
1783 src_chained);
1784 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1785 dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001786 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05001787 dma_unmap_sg_chained(dev, src, src_nents ? : 1,
1788 DMA_BIDIRECTIONAL, src_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001789 }
1790
Yuan Kang1acebad32011-07-15 11:21:42 +08001791 if (iv_dma)
1792 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Yuan Kanga299c832012-06-22 19:48:46 -05001793 if (sec4_sg_bytes)
1794 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001795 DMA_TO_DEVICE);
1796}
1797
Yuan Kang1acebad32011-07-15 11:21:42 +08001798static void aead_unmap(struct device *dev,
1799 struct aead_edesc *edesc,
1800 struct aead_request *req)
1801{
Herbert Xuf2147b82015-06-16 13:54:23 +08001802 caam_unmap(dev, req->src, req->dst,
1803 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1804 edesc->dst_chained, 0, 0,
1805 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1806}
1807
1808static void old_aead_unmap(struct device *dev,
1809 struct aead_edesc *edesc,
1810 struct aead_request *req)
1811{
Yuan Kang1acebad32011-07-15 11:21:42 +08001812 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1813 int ivsize = crypto_aead_ivsize(aead);
1814
Yuan Kang643b39b2012-06-22 19:48:49 -05001815 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
1816 DMA_TO_DEVICE, edesc->assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08001817
1818 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001819 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1820 edesc->dst_chained, edesc->iv_dma, ivsize,
1821 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kang1acebad32011-07-15 11:21:42 +08001822}
1823
Yuan Kangacdca312011-07-15 11:21:42 +08001824static void ablkcipher_unmap(struct device *dev,
1825 struct ablkcipher_edesc *edesc,
1826 struct ablkcipher_request *req)
1827{
1828 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1829 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1830
1831 caam_unmap(dev, req->src, req->dst,
Yuan Kang643b39b2012-06-22 19:48:49 -05001832 edesc->src_nents, edesc->src_chained, edesc->dst_nents,
1833 edesc->dst_chained, edesc->iv_dma, ivsize,
1834 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Yuan Kangacdca312011-07-15 11:21:42 +08001835}
1836
Yuan Kang0e479302011-07-15 11:21:41 +08001837static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001838 void *context)
1839{
Yuan Kang0e479302011-07-15 11:21:41 +08001840 struct aead_request *req = context;
1841 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001842
1843#ifdef DEBUG
1844 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1845#endif
1846
1847 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1848
1849 if (err)
1850 caam_jr_strstatus(jrdev, err);
1851
1852 aead_unmap(jrdev, edesc, req);
1853
1854 kfree(edesc);
1855
1856 aead_request_complete(req, err);
1857}
1858
1859static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1860 void *context)
1861{
1862 struct aead_request *req = context;
1863 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001864#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001865 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001866 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad32011-07-15 11:21:42 +08001867 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001868
1869 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1870#endif
Yuan Kang1acebad32011-07-15 11:21:42 +08001871
Yuan Kang0e479302011-07-15 11:21:41 +08001872 edesc = (struct aead_edesc *)((char *)desc -
1873 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001874
Marek Vasutfa9659c2014-04-24 20:05:12 +02001875 if (err)
1876 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001877
Herbert Xuf2147b82015-06-16 13:54:23 +08001878 old_aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001879
1880#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001881 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001882 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1883 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001884 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001885 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001886 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001887 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08001888 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1889 edesc->src_nents ? 100 : req->cryptlen +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001890 ctx->authsize + 4, 1);
1891#endif
1892
1893 kfree(edesc);
1894
Yuan Kang0e479302011-07-15 11:21:41 +08001895 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001896}
1897
Yuan Kang0e479302011-07-15 11:21:41 +08001898static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
Kim Phillips8e8ec592011-03-13 16:54:26 +08001899 void *context)
1900{
Yuan Kang0e479302011-07-15 11:21:41 +08001901 struct aead_request *req = context;
1902 struct aead_edesc *edesc;
Herbert Xuf2147b82015-06-16 13:54:23 +08001903
1904#ifdef DEBUG
1905 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1906#endif
1907
1908 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1909
1910 if (err)
1911 caam_jr_strstatus(jrdev, err);
1912
1913 aead_unmap(jrdev, edesc, req);
1914
1915 /*
1916 * verify hw auth check passed else return -EBADMSG
1917 */
1918 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1919 err = -EBADMSG;
1920
1921 kfree(edesc);
1922
1923 aead_request_complete(req, err);
1924}
1925
1926static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1927 void *context)
1928{
1929 struct aead_request *req = context;
1930 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08001931#ifdef DEBUG
Yuan Kang0e479302011-07-15 11:21:41 +08001932 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001933 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Yuan Kang1acebad32011-07-15 11:21:42 +08001934 int ivsize = crypto_aead_ivsize(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001935
1936 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1937#endif
Yuan Kang1acebad32011-07-15 11:21:42 +08001938
Yuan Kang0e479302011-07-15 11:21:41 +08001939 edesc = (struct aead_edesc *)((char *)desc -
1940 offsetof(struct aead_edesc, hw_desc));
Kim Phillips8e8ec592011-03-13 16:54:26 +08001941
Yuan Kang1acebad32011-07-15 11:21:42 +08001942#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001943 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001944 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1945 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001946 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08001947 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
Horia Geantabbf9c892013-11-28 15:11:16 +02001948 req->cryptlen - ctx->authsize, 1);
Yuan Kang1acebad32011-07-15 11:21:42 +08001949#endif
1950
Marek Vasutfa9659c2014-04-24 20:05:12 +02001951 if (err)
1952 caam_jr_strstatus(jrdev, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001953
Herbert Xuf2147b82015-06-16 13:54:23 +08001954 old_aead_unmap(jrdev, edesc, req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001955
1956 /*
1957 * verify hw auth check passed else return -EBADMSG
1958 */
1959 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1960 err = -EBADMSG;
1961
1962#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001963 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001964 DUMP_PREFIX_ADDRESS, 16, 4,
Yuan Kang0e479302011-07-15 11:21:41 +08001965 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
1966 sizeof(struct iphdr) + req->assoclen +
1967 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
Kim Phillips8e8ec592011-03-13 16:54:26 +08001968 ctx->authsize + 36, 1);
Yuan Kanga299c832012-06-22 19:48:46 -05001969 if (!err && edesc->sec4_sg_bytes) {
Yuan Kang0e479302011-07-15 11:21:41 +08001970 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
Alex Porosanu514df282013-08-14 18:56:45 +03001971 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08001972 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1973 sg->length + ctx->authsize + 16, 1);
1974 }
1975#endif
Yuan Kang1acebad32011-07-15 11:21:42 +08001976
Kim Phillips8e8ec592011-03-13 16:54:26 +08001977 kfree(edesc);
1978
Yuan Kang0e479302011-07-15 11:21:41 +08001979 aead_request_complete(req, err);
Kim Phillips8e8ec592011-03-13 16:54:26 +08001980}
1981
Yuan Kangacdca312011-07-15 11:21:42 +08001982static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1983 void *context)
1984{
1985 struct ablkcipher_request *req = context;
1986 struct ablkcipher_edesc *edesc;
1987#ifdef DEBUG
1988 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1989 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1990
1991 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1992#endif
1993
1994 edesc = (struct ablkcipher_edesc *)((char *)desc -
1995 offsetof(struct ablkcipher_edesc, hw_desc));
1996
Marek Vasutfa9659c2014-04-24 20:05:12 +02001997 if (err)
1998 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08001999
2000#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002001 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002002 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2003 edesc->src_nents > 1 ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002004 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002005 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2006 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2007#endif
2008
2009 ablkcipher_unmap(jrdev, edesc, req);
2010 kfree(edesc);
2011
2012 ablkcipher_request_complete(req, err);
2013}
2014
2015static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2016 void *context)
2017{
2018 struct ablkcipher_request *req = context;
2019 struct ablkcipher_edesc *edesc;
2020#ifdef DEBUG
2021 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2022 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2023
2024 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2025#endif
2026
2027 edesc = (struct ablkcipher_edesc *)((char *)desc -
2028 offsetof(struct ablkcipher_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +02002029 if (err)
2030 caam_jr_strstatus(jrdev, err);
Yuan Kangacdca312011-07-15 11:21:42 +08002031
2032#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002033 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002034 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2035 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002036 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002037 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2038 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
2039#endif
2040
2041 ablkcipher_unmap(jrdev, edesc, req);
2042 kfree(edesc);
2043
2044 ablkcipher_request_complete(req, err);
2045}
2046
Kim Phillips8e8ec592011-03-13 16:54:26 +08002047/*
Yuan Kang1acebad32011-07-15 11:21:42 +08002048 * Fill in aead job descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002049 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002050static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
2051 struct aead_edesc *edesc,
2052 struct aead_request *req,
2053 bool all_contig, bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002054{
Yuan Kang0e479302011-07-15 11:21:41 +08002055 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002056 struct caam_ctx *ctx = crypto_aead_ctx(aead);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002057 int ivsize = crypto_aead_ivsize(aead);
2058 int authsize = ctx->authsize;
Yuan Kang1acebad32011-07-15 11:21:42 +08002059 u32 *desc = edesc->hw_desc;
2060 u32 out_options = 0, in_options;
2061 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002062 int len, sec4_sg_index = 0;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002063 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002064
Yuan Kang1acebad32011-07-15 11:21:42 +08002065#ifdef DEBUG
Kim Phillips8e8ec592011-03-13 16:54:26 +08002066 debug("assoclen %d cryptlen %d authsize %d\n",
Yuan Kang0e479302011-07-15 11:21:41 +08002067 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002068 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002069 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2070 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002071 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002072 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
Kim Phillips8e8ec592011-03-13 16:54:26 +08002073 edesc->src_nents ? 100 : ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002074 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang0e479302011-07-15 11:21:41 +08002075 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
Yuan Kang1acebad32011-07-15 11:21:42 +08002076 edesc->src_nents ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002077 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Kim Phillips8e8ec592011-03-13 16:54:26 +08002078 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2079 desc_bytes(sh_desc), 1);
2080#endif
Yuan Kang1acebad32011-07-15 11:21:42 +08002081
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002082 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2083 OP_ALG_ALGSEL_AES) &&
2084 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2085 is_gcm = true;
2086
Yuan Kang1acebad32011-07-15 11:21:42 +08002087 len = desc_len(sh_desc);
2088 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2089
2090 if (all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002091 if (is_gcm)
2092 src_dma = edesc->iv_dma;
2093 else
2094 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad32011-07-15 11:21:42 +08002095 in_options = 0;
2096 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002097 src_dma = edesc->sec4_sg_dma;
2098 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
2099 (edesc->src_nents ? : 1);
Yuan Kang1acebad32011-07-15 11:21:42 +08002100 in_options = LDST_SGF;
2101 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002102
2103 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2104 in_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002105
Yuan Kang1acebad32011-07-15 11:21:42 +08002106 if (likely(req->src == req->dst)) {
2107 if (all_contig) {
2108 dst_dma = sg_dma_address(req->src);
2109 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002110 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Yuan Kang1acebad32011-07-15 11:21:42 +08002111 ((edesc->assoc_nents ? : 1) + 1);
2112 out_options = LDST_SGF;
2113 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002114 } else {
Kim Phillips8e8ec592011-03-13 16:54:26 +08002115 if (!edesc->dst_nents) {
Yuan Kang0e479302011-07-15 11:21:41 +08002116 dst_dma = sg_dma_address(req->dst);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002117 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002118 dst_dma = edesc->sec4_sg_dma +
2119 sec4_sg_index *
2120 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +08002121 out_options = LDST_SGF;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002122 }
2123 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002124 if (encrypt)
Horia Geantabbf9c892013-11-28 15:11:16 +02002125 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
2126 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002127 else
Yuan Kang1acebad32011-07-15 11:21:42 +08002128 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
2129 out_options);
2130}
2131
2132/*
Herbert Xuf2147b82015-06-16 13:54:23 +08002133 * Fill in aead job descriptor
2134 */
2135static void init_aead_job(struct aead_request *req,
2136 struct aead_edesc *edesc,
2137 bool all_contig, bool encrypt)
2138{
2139 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2140 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2141 int authsize = ctx->authsize;
2142 u32 *desc = edesc->hw_desc;
2143 u32 out_options, in_options;
2144 dma_addr_t dst_dma, src_dma;
2145 int len, sec4_sg_index = 0;
2146 dma_addr_t ptr;
2147 u32 *sh_desc;
2148
2149 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2150 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2151
2152 len = desc_len(sh_desc);
2153 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2154
2155 if (all_contig) {
2156 src_dma = sg_dma_address(req->src);
2157 in_options = 0;
2158 } else {
2159 src_dma = edesc->sec4_sg_dma;
2160 sec4_sg_index += edesc->src_nents;
2161 in_options = LDST_SGF;
2162 }
2163
2164 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2165 in_options);
2166
2167 dst_dma = src_dma;
2168 out_options = in_options;
2169
2170 if (unlikely(req->src != req->dst)) {
2171 if (!edesc->dst_nents) {
2172 dst_dma = sg_dma_address(req->dst);
2173 } else {
2174 dst_dma = edesc->sec4_sg_dma +
2175 sec4_sg_index *
2176 sizeof(struct sec4_sg_entry);
2177 out_options = LDST_SGF;
2178 }
2179 }
2180
2181 if (encrypt)
2182 append_seq_out_ptr(desc, dst_dma,
2183 req->assoclen + req->cryptlen + authsize,
2184 out_options);
2185 else
2186 append_seq_out_ptr(desc, dst_dma,
2187 req->assoclen + req->cryptlen - authsize,
2188 out_options);
2189
2190 /* REG3 = assoclen */
2191 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2192}
2193
2194static void init_gcm_job(struct aead_request *req,
2195 struct aead_edesc *edesc,
2196 bool all_contig, bool encrypt)
2197{
2198 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2199 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2200 unsigned int ivsize = crypto_aead_ivsize(aead);
2201 u32 *desc = edesc->hw_desc;
2202 bool generic_gcm = (ivsize == 12);
2203 unsigned int last;
2204
2205 init_aead_job(req, edesc, all_contig, encrypt);
2206
2207 /* BUG This should not be specific to generic GCM. */
2208 last = 0;
2209 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2210 last = FIFOLD_TYPE_LAST1;
2211
2212 /* Read GCM IV */
2213 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2214 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2215 /* Append Salt */
2216 if (!generic_gcm)
2217 append_data(desc, ctx->key + ctx->enckeylen, 4);
2218 /* Append IV */
2219 append_data(desc, req->iv, ivsize);
2220 /* End of blank commands */
2221}
2222
2223/*
Yuan Kang1acebad32011-07-15 11:21:42 +08002224 * Fill in aead givencrypt job descriptor
2225 */
2226static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
2227 struct aead_edesc *edesc,
2228 struct aead_request *req,
2229 int contig)
2230{
2231 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2232 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2233 int ivsize = crypto_aead_ivsize(aead);
2234 int authsize = ctx->authsize;
2235 u32 *desc = edesc->hw_desc;
2236 u32 out_options = 0, in_options;
2237 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002238 int len, sec4_sg_index = 0;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002239 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002240
2241#ifdef DEBUG
Yuan Kang1acebad32011-07-15 11:21:42 +08002242 debug("assoclen %d cryptlen %d authsize %d\n",
2243 req->assoclen, req->cryptlen, authsize);
Alex Porosanu514df282013-08-14 18:56:45 +03002244 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002245 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
2246 req->assoclen , 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002247 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002248 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002249 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002250 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2251 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002252 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002253 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
2254 desc_bytes(sh_desc), 1);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002255#endif
2256
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002257 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2258 OP_ALG_ALGSEL_AES) &&
2259 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2260 is_gcm = true;
2261
Yuan Kang1acebad32011-07-15 11:21:42 +08002262 len = desc_len(sh_desc);
2263 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2264
2265 if (contig & GIV_SRC_CONTIG) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002266 if (is_gcm)
2267 src_dma = edesc->iv_dma;
2268 else
2269 src_dma = sg_dma_address(req->assoc);
Yuan Kang1acebad32011-07-15 11:21:42 +08002270 in_options = 0;
2271 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002272 src_dma = edesc->sec4_sg_dma;
2273 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08002274 in_options = LDST_SGF;
2275 }
Horia Geantabbf9c892013-11-28 15:11:16 +02002276 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
2277 in_options);
Yuan Kang1acebad32011-07-15 11:21:42 +08002278
2279 if (contig & GIV_DST_CONTIG) {
2280 dst_dma = edesc->iv_dma;
2281 } else {
2282 if (likely(req->src == req->dst)) {
Yuan Kanga299c832012-06-22 19:48:46 -05002283 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002284 (edesc->assoc_nents +
2285 (is_gcm ? 1 + edesc->src_nents : 0));
Yuan Kang1acebad32011-07-15 11:21:42 +08002286 out_options = LDST_SGF;
2287 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002288 dst_dma = edesc->sec4_sg_dma +
2289 sec4_sg_index *
2290 sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +08002291 out_options = LDST_SGF;
2292 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002293 }
2294
Horia Geantabbf9c892013-11-28 15:11:16 +02002295 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
2296 out_options);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002297}
2298
2299/*
Yuan Kangacdca312011-07-15 11:21:42 +08002300 * Fill in ablkcipher job descriptor
2301 */
2302static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2303 struct ablkcipher_edesc *edesc,
2304 struct ablkcipher_request *req,
2305 bool iv_contig)
2306{
2307 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2308 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2309 u32 *desc = edesc->hw_desc;
2310 u32 out_options = 0, in_options;
2311 dma_addr_t dst_dma, src_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002312 int len, sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08002313
2314#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002315 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002316 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2317 ivsize, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03002318 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08002319 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2320 edesc->src_nents ? 100 : req->nbytes, 1);
2321#endif
2322
2323 len = desc_len(sh_desc);
2324 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2325
2326 if (iv_contig) {
2327 src_dma = edesc->iv_dma;
2328 in_options = 0;
2329 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002330 src_dma = edesc->sec4_sg_dma;
Cristian Stoica35b82e52015-01-21 11:53:30 +02002331 sec4_sg_index += edesc->src_nents + 1;
Yuan Kangacdca312011-07-15 11:21:42 +08002332 in_options = LDST_SGF;
2333 }
2334 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2335
2336 if (likely(req->src == req->dst)) {
2337 if (!edesc->src_nents && iv_contig) {
2338 dst_dma = sg_dma_address(req->src);
2339 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002340 dst_dma = edesc->sec4_sg_dma +
2341 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002342 out_options = LDST_SGF;
2343 }
2344 } else {
2345 if (!edesc->dst_nents) {
2346 dst_dma = sg_dma_address(req->dst);
2347 } else {
Yuan Kanga299c832012-06-22 19:48:46 -05002348 dst_dma = edesc->sec4_sg_dma +
2349 sec4_sg_index * sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08002350 out_options = LDST_SGF;
2351 }
2352 }
2353 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2354}
2355
2356/*
Catalin Vasile7222d1a2014-10-31 12:45:38 +02002357 * Fill in ablkcipher givencrypt job descriptor
2358 */
2359static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2360 struct ablkcipher_edesc *edesc,
2361 struct ablkcipher_request *req,
2362 bool iv_contig)
2363{
2364 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2365 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2366 u32 *desc = edesc->hw_desc;
2367 u32 out_options, in_options;
2368 dma_addr_t dst_dma, src_dma;
2369 int len, sec4_sg_index = 0;
2370
2371#ifdef DEBUG
2372 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2373 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2374 ivsize, 1);
2375 print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2376 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2377 edesc->src_nents ? 100 : req->nbytes, 1);
2378#endif
2379
2380 len = desc_len(sh_desc);
2381 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2382
2383 if (!edesc->src_nents) {
2384 src_dma = sg_dma_address(req->src);
2385 in_options = 0;
2386 } else {
2387 src_dma = edesc->sec4_sg_dma;
2388 sec4_sg_index += edesc->src_nents;
2389 in_options = LDST_SGF;
2390 }
2391 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2392
2393 if (iv_contig) {
2394 dst_dma = edesc->iv_dma;
2395 out_options = 0;
2396 } else {
2397 dst_dma = edesc->sec4_sg_dma +
2398 sec4_sg_index * sizeof(struct sec4_sg_entry);
2399 out_options = LDST_SGF;
2400 }
2401 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2402}
2403
2404/*
Yuan Kang1acebad32011-07-15 11:21:42 +08002405 * allocate and map the aead extended descriptor
Kim Phillips8e8ec592011-03-13 16:54:26 +08002406 */
Herbert Xuf2147b82015-06-16 13:54:23 +08002407static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
2408 int desc_bytes,
2409 bool *all_contig_ptr,
2410 bool encrypt)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002411{
Yuan Kang0e479302011-07-15 11:21:41 +08002412 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002413 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2414 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08002415 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2416 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2417 int assoc_nents, src_nents, dst_nents = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002418 struct aead_edesc *edesc;
Yuan Kang1acebad32011-07-15 11:21:42 +08002419 dma_addr_t iv_dma = 0;
2420 int sgc;
2421 bool all_contig = true;
Yuan Kang643b39b2012-06-22 19:48:49 -05002422 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kang1acebad32011-07-15 11:21:42 +08002423 int ivsize = crypto_aead_ivsize(aead);
Yuan Kanga299c832012-06-22 19:48:46 -05002424 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Horia Geantabbf9c892013-11-28 15:11:16 +02002425 unsigned int authsize = ctx->authsize;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002426 bool is_gcm = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002427
Yuan Kang643b39b2012-06-22 19:48:49 -05002428 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002429
Horia Geantabbf9c892013-11-28 15:11:16 +02002430 if (unlikely(req->dst != req->src)) {
2431 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
2432 dst_nents = sg_count(req->dst,
2433 req->cryptlen +
2434 (encrypt ? authsize : (-authsize)),
2435 &dst_chained);
2436 } else {
2437 src_nents = sg_count(req->src,
2438 req->cryptlen +
2439 (encrypt ? authsize : 0),
2440 &src_chained);
2441 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002442
Yuan Kang643b39b2012-06-22 19:48:49 -05002443 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002444 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002445 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002446 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2447 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002448 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002449 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2450 DMA_TO_DEVICE, src_chained);
2451 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2452 DMA_FROM_DEVICE, dst_chained);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002453 }
2454
Yuan Kang1acebad32011-07-15 11:21:42 +08002455 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002456 if (dma_mapping_error(jrdev, iv_dma)) {
2457 dev_err(jrdev, "unable to map IV\n");
2458 return ERR_PTR(-ENOMEM);
2459 }
2460
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002461 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2462 OP_ALG_ALGSEL_AES) &&
2463 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2464 is_gcm = true;
2465
2466 /*
2467 * Check if data are contiguous.
2468 * GCM expected input sequence: IV, AAD, text
2469 * All other - expected input sequence: AAD, IV, text
2470 */
2471 if (is_gcm)
2472 all_contig = (!assoc_nents &&
2473 iv_dma + ivsize == sg_dma_address(req->assoc) &&
2474 !src_nents && sg_dma_address(req->assoc) +
2475 req->assoclen == sg_dma_address(req->src));
2476 else
2477 all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
2478 req->assoclen == iv_dma && !src_nents &&
2479 iv_dma + ivsize == sg_dma_address(req->src));
2480 if (!all_contig) {
Yuan Kang1acebad32011-07-15 11:21:42 +08002481 assoc_nents = assoc_nents ? : 1;
2482 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002483 sec4_sg_len = assoc_nents + 1 + src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08002484 }
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002485
Yuan Kanga299c832012-06-22 19:48:46 -05002486 sec4_sg_len += dst_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08002487
Yuan Kanga299c832012-06-22 19:48:46 -05002488 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002489
2490 /* allocate space for base edesc and hw desc commands, link tables */
Yuan Kang0e479302011-07-15 11:21:41 +08002491 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002492 sec4_sg_bytes, GFP_DMA | flags);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002493 if (!edesc) {
2494 dev_err(jrdev, "could not allocate extended descriptor\n");
2495 return ERR_PTR(-ENOMEM);
2496 }
2497
2498 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002499 edesc->assoc_chained = assoc_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002500 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002501 edesc->src_chained = src_chained;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002502 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002503 edesc->dst_chained = dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08002504 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002505 edesc->sec4_sg_bytes = sec4_sg_bytes;
2506 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2507 desc_bytes;
Yuan Kang1acebad32011-07-15 11:21:42 +08002508 *all_contig_ptr = all_contig;
2509
Yuan Kanga299c832012-06-22 19:48:46 -05002510 sec4_sg_index = 0;
Yuan Kang1acebad32011-07-15 11:21:42 +08002511 if (!all_contig) {
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002512 if (!is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002513 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2514 edesc->sec4_sg + sec4_sg_index);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002515 sec4_sg_index += assoc_nents;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002516 }
2517
Yuan Kanga299c832012-06-22 19:48:46 -05002518 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08002519 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002520 sec4_sg_index += 1;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002521
2522 if (is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002523 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2524 edesc->sec4_sg + sec4_sg_index);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002525 sec4_sg_index += assoc_nents;
Tudor Ambarus3ef8d942014-10-23 16:11:23 +03002526 }
2527
Yuan Kanga299c832012-06-22 19:48:46 -05002528 sg_to_sec4_sg_last(req->src,
Cristian Stoica35b82e52015-01-21 11:53:30 +02002529 src_nents,
Yuan Kanga299c832012-06-22 19:48:46 -05002530 edesc->sec4_sg +
2531 sec4_sg_index, 0);
Cristian Stoica35b82e52015-01-21 11:53:30 +02002532 sec4_sg_index += src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08002533 }
2534 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05002535 sg_to_sec4_sg_last(req->dst, dst_nents,
2536 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad32011-07-15 11:21:42 +08002537 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302538 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2539 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002540 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2541 dev_err(jrdev, "unable to map S/G table\n");
2542 return ERR_PTR(-ENOMEM);
2543 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002544
2545 return edesc;
2546}
2547
Herbert Xuf2147b82015-06-16 13:54:23 +08002548/*
2549 * allocate and map the aead extended descriptor
2550 */
2551static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2552 int desc_bytes, bool *all_contig_ptr,
2553 bool encrypt)
2554{
2555 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2556 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2557 struct device *jrdev = ctx->jrdev;
2558 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2559 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2560 int src_nents, dst_nents = 0;
2561 struct aead_edesc *edesc;
2562 int sgc;
2563 bool all_contig = true;
2564 bool src_chained = false, dst_chained = false;
2565 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2566 unsigned int authsize = ctx->authsize;
2567
2568 if (unlikely(req->dst != req->src)) {
2569 src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
2570 &src_chained);
2571 dst_nents = sg_count(req->dst,
2572 req->assoclen + req->cryptlen +
2573 (encrypt ? authsize : (-authsize)),
2574 &dst_chained);
2575 } else {
2576 src_nents = sg_count(req->src,
2577 req->assoclen + req->cryptlen +
2578 (encrypt ? authsize : 0),
2579 &src_chained);
2580 }
2581
2582 /* Check if data are contiguous. */
2583 all_contig = !src_nents;
2584 if (!all_contig) {
2585 src_nents = src_nents ? : 1;
2586 sec4_sg_len = src_nents;
2587 }
2588
2589 sec4_sg_len += dst_nents;
2590
2591 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2592
2593 /* allocate space for base edesc and hw desc commands, link tables */
2594 edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
2595 sec4_sg_bytes, GFP_DMA | flags);
2596 if (!edesc) {
2597 dev_err(jrdev, "could not allocate extended descriptor\n");
2598 return ERR_PTR(-ENOMEM);
2599 }
2600
2601 if (likely(req->src == req->dst)) {
2602 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2603 DMA_BIDIRECTIONAL, src_chained);
2604 if (unlikely(!sgc)) {
2605 dev_err(jrdev, "unable to map source\n");
2606 kfree(edesc);
2607 return ERR_PTR(-ENOMEM);
2608 }
2609 } else {
2610 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2611 DMA_TO_DEVICE, src_chained);
2612 if (unlikely(!sgc)) {
2613 dev_err(jrdev, "unable to map source\n");
2614 kfree(edesc);
2615 return ERR_PTR(-ENOMEM);
2616 }
2617
2618 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2619 DMA_FROM_DEVICE, dst_chained);
2620 if (unlikely(!sgc)) {
2621 dev_err(jrdev, "unable to map destination\n");
2622 dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
2623 DMA_TO_DEVICE, src_chained);
2624 kfree(edesc);
2625 return ERR_PTR(-ENOMEM);
2626 }
2627 }
2628
2629 edesc->src_nents = src_nents;
2630 edesc->src_chained = src_chained;
2631 edesc->dst_nents = dst_nents;
2632 edesc->dst_chained = dst_chained;
2633 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2634 desc_bytes;
2635 *all_contig_ptr = all_contig;
2636
2637 sec4_sg_index = 0;
2638 if (!all_contig) {
Herbert Xu7793bda2015-06-18 14:25:56 +08002639 sg_to_sec4_sg_last(req->src, src_nents,
Herbert Xuf2147b82015-06-16 13:54:23 +08002640 edesc->sec4_sg + sec4_sg_index, 0);
2641 sec4_sg_index += src_nents;
2642 }
2643 if (dst_nents) {
2644 sg_to_sec4_sg_last(req->dst, dst_nents,
2645 edesc->sec4_sg + sec4_sg_index, 0);
2646 }
2647
2648 if (!sec4_sg_bytes)
2649 return edesc;
2650
2651 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2652 sec4_sg_bytes, DMA_TO_DEVICE);
2653 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2654 dev_err(jrdev, "unable to map S/G table\n");
2655 aead_unmap(jrdev, edesc, req);
2656 kfree(edesc);
2657 return ERR_PTR(-ENOMEM);
2658 }
2659
2660 edesc->sec4_sg_bytes = sec4_sg_bytes;
2661
2662 return edesc;
2663}
2664
2665static int gcm_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002666{
Yuan Kang0e479302011-07-15 11:21:41 +08002667 struct aead_edesc *edesc;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002668 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002669 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2670 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08002671 bool all_contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002672 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08002673 int ret = 0;
2674
Kim Phillips8e8ec592011-03-13 16:54:26 +08002675 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002676 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
Kim Phillips8e8ec592011-03-13 16:54:26 +08002677 if (IS_ERR(edesc))
2678 return PTR_ERR(edesc);
2679
Yuan Kang1acebad32011-07-15 11:21:42 +08002680 /* Create and submit job descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002681 init_gcm_job(req, edesc, all_contig, true);
Yuan Kang1acebad32011-07-15 11:21:42 +08002682#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002683 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002684 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2685 desc_bytes(edesc->hw_desc), 1);
2686#endif
2687
Kim Phillips8e8ec592011-03-13 16:54:26 +08002688 desc = edesc->hw_desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08002689 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2690 if (!ret) {
2691 ret = -EINPROGRESS;
2692 } else {
2693 aead_unmap(jrdev, edesc, req);
2694 kfree(edesc);
2695 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08002696
Yuan Kang1acebad32011-07-15 11:21:42 +08002697 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08002698}
2699
Herbert Xu46218752015-07-09 07:17:33 +08002700static int ipsec_gcm_encrypt(struct aead_request *req)
2701{
2702 if (req->assoclen < 8)
2703 return -EINVAL;
2704
2705 return gcm_encrypt(req);
2706}
2707
Herbert Xuf2147b82015-06-16 13:54:23 +08002708static int old_aead_encrypt(struct aead_request *req)
Kim Phillips8e8ec592011-03-13 16:54:26 +08002709{
Yuan Kang1acebad32011-07-15 11:21:42 +08002710 struct aead_edesc *edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002711 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Yuan Kang0e479302011-07-15 11:21:41 +08002712 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2713 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08002714 bool all_contig;
Yuan Kang0e479302011-07-15 11:21:41 +08002715 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08002716 int ret = 0;
Yuan Kang0e479302011-07-15 11:21:41 +08002717
2718 /* allocate extended descriptor */
Herbert Xuf2147b82015-06-16 13:54:23 +08002719 edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2720 CAAM_CMD_SZ, &all_contig, true);
Yuan Kang0e479302011-07-15 11:21:41 +08002721 if (IS_ERR(edesc))
2722 return PTR_ERR(edesc);
2723
Herbert Xuf2147b82015-06-16 13:54:23 +08002724 /* Create and submit job descriptor */
2725 old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
2726 all_contig, true);
Yuan Kang1acebad32011-07-15 11:21:42 +08002727#ifdef DEBUG
Herbert Xuf2147b82015-06-16 13:54:23 +08002728 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2729 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2730 desc_bytes(edesc->hw_desc), 1);
Yuan Kang1acebad32011-07-15 11:21:42 +08002731#endif
2732
Herbert Xuf2147b82015-06-16 13:54:23 +08002733 desc = edesc->hw_desc;
2734 ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
2735 if (!ret) {
2736 ret = -EINPROGRESS;
2737 } else {
2738 old_aead_unmap(jrdev, edesc, req);
2739 kfree(edesc);
2740 }
2741
2742 return ret;
2743}
2744
2745static int gcm_decrypt(struct aead_request *req)
2746{
2747 struct aead_edesc *edesc;
2748 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2749 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2750 struct device *jrdev = ctx->jrdev;
2751 bool all_contig;
2752 u32 *desc;
2753 int ret = 0;
2754
2755 /* allocate extended descriptor */
2756 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2757 if (IS_ERR(edesc))
2758 return PTR_ERR(edesc);
2759
Yuan Kang1acebad32011-07-15 11:21:42 +08002760 /* Create and submit job descriptor*/
Herbert Xuf2147b82015-06-16 13:54:23 +08002761 init_gcm_job(req, edesc, all_contig, false);
Yuan Kang1acebad32011-07-15 11:21:42 +08002762#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03002763 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08002764 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2765 desc_bytes(edesc->hw_desc), 1);
2766#endif
2767
Yuan Kang0e479302011-07-15 11:21:41 +08002768 desc = edesc->hw_desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08002769 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2770 if (!ret) {
2771 ret = -EINPROGRESS;
2772 } else {
2773 aead_unmap(jrdev, edesc, req);
2774 kfree(edesc);
2775 }
Yuan Kang0e479302011-07-15 11:21:41 +08002776
Yuan Kang1acebad32011-07-15 11:21:42 +08002777 return ret;
2778}
Yuan Kang0e479302011-07-15 11:21:41 +08002779
Herbert Xu46218752015-07-09 07:17:33 +08002780static int ipsec_gcm_decrypt(struct aead_request *req)
2781{
2782 if (req->assoclen < 8)
2783 return -EINVAL;
2784
2785 return gcm_decrypt(req);
2786}
2787
Herbert Xuf2147b82015-06-16 13:54:23 +08002788static int old_aead_decrypt(struct aead_request *req)
2789{
2790 struct aead_edesc *edesc;
2791 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2792 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2793 struct device *jrdev = ctx->jrdev;
2794 bool all_contig;
2795 u32 *desc;
2796 int ret = 0;
2797
2798 /* allocate extended descriptor */
2799 edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
2800 CAAM_CMD_SZ, &all_contig, false);
2801 if (IS_ERR(edesc))
2802 return PTR_ERR(edesc);
2803
2804#ifdef DEBUG
2805 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2806 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2807 req->cryptlen, 1);
2808#endif
2809
2810 /* Create and submit job descriptor*/
2811 old_init_aead_job(ctx->sh_desc_dec,
2812 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
2813#ifdef DEBUG
2814 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2815 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2816 desc_bytes(edesc->hw_desc), 1);
2817#endif
2818
2819 desc = edesc->hw_desc;
2820 ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
2821 if (!ret) {
2822 ret = -EINPROGRESS;
2823 } else {
2824 old_aead_unmap(jrdev, edesc, req);
2825 kfree(edesc);
2826 }
2827
2828 return ret;
2829}
2830
Yuan Kang1acebad32011-07-15 11:21:42 +08002831/*
2832 * allocate and map the aead extended descriptor for aead givencrypt
2833 */
2834static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
2835 *greq, int desc_bytes,
2836 u32 *contig_ptr)
2837{
2838 struct aead_request *req = &greq->areq;
2839 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2840 struct caam_ctx *ctx = crypto_aead_ctx(aead);
2841 struct device *jrdev = ctx->jrdev;
2842 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2843 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2844 int assoc_nents, src_nents, dst_nents = 0;
2845 struct aead_edesc *edesc;
2846 dma_addr_t iv_dma = 0;
2847 int sgc;
2848 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
2849 int ivsize = crypto_aead_ivsize(aead);
Yuan Kang643b39b2012-06-22 19:48:49 -05002850 bool assoc_chained = false, src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05002851 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002852 bool is_gcm = false;
Yuan Kang0e479302011-07-15 11:21:41 +08002853
Yuan Kang643b39b2012-06-22 19:48:49 -05002854 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
2855 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
Yuan Kang0e479302011-07-15 11:21:41 +08002856
Yuan Kang1acebad32011-07-15 11:21:42 +08002857 if (unlikely(req->dst != req->src))
Horia Geantabbf9c892013-11-28 15:11:16 +02002858 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
2859 &dst_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002860
Yuan Kang643b39b2012-06-22 19:48:49 -05002861 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
Horia Geanta286233e2013-05-10 15:08:39 +03002862 DMA_TO_DEVICE, assoc_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002863 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05002864 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2865 DMA_BIDIRECTIONAL, src_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002866 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05002867 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
2868 DMA_TO_DEVICE, src_chained);
2869 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
2870 DMA_FROM_DEVICE, dst_chained);
Yuan Kang1acebad32011-07-15 11:21:42 +08002871 }
2872
Yuan Kang1acebad32011-07-15 11:21:42 +08002873 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002874 if (dma_mapping_error(jrdev, iv_dma)) {
2875 dev_err(jrdev, "unable to map IV\n");
2876 return ERR_PTR(-ENOMEM);
2877 }
2878
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002879 if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
2880 OP_ALG_ALGSEL_AES) &&
2881 ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
2882 is_gcm = true;
2883
2884 /*
2885 * Check if data are contiguous.
2886 * GCM expected input sequence: IV, AAD, text
2887 * All other - expected input sequence: AAD, IV, text
2888 */
2889
2890 if (is_gcm) {
2891 if (assoc_nents || iv_dma + ivsize !=
2892 sg_dma_address(req->assoc) || src_nents ||
2893 sg_dma_address(req->assoc) + req->assoclen !=
2894 sg_dma_address(req->src))
2895 contig &= ~GIV_SRC_CONTIG;
2896 } else {
2897 if (assoc_nents ||
2898 sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
2899 src_nents || iv_dma + ivsize != sg_dma_address(req->src))
2900 contig &= ~GIV_SRC_CONTIG;
2901 }
2902
Yuan Kang1acebad32011-07-15 11:21:42 +08002903 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
2904 contig &= ~GIV_DST_CONTIG;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002905
Yuan Kang1acebad32011-07-15 11:21:42 +08002906 if (!(contig & GIV_SRC_CONTIG)) {
2907 assoc_nents = assoc_nents ? : 1;
2908 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05002909 sec4_sg_len += assoc_nents + 1 + src_nents;
Tudor Ambarus19167bf2014-10-24 18:13:37 +03002910 if (req->src == req->dst &&
2911 (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
Yuan Kang1acebad32011-07-15 11:21:42 +08002912 contig &= ~GIV_DST_CONTIG;
2913 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002914
2915 /*
2916 * Add new sg entries for GCM output sequence.
2917 * Expected output sequence: IV, encrypted text.
2918 */
2919 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
2920 sec4_sg_len += 1 + src_nents;
2921
2922 if (unlikely(req->src != req->dst)) {
2923 dst_nents = dst_nents ? : 1;
2924 sec4_sg_len += 1 + dst_nents;
2925 }
Yuan Kang1acebad32011-07-15 11:21:42 +08002926
Yuan Kanga299c832012-06-22 19:48:46 -05002927 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
Yuan Kang1acebad32011-07-15 11:21:42 +08002928
2929 /* allocate space for base edesc and hw desc commands, link tables */
2930 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05002931 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kang1acebad32011-07-15 11:21:42 +08002932 if (!edesc) {
2933 dev_err(jrdev, "could not allocate extended descriptor\n");
2934 return ERR_PTR(-ENOMEM);
2935 }
2936
2937 edesc->assoc_nents = assoc_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002938 edesc->assoc_chained = assoc_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08002939 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002940 edesc->src_chained = src_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08002941 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05002942 edesc->dst_chained = dst_chained;
Yuan Kang1acebad32011-07-15 11:21:42 +08002943 edesc->iv_dma = iv_dma;
Yuan Kanga299c832012-06-22 19:48:46 -05002944 edesc->sec4_sg_bytes = sec4_sg_bytes;
2945 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2946 desc_bytes;
Yuan Kang1acebad32011-07-15 11:21:42 +08002947 *contig_ptr = contig;
2948
Yuan Kanga299c832012-06-22 19:48:46 -05002949 sec4_sg_index = 0;
Yuan Kang1acebad32011-07-15 11:21:42 +08002950 if (!(contig & GIV_SRC_CONTIG)) {
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002951 if (!is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002952 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2953 edesc->sec4_sg + sec4_sg_index);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002954 sec4_sg_index += assoc_nents;
2955 }
2956
Yuan Kanga299c832012-06-22 19:48:46 -05002957 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08002958 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002959 sec4_sg_index += 1;
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002960
2961 if (is_gcm) {
Herbert Xu70c3c8a2015-06-08 16:38:24 +08002962 sg_to_sec4_sg_len(req->assoc, req->assoclen,
2963 edesc->sec4_sg + sec4_sg_index);
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002964 sec4_sg_index += assoc_nents;
2965 }
2966
Yuan Kanga299c832012-06-22 19:48:46 -05002967 sg_to_sec4_sg_last(req->src, src_nents,
2968 edesc->sec4_sg +
2969 sec4_sg_index, 0);
2970 sec4_sg_index += src_nents;
Yuan Kang1acebad32011-07-15 11:21:42 +08002971 }
Tudor Ambarusbac68f22014-10-23 16:14:03 +03002972
2973 if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
2974 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2975 iv_dma, ivsize, 0);
2976 sec4_sg_index += 1;
2977 sg_to_sec4_sg_last(req->src, src_nents,
2978 edesc->sec4_sg + sec4_sg_index, 0);
2979 }
2980
Yuan Kang1acebad32011-07-15 11:21:42 +08002981 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Yuan Kanga299c832012-06-22 19:48:46 -05002982 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
Yuan Kang1acebad32011-07-15 11:21:42 +08002983 iv_dma, ivsize, 0);
Yuan Kanga299c832012-06-22 19:48:46 -05002984 sec4_sg_index += 1;
2985 sg_to_sec4_sg_last(req->dst, dst_nents,
2986 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kang1acebad32011-07-15 11:21:42 +08002987 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05302988 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2989 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03002990 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2991 dev_err(jrdev, "unable to map S/G table\n");
2992 return ERR_PTR(-ENOMEM);
2993 }
Yuan Kang1acebad32011-07-15 11:21:42 +08002994
2995 return edesc;
Yuan Kang0e479302011-07-15 11:21:41 +08002996}
2997
Herbert Xuf2147b82015-06-16 13:54:23 +08002998static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
Yuan Kang0e479302011-07-15 11:21:41 +08002999{
3000 struct aead_request *req = &areq->areq;
3001 struct aead_edesc *edesc;
3002 struct crypto_aead *aead = crypto_aead_reqtfm(req);
Kim Phillips8e8ec592011-03-13 16:54:26 +08003003 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3004 struct device *jrdev = ctx->jrdev;
Yuan Kang1acebad32011-07-15 11:21:42 +08003005 u32 contig;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003006 u32 *desc;
Yuan Kang1acebad32011-07-15 11:21:42 +08003007 int ret = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003008
Kim Phillips8e8ec592011-03-13 16:54:26 +08003009 /* allocate extended descriptor */
Yuan Kang1acebad32011-07-15 11:21:42 +08003010 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
3011 CAAM_CMD_SZ, &contig);
3012
Kim Phillips8e8ec592011-03-13 16:54:26 +08003013 if (IS_ERR(edesc))
3014 return PTR_ERR(edesc);
3015
Yuan Kang1acebad32011-07-15 11:21:42 +08003016#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003017 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08003018 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
3019 req->cryptlen, 1);
3020#endif
3021
3022 /* Create and submit job descriptor*/
3023 init_aead_giv_job(ctx->sh_desc_givenc,
3024 ctx->sh_desc_givenc_dma, edesc, req, contig);
3025#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003026 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
Yuan Kang1acebad32011-07-15 11:21:42 +08003027 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3028 desc_bytes(edesc->hw_desc), 1);
3029#endif
3030
Kim Phillips8e8ec592011-03-13 16:54:26 +08003031 desc = edesc->hw_desc;
Herbert Xuf2147b82015-06-16 13:54:23 +08003032 ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
Yuan Kang1acebad32011-07-15 11:21:42 +08003033 if (!ret) {
3034 ret = -EINPROGRESS;
3035 } else {
Herbert Xuf2147b82015-06-16 13:54:23 +08003036 old_aead_unmap(jrdev, edesc, req);
Yuan Kang1acebad32011-07-15 11:21:42 +08003037 kfree(edesc);
3038 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08003039
Yuan Kang1acebad32011-07-15 11:21:42 +08003040 return ret;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003041}
3042
Horia Geantaae4a8252014-03-14 17:46:52 +02003043static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
3044{
Herbert Xuf2147b82015-06-16 13:54:23 +08003045 return old_aead_encrypt(&areq->areq);
Horia Geantaae4a8252014-03-14 17:46:52 +02003046}
3047
Yuan Kangacdca312011-07-15 11:21:42 +08003048/*
3049 * allocate and map the ablkcipher extended descriptor for ablkcipher
3050 */
3051static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
3052 *req, int desc_bytes,
3053 bool *iv_contig_out)
3054{
3055 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3056 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3057 struct device *jrdev = ctx->jrdev;
3058 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3059 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3060 GFP_KERNEL : GFP_ATOMIC;
Yuan Kanga299c832012-06-22 19:48:46 -05003061 int src_nents, dst_nents = 0, sec4_sg_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08003062 struct ablkcipher_edesc *edesc;
3063 dma_addr_t iv_dma = 0;
3064 bool iv_contig = false;
3065 int sgc;
3066 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Yuan Kang643b39b2012-06-22 19:48:49 -05003067 bool src_chained = false, dst_chained = false;
Yuan Kanga299c832012-06-22 19:48:46 -05003068 int sec4_sg_index;
Yuan Kangacdca312011-07-15 11:21:42 +08003069
Yuan Kang643b39b2012-06-22 19:48:49 -05003070 src_nents = sg_count(req->src, req->nbytes, &src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003071
Yuan Kang643b39b2012-06-22 19:48:49 -05003072 if (req->dst != req->src)
3073 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003074
3075 if (likely(req->src == req->dst)) {
Yuan Kang643b39b2012-06-22 19:48:49 -05003076 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3077 DMA_BIDIRECTIONAL, src_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003078 } else {
Yuan Kang643b39b2012-06-22 19:48:49 -05003079 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3080 DMA_TO_DEVICE, src_chained);
3081 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3082 DMA_FROM_DEVICE, dst_chained);
Yuan Kangacdca312011-07-15 11:21:42 +08003083 }
3084
Horia Geantace572082014-07-11 15:34:49 +03003085 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
3086 if (dma_mapping_error(jrdev, iv_dma)) {
3087 dev_err(jrdev, "unable to map IV\n");
3088 return ERR_PTR(-ENOMEM);
3089 }
3090
Yuan Kangacdca312011-07-15 11:21:42 +08003091 /*
3092 * Check if iv can be contiguous with source and destination.
3093 * If so, include it. If not, create scatterlist.
3094 */
Yuan Kangacdca312011-07-15 11:21:42 +08003095 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
3096 iv_contig = true;
3097 else
3098 src_nents = src_nents ? : 1;
Yuan Kanga299c832012-06-22 19:48:46 -05003099 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3100 sizeof(struct sec4_sg_entry);
Yuan Kangacdca312011-07-15 11:21:42 +08003101
3102 /* allocate space for base edesc and hw desc commands, link tables */
3103 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Yuan Kanga299c832012-06-22 19:48:46 -05003104 sec4_sg_bytes, GFP_DMA | flags);
Yuan Kangacdca312011-07-15 11:21:42 +08003105 if (!edesc) {
3106 dev_err(jrdev, "could not allocate extended descriptor\n");
3107 return ERR_PTR(-ENOMEM);
3108 }
3109
3110 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05003111 edesc->src_chained = src_chained;
Yuan Kangacdca312011-07-15 11:21:42 +08003112 edesc->dst_nents = dst_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05003113 edesc->dst_chained = dst_chained;
Yuan Kanga299c832012-06-22 19:48:46 -05003114 edesc->sec4_sg_bytes = sec4_sg_bytes;
3115 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3116 desc_bytes;
Yuan Kangacdca312011-07-15 11:21:42 +08003117
Yuan Kanga299c832012-06-22 19:48:46 -05003118 sec4_sg_index = 0;
Yuan Kangacdca312011-07-15 11:21:42 +08003119 if (!iv_contig) {
Yuan Kanga299c832012-06-22 19:48:46 -05003120 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
3121 sg_to_sec4_sg_last(req->src, src_nents,
3122 edesc->sec4_sg + 1, 0);
3123 sec4_sg_index += 1 + src_nents;
Yuan Kangacdca312011-07-15 11:21:42 +08003124 }
3125
Yuan Kang643b39b2012-06-22 19:48:49 -05003126 if (dst_nents) {
Yuan Kanga299c832012-06-22 19:48:46 -05003127 sg_to_sec4_sg_last(req->dst, dst_nents,
3128 edesc->sec4_sg + sec4_sg_index, 0);
Yuan Kangacdca312011-07-15 11:21:42 +08003129 }
3130
Yuan Kanga299c832012-06-22 19:48:46 -05003131 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3132 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03003133 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3134 dev_err(jrdev, "unable to map S/G table\n");
3135 return ERR_PTR(-ENOMEM);
3136 }
3137
Yuan Kangacdca312011-07-15 11:21:42 +08003138 edesc->iv_dma = iv_dma;
3139
3140#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003141 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Yuan Kanga299c832012-06-22 19:48:46 -05003142 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3143 sec4_sg_bytes, 1);
Yuan Kangacdca312011-07-15 11:21:42 +08003144#endif
3145
3146 *iv_contig_out = iv_contig;
3147 return edesc;
3148}
3149
3150static int ablkcipher_encrypt(struct ablkcipher_request *req)
3151{
3152 struct ablkcipher_edesc *edesc;
3153 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3154 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3155 struct device *jrdev = ctx->jrdev;
3156 bool iv_contig;
3157 u32 *desc;
3158 int ret = 0;
3159
3160 /* allocate extended descriptor */
3161 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3162 CAAM_CMD_SZ, &iv_contig);
3163 if (IS_ERR(edesc))
3164 return PTR_ERR(edesc);
3165
3166 /* Create and submit job descriptor*/
3167 init_ablkcipher_job(ctx->sh_desc_enc,
3168 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
3169#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003170 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003171 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3172 desc_bytes(edesc->hw_desc), 1);
3173#endif
3174 desc = edesc->hw_desc;
3175 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3176
3177 if (!ret) {
3178 ret = -EINPROGRESS;
3179 } else {
3180 ablkcipher_unmap(jrdev, edesc, req);
3181 kfree(edesc);
3182 }
3183
3184 return ret;
3185}
3186
3187static int ablkcipher_decrypt(struct ablkcipher_request *req)
3188{
3189 struct ablkcipher_edesc *edesc;
3190 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3191 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3192 struct device *jrdev = ctx->jrdev;
3193 bool iv_contig;
3194 u32 *desc;
3195 int ret = 0;
3196
3197 /* allocate extended descriptor */
3198 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
3199 CAAM_CMD_SZ, &iv_contig);
3200 if (IS_ERR(edesc))
3201 return PTR_ERR(edesc);
3202
3203 /* Create and submit job descriptor*/
3204 init_ablkcipher_job(ctx->sh_desc_dec,
3205 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
3206 desc = edesc->hw_desc;
3207#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03003208 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Yuan Kangacdca312011-07-15 11:21:42 +08003209 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3210 desc_bytes(edesc->hw_desc), 1);
3211#endif
3212
3213 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
3214 if (!ret) {
3215 ret = -EINPROGRESS;
3216 } else {
3217 ablkcipher_unmap(jrdev, edesc, req);
3218 kfree(edesc);
3219 }
3220
3221 return ret;
3222}
3223
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003224/*
3225 * allocate and map the ablkcipher extended descriptor
3226 * for ablkcipher givencrypt
3227 */
3228static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3229 struct skcipher_givcrypt_request *greq,
3230 int desc_bytes,
3231 bool *iv_contig_out)
3232{
3233 struct ablkcipher_request *req = &greq->creq;
3234 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3235 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3236 struct device *jrdev = ctx->jrdev;
3237 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3238 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3239 GFP_KERNEL : GFP_ATOMIC;
3240 int src_nents, dst_nents = 0, sec4_sg_bytes;
3241 struct ablkcipher_edesc *edesc;
3242 dma_addr_t iv_dma = 0;
3243 bool iv_contig = false;
3244 int sgc;
3245 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3246 bool src_chained = false, dst_chained = false;
3247 int sec4_sg_index;
3248
3249 src_nents = sg_count(req->src, req->nbytes, &src_chained);
3250
3251 if (unlikely(req->dst != req->src))
3252 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3253
3254 if (likely(req->src == req->dst)) {
3255 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3256 DMA_BIDIRECTIONAL, src_chained);
3257 } else {
3258 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3259 DMA_TO_DEVICE, src_chained);
3260 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3261 DMA_FROM_DEVICE, dst_chained);
3262 }
3263
3264 /*
3265 * Check if iv can be contiguous with source and destination.
3266 * If so, include it. If not, create scatterlist.
3267 */
3268 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3269 if (dma_mapping_error(jrdev, iv_dma)) {
3270 dev_err(jrdev, "unable to map IV\n");
3271 return ERR_PTR(-ENOMEM);
3272 }
3273
3274 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3275 iv_contig = true;
3276 else
3277 dst_nents = dst_nents ? : 1;
3278 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3279 sizeof(struct sec4_sg_entry);
3280
3281 /* allocate space for base edesc and hw desc commands, link tables */
3282 edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3283 sec4_sg_bytes, GFP_DMA | flags);
3284 if (!edesc) {
3285 dev_err(jrdev, "could not allocate extended descriptor\n");
3286 return ERR_PTR(-ENOMEM);
3287 }
3288
3289 edesc->src_nents = src_nents;
3290 edesc->src_chained = src_chained;
3291 edesc->dst_nents = dst_nents;
3292 edesc->dst_chained = dst_chained;
3293 edesc->sec4_sg_bytes = sec4_sg_bytes;
3294 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3295 desc_bytes;
3296
3297 sec4_sg_index = 0;
3298 if (src_nents) {
3299 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3300 sec4_sg_index += src_nents;
3301 }
3302
3303 if (!iv_contig) {
3304 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3305 iv_dma, ivsize, 0);
3306 sec4_sg_index += 1;
3307 sg_to_sec4_sg_last(req->dst, dst_nents,
3308 edesc->sec4_sg + sec4_sg_index, 0);
3309 }
3310
3311 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3312 sec4_sg_bytes, DMA_TO_DEVICE);
3313 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3314 dev_err(jrdev, "unable to map S/G table\n");
3315 return ERR_PTR(-ENOMEM);
3316 }
3317 edesc->iv_dma = iv_dma;
3318
3319#ifdef DEBUG
3320 print_hex_dump(KERN_ERR,
3321 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3322 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3323 sec4_sg_bytes, 1);
3324#endif
3325
3326 *iv_contig_out = iv_contig;
3327 return edesc;
3328}
3329
3330static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3331{
3332 struct ablkcipher_request *req = &creq->creq;
3333 struct ablkcipher_edesc *edesc;
3334 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3335 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3336 struct device *jrdev = ctx->jrdev;
3337 bool iv_contig;
3338 u32 *desc;
3339 int ret = 0;
3340
3341 /* allocate extended descriptor */
3342 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3343 CAAM_CMD_SZ, &iv_contig);
3344 if (IS_ERR(edesc))
3345 return PTR_ERR(edesc);
3346
3347 /* Create and submit job descriptor*/
3348 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3349 edesc, req, iv_contig);
3350#ifdef DEBUG
3351 print_hex_dump(KERN_ERR,
3352 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3353 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3354 desc_bytes(edesc->hw_desc), 1);
3355#endif
3356 desc = edesc->hw_desc;
3357 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3358
3359 if (!ret) {
3360 ret = -EINPROGRESS;
3361 } else {
3362 ablkcipher_unmap(jrdev, edesc, req);
3363 kfree(edesc);
3364 }
3365
3366 return ret;
3367}
3368
Yuan Kang885e9e22011-07-15 11:21:41 +08003369#define template_aead template_u.aead
Yuan Kangacdca312011-07-15 11:21:42 +08003370#define template_ablkcipher template_u.ablkcipher
Kim Phillips8e8ec592011-03-13 16:54:26 +08003371struct caam_alg_template {
3372 char name[CRYPTO_MAX_ALG_NAME];
3373 char driver_name[CRYPTO_MAX_ALG_NAME];
3374 unsigned int blocksize;
Yuan Kang885e9e22011-07-15 11:21:41 +08003375 u32 type;
3376 union {
3377 struct ablkcipher_alg ablkcipher;
Herbert Xuae13ed442015-05-21 15:11:03 +08003378 struct old_aead_alg aead;
Yuan Kang885e9e22011-07-15 11:21:41 +08003379 } template_u;
Kim Phillips8e8ec592011-03-13 16:54:26 +08003380 u32 class1_alg_type;
3381 u32 class2_alg_type;
3382 u32 alg_op;
3383};
3384
3385static struct caam_alg_template driver_algs[] = {
Horia Geanta246bbed2013-03-20 16:31:58 +02003386 /* single-pass ipsec_esp descriptor */
Kim Phillips8e8ec592011-03-13 16:54:26 +08003387 {
Horia Geantaae4a8252014-03-14 17:46:52 +02003388 .name = "authenc(hmac(md5),ecb(cipher_null))",
3389 .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
3390 .blocksize = NULL_BLOCK_SIZE,
3391 .type = CRYPTO_ALG_TYPE_AEAD,
3392 .template_aead = {
3393 .setkey = aead_setkey,
3394 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003395 .encrypt = old_aead_encrypt,
3396 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003397 .givencrypt = aead_null_givencrypt,
3398 .geniv = "<built-in>",
3399 .ivsize = NULL_IV_SIZE,
3400 .maxauthsize = MD5_DIGEST_SIZE,
3401 },
3402 .class1_alg_type = 0,
3403 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3404 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3405 },
3406 {
3407 .name = "authenc(hmac(sha1),ecb(cipher_null))",
3408 .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
3409 .blocksize = NULL_BLOCK_SIZE,
3410 .type = CRYPTO_ALG_TYPE_AEAD,
3411 .template_aead = {
3412 .setkey = aead_setkey,
3413 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003414 .encrypt = old_aead_encrypt,
3415 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003416 .givencrypt = aead_null_givencrypt,
3417 .geniv = "<built-in>",
3418 .ivsize = NULL_IV_SIZE,
3419 .maxauthsize = SHA1_DIGEST_SIZE,
3420 },
3421 .class1_alg_type = 0,
3422 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3423 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3424 },
3425 {
3426 .name = "authenc(hmac(sha224),ecb(cipher_null))",
3427 .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
3428 .blocksize = NULL_BLOCK_SIZE,
3429 .type = CRYPTO_ALG_TYPE_AEAD,
3430 .template_aead = {
3431 .setkey = aead_setkey,
3432 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003433 .encrypt = old_aead_encrypt,
3434 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003435 .givencrypt = aead_null_givencrypt,
3436 .geniv = "<built-in>",
3437 .ivsize = NULL_IV_SIZE,
3438 .maxauthsize = SHA224_DIGEST_SIZE,
3439 },
3440 .class1_alg_type = 0,
3441 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3442 OP_ALG_AAI_HMAC_PRECOMP,
3443 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3444 },
3445 {
3446 .name = "authenc(hmac(sha256),ecb(cipher_null))",
3447 .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
3448 .blocksize = NULL_BLOCK_SIZE,
3449 .type = CRYPTO_ALG_TYPE_AEAD,
3450 .template_aead = {
3451 .setkey = aead_setkey,
3452 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003453 .encrypt = old_aead_encrypt,
3454 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003455 .givencrypt = aead_null_givencrypt,
3456 .geniv = "<built-in>",
3457 .ivsize = NULL_IV_SIZE,
3458 .maxauthsize = SHA256_DIGEST_SIZE,
3459 },
3460 .class1_alg_type = 0,
3461 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3462 OP_ALG_AAI_HMAC_PRECOMP,
3463 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3464 },
3465 {
3466 .name = "authenc(hmac(sha384),ecb(cipher_null))",
3467 .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
3468 .blocksize = NULL_BLOCK_SIZE,
3469 .type = CRYPTO_ALG_TYPE_AEAD,
3470 .template_aead = {
3471 .setkey = aead_setkey,
3472 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003473 .encrypt = old_aead_encrypt,
3474 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003475 .givencrypt = aead_null_givencrypt,
3476 .geniv = "<built-in>",
3477 .ivsize = NULL_IV_SIZE,
3478 .maxauthsize = SHA384_DIGEST_SIZE,
3479 },
3480 .class1_alg_type = 0,
3481 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3482 OP_ALG_AAI_HMAC_PRECOMP,
3483 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3484 },
3485 {
3486 .name = "authenc(hmac(sha512),ecb(cipher_null))",
3487 .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
3488 .blocksize = NULL_BLOCK_SIZE,
3489 .type = CRYPTO_ALG_TYPE_AEAD,
3490 .template_aead = {
3491 .setkey = aead_setkey,
3492 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003493 .encrypt = old_aead_encrypt,
3494 .decrypt = old_aead_decrypt,
Horia Geantaae4a8252014-03-14 17:46:52 +02003495 .givencrypt = aead_null_givencrypt,
3496 .geniv = "<built-in>",
3497 .ivsize = NULL_IV_SIZE,
3498 .maxauthsize = SHA512_DIGEST_SIZE,
3499 },
3500 .class1_alg_type = 0,
3501 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3502 OP_ALG_AAI_HMAC_PRECOMP,
3503 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3504 },
3505 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003506 .name = "authenc(hmac(md5),cbc(aes))",
3507 .driver_name = "authenc-hmac-md5-cbc-aes-caam",
3508 .blocksize = AES_BLOCK_SIZE,
3509 .type = CRYPTO_ALG_TYPE_AEAD,
3510 .template_aead = {
3511 .setkey = aead_setkey,
3512 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003513 .encrypt = old_aead_encrypt,
3514 .decrypt = old_aead_decrypt,
3515 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003516 .geniv = "<built-in>",
3517 .ivsize = AES_BLOCK_SIZE,
3518 .maxauthsize = MD5_DIGEST_SIZE,
3519 },
3520 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3521 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3522 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3523 },
3524 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003525 .name = "authenc(hmac(sha1),cbc(aes))",
3526 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
3527 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003528 .type = CRYPTO_ALG_TYPE_AEAD,
3529 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003530 .setkey = aead_setkey,
3531 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003532 .encrypt = old_aead_encrypt,
3533 .decrypt = old_aead_decrypt,
3534 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003535 .geniv = "<built-in>",
3536 .ivsize = AES_BLOCK_SIZE,
3537 .maxauthsize = SHA1_DIGEST_SIZE,
3538 },
3539 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3540 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3541 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3542 },
3543 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003544 .name = "authenc(hmac(sha224),cbc(aes))",
3545 .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
3546 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303547 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003548 .template_aead = {
3549 .setkey = aead_setkey,
3550 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003551 .encrypt = old_aead_encrypt,
3552 .decrypt = old_aead_decrypt,
3553 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003554 .geniv = "<built-in>",
3555 .ivsize = AES_BLOCK_SIZE,
3556 .maxauthsize = SHA224_DIGEST_SIZE,
3557 },
3558 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3559 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3560 OP_ALG_AAI_HMAC_PRECOMP,
3561 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3562 },
3563 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003564 .name = "authenc(hmac(sha256),cbc(aes))",
3565 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
3566 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003567 .type = CRYPTO_ALG_TYPE_AEAD,
3568 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003569 .setkey = aead_setkey,
3570 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003571 .encrypt = old_aead_encrypt,
3572 .decrypt = old_aead_decrypt,
3573 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003574 .geniv = "<built-in>",
3575 .ivsize = AES_BLOCK_SIZE,
3576 .maxauthsize = SHA256_DIGEST_SIZE,
3577 },
3578 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3579 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3580 OP_ALG_AAI_HMAC_PRECOMP,
3581 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3582 },
3583 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003584 .name = "authenc(hmac(sha384),cbc(aes))",
3585 .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
3586 .blocksize = AES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303587 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003588 .template_aead = {
3589 .setkey = aead_setkey,
3590 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003591 .encrypt = old_aead_encrypt,
3592 .decrypt = old_aead_decrypt,
3593 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003594 .geniv = "<built-in>",
3595 .ivsize = AES_BLOCK_SIZE,
3596 .maxauthsize = SHA384_DIGEST_SIZE,
3597 },
3598 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3599 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3600 OP_ALG_AAI_HMAC_PRECOMP,
3601 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3602 },
3603
3604 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003605 .name = "authenc(hmac(sha512),cbc(aes))",
3606 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
3607 .blocksize = AES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003608 .type = CRYPTO_ALG_TYPE_AEAD,
3609 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003610 .setkey = aead_setkey,
3611 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003612 .encrypt = old_aead_encrypt,
3613 .decrypt = old_aead_decrypt,
3614 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003615 .geniv = "<built-in>",
3616 .ivsize = AES_BLOCK_SIZE,
3617 .maxauthsize = SHA512_DIGEST_SIZE,
3618 },
3619 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3620 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3621 OP_ALG_AAI_HMAC_PRECOMP,
3622 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3623 },
3624 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003625 .name = "authenc(hmac(md5),cbc(des3_ede))",
3626 .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
3627 .blocksize = DES3_EDE_BLOCK_SIZE,
3628 .type = CRYPTO_ALG_TYPE_AEAD,
3629 .template_aead = {
3630 .setkey = aead_setkey,
3631 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003632 .encrypt = old_aead_encrypt,
3633 .decrypt = old_aead_decrypt,
3634 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003635 .geniv = "<built-in>",
3636 .ivsize = DES3_EDE_BLOCK_SIZE,
3637 .maxauthsize = MD5_DIGEST_SIZE,
3638 },
3639 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3640 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3641 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3642 },
3643 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003644 .name = "authenc(hmac(sha1),cbc(des3_ede))",
3645 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
3646 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003647 .type = CRYPTO_ALG_TYPE_AEAD,
3648 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003649 .setkey = aead_setkey,
3650 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003651 .encrypt = old_aead_encrypt,
3652 .decrypt = old_aead_decrypt,
3653 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003654 .geniv = "<built-in>",
3655 .ivsize = DES3_EDE_BLOCK_SIZE,
3656 .maxauthsize = SHA1_DIGEST_SIZE,
3657 },
3658 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3659 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3660 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3661 },
3662 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003663 .name = "authenc(hmac(sha224),cbc(des3_ede))",
3664 .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
3665 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303666 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003667 .template_aead = {
3668 .setkey = aead_setkey,
3669 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003670 .encrypt = old_aead_encrypt,
3671 .decrypt = old_aead_decrypt,
3672 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003673 .geniv = "<built-in>",
3674 .ivsize = DES3_EDE_BLOCK_SIZE,
3675 .maxauthsize = SHA224_DIGEST_SIZE,
3676 },
3677 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3678 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3679 OP_ALG_AAI_HMAC_PRECOMP,
3680 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3681 },
3682 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003683 .name = "authenc(hmac(sha256),cbc(des3_ede))",
3684 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
3685 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003686 .type = CRYPTO_ALG_TYPE_AEAD,
3687 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003688 .setkey = aead_setkey,
3689 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003690 .encrypt = old_aead_encrypt,
3691 .decrypt = old_aead_decrypt,
3692 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003693 .geniv = "<built-in>",
3694 .ivsize = DES3_EDE_BLOCK_SIZE,
3695 .maxauthsize = SHA256_DIGEST_SIZE,
3696 },
3697 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3698 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3699 OP_ALG_AAI_HMAC_PRECOMP,
3700 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3701 },
3702 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003703 .name = "authenc(hmac(sha384),cbc(des3_ede))",
3704 .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
3705 .blocksize = DES3_EDE_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303706 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003707 .template_aead = {
3708 .setkey = aead_setkey,
3709 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003710 .encrypt = old_aead_encrypt,
3711 .decrypt = old_aead_decrypt,
3712 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003713 .geniv = "<built-in>",
3714 .ivsize = DES3_EDE_BLOCK_SIZE,
3715 .maxauthsize = SHA384_DIGEST_SIZE,
3716 },
3717 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3718 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3719 OP_ALG_AAI_HMAC_PRECOMP,
3720 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3721 },
3722 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003723 .name = "authenc(hmac(sha512),cbc(des3_ede))",
3724 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
3725 .blocksize = DES3_EDE_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003726 .type = CRYPTO_ALG_TYPE_AEAD,
3727 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003728 .setkey = aead_setkey,
3729 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003730 .encrypt = old_aead_encrypt,
3731 .decrypt = old_aead_decrypt,
3732 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003733 .geniv = "<built-in>",
3734 .ivsize = DES3_EDE_BLOCK_SIZE,
3735 .maxauthsize = SHA512_DIGEST_SIZE,
3736 },
3737 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3738 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3739 OP_ALG_AAI_HMAC_PRECOMP,
3740 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3741 },
3742 {
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003743 .name = "authenc(hmac(md5),cbc(des))",
3744 .driver_name = "authenc-hmac-md5-cbc-des-caam",
3745 .blocksize = DES_BLOCK_SIZE,
3746 .type = CRYPTO_ALG_TYPE_AEAD,
3747 .template_aead = {
3748 .setkey = aead_setkey,
3749 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003750 .encrypt = old_aead_encrypt,
3751 .decrypt = old_aead_decrypt,
3752 .givencrypt = old_aead_givencrypt,
Kim Phillips8b4d43a2011-11-21 16:13:27 +08003753 .geniv = "<built-in>",
3754 .ivsize = DES_BLOCK_SIZE,
3755 .maxauthsize = MD5_DIGEST_SIZE,
3756 },
3757 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3758 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3759 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3760 },
3761 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003762 .name = "authenc(hmac(sha1),cbc(des))",
3763 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
3764 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003765 .type = CRYPTO_ALG_TYPE_AEAD,
3766 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003767 .setkey = aead_setkey,
3768 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003769 .encrypt = old_aead_encrypt,
3770 .decrypt = old_aead_decrypt,
3771 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003772 .geniv = "<built-in>",
3773 .ivsize = DES_BLOCK_SIZE,
3774 .maxauthsize = SHA1_DIGEST_SIZE,
3775 },
3776 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3777 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3778 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3779 },
3780 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003781 .name = "authenc(hmac(sha224),cbc(des))",
3782 .driver_name = "authenc-hmac-sha224-cbc-des-caam",
3783 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303784 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003785 .template_aead = {
3786 .setkey = aead_setkey,
3787 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003788 .encrypt = old_aead_encrypt,
3789 .decrypt = old_aead_decrypt,
3790 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003791 .geniv = "<built-in>",
3792 .ivsize = DES_BLOCK_SIZE,
3793 .maxauthsize = SHA224_DIGEST_SIZE,
3794 },
3795 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3796 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3797 OP_ALG_AAI_HMAC_PRECOMP,
3798 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3799 },
3800 {
Kim Phillips8e8ec592011-03-13 16:54:26 +08003801 .name = "authenc(hmac(sha256),cbc(des))",
3802 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
3803 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003804 .type = CRYPTO_ALG_TYPE_AEAD,
3805 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003806 .setkey = aead_setkey,
3807 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003808 .encrypt = old_aead_encrypt,
3809 .decrypt = old_aead_decrypt,
3810 .givencrypt = old_aead_givencrypt,
Kim Phillips8e8ec592011-03-13 16:54:26 +08003811 .geniv = "<built-in>",
3812 .ivsize = DES_BLOCK_SIZE,
3813 .maxauthsize = SHA256_DIGEST_SIZE,
3814 },
3815 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3816 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3817 OP_ALG_AAI_HMAC_PRECOMP,
3818 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3819 },
Kim Phillips4427b1b2011-05-14 22:08:17 -05003820 {
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003821 .name = "authenc(hmac(sha384),cbc(des))",
3822 .driver_name = "authenc-hmac-sha384-cbc-des-caam",
3823 .blocksize = DES_BLOCK_SIZE,
Vakul Gargcb7d5662013-03-12 14:09:24 +05303824 .type = CRYPTO_ALG_TYPE_AEAD,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003825 .template_aead = {
3826 .setkey = aead_setkey,
3827 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003828 .encrypt = old_aead_encrypt,
3829 .decrypt = old_aead_decrypt,
3830 .givencrypt = old_aead_givencrypt,
Hemant Agrawale863f9cc2012-01-09 18:26:44 -06003831 .geniv = "<built-in>",
3832 .ivsize = DES_BLOCK_SIZE,
3833 .maxauthsize = SHA384_DIGEST_SIZE,
3834 },
3835 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3836 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3837 OP_ALG_AAI_HMAC_PRECOMP,
3838 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3839 },
3840 {
Kim Phillips4427b1b2011-05-14 22:08:17 -05003841 .name = "authenc(hmac(sha512),cbc(des))",
3842 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
3843 .blocksize = DES_BLOCK_SIZE,
Yuan Kang885e9e22011-07-15 11:21:41 +08003844 .type = CRYPTO_ALG_TYPE_AEAD,
3845 .template_aead = {
Yuan Kang0e479302011-07-15 11:21:41 +08003846 .setkey = aead_setkey,
3847 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003848 .encrypt = old_aead_encrypt,
3849 .decrypt = old_aead_decrypt,
3850 .givencrypt = old_aead_givencrypt,
Kim Phillips4427b1b2011-05-14 22:08:17 -05003851 .geniv = "<built-in>",
3852 .ivsize = DES_BLOCK_SIZE,
3853 .maxauthsize = SHA512_DIGEST_SIZE,
3854 },
3855 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3856 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3857 OP_ALG_AAI_HMAC_PRECOMP,
3858 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3859 },
Tudor Ambarusbac68f22014-10-23 16:14:03 +03003860 {
Catalin Vasiledaebc462014-10-31 12:45:37 +02003861 .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
3862 .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3863 .blocksize = 1,
3864 .type = CRYPTO_ALG_TYPE_AEAD,
3865 .template_aead = {
3866 .setkey = aead_setkey,
3867 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003868 .encrypt = old_aead_encrypt,
3869 .decrypt = old_aead_decrypt,
3870 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003871 .geniv = "<built-in>",
3872 .ivsize = CTR_RFC3686_IV_SIZE,
3873 .maxauthsize = MD5_DIGEST_SIZE,
3874 },
3875 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3876 .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
3877 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3878 },
3879 {
3880 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3881 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3882 .blocksize = 1,
3883 .type = CRYPTO_ALG_TYPE_AEAD,
3884 .template_aead = {
3885 .setkey = aead_setkey,
3886 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003887 .encrypt = old_aead_encrypt,
3888 .decrypt = old_aead_decrypt,
3889 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003890 .geniv = "<built-in>",
3891 .ivsize = CTR_RFC3686_IV_SIZE,
3892 .maxauthsize = SHA1_DIGEST_SIZE,
3893 },
3894 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3895 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
3896 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3897 },
3898 {
3899 .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3900 .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3901 .blocksize = 1,
3902 .type = CRYPTO_ALG_TYPE_AEAD,
3903 .template_aead = {
3904 .setkey = aead_setkey,
3905 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003906 .encrypt = old_aead_encrypt,
3907 .decrypt = old_aead_decrypt,
3908 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003909 .geniv = "<built-in>",
3910 .ivsize = CTR_RFC3686_IV_SIZE,
3911 .maxauthsize = SHA224_DIGEST_SIZE,
3912 },
3913 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3914 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3915 OP_ALG_AAI_HMAC_PRECOMP,
3916 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3917 },
3918 {
3919 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3920 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3921 .blocksize = 1,
3922 .type = CRYPTO_ALG_TYPE_AEAD,
3923 .template_aead = {
3924 .setkey = aead_setkey,
3925 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003926 .encrypt = old_aead_encrypt,
3927 .decrypt = old_aead_decrypt,
3928 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003929 .geniv = "<built-in>",
3930 .ivsize = CTR_RFC3686_IV_SIZE,
3931 .maxauthsize = SHA256_DIGEST_SIZE,
3932 },
3933 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3934 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3935 OP_ALG_AAI_HMAC_PRECOMP,
3936 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3937 },
3938 {
3939 .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3940 .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3941 .blocksize = 1,
3942 .type = CRYPTO_ALG_TYPE_AEAD,
3943 .template_aead = {
3944 .setkey = aead_setkey,
3945 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003946 .encrypt = old_aead_encrypt,
3947 .decrypt = old_aead_decrypt,
3948 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003949 .geniv = "<built-in>",
3950 .ivsize = CTR_RFC3686_IV_SIZE,
3951 .maxauthsize = SHA384_DIGEST_SIZE,
3952 },
3953 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3954 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3955 OP_ALG_AAI_HMAC_PRECOMP,
3956 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3957 },
3958 {
3959 .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3960 .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3961 .blocksize = 1,
3962 .type = CRYPTO_ALG_TYPE_AEAD,
3963 .template_aead = {
3964 .setkey = aead_setkey,
3965 .setauthsize = aead_setauthsize,
Herbert Xuf2147b82015-06-16 13:54:23 +08003966 .encrypt = old_aead_encrypt,
3967 .decrypt = old_aead_decrypt,
3968 .givencrypt = old_aead_givencrypt,
Catalin Vasiledaebc462014-10-31 12:45:37 +02003969 .geniv = "<built-in>",
3970 .ivsize = CTR_RFC3686_IV_SIZE,
3971 .maxauthsize = SHA512_DIGEST_SIZE,
3972 },
3973 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3974 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3975 OP_ALG_AAI_HMAC_PRECOMP,
3976 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3977 },
Yuan Kangacdca312011-07-15 11:21:42 +08003978 /* ablkcipher descriptor */
3979 {
3980 .name = "cbc(aes)",
3981 .driver_name = "cbc-aes-caam",
3982 .blocksize = AES_BLOCK_SIZE,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003983 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08003984 .template_ablkcipher = {
3985 .setkey = ablkcipher_setkey,
3986 .encrypt = ablkcipher_encrypt,
3987 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02003988 .givencrypt = ablkcipher_givencrypt,
3989 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08003990 .min_keysize = AES_MIN_KEY_SIZE,
3991 .max_keysize = AES_MAX_KEY_SIZE,
3992 .ivsize = AES_BLOCK_SIZE,
3993 },
3994 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3995 },
3996 {
3997 .name = "cbc(des3_ede)",
3998 .driver_name = "cbc-3des-caam",
3999 .blocksize = DES3_EDE_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004000 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08004001 .template_ablkcipher = {
4002 .setkey = ablkcipher_setkey,
4003 .encrypt = ablkcipher_encrypt,
4004 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004005 .givencrypt = ablkcipher_givencrypt,
4006 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08004007 .min_keysize = DES3_EDE_KEY_SIZE,
4008 .max_keysize = DES3_EDE_KEY_SIZE,
4009 .ivsize = DES3_EDE_BLOCK_SIZE,
4010 },
4011 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
4012 },
4013 {
4014 .name = "cbc(des)",
4015 .driver_name = "cbc-des-caam",
4016 .blocksize = DES_BLOCK_SIZE,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004017 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Yuan Kangacdca312011-07-15 11:21:42 +08004018 .template_ablkcipher = {
4019 .setkey = ablkcipher_setkey,
4020 .encrypt = ablkcipher_encrypt,
4021 .decrypt = ablkcipher_decrypt,
Catalin Vasileff2c3a32014-11-11 16:18:13 +02004022 .givencrypt = ablkcipher_givencrypt,
4023 .geniv = "<built-in>",
Yuan Kangacdca312011-07-15 11:21:42 +08004024 .min_keysize = DES_KEY_SIZE,
4025 .max_keysize = DES_KEY_SIZE,
4026 .ivsize = DES_BLOCK_SIZE,
4027 },
4028 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Catalin Vasile2b22f6c2014-10-31 12:45:35 +02004029 },
4030 {
4031 .name = "ctr(aes)",
4032 .driver_name = "ctr-aes-caam",
4033 .blocksize = 1,
4034 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4035 .template_ablkcipher = {
4036 .setkey = ablkcipher_setkey,
4037 .encrypt = ablkcipher_encrypt,
4038 .decrypt = ablkcipher_decrypt,
4039 .geniv = "chainiv",
4040 .min_keysize = AES_MIN_KEY_SIZE,
4041 .max_keysize = AES_MAX_KEY_SIZE,
4042 .ivsize = AES_BLOCK_SIZE,
4043 },
4044 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004045 },
4046 {
4047 .name = "rfc3686(ctr(aes))",
4048 .driver_name = "rfc3686-ctr-aes-caam",
4049 .blocksize = 1,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004050 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004051 .template_ablkcipher = {
4052 .setkey = ablkcipher_setkey,
4053 .encrypt = ablkcipher_encrypt,
4054 .decrypt = ablkcipher_decrypt,
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004055 .givencrypt = ablkcipher_givencrypt,
4056 .geniv = "<built-in>",
Catalin Vasilea5f57cf2014-10-31 12:45:36 +02004057 .min_keysize = AES_MIN_KEY_SIZE +
4058 CTR_RFC3686_NONCE_SIZE,
4059 .max_keysize = AES_MAX_KEY_SIZE +
4060 CTR_RFC3686_NONCE_SIZE,
4061 .ivsize = CTR_RFC3686_IV_SIZE,
4062 },
4063 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
Yuan Kangacdca312011-07-15 11:21:42 +08004064 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004065};
4066
Herbert Xuf2147b82015-06-16 13:54:23 +08004067struct caam_alg_entry {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004068 int class1_alg_type;
4069 int class2_alg_type;
4070 int alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004071};
4072
Herbert Xuf2147b82015-06-16 13:54:23 +08004073struct caam_aead_alg {
4074 struct aead_alg aead;
4075 struct caam_alg_entry caam;
4076 bool registered;
4077};
4078
4079static struct caam_aead_alg driver_aeads[] = {
4080 {
4081 .aead = {
4082 .base = {
4083 .cra_name = "rfc4106(gcm(aes))",
4084 .cra_driver_name = "rfc4106-gcm-aes-caam",
4085 .cra_blocksize = 1,
4086 },
4087 .setkey = rfc4106_setkey,
4088 .setauthsize = rfc4106_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08004089 .encrypt = ipsec_gcm_encrypt,
4090 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08004091 .ivsize = 8,
4092 .maxauthsize = AES_BLOCK_SIZE,
4093 },
4094 .caam = {
4095 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4096 },
4097 },
4098 {
4099 .aead = {
4100 .base = {
4101 .cra_name = "rfc4543(gcm(aes))",
4102 .cra_driver_name = "rfc4543-gcm-aes-caam",
4103 .cra_blocksize = 1,
4104 },
4105 .setkey = rfc4543_setkey,
4106 .setauthsize = rfc4543_setauthsize,
Herbert Xu46218752015-07-09 07:17:33 +08004107 .encrypt = ipsec_gcm_encrypt,
4108 .decrypt = ipsec_gcm_decrypt,
Herbert Xuf2147b82015-06-16 13:54:23 +08004109 .ivsize = 8,
4110 .maxauthsize = AES_BLOCK_SIZE,
4111 },
4112 .caam = {
4113 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4114 },
4115 },
4116 /* Galois Counter Mode */
4117 {
4118 .aead = {
4119 .base = {
4120 .cra_name = "gcm(aes)",
4121 .cra_driver_name = "gcm-aes-caam",
4122 .cra_blocksize = 1,
4123 },
4124 .setkey = gcm_setkey,
4125 .setauthsize = gcm_setauthsize,
4126 .encrypt = gcm_encrypt,
4127 .decrypt = gcm_decrypt,
4128 .ivsize = 12,
4129 .maxauthsize = AES_BLOCK_SIZE,
4130 },
4131 .caam = {
4132 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4133 },
4134 },
4135};
4136
4137struct caam_crypto_alg {
4138 struct crypto_alg crypto_alg;
4139 struct list_head entry;
4140 struct caam_alg_entry caam;
4141};
4142
4143static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4144{
4145 ctx->jrdev = caam_jr_alloc();
4146 if (IS_ERR(ctx->jrdev)) {
4147 pr_err("Job Ring Device allocation for transform failed\n");
4148 return PTR_ERR(ctx->jrdev);
4149 }
4150
4151 /* copy descriptor header template value */
4152 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4153 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4154 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4155
4156 return 0;
4157}
4158
Kim Phillips8e8ec592011-03-13 16:54:26 +08004159static int caam_cra_init(struct crypto_tfm *tfm)
4160{
4161 struct crypto_alg *alg = tfm->__crt_alg;
4162 struct caam_crypto_alg *caam_alg =
4163 container_of(alg, struct caam_crypto_alg, crypto_alg);
4164 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004165
Herbert Xuf2147b82015-06-16 13:54:23 +08004166 return caam_init_common(ctx, &caam_alg->caam);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004167}
4168
Herbert Xuf2147b82015-06-16 13:54:23 +08004169static int caam_aead_init(struct crypto_aead *tfm)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004170{
Herbert Xuf2147b82015-06-16 13:54:23 +08004171 struct aead_alg *alg = crypto_aead_alg(tfm);
4172 struct caam_aead_alg *caam_alg =
4173 container_of(alg, struct caam_aead_alg, aead);
4174 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004175
Herbert Xuf2147b82015-06-16 13:54:23 +08004176 return caam_init_common(ctx, &caam_alg->caam);
4177}
4178
4179static void caam_exit_common(struct caam_ctx *ctx)
4180{
Yuan Kang1acebad32011-07-15 11:21:42 +08004181 if (ctx->sh_desc_enc_dma &&
4182 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4183 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4184 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4185 if (ctx->sh_desc_dec_dma &&
4186 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4187 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4188 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4189 if (ctx->sh_desc_givenc_dma &&
4190 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4191 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4192 desc_bytes(ctx->sh_desc_givenc),
Kim Phillips4427b1b2011-05-14 22:08:17 -05004193 DMA_TO_DEVICE);
Horia Geantaec31eed2014-03-14 17:48:30 +02004194 if (ctx->key_dma &&
4195 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4196 dma_unmap_single(ctx->jrdev, ctx->key_dma,
4197 ctx->enckeylen + ctx->split_key_pad_len,
4198 DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304199
4200 caam_jr_free(ctx->jrdev);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004201}
4202
Herbert Xuf2147b82015-06-16 13:54:23 +08004203static void caam_cra_exit(struct crypto_tfm *tfm)
4204{
4205 caam_exit_common(crypto_tfm_ctx(tfm));
4206}
4207
4208static void caam_aead_exit(struct crypto_aead *tfm)
4209{
4210 caam_exit_common(crypto_aead_ctx(tfm));
4211}
4212
Kim Phillips8e8ec592011-03-13 16:54:26 +08004213static void __exit caam_algapi_exit(void)
4214{
4215
Kim Phillips8e8ec592011-03-13 16:54:26 +08004216 struct caam_crypto_alg *t_alg, *n;
Herbert Xuf2147b82015-06-16 13:54:23 +08004217 int i;
4218
4219 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4220 struct caam_aead_alg *t_alg = driver_aeads + i;
4221
4222 if (t_alg->registered)
4223 crypto_unregister_aead(&t_alg->aead);
4224 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004225
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304226 if (!alg_list.next)
Kim Phillips8e8ec592011-03-13 16:54:26 +08004227 return;
4228
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304229 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
Kim Phillips8e8ec592011-03-13 16:54:26 +08004230 crypto_unregister_alg(&t_alg->crypto_alg);
4231 list_del(&t_alg->entry);
4232 kfree(t_alg);
4233 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004234}
4235
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304236static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
Kim Phillips8e8ec592011-03-13 16:54:26 +08004237 *template)
4238{
4239 struct caam_crypto_alg *t_alg;
4240 struct crypto_alg *alg;
4241
4242 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
4243 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304244 pr_err("failed to allocate t_alg\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004245 return ERR_PTR(-ENOMEM);
4246 }
4247
4248 alg = &t_alg->crypto_alg;
4249
4250 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4251 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4252 template->driver_name);
4253 alg->cra_module = THIS_MODULE;
4254 alg->cra_init = caam_cra_init;
4255 alg->cra_exit = caam_cra_exit;
4256 alg->cra_priority = CAAM_CRA_PRIORITY;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004257 alg->cra_blocksize = template->blocksize;
4258 alg->cra_alignmask = 0;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004259 alg->cra_ctxsize = sizeof(struct caam_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01004260 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4261 template->type;
Yuan Kang885e9e22011-07-15 11:21:41 +08004262 switch (template->type) {
Catalin Vasile7222d1a2014-10-31 12:45:38 +02004263 case CRYPTO_ALG_TYPE_GIVCIPHER:
4264 alg->cra_type = &crypto_givcipher_type;
4265 alg->cra_ablkcipher = template->template_ablkcipher;
4266 break;
Yuan Kangacdca312011-07-15 11:21:42 +08004267 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4268 alg->cra_type = &crypto_ablkcipher_type;
4269 alg->cra_ablkcipher = template->template_ablkcipher;
4270 break;
Yuan Kang885e9e22011-07-15 11:21:41 +08004271 case CRYPTO_ALG_TYPE_AEAD:
4272 alg->cra_type = &crypto_aead_type;
4273 alg->cra_aead = template->template_aead;
4274 break;
4275 }
Kim Phillips8e8ec592011-03-13 16:54:26 +08004276
Herbert Xuf2147b82015-06-16 13:54:23 +08004277 t_alg->caam.class1_alg_type = template->class1_alg_type;
4278 t_alg->caam.class2_alg_type = template->class2_alg_type;
4279 t_alg->caam.alg_op = template->alg_op;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004280
4281 return t_alg;
4282}
4283
Herbert Xuf2147b82015-06-16 13:54:23 +08004284static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4285{
4286 struct aead_alg *alg = &t_alg->aead;
4287
4288 alg->base.cra_module = THIS_MODULE;
4289 alg->base.cra_priority = CAAM_CRA_PRIORITY;
4290 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
Herbert Xu46218752015-07-09 07:17:33 +08004291 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4292 CRYPTO_ALG_AEAD_NEW;
Herbert Xuf2147b82015-06-16 13:54:23 +08004293
4294 alg->init = caam_aead_init;
4295 alg->exit = caam_aead_exit;
4296}
4297
Kim Phillips8e8ec592011-03-13 16:54:26 +08004298static int __init caam_algapi_init(void)
4299{
Ruchika Gupta35af6402014-07-07 10:42:12 +05304300 struct device_node *dev_node;
4301 struct platform_device *pdev;
4302 struct device *ctrldev;
4303 void *priv;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004304 int i = 0, err = 0;
Herbert Xuf2147b82015-06-16 13:54:23 +08004305 bool registered = false;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004306
Ruchika Gupta35af6402014-07-07 10:42:12 +05304307 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4308 if (!dev_node) {
4309 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4310 if (!dev_node)
4311 return -ENODEV;
4312 }
4313
4314 pdev = of_find_device_by_node(dev_node);
4315 if (!pdev) {
4316 of_node_put(dev_node);
4317 return -ENODEV;
4318 }
4319
4320 ctrldev = &pdev->dev;
4321 priv = dev_get_drvdata(ctrldev);
4322 of_node_put(dev_node);
4323
4324 /*
4325 * If priv is NULL, it's probably because the caam driver wasn't
4326 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4327 */
4328 if (!priv)
4329 return -ENODEV;
4330
4331
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304332 INIT_LIST_HEAD(&alg_list);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004333
4334 /* register crypto algorithms the device supports */
4335 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4336 /* TODO: check if h/w supports alg */
4337 struct caam_crypto_alg *t_alg;
4338
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304339 t_alg = caam_alg_alloc(&driver_algs[i]);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004340 if (IS_ERR(t_alg)) {
4341 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304342 pr_warn("%s alg allocation failed\n",
4343 driver_algs[i].driver_name);
Kim Phillips8e8ec592011-03-13 16:54:26 +08004344 continue;
4345 }
4346
4347 err = crypto_register_alg(&t_alg->crypto_alg);
4348 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304349 pr_warn("%s alg registration failed\n",
Kim Phillips8e8ec592011-03-13 16:54:26 +08004350 t_alg->crypto_alg.cra_driver_name);
4351 kfree(t_alg);
Herbert Xuf2147b82015-06-16 13:54:23 +08004352 continue;
4353 }
4354
4355 list_add_tail(&t_alg->entry, &alg_list);
4356 registered = true;
Kim Phillips8e8ec592011-03-13 16:54:26 +08004357 }
Herbert Xuf2147b82015-06-16 13:54:23 +08004358
4359 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4360 struct caam_aead_alg *t_alg = driver_aeads + i;
4361
4362 caam_aead_alg_init(t_alg);
4363
4364 err = crypto_register_aead(&t_alg->aead);
4365 if (err) {
4366 pr_warn("%s alg registration failed\n",
4367 t_alg->aead.base.cra_driver_name);
4368 continue;
4369 }
4370
4371 t_alg->registered = true;
4372 registered = true;
4373 }
4374
4375 if (registered)
Ruchika Guptacfc6f112013-10-25 12:01:03 +05304376 pr_info("caam algorithms registered in /proc/crypto\n");
Kim Phillips8e8ec592011-03-13 16:54:26 +08004377
4378 return err;
4379}
4380
4381module_init(caam_algapi_init);
4382module_exit(caam_algapi_exit);
4383
4384MODULE_LICENSE("GPL");
4385MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4386MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");