blob: 7af5b61f3f660f36301853daf6f2c2318703f884 [file] [log] [blame]
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
9#include <crypto/internal/hash.h>
10
11#include "cc_driver.h"
12#include "cc_request_mgr.h"
13#include "cc_buffer_mgr.h"
14#include "cc_hash.h"
15#include "cc_sram_mgr.h"
16
17#define CC_MAX_HASH_SEQ_LEN 12
18#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
19
20struct cc_hash_handle {
21 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
22 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
23 struct list_head hash_list;
24};
25
26static const u32 digest_len_init[] = {
27 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
28static const u32 md5_init[] = {
29 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
30static const u32 sha1_init[] = {
31 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 sha224_init[] = {
33 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
34 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
35static const u32 sha256_init[] = {
36 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
37 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000038static const u32 digest_len_sha512_init[] = {
39 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
40static u64 sha384_init[] = {
41 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
42 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
43static u64 sha512_init[] = {
44 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
45 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000046
47static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
48 unsigned int *seq_size);
49
50static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
51 unsigned int *seq_size);
52
53static const void *cc_larval_digest(struct device *dev, u32 mode);
54
55struct cc_hash_alg {
56 struct list_head entry;
57 int hash_mode;
58 int hw_mode;
59 int inter_digestsize;
60 struct cc_drvdata *drvdata;
61 struct ahash_alg ahash_alg;
62};
63
64struct hash_key_req_ctx {
65 u32 keylen;
66 dma_addr_t key_dma_addr;
67};
68
69/* hash per-session context */
70struct cc_hash_ctx {
71 struct cc_drvdata *drvdata;
72 /* holds the origin digest; the digest after "setkey" if HMAC,*
73 * the initial digest if HASH.
74 */
75 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
76 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
77
78 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
79 dma_addr_t digest_buff_dma_addr;
80 /* use for hmac with key large then mode block size */
81 struct hash_key_req_ctx key_params;
82 int hash_mode;
83 int hw_mode;
84 int inter_digestsize;
Yael Chemlaf1e52fd2018-10-18 13:59:57 +010085 unsigned int hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000086 struct completion setkey_comp;
87 bool is_hmac;
88};
89
90static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
91 unsigned int flow_mode, struct cc_hw_desc desc[],
92 bool is_not_last_data, unsigned int *seq_size);
93
94static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
95{
96 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
97 mode == DRV_HASH_SHA512) {
98 set_bytes_swap(desc, 1);
99 } else {
100 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
101 }
102}
103
104static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
105 unsigned int digestsize)
106{
107 state->digest_result_dma_addr =
108 dma_map_single(dev, state->digest_result_buff,
109 digestsize, DMA_BIDIRECTIONAL);
110 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
111 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
112 digestsize);
113 return -ENOMEM;
114 }
115 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
116 digestsize, state->digest_result_buff,
117 &state->digest_result_dma_addr);
118
119 return 0;
120}
121
122static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
123 struct cc_hash_ctx *ctx)
124{
125 bool is_hmac = ctx->is_hmac;
126
127 memset(state, 0, sizeof(*state));
128
129 if (is_hmac) {
130 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
131 ctx->hw_mode != DRV_CIPHER_CMAC) {
132 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
133 ctx->inter_digestsize,
134 DMA_BIDIRECTIONAL);
135
136 memcpy(state->digest_buff, ctx->digest_buff,
137 ctx->inter_digestsize);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000138 if (ctx->hash_mode == DRV_HASH_SHA512 ||
139 ctx->hash_mode == DRV_HASH_SHA384)
140 memcpy(state->digest_bytes_len,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000141 digest_len_sha512_init,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100142 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000143 else
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000144 memcpy(state->digest_bytes_len, digest_len_init,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100145 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000146 }
147
148 if (ctx->hash_mode != DRV_HASH_NULL) {
149 dma_sync_single_for_cpu(dev,
150 ctx->opad_tmp_keys_dma_addr,
151 ctx->inter_digestsize,
152 DMA_BIDIRECTIONAL);
153 memcpy(state->opad_digest_buff,
154 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
155 }
156 } else { /*hash*/
157 /* Copy the initial digests if hash flow. */
158 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
159
160 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
161 }
162}
163
164static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
165 struct cc_hash_ctx *ctx)
166{
167 bool is_hmac = ctx->is_hmac;
168
169 state->digest_buff_dma_addr =
170 dma_map_single(dev, state->digest_buff,
171 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
172 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
173 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
174 ctx->inter_digestsize, state->digest_buff);
175 return -EINVAL;
176 }
177 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
178 ctx->inter_digestsize, state->digest_buff,
179 &state->digest_buff_dma_addr);
180
181 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
182 state->digest_bytes_len_dma_addr =
183 dma_map_single(dev, state->digest_bytes_len,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000184 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000185 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
186 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000187 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000188 goto unmap_digest_buf;
189 }
190 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000191 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000192 &state->digest_bytes_len_dma_addr);
193 }
194
195 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
196 state->opad_digest_dma_addr =
197 dma_map_single(dev, state->opad_digest_buff,
198 ctx->inter_digestsize,
199 DMA_BIDIRECTIONAL);
200 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
201 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
202 ctx->inter_digestsize,
203 state->opad_digest_buff);
204 goto unmap_digest_len;
205 }
206 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
207 ctx->inter_digestsize, state->opad_digest_buff,
208 &state->opad_digest_dma_addr);
209 }
210
211 return 0;
212
213unmap_digest_len:
214 if (state->digest_bytes_len_dma_addr) {
215 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000216 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000217 state->digest_bytes_len_dma_addr = 0;
218 }
219unmap_digest_buf:
220 if (state->digest_buff_dma_addr) {
221 dma_unmap_single(dev, state->digest_buff_dma_addr,
222 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
223 state->digest_buff_dma_addr = 0;
224 }
225
226 return -EINVAL;
227}
228
229static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
230 struct cc_hash_ctx *ctx)
231{
232 if (state->digest_buff_dma_addr) {
233 dma_unmap_single(dev, state->digest_buff_dma_addr,
234 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
235 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
236 &state->digest_buff_dma_addr);
237 state->digest_buff_dma_addr = 0;
238 }
239 if (state->digest_bytes_len_dma_addr) {
240 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000241 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000242 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
243 &state->digest_bytes_len_dma_addr);
244 state->digest_bytes_len_dma_addr = 0;
245 }
246 if (state->opad_digest_dma_addr) {
247 dma_unmap_single(dev, state->opad_digest_dma_addr,
248 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
250 &state->opad_digest_dma_addr);
251 state->opad_digest_dma_addr = 0;
252 }
253}
254
255static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
256 unsigned int digestsize, u8 *result)
257{
258 if (state->digest_result_dma_addr) {
259 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
260 DMA_BIDIRECTIONAL);
261 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
262 state->digest_result_buff,
263 &state->digest_result_dma_addr, digestsize);
264 memcpy(result, state->digest_result_buff, digestsize);
265 }
266 state->digest_result_dma_addr = 0;
267}
268
269static void cc_update_complete(struct device *dev, void *cc_req, int err)
270{
271 struct ahash_request *req = (struct ahash_request *)cc_req;
272 struct ahash_req_ctx *state = ahash_request_ctx(req);
273 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
274 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
275
276 dev_dbg(dev, "req=%pK\n", req);
277
278 cc_unmap_hash_request(dev, state, req->src, false);
279 cc_unmap_req(dev, state, ctx);
280 req->base.complete(&req->base, err);
281}
282
283static void cc_digest_complete(struct device *dev, void *cc_req, int err)
284{
285 struct ahash_request *req = (struct ahash_request *)cc_req;
286 struct ahash_req_ctx *state = ahash_request_ctx(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289 u32 digestsize = crypto_ahash_digestsize(tfm);
290
291 dev_dbg(dev, "req=%pK\n", req);
292
293 cc_unmap_hash_request(dev, state, req->src, false);
294 cc_unmap_result(dev, state, digestsize, req->result);
295 cc_unmap_req(dev, state, ctx);
296 req->base.complete(&req->base, err);
297}
298
299static void cc_hash_complete(struct device *dev, void *cc_req, int err)
300{
301 struct ahash_request *req = (struct ahash_request *)cc_req;
302 struct ahash_req_ctx *state = ahash_request_ctx(req);
303 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
304 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
305 u32 digestsize = crypto_ahash_digestsize(tfm);
306
307 dev_dbg(dev, "req=%pK\n", req);
308
309 cc_unmap_hash_request(dev, state, req->src, false);
310 cc_unmap_result(dev, state, digestsize, req->result);
311 cc_unmap_req(dev, state, ctx);
312 req->base.complete(&req->base, err);
313}
314
315static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
316 int idx)
317{
318 struct ahash_req_ctx *state = ahash_request_ctx(req);
319 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
320 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
321 u32 digestsize = crypto_ahash_digestsize(tfm);
322
323 /* Get final MAC result */
324 hw_desc_init(&desc[idx]);
325 set_cipher_mode(&desc[idx], ctx->hw_mode);
326 /* TODO */
327 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
328 NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000329 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000330 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
331 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
332 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
333 cc_set_endianity(ctx->hash_mode, &desc[idx]);
334 idx++;
335
336 return idx;
337}
338
339static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
340 int idx)
341{
342 struct ahash_req_ctx *state = ahash_request_ctx(req);
343 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
344 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
345 u32 digestsize = crypto_ahash_digestsize(tfm);
346
347 /* store the hash digest result in the context */
348 hw_desc_init(&desc[idx]);
349 set_cipher_mode(&desc[idx], ctx->hw_mode);
350 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
351 NS_BIT, 0);
352 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
353 cc_set_endianity(ctx->hash_mode, &desc[idx]);
354 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
355 idx++;
356
357 /* Loading hash opad xor key state */
358 hw_desc_init(&desc[idx]);
359 set_cipher_mode(&desc[idx], ctx->hw_mode);
360 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
361 ctx->inter_digestsize, NS_BIT);
362 set_flow_mode(&desc[idx], S_DIN_to_HASH);
363 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
364 idx++;
365
366 /* Load the hash current length */
367 hw_desc_init(&desc[idx]);
368 set_cipher_mode(&desc[idx], ctx->hw_mode);
369 set_din_sram(&desc[idx],
370 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100371 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000372 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
373 set_flow_mode(&desc[idx], S_DIN_to_HASH);
374 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
375 idx++;
376
377 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
378 hw_desc_init(&desc[idx]);
379 set_din_no_dma(&desc[idx], 0, 0xfffff0);
380 set_dout_no_dma(&desc[idx], 0, 0, 1);
381 idx++;
382
383 /* Perform HASH update */
384 hw_desc_init(&desc[idx]);
385 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
386 digestsize, NS_BIT);
387 set_flow_mode(&desc[idx], DIN_HASH);
388 idx++;
389
390 return idx;
391}
392
393static int cc_hash_digest(struct ahash_request *req)
394{
395 struct ahash_req_ctx *state = ahash_request_ctx(req);
396 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
397 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
398 u32 digestsize = crypto_ahash_digestsize(tfm);
399 struct scatterlist *src = req->src;
400 unsigned int nbytes = req->nbytes;
401 u8 *result = req->result;
402 struct device *dev = drvdata_to_dev(ctx->drvdata);
403 bool is_hmac = ctx->is_hmac;
404 struct cc_crypto_req cc_req = {};
405 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
406 cc_sram_addr_t larval_digest_addr =
407 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
408 int idx = 0;
409 int rc = 0;
410 gfp_t flags = cc_gfp_flags(&req->base);
411
412 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
413 nbytes);
414
415 cc_init_req(dev, state, ctx);
416
417 if (cc_map_req(dev, state, ctx)) {
418 dev_err(dev, "map_ahash_source() failed\n");
419 return -ENOMEM;
420 }
421
422 if (cc_map_result(dev, state, digestsize)) {
423 dev_err(dev, "map_ahash_digest() failed\n");
424 cc_unmap_req(dev, state, ctx);
425 return -ENOMEM;
426 }
427
428 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
429 flags)) {
430 dev_err(dev, "map_ahash_request_final() failed\n");
431 cc_unmap_result(dev, state, digestsize, result);
432 cc_unmap_req(dev, state, ctx);
433 return -ENOMEM;
434 }
435
436 /* Setup request structure */
437 cc_req.user_cb = cc_digest_complete;
438 cc_req.user_arg = req;
439
440 /* If HMAC then load hash IPAD xor key, if HASH then load initial
441 * digest
442 */
443 hw_desc_init(&desc[idx]);
444 set_cipher_mode(&desc[idx], ctx->hw_mode);
445 if (is_hmac) {
446 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
447 ctx->inter_digestsize, NS_BIT);
448 } else {
449 set_din_sram(&desc[idx], larval_digest_addr,
450 ctx->inter_digestsize);
451 }
452 set_flow_mode(&desc[idx], S_DIN_to_HASH);
453 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
454 idx++;
455
456 /* Load the hash current length */
457 hw_desc_init(&desc[idx]);
458 set_cipher_mode(&desc[idx], ctx->hw_mode);
459
460 if (is_hmac) {
461 set_din_type(&desc[idx], DMA_DLLI,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000462 state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100463 ctx->hash_len, NS_BIT);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000464 } else {
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100465 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000466 if (nbytes)
467 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
468 else
469 set_cipher_do(&desc[idx], DO_PAD);
470 }
471 set_flow_mode(&desc[idx], S_DIN_to_HASH);
472 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
473 idx++;
474
475 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
476
477 if (is_hmac) {
478 /* HW last hash block padding (aka. "DO_PAD") */
479 hw_desc_init(&desc[idx]);
480 set_cipher_mode(&desc[idx], ctx->hw_mode);
481 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100482 ctx->hash_len, NS_BIT, 0);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000483 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
484 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
485 set_cipher_do(&desc[idx], DO_PAD);
486 idx++;
487
488 idx = cc_fin_hmac(desc, req, idx);
489 }
490
491 idx = cc_fin_result(desc, req, idx);
492
493 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
494 if (rc != -EINPROGRESS && rc != -EBUSY) {
495 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
496 cc_unmap_hash_request(dev, state, src, true);
497 cc_unmap_result(dev, state, digestsize, result);
498 cc_unmap_req(dev, state, ctx);
499 }
500 return rc;
501}
502
503static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
504 struct ahash_req_ctx *state, unsigned int idx)
505{
506 /* Restore hash digest */
507 hw_desc_init(&desc[idx]);
508 set_cipher_mode(&desc[idx], ctx->hw_mode);
509 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
510 ctx->inter_digestsize, NS_BIT);
511 set_flow_mode(&desc[idx], S_DIN_to_HASH);
512 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
513 idx++;
514
515 /* Restore hash current length */
516 hw_desc_init(&desc[idx]);
517 set_cipher_mode(&desc[idx], ctx->hw_mode);
518 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
519 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100520 ctx->hash_len, NS_BIT);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000521 set_flow_mode(&desc[idx], S_DIN_to_HASH);
522 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
523 idx++;
524
525 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
526
527 return idx;
528}
529
530static int cc_hash_update(struct ahash_request *req)
531{
532 struct ahash_req_ctx *state = ahash_request_ctx(req);
533 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
534 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
535 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
536 struct scatterlist *src = req->src;
537 unsigned int nbytes = req->nbytes;
538 struct device *dev = drvdata_to_dev(ctx->drvdata);
539 struct cc_crypto_req cc_req = {};
540 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
541 u32 idx = 0;
542 int rc;
543 gfp_t flags = cc_gfp_flags(&req->base);
544
545 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
546 "hmac" : "hash", nbytes);
547
548 if (nbytes == 0) {
549 /* no real updates required */
550 return 0;
551 }
552
553 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
554 block_size, flags);
555 if (rc) {
556 if (rc == 1) {
557 dev_dbg(dev, " data size not require HW update %x\n",
558 nbytes);
559 /* No hardware updates are required */
560 return 0;
561 }
562 dev_err(dev, "map_ahash_request_update() failed\n");
563 return -ENOMEM;
564 }
565
566 if (cc_map_req(dev, state, ctx)) {
567 dev_err(dev, "map_ahash_source() failed\n");
568 cc_unmap_hash_request(dev, state, src, true);
569 return -EINVAL;
570 }
571
572 /* Setup request structure */
573 cc_req.user_cb = cc_update_complete;
574 cc_req.user_arg = req;
575
576 idx = cc_restore_hash(desc, ctx, state, idx);
577
578 /* store the hash digest result in context */
579 hw_desc_init(&desc[idx]);
580 set_cipher_mode(&desc[idx], ctx->hw_mode);
581 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
582 ctx->inter_digestsize, NS_BIT, 0);
583 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
584 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
585 idx++;
586
587 /* store current hash length in context */
588 hw_desc_init(&desc[idx]);
589 set_cipher_mode(&desc[idx], ctx->hw_mode);
590 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100591 ctx->hash_len, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000592 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000593 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
594 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
595 idx++;
596
597 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
598 if (rc != -EINPROGRESS && rc != -EBUSY) {
599 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
600 cc_unmap_hash_request(dev, state, src, true);
601 cc_unmap_req(dev, state, ctx);
602 }
603 return rc;
604}
605
Hadar Gat26497e72018-07-01 08:02:34 +0100606static int cc_do_finup(struct ahash_request *req, bool update)
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000607{
608 struct ahash_req_ctx *state = ahash_request_ctx(req);
609 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
610 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
611 u32 digestsize = crypto_ahash_digestsize(tfm);
612 struct scatterlist *src = req->src;
613 unsigned int nbytes = req->nbytes;
614 u8 *result = req->result;
615 struct device *dev = drvdata_to_dev(ctx->drvdata);
616 bool is_hmac = ctx->is_hmac;
617 struct cc_crypto_req cc_req = {};
618 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
619 unsigned int idx = 0;
620 int rc;
621 gfp_t flags = cc_gfp_flags(&req->base);
622
Hadar Gat26497e72018-07-01 08:02:34 +0100623 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
624 update ? "finup" : "final", nbytes);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000625
626 if (cc_map_req(dev, state, ctx)) {
627 dev_err(dev, "map_ahash_source() failed\n");
628 return -EINVAL;
629 }
630
Hadar Gat26497e72018-07-01 08:02:34 +0100631 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000632 flags)) {
633 dev_err(dev, "map_ahash_request_final() failed\n");
634 cc_unmap_req(dev, state, ctx);
635 return -ENOMEM;
636 }
637 if (cc_map_result(dev, state, digestsize)) {
638 dev_err(dev, "map_ahash_digest() failed\n");
639 cc_unmap_hash_request(dev, state, src, true);
640 cc_unmap_req(dev, state, ctx);
641 return -ENOMEM;
642 }
643
644 /* Setup request structure */
645 cc_req.user_cb = cc_hash_complete;
646 cc_req.user_arg = req;
647
648 idx = cc_restore_hash(desc, ctx, state, idx);
649
Hadar Gat26497e72018-07-01 08:02:34 +0100650 /* Pad the hash */
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000651 hw_desc_init(&desc[idx]);
652 set_cipher_do(&desc[idx], DO_PAD);
653 set_cipher_mode(&desc[idx], ctx->hw_mode);
654 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100655 ctx->hash_len, NS_BIT, 0);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000656 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
657 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
658 idx++;
659
660 if (is_hmac)
661 idx = cc_fin_hmac(desc, req, idx);
662
663 idx = cc_fin_result(desc, req, idx);
664
665 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
666 if (rc != -EINPROGRESS && rc != -EBUSY) {
667 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
668 cc_unmap_hash_request(dev, state, src, true);
669 cc_unmap_result(dev, state, digestsize, result);
670 cc_unmap_req(dev, state, ctx);
671 }
672 return rc;
673}
674
Hadar Gat26497e72018-07-01 08:02:34 +0100675static int cc_hash_finup(struct ahash_request *req)
676{
677 return cc_do_finup(req, true);
678}
679
680
681static int cc_hash_final(struct ahash_request *req)
682{
683 return cc_do_finup(req, false);
684}
685
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000686static int cc_hash_init(struct ahash_request *req)
687{
688 struct ahash_req_ctx *state = ahash_request_ctx(req);
689 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
690 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
691 struct device *dev = drvdata_to_dev(ctx->drvdata);
692
693 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
694
695 cc_init_req(dev, state, ctx);
696
697 return 0;
698}
699
700static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
701 unsigned int keylen)
702{
703 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
704 struct cc_crypto_req cc_req = {};
705 struct cc_hash_ctx *ctx = NULL;
706 int blocksize = 0;
707 int digestsize = 0;
708 int i, idx = 0, rc = 0;
709 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
710 cc_sram_addr_t larval_addr;
711 struct device *dev;
712
713 ctx = crypto_ahash_ctx(ahash);
714 dev = drvdata_to_dev(ctx->drvdata);
715 dev_dbg(dev, "start keylen: %d", keylen);
716
717 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
718 digestsize = crypto_ahash_digestsize(ahash);
719
720 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
721
722 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
723 * any NON-ZERO value utilizes HMAC flow
724 */
725 ctx->key_params.keylen = keylen;
726 ctx->key_params.key_dma_addr = 0;
727 ctx->is_hmac = true;
728
729 if (keylen) {
730 ctx->key_params.key_dma_addr =
731 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
732 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
733 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
734 key, keylen);
735 return -ENOMEM;
736 }
737 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
738 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
739
740 if (keylen > blocksize) {
741 /* Load hash initial state */
742 hw_desc_init(&desc[idx]);
743 set_cipher_mode(&desc[idx], ctx->hw_mode);
744 set_din_sram(&desc[idx], larval_addr,
745 ctx->inter_digestsize);
746 set_flow_mode(&desc[idx], S_DIN_to_HASH);
747 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
748 idx++;
749
750 /* Load the hash current length*/
751 hw_desc_init(&desc[idx]);
752 set_cipher_mode(&desc[idx], ctx->hw_mode);
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100753 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000754 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
755 set_flow_mode(&desc[idx], S_DIN_to_HASH);
756 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
757 idx++;
758
759 hw_desc_init(&desc[idx]);
760 set_din_type(&desc[idx], DMA_DLLI,
761 ctx->key_params.key_dma_addr, keylen,
762 NS_BIT);
763 set_flow_mode(&desc[idx], DIN_HASH);
764 idx++;
765
766 /* Get hashed key */
767 hw_desc_init(&desc[idx]);
768 set_cipher_mode(&desc[idx], ctx->hw_mode);
769 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
770 digestsize, NS_BIT, 0);
771 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
772 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
773 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
774 cc_set_endianity(ctx->hash_mode, &desc[idx]);
775 idx++;
776
777 hw_desc_init(&desc[idx]);
778 set_din_const(&desc[idx], 0, (blocksize - digestsize));
779 set_flow_mode(&desc[idx], BYPASS);
780 set_dout_dlli(&desc[idx],
781 (ctx->opad_tmp_keys_dma_addr +
782 digestsize),
783 (blocksize - digestsize), NS_BIT, 0);
784 idx++;
785 } else {
786 hw_desc_init(&desc[idx]);
787 set_din_type(&desc[idx], DMA_DLLI,
788 ctx->key_params.key_dma_addr, keylen,
789 NS_BIT);
790 set_flow_mode(&desc[idx], BYPASS);
791 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
792 keylen, NS_BIT, 0);
793 idx++;
794
795 if ((blocksize - keylen)) {
796 hw_desc_init(&desc[idx]);
797 set_din_const(&desc[idx], 0,
798 (blocksize - keylen));
799 set_flow_mode(&desc[idx], BYPASS);
800 set_dout_dlli(&desc[idx],
801 (ctx->opad_tmp_keys_dma_addr +
802 keylen), (blocksize - keylen),
803 NS_BIT, 0);
804 idx++;
805 }
806 }
807 } else {
808 hw_desc_init(&desc[idx]);
809 set_din_const(&desc[idx], 0, blocksize);
810 set_flow_mode(&desc[idx], BYPASS);
811 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
812 blocksize, NS_BIT, 0);
813 idx++;
814 }
815
816 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
817 if (rc) {
818 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
819 goto out;
820 }
821
822 /* calc derived HMAC key */
823 for (idx = 0, i = 0; i < 2; i++) {
824 /* Load hash initial state */
825 hw_desc_init(&desc[idx]);
826 set_cipher_mode(&desc[idx], ctx->hw_mode);
827 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
828 set_flow_mode(&desc[idx], S_DIN_to_HASH);
829 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
830 idx++;
831
832 /* Load the hash current length*/
833 hw_desc_init(&desc[idx]);
834 set_cipher_mode(&desc[idx], ctx->hw_mode);
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100835 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000836 set_flow_mode(&desc[idx], S_DIN_to_HASH);
837 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
838 idx++;
839
840 /* Prepare ipad key */
841 hw_desc_init(&desc[idx]);
842 set_xor_val(&desc[idx], hmac_pad_const[i]);
843 set_cipher_mode(&desc[idx], ctx->hw_mode);
844 set_flow_mode(&desc[idx], S_DIN_to_HASH);
845 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
846 idx++;
847
848 /* Perform HASH update */
849 hw_desc_init(&desc[idx]);
850 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
851 blocksize, NS_BIT);
852 set_cipher_mode(&desc[idx], ctx->hw_mode);
853 set_xor_active(&desc[idx]);
854 set_flow_mode(&desc[idx], DIN_HASH);
855 idx++;
856
857 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
858 * of the first HASH "update" state)
859 */
860 hw_desc_init(&desc[idx]);
861 set_cipher_mode(&desc[idx], ctx->hw_mode);
862 if (i > 0) /* Not first iteration */
863 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
864 ctx->inter_digestsize, NS_BIT, 0);
865 else /* First iteration */
866 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
867 ctx->inter_digestsize, NS_BIT, 0);
868 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
869 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
870 idx++;
871 }
872
873 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
874
875out:
876 if (rc)
877 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
878
879 if (ctx->key_params.key_dma_addr) {
880 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
881 ctx->key_params.keylen, DMA_TO_DEVICE);
882 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
883 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
884 }
885 return rc;
886}
887
888static int cc_xcbc_setkey(struct crypto_ahash *ahash,
889 const u8 *key, unsigned int keylen)
890{
891 struct cc_crypto_req cc_req = {};
892 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
893 struct device *dev = drvdata_to_dev(ctx->drvdata);
894 int rc = 0;
895 unsigned int idx = 0;
896 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
897
898 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
899
900 switch (keylen) {
901 case AES_KEYSIZE_128:
902 case AES_KEYSIZE_192:
903 case AES_KEYSIZE_256:
904 break;
905 default:
906 return -EINVAL;
907 }
908
909 ctx->key_params.keylen = keylen;
910
911 ctx->key_params.key_dma_addr =
912 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
913 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
914 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
915 key, keylen);
916 return -ENOMEM;
917 }
918 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
919 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
920
921 ctx->is_hmac = true;
922 /* 1. Load the AES key */
923 hw_desc_init(&desc[idx]);
924 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
925 keylen, NS_BIT);
926 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
927 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
928 set_key_size_aes(&desc[idx], keylen);
929 set_flow_mode(&desc[idx], S_DIN_to_AES);
930 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
931 idx++;
932
933 hw_desc_init(&desc[idx]);
934 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
935 set_flow_mode(&desc[idx], DIN_AES_DOUT);
936 set_dout_dlli(&desc[idx],
937 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
938 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
939 idx++;
940
941 hw_desc_init(&desc[idx]);
942 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
943 set_flow_mode(&desc[idx], DIN_AES_DOUT);
944 set_dout_dlli(&desc[idx],
945 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
946 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
947 idx++;
948
949 hw_desc_init(&desc[idx]);
950 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
951 set_flow_mode(&desc[idx], DIN_AES_DOUT);
952 set_dout_dlli(&desc[idx],
953 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
954 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
955 idx++;
956
957 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
958
959 if (rc)
960 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
961
962 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
963 ctx->key_params.keylen, DMA_TO_DEVICE);
964 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
965 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
966
967 return rc;
968}
969
970static int cc_cmac_setkey(struct crypto_ahash *ahash,
971 const u8 *key, unsigned int keylen)
972{
973 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
974 struct device *dev = drvdata_to_dev(ctx->drvdata);
975
976 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
977
978 ctx->is_hmac = true;
979
980 switch (keylen) {
981 case AES_KEYSIZE_128:
982 case AES_KEYSIZE_192:
983 case AES_KEYSIZE_256:
984 break;
985 default:
986 return -EINVAL;
987 }
988
989 ctx->key_params.keylen = keylen;
990
991 /* STAT_PHASE_1: Copy key to ctx */
992
993 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
994 keylen, DMA_TO_DEVICE);
995
996 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
997 if (keylen == 24) {
998 memset(ctx->opad_tmp_keys_buff + 24, 0,
999 CC_AES_KEY_SIZE_MAX - 24);
1000 }
1001
1002 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1003 keylen, DMA_TO_DEVICE);
1004
1005 ctx->key_params.keylen = keylen;
1006
1007 return 0;
1008}
1009
1010static void cc_free_ctx(struct cc_hash_ctx *ctx)
1011{
1012 struct device *dev = drvdata_to_dev(ctx->drvdata);
1013
1014 if (ctx->digest_buff_dma_addr) {
1015 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1016 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1017 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1018 &ctx->digest_buff_dma_addr);
1019 ctx->digest_buff_dma_addr = 0;
1020 }
1021 if (ctx->opad_tmp_keys_dma_addr) {
1022 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1023 sizeof(ctx->opad_tmp_keys_buff),
1024 DMA_BIDIRECTIONAL);
1025 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1026 &ctx->opad_tmp_keys_dma_addr);
1027 ctx->opad_tmp_keys_dma_addr = 0;
1028 }
1029
1030 ctx->key_params.keylen = 0;
1031}
1032
1033static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1034{
1035 struct device *dev = drvdata_to_dev(ctx->drvdata);
1036
1037 ctx->key_params.keylen = 0;
1038
1039 ctx->digest_buff_dma_addr =
1040 dma_map_single(dev, (void *)ctx->digest_buff,
1041 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1042 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1043 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1044 sizeof(ctx->digest_buff), ctx->digest_buff);
1045 goto fail;
1046 }
1047 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1048 sizeof(ctx->digest_buff), ctx->digest_buff,
1049 &ctx->digest_buff_dma_addr);
1050
1051 ctx->opad_tmp_keys_dma_addr =
1052 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1053 sizeof(ctx->opad_tmp_keys_buff),
1054 DMA_BIDIRECTIONAL);
1055 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1056 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1057 sizeof(ctx->opad_tmp_keys_buff),
1058 ctx->opad_tmp_keys_buff);
1059 goto fail;
1060 }
1061 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1062 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1063 &ctx->opad_tmp_keys_dma_addr);
1064
1065 ctx->is_hmac = false;
1066 return 0;
1067
1068fail:
1069 cc_free_ctx(ctx);
1070 return -ENOMEM;
1071}
1072
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001073static int cc_get_hash_len(struct crypto_tfm *tfm)
1074{
1075 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1076
1077 return cc_get_default_hash_len(ctx->drvdata);
1078}
1079
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001080static int cc_cra_init(struct crypto_tfm *tfm)
1081{
1082 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1083 struct hash_alg_common *hash_alg_common =
1084 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1085 struct ahash_alg *ahash_alg =
1086 container_of(hash_alg_common, struct ahash_alg, halg);
1087 struct cc_hash_alg *cc_alg =
1088 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1089
1090 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1091 sizeof(struct ahash_req_ctx));
1092
1093 ctx->hash_mode = cc_alg->hash_mode;
1094 ctx->hw_mode = cc_alg->hw_mode;
1095 ctx->inter_digestsize = cc_alg->inter_digestsize;
1096 ctx->drvdata = cc_alg->drvdata;
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001097 ctx->hash_len = cc_get_hash_len(tfm);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001098 return cc_alloc_ctx(ctx);
1099}
1100
1101static void cc_cra_exit(struct crypto_tfm *tfm)
1102{
1103 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1104 struct device *dev = drvdata_to_dev(ctx->drvdata);
1105
1106 dev_dbg(dev, "cc_cra_exit");
1107 cc_free_ctx(ctx);
1108}
1109
1110static int cc_mac_update(struct ahash_request *req)
1111{
1112 struct ahash_req_ctx *state = ahash_request_ctx(req);
1113 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1114 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1115 struct device *dev = drvdata_to_dev(ctx->drvdata);
1116 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1117 struct cc_crypto_req cc_req = {};
1118 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1119 int rc;
1120 u32 idx = 0;
1121 gfp_t flags = cc_gfp_flags(&req->base);
1122
1123 if (req->nbytes == 0) {
1124 /* no real updates required */
1125 return 0;
1126 }
1127
1128 state->xcbc_count++;
1129
1130 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1131 req->nbytes, block_size, flags);
1132 if (rc) {
1133 if (rc == 1) {
1134 dev_dbg(dev, " data size not require HW update %x\n",
1135 req->nbytes);
1136 /* No hardware updates are required */
1137 return 0;
1138 }
1139 dev_err(dev, "map_ahash_request_update() failed\n");
1140 return -ENOMEM;
1141 }
1142
1143 if (cc_map_req(dev, state, ctx)) {
1144 dev_err(dev, "map_ahash_source() failed\n");
1145 return -EINVAL;
1146 }
1147
1148 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1149 cc_setup_xcbc(req, desc, &idx);
1150 else
1151 cc_setup_cmac(req, desc, &idx);
1152
1153 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1154
1155 /* store the hash digest result in context */
1156 hw_desc_init(&desc[idx]);
1157 set_cipher_mode(&desc[idx], ctx->hw_mode);
1158 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1159 ctx->inter_digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001160 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001161 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1162 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1163 idx++;
1164
1165 /* Setup request structure */
1166 cc_req.user_cb = (void *)cc_update_complete;
1167 cc_req.user_arg = (void *)req;
1168
1169 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1170 if (rc != -EINPROGRESS && rc != -EBUSY) {
1171 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1172 cc_unmap_hash_request(dev, state, req->src, true);
1173 cc_unmap_req(dev, state, ctx);
1174 }
1175 return rc;
1176}
1177
1178static int cc_mac_final(struct ahash_request *req)
1179{
1180 struct ahash_req_ctx *state = ahash_request_ctx(req);
1181 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1182 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1183 struct device *dev = drvdata_to_dev(ctx->drvdata);
1184 struct cc_crypto_req cc_req = {};
1185 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1186 int idx = 0;
1187 int rc = 0;
1188 u32 key_size, key_len;
1189 u32 digestsize = crypto_ahash_digestsize(tfm);
1190 gfp_t flags = cc_gfp_flags(&req->base);
1191 u32 rem_cnt = *cc_hash_buf_cnt(state);
1192
1193 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1194 key_size = CC_AES_128_BIT_KEY_SIZE;
1195 key_len = CC_AES_128_BIT_KEY_SIZE;
1196 } else {
1197 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1198 ctx->key_params.keylen;
1199 key_len = ctx->key_params.keylen;
1200 }
1201
1202 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1203
1204 if (cc_map_req(dev, state, ctx)) {
1205 dev_err(dev, "map_ahash_source() failed\n");
1206 return -EINVAL;
1207 }
1208
1209 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1210 req->nbytes, 0, flags)) {
1211 dev_err(dev, "map_ahash_request_final() failed\n");
1212 cc_unmap_req(dev, state, ctx);
1213 return -ENOMEM;
1214 }
1215
1216 if (cc_map_result(dev, state, digestsize)) {
1217 dev_err(dev, "map_ahash_digest() failed\n");
1218 cc_unmap_hash_request(dev, state, req->src, true);
1219 cc_unmap_req(dev, state, ctx);
1220 return -ENOMEM;
1221 }
1222
1223 /* Setup request structure */
1224 cc_req.user_cb = (void *)cc_hash_complete;
1225 cc_req.user_arg = (void *)req;
1226
1227 if (state->xcbc_count && rem_cnt == 0) {
1228 /* Load key for ECB decryption */
1229 hw_desc_init(&desc[idx]);
1230 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1231 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1232 set_din_type(&desc[idx], DMA_DLLI,
1233 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1234 key_size, NS_BIT);
1235 set_key_size_aes(&desc[idx], key_len);
1236 set_flow_mode(&desc[idx], S_DIN_to_AES);
1237 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1238 idx++;
1239
1240 /* Initiate decryption of block state to previous
1241 * block_state-XOR-M[n]
1242 */
1243 hw_desc_init(&desc[idx]);
1244 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1245 CC_AES_BLOCK_SIZE, NS_BIT);
1246 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1247 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1248 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1249 idx++;
1250
1251 /* Memory Barrier: wait for axi write to complete */
1252 hw_desc_init(&desc[idx]);
1253 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1254 set_dout_no_dma(&desc[idx], 0, 0, 1);
1255 idx++;
1256 }
1257
1258 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1259 cc_setup_xcbc(req, desc, &idx);
1260 else
1261 cc_setup_cmac(req, desc, &idx);
1262
1263 if (state->xcbc_count == 0) {
1264 hw_desc_init(&desc[idx]);
1265 set_cipher_mode(&desc[idx], ctx->hw_mode);
1266 set_key_size_aes(&desc[idx], key_len);
1267 set_cmac_size0_mode(&desc[idx]);
1268 set_flow_mode(&desc[idx], S_DIN_to_AES);
1269 idx++;
1270 } else if (rem_cnt > 0) {
1271 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1272 } else {
1273 hw_desc_init(&desc[idx]);
1274 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1275 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1276 idx++;
1277 }
1278
1279 /* Get final MAC result */
1280 hw_desc_init(&desc[idx]);
1281 /* TODO */
1282 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1283 digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001284 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001285 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1286 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1287 set_cipher_mode(&desc[idx], ctx->hw_mode);
1288 idx++;
1289
1290 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1291 if (rc != -EINPROGRESS && rc != -EBUSY) {
1292 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1293 cc_unmap_hash_request(dev, state, req->src, true);
1294 cc_unmap_result(dev, state, digestsize, req->result);
1295 cc_unmap_req(dev, state, ctx);
1296 }
1297 return rc;
1298}
1299
1300static int cc_mac_finup(struct ahash_request *req)
1301{
1302 struct ahash_req_ctx *state = ahash_request_ctx(req);
1303 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1304 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1305 struct device *dev = drvdata_to_dev(ctx->drvdata);
1306 struct cc_crypto_req cc_req = {};
1307 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1308 int idx = 0;
1309 int rc = 0;
1310 u32 key_len = 0;
1311 u32 digestsize = crypto_ahash_digestsize(tfm);
1312 gfp_t flags = cc_gfp_flags(&req->base);
1313
1314 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1315 if (state->xcbc_count > 0 && req->nbytes == 0) {
1316 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1317 return cc_mac_final(req);
1318 }
1319
1320 if (cc_map_req(dev, state, ctx)) {
1321 dev_err(dev, "map_ahash_source() failed\n");
1322 return -EINVAL;
1323 }
1324
1325 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1326 req->nbytes, 1, flags)) {
1327 dev_err(dev, "map_ahash_request_final() failed\n");
1328 cc_unmap_req(dev, state, ctx);
1329 return -ENOMEM;
1330 }
1331 if (cc_map_result(dev, state, digestsize)) {
1332 dev_err(dev, "map_ahash_digest() failed\n");
1333 cc_unmap_hash_request(dev, state, req->src, true);
1334 cc_unmap_req(dev, state, ctx);
1335 return -ENOMEM;
1336 }
1337
1338 /* Setup request structure */
1339 cc_req.user_cb = (void *)cc_hash_complete;
1340 cc_req.user_arg = (void *)req;
1341
1342 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1343 key_len = CC_AES_128_BIT_KEY_SIZE;
1344 cc_setup_xcbc(req, desc, &idx);
1345 } else {
1346 key_len = ctx->key_params.keylen;
1347 cc_setup_cmac(req, desc, &idx);
1348 }
1349
1350 if (req->nbytes == 0) {
1351 hw_desc_init(&desc[idx]);
1352 set_cipher_mode(&desc[idx], ctx->hw_mode);
1353 set_key_size_aes(&desc[idx], key_len);
1354 set_cmac_size0_mode(&desc[idx]);
1355 set_flow_mode(&desc[idx], S_DIN_to_AES);
1356 idx++;
1357 } else {
1358 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1359 }
1360
1361 /* Get final MAC result */
1362 hw_desc_init(&desc[idx]);
1363 /* TODO */
1364 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1365 digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001366 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001367 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1368 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1369 set_cipher_mode(&desc[idx], ctx->hw_mode);
1370 idx++;
1371
1372 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1373 if (rc != -EINPROGRESS && rc != -EBUSY) {
1374 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1375 cc_unmap_hash_request(dev, state, req->src, true);
1376 cc_unmap_result(dev, state, digestsize, req->result);
1377 cc_unmap_req(dev, state, ctx);
1378 }
1379 return rc;
1380}
1381
1382static int cc_mac_digest(struct ahash_request *req)
1383{
1384 struct ahash_req_ctx *state = ahash_request_ctx(req);
1385 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1386 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1387 struct device *dev = drvdata_to_dev(ctx->drvdata);
1388 u32 digestsize = crypto_ahash_digestsize(tfm);
1389 struct cc_crypto_req cc_req = {};
1390 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1391 u32 key_len;
1392 unsigned int idx = 0;
1393 int rc;
1394 gfp_t flags = cc_gfp_flags(&req->base);
1395
1396 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1397
1398 cc_init_req(dev, state, ctx);
1399
1400 if (cc_map_req(dev, state, ctx)) {
1401 dev_err(dev, "map_ahash_source() failed\n");
1402 return -ENOMEM;
1403 }
1404 if (cc_map_result(dev, state, digestsize)) {
1405 dev_err(dev, "map_ahash_digest() failed\n");
1406 cc_unmap_req(dev, state, ctx);
1407 return -ENOMEM;
1408 }
1409
1410 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1411 req->nbytes, 1, flags)) {
1412 dev_err(dev, "map_ahash_request_final() failed\n");
1413 cc_unmap_req(dev, state, ctx);
1414 return -ENOMEM;
1415 }
1416
1417 /* Setup request structure */
1418 cc_req.user_cb = (void *)cc_digest_complete;
1419 cc_req.user_arg = (void *)req;
1420
1421 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1422 key_len = CC_AES_128_BIT_KEY_SIZE;
1423 cc_setup_xcbc(req, desc, &idx);
1424 } else {
1425 key_len = ctx->key_params.keylen;
1426 cc_setup_cmac(req, desc, &idx);
1427 }
1428
1429 if (req->nbytes == 0) {
1430 hw_desc_init(&desc[idx]);
1431 set_cipher_mode(&desc[idx], ctx->hw_mode);
1432 set_key_size_aes(&desc[idx], key_len);
1433 set_cmac_size0_mode(&desc[idx]);
1434 set_flow_mode(&desc[idx], S_DIN_to_AES);
1435 idx++;
1436 } else {
1437 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1438 }
1439
1440 /* Get final MAC result */
1441 hw_desc_init(&desc[idx]);
1442 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1443 CC_AES_BLOCK_SIZE, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001444 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001445 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1446 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1447 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1448 set_cipher_mode(&desc[idx], ctx->hw_mode);
1449 idx++;
1450
1451 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1452 if (rc != -EINPROGRESS && rc != -EBUSY) {
1453 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1454 cc_unmap_hash_request(dev, state, req->src, true);
1455 cc_unmap_result(dev, state, digestsize, req->result);
1456 cc_unmap_req(dev, state, ctx);
1457 }
1458 return rc;
1459}
1460
1461static int cc_hash_export(struct ahash_request *req, void *out)
1462{
1463 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1464 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1465 struct ahash_req_ctx *state = ahash_request_ctx(req);
1466 u8 *curr_buff = cc_hash_buf(state);
1467 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1468 const u32 tmp = CC_EXPORT_MAGIC;
1469
1470 memcpy(out, &tmp, sizeof(u32));
1471 out += sizeof(u32);
1472
1473 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1474 out += ctx->inter_digestsize;
1475
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001476 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1477 out += ctx->hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001478
1479 memcpy(out, &curr_buff_cnt, sizeof(u32));
1480 out += sizeof(u32);
1481
1482 memcpy(out, curr_buff, curr_buff_cnt);
1483
1484 return 0;
1485}
1486
1487static int cc_hash_import(struct ahash_request *req, const void *in)
1488{
1489 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1490 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1491 struct device *dev = drvdata_to_dev(ctx->drvdata);
1492 struct ahash_req_ctx *state = ahash_request_ctx(req);
1493 u32 tmp;
1494
1495 memcpy(&tmp, in, sizeof(u32));
1496 if (tmp != CC_EXPORT_MAGIC)
1497 return -EINVAL;
1498 in += sizeof(u32);
1499
1500 cc_init_req(dev, state, ctx);
1501
1502 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1503 in += ctx->inter_digestsize;
1504
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001505 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1506 in += ctx->hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001507
1508 /* Sanity check the data as much as possible */
1509 memcpy(&tmp, in, sizeof(u32));
1510 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1511 return -EINVAL;
1512 in += sizeof(u32);
1513
1514 state->buf_cnt[0] = tmp;
1515 memcpy(state->buffers[0], in, tmp);
1516
1517 return 0;
1518}
1519
1520struct cc_hash_template {
1521 char name[CRYPTO_MAX_ALG_NAME];
1522 char driver_name[CRYPTO_MAX_ALG_NAME];
1523 char mac_name[CRYPTO_MAX_ALG_NAME];
1524 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1525 unsigned int blocksize;
1526 bool synchronize;
1527 struct ahash_alg template_ahash;
1528 int hash_mode;
1529 int hw_mode;
1530 int inter_digestsize;
1531 struct cc_drvdata *drvdata;
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001532 u32 min_hw_rev;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001533};
1534
1535#define CC_STATE_SIZE(_x) \
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001536 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001537
1538/* hash descriptors */
1539static struct cc_hash_template driver_hash[] = {
1540 //Asynchronize hash template
1541 {
1542 .name = "sha1",
1543 .driver_name = "sha1-ccree",
1544 .mac_name = "hmac(sha1)",
1545 .mac_driver_name = "hmac-sha1-ccree",
1546 .blocksize = SHA1_BLOCK_SIZE,
1547 .synchronize = false,
1548 .template_ahash = {
1549 .init = cc_hash_init,
1550 .update = cc_hash_update,
1551 .final = cc_hash_final,
1552 .finup = cc_hash_finup,
1553 .digest = cc_hash_digest,
1554 .export = cc_hash_export,
1555 .import = cc_hash_import,
1556 .setkey = cc_hash_setkey,
1557 .halg = {
1558 .digestsize = SHA1_DIGEST_SIZE,
1559 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1560 },
1561 },
1562 .hash_mode = DRV_HASH_SHA1,
1563 .hw_mode = DRV_HASH_HW_SHA1,
1564 .inter_digestsize = SHA1_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001565 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001566 },
1567 {
1568 .name = "sha256",
1569 .driver_name = "sha256-ccree",
1570 .mac_name = "hmac(sha256)",
1571 .mac_driver_name = "hmac-sha256-ccree",
1572 .blocksize = SHA256_BLOCK_SIZE,
1573 .template_ahash = {
1574 .init = cc_hash_init,
1575 .update = cc_hash_update,
1576 .final = cc_hash_final,
1577 .finup = cc_hash_finup,
1578 .digest = cc_hash_digest,
1579 .export = cc_hash_export,
1580 .import = cc_hash_import,
1581 .setkey = cc_hash_setkey,
1582 .halg = {
1583 .digestsize = SHA256_DIGEST_SIZE,
1584 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1585 },
1586 },
1587 .hash_mode = DRV_HASH_SHA256,
1588 .hw_mode = DRV_HASH_HW_SHA256,
1589 .inter_digestsize = SHA256_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001590 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001591 },
1592 {
1593 .name = "sha224",
1594 .driver_name = "sha224-ccree",
1595 .mac_name = "hmac(sha224)",
1596 .mac_driver_name = "hmac-sha224-ccree",
1597 .blocksize = SHA224_BLOCK_SIZE,
1598 .template_ahash = {
1599 .init = cc_hash_init,
1600 .update = cc_hash_update,
1601 .final = cc_hash_final,
1602 .finup = cc_hash_finup,
1603 .digest = cc_hash_digest,
1604 .export = cc_hash_export,
1605 .import = cc_hash_import,
1606 .setkey = cc_hash_setkey,
1607 .halg = {
1608 .digestsize = SHA224_DIGEST_SIZE,
1609 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1610 },
1611 },
1612 .hash_mode = DRV_HASH_SHA224,
1613 .hw_mode = DRV_HASH_HW_SHA256,
1614 .inter_digestsize = SHA256_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001615 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001616 },
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001617 {
1618 .name = "sha384",
1619 .driver_name = "sha384-ccree",
1620 .mac_name = "hmac(sha384)",
1621 .mac_driver_name = "hmac-sha384-ccree",
1622 .blocksize = SHA384_BLOCK_SIZE,
1623 .template_ahash = {
1624 .init = cc_hash_init,
1625 .update = cc_hash_update,
1626 .final = cc_hash_final,
1627 .finup = cc_hash_finup,
1628 .digest = cc_hash_digest,
1629 .export = cc_hash_export,
1630 .import = cc_hash_import,
1631 .setkey = cc_hash_setkey,
1632 .halg = {
1633 .digestsize = SHA384_DIGEST_SIZE,
1634 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1635 },
1636 },
1637 .hash_mode = DRV_HASH_SHA384,
1638 .hw_mode = DRV_HASH_HW_SHA512,
1639 .inter_digestsize = SHA512_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001640 .min_hw_rev = CC_HW_REV_712,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001641 },
1642 {
1643 .name = "sha512",
1644 .driver_name = "sha512-ccree",
1645 .mac_name = "hmac(sha512)",
1646 .mac_driver_name = "hmac-sha512-ccree",
1647 .blocksize = SHA512_BLOCK_SIZE,
1648 .template_ahash = {
1649 .init = cc_hash_init,
1650 .update = cc_hash_update,
1651 .final = cc_hash_final,
1652 .finup = cc_hash_finup,
1653 .digest = cc_hash_digest,
1654 .export = cc_hash_export,
1655 .import = cc_hash_import,
1656 .setkey = cc_hash_setkey,
1657 .halg = {
1658 .digestsize = SHA512_DIGEST_SIZE,
1659 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1660 },
1661 },
1662 .hash_mode = DRV_HASH_SHA512,
1663 .hw_mode = DRV_HASH_HW_SHA512,
1664 .inter_digestsize = SHA512_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001665 .min_hw_rev = CC_HW_REV_712,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001666 },
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001667 {
1668 .name = "md5",
1669 .driver_name = "md5-ccree",
1670 .mac_name = "hmac(md5)",
1671 .mac_driver_name = "hmac-md5-ccree",
1672 .blocksize = MD5_HMAC_BLOCK_SIZE,
1673 .template_ahash = {
1674 .init = cc_hash_init,
1675 .update = cc_hash_update,
1676 .final = cc_hash_final,
1677 .finup = cc_hash_finup,
1678 .digest = cc_hash_digest,
1679 .export = cc_hash_export,
1680 .import = cc_hash_import,
1681 .setkey = cc_hash_setkey,
1682 .halg = {
1683 .digestsize = MD5_DIGEST_SIZE,
1684 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1685 },
1686 },
1687 .hash_mode = DRV_HASH_MD5,
1688 .hw_mode = DRV_HASH_HW_MD5,
1689 .inter_digestsize = MD5_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001690 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001691 },
1692 {
1693 .mac_name = "xcbc(aes)",
1694 .mac_driver_name = "xcbc-aes-ccree",
1695 .blocksize = AES_BLOCK_SIZE,
1696 .template_ahash = {
1697 .init = cc_hash_init,
1698 .update = cc_mac_update,
1699 .final = cc_mac_final,
1700 .finup = cc_mac_finup,
1701 .digest = cc_mac_digest,
1702 .setkey = cc_xcbc_setkey,
1703 .export = cc_hash_export,
1704 .import = cc_hash_import,
1705 .halg = {
1706 .digestsize = AES_BLOCK_SIZE,
1707 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1708 },
1709 },
1710 .hash_mode = DRV_HASH_NULL,
1711 .hw_mode = DRV_CIPHER_XCBC_MAC,
1712 .inter_digestsize = AES_BLOCK_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001713 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001714 },
1715 {
1716 .mac_name = "cmac(aes)",
1717 .mac_driver_name = "cmac-aes-ccree",
1718 .blocksize = AES_BLOCK_SIZE,
1719 .template_ahash = {
1720 .init = cc_hash_init,
1721 .update = cc_mac_update,
1722 .final = cc_mac_final,
1723 .finup = cc_mac_finup,
1724 .digest = cc_mac_digest,
1725 .setkey = cc_cmac_setkey,
1726 .export = cc_hash_export,
1727 .import = cc_hash_import,
1728 .halg = {
1729 .digestsize = AES_BLOCK_SIZE,
1730 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1731 },
1732 },
1733 .hash_mode = DRV_HASH_NULL,
1734 .hw_mode = DRV_CIPHER_CMAC,
1735 .inter_digestsize = AES_BLOCK_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001736 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001737 },
1738};
1739
1740static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1741 struct device *dev, bool keyed)
1742{
1743 struct cc_hash_alg *t_crypto_alg;
1744 struct crypto_alg *alg;
1745 struct ahash_alg *halg;
1746
1747 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1748 if (!t_crypto_alg)
1749 return ERR_PTR(-ENOMEM);
1750
1751 t_crypto_alg->ahash_alg = template->template_ahash;
1752 halg = &t_crypto_alg->ahash_alg;
1753 alg = &halg->halg.base;
1754
1755 if (keyed) {
1756 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1757 template->mac_name);
1758 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1759 template->mac_driver_name);
1760 } else {
1761 halg->setkey = NULL;
1762 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1763 template->name);
1764 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1765 template->driver_name);
1766 }
1767 alg->cra_module = THIS_MODULE;
1768 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1769 alg->cra_priority = CC_CRA_PRIO;
1770 alg->cra_blocksize = template->blocksize;
1771 alg->cra_alignmask = 0;
1772 alg->cra_exit = cc_cra_exit;
1773
1774 alg->cra_init = cc_cra_init;
Eric Biggers6a38f622018-06-30 15:16:12 -07001775 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001776
1777 t_crypto_alg->hash_mode = template->hash_mode;
1778 t_crypto_alg->hw_mode = template->hw_mode;
1779 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1780
1781 return t_crypto_alg;
1782}
1783
1784int cc_init_hash_sram(struct cc_drvdata *drvdata)
1785{
1786 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1787 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1788 unsigned int larval_seq_len = 0;
1789 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001790 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001791 int rc = 0;
1792
1793 /* Copy-to-sram digest-len */
1794 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1795 ARRAY_SIZE(digest_len_init), larval_seq,
1796 &larval_seq_len);
1797 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1798 if (rc)
1799 goto init_digest_const_err;
1800
1801 sram_buff_ofs += sizeof(digest_len_init);
1802 larval_seq_len = 0;
1803
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001804 if (large_sha_supported) {
1805 /* Copy-to-sram digest-len for sha384/512 */
1806 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1807 ARRAY_SIZE(digest_len_sha512_init),
1808 larval_seq, &larval_seq_len);
1809 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1810 if (rc)
1811 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001812
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001813 sram_buff_ofs += sizeof(digest_len_sha512_init);
1814 larval_seq_len = 0;
1815 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001816
1817 /* The initial digests offset */
1818 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1819
1820 /* Copy-to-sram initial SHA* digests */
1821 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1822 larval_seq, &larval_seq_len);
1823 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1824 if (rc)
1825 goto init_digest_const_err;
1826 sram_buff_ofs += sizeof(md5_init);
1827 larval_seq_len = 0;
1828
1829 cc_set_sram_desc(sha1_init, sram_buff_ofs,
1830 ARRAY_SIZE(sha1_init), larval_seq,
1831 &larval_seq_len);
1832 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1833 if (rc)
1834 goto init_digest_const_err;
1835 sram_buff_ofs += sizeof(sha1_init);
1836 larval_seq_len = 0;
1837
1838 cc_set_sram_desc(sha224_init, sram_buff_ofs,
1839 ARRAY_SIZE(sha224_init), larval_seq,
1840 &larval_seq_len);
1841 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1842 if (rc)
1843 goto init_digest_const_err;
1844 sram_buff_ofs += sizeof(sha224_init);
1845 larval_seq_len = 0;
1846
1847 cc_set_sram_desc(sha256_init, sram_buff_ofs,
1848 ARRAY_SIZE(sha256_init), larval_seq,
1849 &larval_seq_len);
1850 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1851 if (rc)
1852 goto init_digest_const_err;
1853 sram_buff_ofs += sizeof(sha256_init);
1854 larval_seq_len = 0;
1855
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001856 if (large_sha_supported) {
1857 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1858 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1859 &larval_seq_len);
1860 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1861 if (rc)
1862 goto init_digest_const_err;
1863 sram_buff_ofs += sizeof(sha384_init);
1864 larval_seq_len = 0;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001865
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001866 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1867 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1868 &larval_seq_len);
1869 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1870 if (rc)
1871 goto init_digest_const_err;
1872 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001873
1874init_digest_const_err:
1875 return rc;
1876}
1877
1878static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1879{
1880 int i;
1881 u32 tmp;
1882
1883 for (i = 0; i < size; i += 2) {
1884 tmp = buf[i];
1885 buf[i] = buf[i + 1];
1886 buf[i + 1] = tmp;
1887 }
1888}
1889
1890/*
1891 * Due to the way the HW works we need to swap every
1892 * double word in the SHA384 and SHA512 larval hashes
1893 */
1894void __init cc_hash_global_init(void)
1895{
1896 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1897 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1898}
1899
1900int cc_hash_alloc(struct cc_drvdata *drvdata)
1901{
1902 struct cc_hash_handle *hash_handle;
1903 cc_sram_addr_t sram_buff;
1904 u32 sram_size_to_alloc;
1905 struct device *dev = drvdata_to_dev(drvdata);
1906 int rc = 0;
1907 int alg;
1908
1909 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1910 if (!hash_handle)
1911 return -ENOMEM;
1912
1913 INIT_LIST_HEAD(&hash_handle->hash_list);
1914 drvdata->hash_handle = hash_handle;
1915
1916 sram_size_to_alloc = sizeof(digest_len_init) +
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001917 sizeof(md5_init) +
1918 sizeof(sha1_init) +
1919 sizeof(sha224_init) +
1920 sizeof(sha256_init);
1921
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001922 if (drvdata->hw_rev >= CC_HW_REV_712)
1923 sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1924 sizeof(sha384_init) + sizeof(sha512_init);
1925
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001926 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1927 if (sram_buff == NULL_SRAM_ADDR) {
1928 dev_err(dev, "SRAM pool exhausted\n");
1929 rc = -ENOMEM;
1930 goto fail;
1931 }
1932
1933 /* The initial digest-len offset */
1934 hash_handle->digest_len_sram_addr = sram_buff;
1935
1936 /*must be set before the alg registration as it is being used there*/
1937 rc = cc_init_hash_sram(drvdata);
1938 if (rc) {
1939 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1940 goto fail;
1941 }
1942
1943 /* ahash registration */
1944 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1945 struct cc_hash_alg *t_alg;
1946 int hw_mode = driver_hash[alg].hw_mode;
1947
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001948 /* We either support both HASH and MAC or none */
1949 if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
1950 continue;
1951
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001952 /* register hmac version */
1953 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1954 if (IS_ERR(t_alg)) {
1955 rc = PTR_ERR(t_alg);
1956 dev_err(dev, "%s alg allocation failed\n",
1957 driver_hash[alg].driver_name);
1958 goto fail;
1959 }
1960 t_alg->drvdata = drvdata;
1961
1962 rc = crypto_register_ahash(&t_alg->ahash_alg);
1963 if (rc) {
1964 dev_err(dev, "%s alg registration failed\n",
1965 driver_hash[alg].driver_name);
1966 kfree(t_alg);
1967 goto fail;
1968 } else {
1969 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
1970 }
1971
1972 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
1973 hw_mode == DRV_CIPHER_CMAC)
1974 continue;
1975
1976 /* register hash version */
1977 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
1978 if (IS_ERR(t_alg)) {
1979 rc = PTR_ERR(t_alg);
1980 dev_err(dev, "%s alg allocation failed\n",
1981 driver_hash[alg].driver_name);
1982 goto fail;
1983 }
1984 t_alg->drvdata = drvdata;
1985
1986 rc = crypto_register_ahash(&t_alg->ahash_alg);
1987 if (rc) {
1988 dev_err(dev, "%s alg registration failed\n",
1989 driver_hash[alg].driver_name);
1990 kfree(t_alg);
1991 goto fail;
1992 } else {
1993 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
1994 }
1995 }
1996
1997 return 0;
1998
1999fail:
2000 kfree(drvdata->hash_handle);
2001 drvdata->hash_handle = NULL;
2002 return rc;
2003}
2004
2005int cc_hash_free(struct cc_drvdata *drvdata)
2006{
2007 struct cc_hash_alg *t_hash_alg, *hash_n;
2008 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2009
2010 if (hash_handle) {
2011 list_for_each_entry_safe(t_hash_alg, hash_n,
2012 &hash_handle->hash_list, entry) {
2013 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2014 list_del(&t_hash_alg->entry);
2015 kfree(t_hash_alg);
2016 }
2017
2018 kfree(hash_handle);
2019 drvdata->hash_handle = NULL;
2020 }
2021 return 0;
2022}
2023
2024static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2025 unsigned int *seq_size)
2026{
2027 unsigned int idx = *seq_size;
2028 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2029 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2030 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2031
2032 /* Setup XCBC MAC K1 */
2033 hw_desc_init(&desc[idx]);
2034 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2035 XCBC_MAC_K1_OFFSET),
2036 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2037 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2038 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2039 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2040 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2041 set_flow_mode(&desc[idx], S_DIN_to_AES);
2042 idx++;
2043
2044 /* Setup XCBC MAC K2 */
2045 hw_desc_init(&desc[idx]);
2046 set_din_type(&desc[idx], DMA_DLLI,
2047 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2048 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2049 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2050 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2051 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2052 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2053 set_flow_mode(&desc[idx], S_DIN_to_AES);
2054 idx++;
2055
2056 /* Setup XCBC MAC K3 */
2057 hw_desc_init(&desc[idx]);
2058 set_din_type(&desc[idx], DMA_DLLI,
2059 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2060 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2061 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2062 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2063 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2064 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2065 set_flow_mode(&desc[idx], S_DIN_to_AES);
2066 idx++;
2067
2068 /* Loading MAC state */
2069 hw_desc_init(&desc[idx]);
2070 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2071 CC_AES_BLOCK_SIZE, NS_BIT);
2072 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2073 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2074 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2075 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2076 set_flow_mode(&desc[idx], S_DIN_to_AES);
2077 idx++;
2078 *seq_size = idx;
2079}
2080
2081static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2082 unsigned int *seq_size)
2083{
2084 unsigned int idx = *seq_size;
2085 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2086 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2087 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2088
2089 /* Setup CMAC Key */
2090 hw_desc_init(&desc[idx]);
2091 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2092 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2093 ctx->key_params.keylen), NS_BIT);
2094 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2095 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2096 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2097 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2098 set_flow_mode(&desc[idx], S_DIN_to_AES);
2099 idx++;
2100
2101 /* Load MAC state */
2102 hw_desc_init(&desc[idx]);
2103 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2104 CC_AES_BLOCK_SIZE, NS_BIT);
2105 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2106 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2107 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2108 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2109 set_flow_mode(&desc[idx], S_DIN_to_AES);
2110 idx++;
2111 *seq_size = idx;
2112}
2113
2114static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2115 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2116 struct cc_hw_desc desc[], bool is_not_last_data,
2117 unsigned int *seq_size)
2118{
2119 unsigned int idx = *seq_size;
2120 struct device *dev = drvdata_to_dev(ctx->drvdata);
2121
2122 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2123 hw_desc_init(&desc[idx]);
2124 set_din_type(&desc[idx], DMA_DLLI,
2125 sg_dma_address(areq_ctx->curr_sg),
2126 areq_ctx->curr_sg->length, NS_BIT);
2127 set_flow_mode(&desc[idx], flow_mode);
2128 idx++;
2129 } else {
2130 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2131 dev_dbg(dev, " NULL mode\n");
2132 /* nothing to build */
2133 return;
2134 }
2135 /* bypass */
2136 hw_desc_init(&desc[idx]);
2137 set_din_type(&desc[idx], DMA_DLLI,
2138 areq_ctx->mlli_params.mlli_dma_addr,
2139 areq_ctx->mlli_params.mlli_len, NS_BIT);
2140 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2141 areq_ctx->mlli_params.mlli_len);
2142 set_flow_mode(&desc[idx], BYPASS);
2143 idx++;
2144 /* process */
2145 hw_desc_init(&desc[idx]);
2146 set_din_type(&desc[idx], DMA_MLLI,
2147 ctx->drvdata->mlli_sram_addr,
2148 areq_ctx->mlli_nents, NS_BIT);
2149 set_flow_mode(&desc[idx], flow_mode);
2150 idx++;
2151 }
2152 if (is_not_last_data)
2153 set_din_not_last_indication(&desc[(idx - 1)]);
2154 /* return updated desc sequence size */
2155 *seq_size = idx;
2156}
2157
2158static const void *cc_larval_digest(struct device *dev, u32 mode)
2159{
2160 switch (mode) {
2161 case DRV_HASH_MD5:
2162 return md5_init;
2163 case DRV_HASH_SHA1:
2164 return sha1_init;
2165 case DRV_HASH_SHA224:
2166 return sha224_init;
2167 case DRV_HASH_SHA256:
2168 return sha256_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002169 case DRV_HASH_SHA384:
2170 return sha384_init;
2171 case DRV_HASH_SHA512:
2172 return sha512_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002173 default:
2174 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2175 return md5_init;
2176 }
2177}
2178
2179/*!
2180 * Gets the address of the initial digest in SRAM
2181 * according to the given hash mode
2182 *
2183 * \param drvdata
2184 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2185 *
2186 * \return u32 The address of the initial digest in SRAM
2187 */
2188cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2189{
2190 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2191 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2192 struct device *dev = drvdata_to_dev(_drvdata);
2193
2194 switch (mode) {
2195 case DRV_HASH_NULL:
2196 break; /*Ignore*/
2197 case DRV_HASH_MD5:
2198 return (hash_handle->larval_digest_sram_addr);
2199 case DRV_HASH_SHA1:
2200 return (hash_handle->larval_digest_sram_addr +
2201 sizeof(md5_init));
2202 case DRV_HASH_SHA224:
2203 return (hash_handle->larval_digest_sram_addr +
2204 sizeof(md5_init) +
2205 sizeof(sha1_init));
2206 case DRV_HASH_SHA256:
2207 return (hash_handle->larval_digest_sram_addr +
2208 sizeof(md5_init) +
2209 sizeof(sha1_init) +
2210 sizeof(sha224_init));
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002211 case DRV_HASH_SHA384:
2212 return (hash_handle->larval_digest_sram_addr +
2213 sizeof(md5_init) +
2214 sizeof(sha1_init) +
2215 sizeof(sha224_init) +
2216 sizeof(sha256_init));
2217 case DRV_HASH_SHA512:
2218 return (hash_handle->larval_digest_sram_addr +
2219 sizeof(md5_init) +
2220 sizeof(sha1_init) +
2221 sizeof(sha224_init) +
2222 sizeof(sha256_init) +
2223 sizeof(sha384_init));
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002224 default:
2225 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2226 }
2227
2228 /*This is valid wrong value to avoid kernel crash*/
2229 return hash_handle->larval_digest_sram_addr;
2230}
2231
2232cc_sram_addr_t
2233cc_digest_len_addr(void *drvdata, u32 mode)
2234{
2235 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2236 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2237 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2238
2239 switch (mode) {
2240 case DRV_HASH_SHA1:
2241 case DRV_HASH_SHA224:
2242 case DRV_HASH_SHA256:
2243 case DRV_HASH_MD5:
2244 return digest_len_addr;
2245#if (CC_DEV_SHA_MAX > 256)
2246 case DRV_HASH_SHA384:
2247 case DRV_HASH_SHA512:
2248 return digest_len_addr + sizeof(digest_len_init);
2249#endif
2250 default:
2251 return digest_len_addr; /*to avoid kernel crash*/
2252 }
2253}