blob: f3adc1ab0e01abecfedde249ea14de50f9df32b4 [file] [log] [blame]
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001// SPDX-License-Identifier: GPL-2.0
Gilad Ben-Yossef03963ca2019-04-18 16:38:53 +03002/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00003
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
Yael Chemla927574e2018-10-18 13:59:59 +01009#include <crypto/sm3.h>
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000010#include <crypto/internal/hash.h>
11
12#include "cc_driver.h"
13#include "cc_request_mgr.h"
14#include "cc_buffer_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define CC_MAX_HASH_SEQ_LEN 12
19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
Yael Chemla927574e2018-10-18 13:59:59 +010020#define CC_SM3_HASH_LEN_SIZE 8
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000021
22struct cc_hash_handle {
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +010023 u32 digest_len_sram_addr; /* const value in SRAM*/
24 u32 larval_digest_sram_addr; /* const value in SRAM */
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000025 struct list_head hash_list;
26};
27
Hans de Goedee55d8a72019-09-01 22:35:28 +020028static const u32 cc_digest_len_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000029 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
Hans de Goedee55d8a72019-09-01 22:35:28 +020030static const u32 cc_md5_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000031 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
Hans de Goedee55d8a72019-09-01 22:35:28 +020032static const u32 cc_sha1_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000033 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
Hans de Goedee55d8a72019-09-01 22:35:28 +020034static const u32 cc_sha224_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000035 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
Hans de Goedee55d8a72019-09-01 22:35:28 +020037static const u32 cc_sha256_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000038 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
Hans de Goedee55d8a72019-09-01 22:35:28 +020040static const u32 cc_digest_len_sha512_init[] = {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000041 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
Geert Uytterhoevenf08b5852020-02-11 19:18:59 +010042
43/*
44 * Due to the way the HW works, every double word in the SHA384 and SHA512
45 * larval hashes must be stored in hi/lo order
46 */
47#define hilo(x) upper_32_bits(x), lower_32_bits(x)
48static const u32 cc_sha384_init[] = {
49 hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
50 hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
51static const u32 cc_sha512_init[] = {
52 hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
53 hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
54
Hans de Goedee55d8a72019-09-01 22:35:28 +020055static const u32 cc_sm3_init[] = {
Yael Chemla927574e2018-10-18 13:59:59 +010056 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
57 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000058
59static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
60 unsigned int *seq_size);
61
62static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
63 unsigned int *seq_size);
64
65static const void *cc_larval_digest(struct device *dev, u32 mode);
66
67struct cc_hash_alg {
68 struct list_head entry;
69 int hash_mode;
70 int hw_mode;
71 int inter_digestsize;
72 struct cc_drvdata *drvdata;
73 struct ahash_alg ahash_alg;
74};
75
76struct hash_key_req_ctx {
77 u32 keylen;
78 dma_addr_t key_dma_addr;
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +030079 u8 *key;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000080};
81
82/* hash per-session context */
83struct cc_hash_ctx {
84 struct cc_drvdata *drvdata;
85 /* holds the origin digest; the digest after "setkey" if HMAC,*
86 * the initial digest if HASH.
87 */
88 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
89 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
90
91 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
92 dma_addr_t digest_buff_dma_addr;
93 /* use for hmac with key large then mode block size */
94 struct hash_key_req_ctx key_params;
95 int hash_mode;
96 int hw_mode;
97 int inter_digestsize;
Yael Chemlaf1e52fd2018-10-18 13:59:57 +010098 unsigned int hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +000099 struct completion setkey_comp;
100 bool is_hmac;
101};
102
103static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
104 unsigned int flow_mode, struct cc_hw_desc desc[],
105 bool is_not_last_data, unsigned int *seq_size);
106
107static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
108{
109 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
110 mode == DRV_HASH_SHA512) {
111 set_bytes_swap(desc, 1);
112 } else {
113 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
114 }
115}
116
117static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
118 unsigned int digestsize)
119{
120 state->digest_result_dma_addr =
121 dma_map_single(dev, state->digest_result_buff,
122 digestsize, DMA_BIDIRECTIONAL);
123 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
124 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
125 digestsize);
126 return -ENOMEM;
127 }
128 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
129 digestsize, state->digest_result_buff,
130 &state->digest_result_dma_addr);
131
132 return 0;
133}
134
135static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
136 struct cc_hash_ctx *ctx)
137{
138 bool is_hmac = ctx->is_hmac;
139
140 memset(state, 0, sizeof(*state));
141
142 if (is_hmac) {
143 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
144 ctx->hw_mode != DRV_CIPHER_CMAC) {
145 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
146 ctx->inter_digestsize,
147 DMA_BIDIRECTIONAL);
148
149 memcpy(state->digest_buff, ctx->digest_buff,
150 ctx->inter_digestsize);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000151 if (ctx->hash_mode == DRV_HASH_SHA512 ||
152 ctx->hash_mode == DRV_HASH_SHA384)
153 memcpy(state->digest_bytes_len,
Hans de Goedee55d8a72019-09-01 22:35:28 +0200154 cc_digest_len_sha512_init,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100155 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000156 else
Hans de Goedee55d8a72019-09-01 22:35:28 +0200157 memcpy(state->digest_bytes_len,
158 cc_digest_len_init,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100159 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000160 }
161
162 if (ctx->hash_mode != DRV_HASH_NULL) {
163 dma_sync_single_for_cpu(dev,
164 ctx->opad_tmp_keys_dma_addr,
165 ctx->inter_digestsize,
166 DMA_BIDIRECTIONAL);
167 memcpy(state->opad_digest_buff,
168 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
169 }
170 } else { /*hash*/
171 /* Copy the initial digests if hash flow. */
172 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
173
174 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
175 }
176}
177
178static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
179 struct cc_hash_ctx *ctx)
180{
181 bool is_hmac = ctx->is_hmac;
182
183 state->digest_buff_dma_addr =
184 dma_map_single(dev, state->digest_buff,
185 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
186 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
187 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
188 ctx->inter_digestsize, state->digest_buff);
189 return -EINVAL;
190 }
191 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
192 ctx->inter_digestsize, state->digest_buff,
193 &state->digest_buff_dma_addr);
194
195 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
196 state->digest_bytes_len_dma_addr =
197 dma_map_single(dev, state->digest_bytes_len,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000198 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000199 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
200 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000201 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000202 goto unmap_digest_buf;
203 }
204 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000205 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000206 &state->digest_bytes_len_dma_addr);
207 }
208
209 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
210 state->opad_digest_dma_addr =
211 dma_map_single(dev, state->opad_digest_buff,
212 ctx->inter_digestsize,
213 DMA_BIDIRECTIONAL);
214 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
215 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
216 ctx->inter_digestsize,
217 state->opad_digest_buff);
218 goto unmap_digest_len;
219 }
220 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
221 ctx->inter_digestsize, state->opad_digest_buff,
222 &state->opad_digest_dma_addr);
223 }
224
225 return 0;
226
227unmap_digest_len:
228 if (state->digest_bytes_len_dma_addr) {
229 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000230 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000231 state->digest_bytes_len_dma_addr = 0;
232 }
233unmap_digest_buf:
234 if (state->digest_buff_dma_addr) {
235 dma_unmap_single(dev, state->digest_buff_dma_addr,
236 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
237 state->digest_buff_dma_addr = 0;
238 }
239
240 return -EINVAL;
241}
242
243static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
244 struct cc_hash_ctx *ctx)
245{
246 if (state->digest_buff_dma_addr) {
247 dma_unmap_single(dev, state->digest_buff_dma_addr,
248 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
250 &state->digest_buff_dma_addr);
251 state->digest_buff_dma_addr = 0;
252 }
253 if (state->digest_bytes_len_dma_addr) {
254 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000255 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000256 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
257 &state->digest_bytes_len_dma_addr);
258 state->digest_bytes_len_dma_addr = 0;
259 }
260 if (state->opad_digest_dma_addr) {
261 dma_unmap_single(dev, state->opad_digest_dma_addr,
262 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
263 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
264 &state->opad_digest_dma_addr);
265 state->opad_digest_dma_addr = 0;
266 }
267}
268
269static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
270 unsigned int digestsize, u8 *result)
271{
272 if (state->digest_result_dma_addr) {
273 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
274 DMA_BIDIRECTIONAL);
275 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
276 state->digest_result_buff,
277 &state->digest_result_dma_addr, digestsize);
278 memcpy(result, state->digest_result_buff, digestsize);
279 }
280 state->digest_result_dma_addr = 0;
281}
282
283static void cc_update_complete(struct device *dev, void *cc_req, int err)
284{
285 struct ahash_request *req = (struct ahash_request *)cc_req;
286 struct ahash_req_ctx *state = ahash_request_ctx(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289
290 dev_dbg(dev, "req=%pK\n", req);
291
Gilad Ben-Yossefa108f932019-04-18 16:38:46 +0300292 if (err != -EINPROGRESS) {
293 /* Not a BACKLOG notification */
294 cc_unmap_hash_request(dev, state, req->src, false);
295 cc_unmap_req(dev, state, ctx);
296 }
297
Gilad Ben-Yossef151ded72019-04-18 16:38:47 +0300298 ahash_request_complete(req, err);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000299}
300
301static void cc_digest_complete(struct device *dev, void *cc_req, int err)
302{
303 struct ahash_request *req = (struct ahash_request *)cc_req;
304 struct ahash_req_ctx *state = ahash_request_ctx(req);
305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
306 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
307 u32 digestsize = crypto_ahash_digestsize(tfm);
308
309 dev_dbg(dev, "req=%pK\n", req);
310
Gilad Ben-Yossefa108f932019-04-18 16:38:46 +0300311 if (err != -EINPROGRESS) {
312 /* Not a BACKLOG notification */
313 cc_unmap_hash_request(dev, state, req->src, false);
314 cc_unmap_result(dev, state, digestsize, req->result);
315 cc_unmap_req(dev, state, ctx);
316 }
317
Gilad Ben-Yossef151ded72019-04-18 16:38:47 +0300318 ahash_request_complete(req, err);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000319}
320
321static void cc_hash_complete(struct device *dev, void *cc_req, int err)
322{
323 struct ahash_request *req = (struct ahash_request *)cc_req;
324 struct ahash_req_ctx *state = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
327 u32 digestsize = crypto_ahash_digestsize(tfm);
328
329 dev_dbg(dev, "req=%pK\n", req);
330
Gilad Ben-Yossefa108f932019-04-18 16:38:46 +0300331 if (err != -EINPROGRESS) {
332 /* Not a BACKLOG notification */
333 cc_unmap_hash_request(dev, state, req->src, false);
334 cc_unmap_result(dev, state, digestsize, req->result);
335 cc_unmap_req(dev, state, ctx);
336 }
337
Gilad Ben-Yossef151ded72019-04-18 16:38:47 +0300338 ahash_request_complete(req, err);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000339}
340
341static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
342 int idx)
343{
344 struct ahash_req_ctx *state = ahash_request_ctx(req);
345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
347 u32 digestsize = crypto_ahash_digestsize(tfm);
348
349 /* Get final MAC result */
350 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100351 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000352 /* TODO */
353 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
354 NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000355 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000356 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
357 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
358 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
359 cc_set_endianity(ctx->hash_mode, &desc[idx]);
360 idx++;
361
362 return idx;
363}
364
365static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
366 int idx)
367{
368 struct ahash_req_ctx *state = ahash_request_ctx(req);
369 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
370 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
371 u32 digestsize = crypto_ahash_digestsize(tfm);
372
373 /* store the hash digest result in the context */
374 hw_desc_init(&desc[idx]);
375 set_cipher_mode(&desc[idx], ctx->hw_mode);
376 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
377 NS_BIT, 0);
378 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
379 cc_set_endianity(ctx->hash_mode, &desc[idx]);
380 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
381 idx++;
382
383 /* Loading hash opad xor key state */
384 hw_desc_init(&desc[idx]);
385 set_cipher_mode(&desc[idx], ctx->hw_mode);
386 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
387 ctx->inter_digestsize, NS_BIT);
388 set_flow_mode(&desc[idx], S_DIN_to_HASH);
389 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
390 idx++;
391
392 /* Load the hash current length */
393 hw_desc_init(&desc[idx]);
394 set_cipher_mode(&desc[idx], ctx->hw_mode);
395 set_din_sram(&desc[idx],
396 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100397 ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000398 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
399 set_flow_mode(&desc[idx], S_DIN_to_HASH);
400 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
401 idx++;
402
403 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
404 hw_desc_init(&desc[idx]);
405 set_din_no_dma(&desc[idx], 0, 0xfffff0);
406 set_dout_no_dma(&desc[idx], 0, 0, 1);
407 idx++;
408
409 /* Perform HASH update */
410 hw_desc_init(&desc[idx]);
411 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
412 digestsize, NS_BIT);
413 set_flow_mode(&desc[idx], DIN_HASH);
414 idx++;
415
416 return idx;
417}
418
419static int cc_hash_digest(struct ahash_request *req)
420{
421 struct ahash_req_ctx *state = ahash_request_ctx(req);
422 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
423 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
424 u32 digestsize = crypto_ahash_digestsize(tfm);
425 struct scatterlist *src = req->src;
426 unsigned int nbytes = req->nbytes;
427 u8 *result = req->result;
428 struct device *dev = drvdata_to_dev(ctx->drvdata);
429 bool is_hmac = ctx->is_hmac;
430 struct cc_crypto_req cc_req = {};
431 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +0100432 u32 larval_digest_addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000433 int idx = 0;
434 int rc = 0;
435 gfp_t flags = cc_gfp_flags(&req->base);
436
437 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
438 nbytes);
439
440 cc_init_req(dev, state, ctx);
441
442 if (cc_map_req(dev, state, ctx)) {
443 dev_err(dev, "map_ahash_source() failed\n");
444 return -ENOMEM;
445 }
446
447 if (cc_map_result(dev, state, digestsize)) {
448 dev_err(dev, "map_ahash_digest() failed\n");
449 cc_unmap_req(dev, state, ctx);
450 return -ENOMEM;
451 }
452
453 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
454 flags)) {
455 dev_err(dev, "map_ahash_request_final() failed\n");
456 cc_unmap_result(dev, state, digestsize, result);
457 cc_unmap_req(dev, state, ctx);
458 return -ENOMEM;
459 }
460
461 /* Setup request structure */
462 cc_req.user_cb = cc_digest_complete;
463 cc_req.user_arg = req;
464
465 /* If HMAC then load hash IPAD xor key, if HASH then load initial
466 * digest
467 */
468 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100469 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000470 if (is_hmac) {
471 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
472 ctx->inter_digestsize, NS_BIT);
473 } else {
Geert Uytterhoevene431cc02020-02-11 19:19:05 +0100474 larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
475 ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000476 set_din_sram(&desc[idx], larval_digest_addr,
477 ctx->inter_digestsize);
478 }
479 set_flow_mode(&desc[idx], S_DIN_to_HASH);
480 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
481 idx++;
482
483 /* Load the hash current length */
484 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100485 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000486
487 if (is_hmac) {
488 set_din_type(&desc[idx], DMA_DLLI,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000489 state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100490 ctx->hash_len, NS_BIT);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000491 } else {
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100492 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000493 if (nbytes)
494 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
495 else
496 set_cipher_do(&desc[idx], DO_PAD);
497 }
498 set_flow_mode(&desc[idx], S_DIN_to_HASH);
499 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
500 idx++;
501
502 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
503
504 if (is_hmac) {
505 /* HW last hash block padding (aka. "DO_PAD") */
506 hw_desc_init(&desc[idx]);
507 set_cipher_mode(&desc[idx], ctx->hw_mode);
508 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100509 ctx->hash_len, NS_BIT, 0);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000510 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
511 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
512 set_cipher_do(&desc[idx], DO_PAD);
513 idx++;
514
515 idx = cc_fin_hmac(desc, req, idx);
516 }
517
518 idx = cc_fin_result(desc, req, idx);
519
520 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
521 if (rc != -EINPROGRESS && rc != -EBUSY) {
522 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
523 cc_unmap_hash_request(dev, state, src, true);
524 cc_unmap_result(dev, state, digestsize, result);
525 cc_unmap_req(dev, state, ctx);
526 }
527 return rc;
528}
529
530static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
531 struct ahash_req_ctx *state, unsigned int idx)
532{
533 /* Restore hash digest */
534 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100535 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000536 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
537 ctx->inter_digestsize, NS_BIT);
538 set_flow_mode(&desc[idx], S_DIN_to_HASH);
539 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
540 idx++;
541
542 /* Restore hash current length */
543 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100544 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000545 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
546 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100547 ctx->hash_len, NS_BIT);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000548 set_flow_mode(&desc[idx], S_DIN_to_HASH);
549 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
550 idx++;
551
552 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
553
554 return idx;
555}
556
557static int cc_hash_update(struct ahash_request *req)
558{
559 struct ahash_req_ctx *state = ahash_request_ctx(req);
560 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
561 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
562 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
563 struct scatterlist *src = req->src;
564 unsigned int nbytes = req->nbytes;
565 struct device *dev = drvdata_to_dev(ctx->drvdata);
566 struct cc_crypto_req cc_req = {};
567 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
568 u32 idx = 0;
569 int rc;
570 gfp_t flags = cc_gfp_flags(&req->base);
571
572 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
573 "hmac" : "hash", nbytes);
574
575 if (nbytes == 0) {
576 /* no real updates required */
577 return 0;
578 }
579
580 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
581 block_size, flags);
582 if (rc) {
583 if (rc == 1) {
584 dev_dbg(dev, " data size not require HW update %x\n",
585 nbytes);
586 /* No hardware updates are required */
587 return 0;
588 }
589 dev_err(dev, "map_ahash_request_update() failed\n");
590 return -ENOMEM;
591 }
592
593 if (cc_map_req(dev, state, ctx)) {
594 dev_err(dev, "map_ahash_source() failed\n");
595 cc_unmap_hash_request(dev, state, src, true);
596 return -EINVAL;
597 }
598
599 /* Setup request structure */
600 cc_req.user_cb = cc_update_complete;
601 cc_req.user_arg = req;
602
603 idx = cc_restore_hash(desc, ctx, state, idx);
604
605 /* store the hash digest result in context */
606 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100607 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000608 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
609 ctx->inter_digestsize, NS_BIT, 0);
610 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
611 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
612 idx++;
613
614 /* store current hash length in context */
615 hw_desc_init(&desc[idx]);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100616 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000617 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100618 ctx->hash_len, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +0000619 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000620 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
621 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
622 idx++;
623
624 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
625 if (rc != -EINPROGRESS && rc != -EBUSY) {
626 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
627 cc_unmap_hash_request(dev, state, src, true);
628 cc_unmap_req(dev, state, ctx);
629 }
630 return rc;
631}
632
Hadar Gat26497e72018-07-01 08:02:34 +0100633static int cc_do_finup(struct ahash_request *req, bool update)
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000634{
635 struct ahash_req_ctx *state = ahash_request_ctx(req);
636 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
637 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
638 u32 digestsize = crypto_ahash_digestsize(tfm);
639 struct scatterlist *src = req->src;
640 unsigned int nbytes = req->nbytes;
641 u8 *result = req->result;
642 struct device *dev = drvdata_to_dev(ctx->drvdata);
643 bool is_hmac = ctx->is_hmac;
644 struct cc_crypto_req cc_req = {};
645 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
646 unsigned int idx = 0;
647 int rc;
648 gfp_t flags = cc_gfp_flags(&req->base);
649
Hadar Gat26497e72018-07-01 08:02:34 +0100650 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
651 update ? "finup" : "final", nbytes);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000652
653 if (cc_map_req(dev, state, ctx)) {
654 dev_err(dev, "map_ahash_source() failed\n");
655 return -EINVAL;
656 }
657
Hadar Gat26497e72018-07-01 08:02:34 +0100658 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000659 flags)) {
660 dev_err(dev, "map_ahash_request_final() failed\n");
661 cc_unmap_req(dev, state, ctx);
662 return -ENOMEM;
663 }
664 if (cc_map_result(dev, state, digestsize)) {
665 dev_err(dev, "map_ahash_digest() failed\n");
666 cc_unmap_hash_request(dev, state, src, true);
667 cc_unmap_req(dev, state, ctx);
668 return -ENOMEM;
669 }
670
671 /* Setup request structure */
672 cc_req.user_cb = cc_hash_complete;
673 cc_req.user_arg = req;
674
675 idx = cc_restore_hash(desc, ctx, state, idx);
676
Hadar Gat26497e72018-07-01 08:02:34 +0100677 /* Pad the hash */
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000678 hw_desc_init(&desc[idx]);
679 set_cipher_do(&desc[idx], DO_PAD);
Yael Chemla18a1dc12018-10-18 13:59:58 +0100680 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000681 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100682 ctx->hash_len, NS_BIT, 0);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000683 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
684 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
685 idx++;
686
687 if (is_hmac)
688 idx = cc_fin_hmac(desc, req, idx);
689
690 idx = cc_fin_result(desc, req, idx);
691
692 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
693 if (rc != -EINPROGRESS && rc != -EBUSY) {
694 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
695 cc_unmap_hash_request(dev, state, src, true);
696 cc_unmap_result(dev, state, digestsize, result);
697 cc_unmap_req(dev, state, ctx);
698 }
699 return rc;
700}
701
Hadar Gat26497e72018-07-01 08:02:34 +0100702static int cc_hash_finup(struct ahash_request *req)
703{
704 return cc_do_finup(req, true);
705}
706
707
708static int cc_hash_final(struct ahash_request *req)
709{
710 return cc_do_finup(req, false);
711}
712
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000713static int cc_hash_init(struct ahash_request *req)
714{
715 struct ahash_req_ctx *state = ahash_request_ctx(req);
716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
717 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
718 struct device *dev = drvdata_to_dev(ctx->drvdata);
719
720 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
721
722 cc_init_req(dev, state, ctx);
723
724 return 0;
725}
726
727static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
728 unsigned int keylen)
729{
730 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
731 struct cc_crypto_req cc_req = {};
732 struct cc_hash_ctx *ctx = NULL;
733 int blocksize = 0;
734 int digestsize = 0;
735 int i, idx = 0, rc = 0;
736 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +0100737 u32 larval_addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000738 struct device *dev;
739
740 ctx = crypto_ahash_ctx(ahash);
741 dev = drvdata_to_dev(ctx->drvdata);
742 dev_dbg(dev, "start keylen: %d", keylen);
743
744 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
745 digestsize = crypto_ahash_digestsize(ahash);
746
747 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
748
749 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
750 * any NON-ZERO value utilizes HMAC flow
751 */
752 ctx->key_params.keylen = keylen;
753 ctx->key_params.key_dma_addr = 0;
754 ctx->is_hmac = true;
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300755 ctx->key_params.key = NULL;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000756
757 if (keylen) {
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300758 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
759 if (!ctx->key_params.key)
760 return -ENOMEM;
761
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000762 ctx->key_params.key_dma_addr =
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +0100763 dma_map_single(dev, ctx->key_params.key, keylen,
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300764 DMA_TO_DEVICE);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000765 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
766 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300767 ctx->key_params.key, keylen);
768 kzfree(ctx->key_params.key);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000769 return -ENOMEM;
770 }
771 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
772 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
773
774 if (keylen > blocksize) {
775 /* Load hash initial state */
776 hw_desc_init(&desc[idx]);
777 set_cipher_mode(&desc[idx], ctx->hw_mode);
778 set_din_sram(&desc[idx], larval_addr,
779 ctx->inter_digestsize);
780 set_flow_mode(&desc[idx], S_DIN_to_HASH);
781 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
782 idx++;
783
784 /* Load the hash current length*/
785 hw_desc_init(&desc[idx]);
786 set_cipher_mode(&desc[idx], ctx->hw_mode);
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100787 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000788 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
789 set_flow_mode(&desc[idx], S_DIN_to_HASH);
790 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
791 idx++;
792
793 hw_desc_init(&desc[idx]);
794 set_din_type(&desc[idx], DMA_DLLI,
795 ctx->key_params.key_dma_addr, keylen,
796 NS_BIT);
797 set_flow_mode(&desc[idx], DIN_HASH);
798 idx++;
799
800 /* Get hashed key */
801 hw_desc_init(&desc[idx]);
802 set_cipher_mode(&desc[idx], ctx->hw_mode);
803 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
804 digestsize, NS_BIT, 0);
805 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
806 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
807 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
808 cc_set_endianity(ctx->hash_mode, &desc[idx]);
809 idx++;
810
811 hw_desc_init(&desc[idx]);
812 set_din_const(&desc[idx], 0, (blocksize - digestsize));
813 set_flow_mode(&desc[idx], BYPASS);
814 set_dout_dlli(&desc[idx],
815 (ctx->opad_tmp_keys_dma_addr +
816 digestsize),
817 (blocksize - digestsize), NS_BIT, 0);
818 idx++;
819 } else {
820 hw_desc_init(&desc[idx]);
821 set_din_type(&desc[idx], DMA_DLLI,
822 ctx->key_params.key_dma_addr, keylen,
823 NS_BIT);
824 set_flow_mode(&desc[idx], BYPASS);
825 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
826 keylen, NS_BIT, 0);
827 idx++;
828
829 if ((blocksize - keylen)) {
830 hw_desc_init(&desc[idx]);
831 set_din_const(&desc[idx], 0,
832 (blocksize - keylen));
833 set_flow_mode(&desc[idx], BYPASS);
834 set_dout_dlli(&desc[idx],
835 (ctx->opad_tmp_keys_dma_addr +
836 keylen), (blocksize - keylen),
837 NS_BIT, 0);
838 idx++;
839 }
840 }
841 } else {
842 hw_desc_init(&desc[idx]);
843 set_din_const(&desc[idx], 0, blocksize);
844 set_flow_mode(&desc[idx], BYPASS);
845 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
846 blocksize, NS_BIT, 0);
847 idx++;
848 }
849
850 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
851 if (rc) {
852 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
853 goto out;
854 }
855
856 /* calc derived HMAC key */
857 for (idx = 0, i = 0; i < 2; i++) {
858 /* Load hash initial state */
859 hw_desc_init(&desc[idx]);
860 set_cipher_mode(&desc[idx], ctx->hw_mode);
861 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
862 set_flow_mode(&desc[idx], S_DIN_to_HASH);
863 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
864 idx++;
865
866 /* Load the hash current length*/
867 hw_desc_init(&desc[idx]);
868 set_cipher_mode(&desc[idx], ctx->hw_mode);
Yael Chemlaf1e52fd2018-10-18 13:59:57 +0100869 set_din_const(&desc[idx], 0, ctx->hash_len);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000870 set_flow_mode(&desc[idx], S_DIN_to_HASH);
871 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
872 idx++;
873
874 /* Prepare ipad key */
875 hw_desc_init(&desc[idx]);
876 set_xor_val(&desc[idx], hmac_pad_const[i]);
877 set_cipher_mode(&desc[idx], ctx->hw_mode);
878 set_flow_mode(&desc[idx], S_DIN_to_HASH);
879 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
880 idx++;
881
882 /* Perform HASH update */
883 hw_desc_init(&desc[idx]);
884 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
885 blocksize, NS_BIT);
886 set_cipher_mode(&desc[idx], ctx->hw_mode);
887 set_xor_active(&desc[idx]);
888 set_flow_mode(&desc[idx], DIN_HASH);
889 idx++;
890
891 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
892 * of the first HASH "update" state)
893 */
894 hw_desc_init(&desc[idx]);
895 set_cipher_mode(&desc[idx], ctx->hw_mode);
896 if (i > 0) /* Not first iteration */
897 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
898 ctx->inter_digestsize, NS_BIT, 0);
899 else /* First iteration */
900 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
901 ctx->inter_digestsize, NS_BIT, 0);
902 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
903 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
904 idx++;
905 }
906
907 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
908
909out:
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000910 if (ctx->key_params.key_dma_addr) {
911 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
912 ctx->key_params.keylen, DMA_TO_DEVICE);
913 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
914 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
915 }
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300916
917 kzfree(ctx->key_params.key);
918
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000919 return rc;
920}
921
922static int cc_xcbc_setkey(struct crypto_ahash *ahash,
923 const u8 *key, unsigned int keylen)
924{
925 struct cc_crypto_req cc_req = {};
926 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
927 struct device *dev = drvdata_to_dev(ctx->drvdata);
928 int rc = 0;
929 unsigned int idx = 0;
930 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
931
932 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
933
934 switch (keylen) {
935 case AES_KEYSIZE_128:
936 case AES_KEYSIZE_192:
937 case AES_KEYSIZE_256:
938 break;
939 default:
940 return -EINVAL;
941 }
942
943 ctx->key_params.keylen = keylen;
944
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300945 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
946 if (!ctx->key_params.key)
947 return -ENOMEM;
948
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000949 ctx->key_params.key_dma_addr =
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300950 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000951 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
952 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
953 key, keylen);
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +0300954 kzfree(ctx->key_params.key);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000955 return -ENOMEM;
956 }
957 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
958 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
959
960 ctx->is_hmac = true;
961 /* 1. Load the AES key */
962 hw_desc_init(&desc[idx]);
963 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
964 keylen, NS_BIT);
965 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
966 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
967 set_key_size_aes(&desc[idx], keylen);
968 set_flow_mode(&desc[idx], S_DIN_to_AES);
969 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
970 idx++;
971
972 hw_desc_init(&desc[idx]);
973 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
974 set_flow_mode(&desc[idx], DIN_AES_DOUT);
975 set_dout_dlli(&desc[idx],
976 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
977 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
978 idx++;
979
980 hw_desc_init(&desc[idx]);
981 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
982 set_flow_mode(&desc[idx], DIN_AES_DOUT);
983 set_dout_dlli(&desc[idx],
984 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
985 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
986 idx++;
987
988 hw_desc_init(&desc[idx]);
989 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
990 set_flow_mode(&desc[idx], DIN_AES_DOUT);
991 set_dout_dlli(&desc[idx],
992 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
993 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
994 idx++;
995
996 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
997
Gilad Ben-Yossef63893812018-01-22 09:27:02 +0000998 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
999 ctx->key_params.keylen, DMA_TO_DEVICE);
1000 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1001 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1002
Gilad Ben-Yossef874e1632019-04-18 16:39:04 +03001003 kzfree(ctx->key_params.key);
1004
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001005 return rc;
1006}
1007
1008static int cc_cmac_setkey(struct crypto_ahash *ahash,
1009 const u8 *key, unsigned int keylen)
1010{
1011 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1012 struct device *dev = drvdata_to_dev(ctx->drvdata);
1013
1014 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1015
1016 ctx->is_hmac = true;
1017
1018 switch (keylen) {
1019 case AES_KEYSIZE_128:
1020 case AES_KEYSIZE_192:
1021 case AES_KEYSIZE_256:
1022 break;
1023 default:
1024 return -EINVAL;
1025 }
1026
1027 ctx->key_params.keylen = keylen;
1028
1029 /* STAT_PHASE_1: Copy key to ctx */
1030
1031 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1032 keylen, DMA_TO_DEVICE);
1033
1034 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1035 if (keylen == 24) {
1036 memset(ctx->opad_tmp_keys_buff + 24, 0,
1037 CC_AES_KEY_SIZE_MAX - 24);
1038 }
1039
1040 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1041 keylen, DMA_TO_DEVICE);
1042
1043 ctx->key_params.keylen = keylen;
1044
1045 return 0;
1046}
1047
1048static void cc_free_ctx(struct cc_hash_ctx *ctx)
1049{
1050 struct device *dev = drvdata_to_dev(ctx->drvdata);
1051
1052 if (ctx->digest_buff_dma_addr) {
1053 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1054 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1055 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1056 &ctx->digest_buff_dma_addr);
1057 ctx->digest_buff_dma_addr = 0;
1058 }
1059 if (ctx->opad_tmp_keys_dma_addr) {
1060 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1061 sizeof(ctx->opad_tmp_keys_buff),
1062 DMA_BIDIRECTIONAL);
1063 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1064 &ctx->opad_tmp_keys_dma_addr);
1065 ctx->opad_tmp_keys_dma_addr = 0;
1066 }
1067
1068 ctx->key_params.keylen = 0;
1069}
1070
1071static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1072{
1073 struct device *dev = drvdata_to_dev(ctx->drvdata);
1074
1075 ctx->key_params.keylen = 0;
1076
1077 ctx->digest_buff_dma_addr =
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001078 dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
1079 DMA_BIDIRECTIONAL);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001080 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1081 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1082 sizeof(ctx->digest_buff), ctx->digest_buff);
1083 goto fail;
1084 }
1085 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1086 sizeof(ctx->digest_buff), ctx->digest_buff,
1087 &ctx->digest_buff_dma_addr);
1088
1089 ctx->opad_tmp_keys_dma_addr =
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001090 dma_map_single(dev, ctx->opad_tmp_keys_buff,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001091 sizeof(ctx->opad_tmp_keys_buff),
1092 DMA_BIDIRECTIONAL);
1093 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1094 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1095 sizeof(ctx->opad_tmp_keys_buff),
1096 ctx->opad_tmp_keys_buff);
1097 goto fail;
1098 }
1099 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1100 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1101 &ctx->opad_tmp_keys_dma_addr);
1102
1103 ctx->is_hmac = false;
1104 return 0;
1105
1106fail:
1107 cc_free_ctx(ctx);
1108 return -ENOMEM;
1109}
1110
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001111static int cc_get_hash_len(struct crypto_tfm *tfm)
1112{
1113 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1114
Yael Chemla927574e2018-10-18 13:59:59 +01001115 if (ctx->hash_mode == DRV_HASH_SM3)
1116 return CC_SM3_HASH_LEN_SIZE;
1117 else
1118 return cc_get_default_hash_len(ctx->drvdata);
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001119}
1120
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001121static int cc_cra_init(struct crypto_tfm *tfm)
1122{
1123 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct hash_alg_common *hash_alg_common =
1125 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1126 struct ahash_alg *ahash_alg =
1127 container_of(hash_alg_common, struct ahash_alg, halg);
1128 struct cc_hash_alg *cc_alg =
1129 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1130
1131 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1132 sizeof(struct ahash_req_ctx));
1133
1134 ctx->hash_mode = cc_alg->hash_mode;
1135 ctx->hw_mode = cc_alg->hw_mode;
1136 ctx->inter_digestsize = cc_alg->inter_digestsize;
1137 ctx->drvdata = cc_alg->drvdata;
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001138 ctx->hash_len = cc_get_hash_len(tfm);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001139 return cc_alloc_ctx(ctx);
1140}
1141
1142static void cc_cra_exit(struct crypto_tfm *tfm)
1143{
1144 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1145 struct device *dev = drvdata_to_dev(ctx->drvdata);
1146
1147 dev_dbg(dev, "cc_cra_exit");
1148 cc_free_ctx(ctx);
1149}
1150
1151static int cc_mac_update(struct ahash_request *req)
1152{
1153 struct ahash_req_ctx *state = ahash_request_ctx(req);
1154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1155 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1156 struct device *dev = drvdata_to_dev(ctx->drvdata);
1157 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1158 struct cc_crypto_req cc_req = {};
1159 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1160 int rc;
1161 u32 idx = 0;
1162 gfp_t flags = cc_gfp_flags(&req->base);
1163
1164 if (req->nbytes == 0) {
1165 /* no real updates required */
1166 return 0;
1167 }
1168
1169 state->xcbc_count++;
1170
1171 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1172 req->nbytes, block_size, flags);
1173 if (rc) {
1174 if (rc == 1) {
1175 dev_dbg(dev, " data size not require HW update %x\n",
1176 req->nbytes);
1177 /* No hardware updates are required */
1178 return 0;
1179 }
1180 dev_err(dev, "map_ahash_request_update() failed\n");
1181 return -ENOMEM;
1182 }
1183
1184 if (cc_map_req(dev, state, ctx)) {
1185 dev_err(dev, "map_ahash_source() failed\n");
1186 return -EINVAL;
1187 }
1188
1189 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1190 cc_setup_xcbc(req, desc, &idx);
1191 else
1192 cc_setup_cmac(req, desc, &idx);
1193
1194 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1195
1196 /* store the hash digest result in context */
1197 hw_desc_init(&desc[idx]);
1198 set_cipher_mode(&desc[idx], ctx->hw_mode);
1199 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1200 ctx->inter_digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001201 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001202 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1203 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1204 idx++;
1205
1206 /* Setup request structure */
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001207 cc_req.user_cb = cc_update_complete;
1208 cc_req.user_arg = req;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001209
1210 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1211 if (rc != -EINPROGRESS && rc != -EBUSY) {
1212 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1213 cc_unmap_hash_request(dev, state, req->src, true);
1214 cc_unmap_req(dev, state, ctx);
1215 }
1216 return rc;
1217}
1218
1219static int cc_mac_final(struct ahash_request *req)
1220{
1221 struct ahash_req_ctx *state = ahash_request_ctx(req);
1222 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1223 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1224 struct device *dev = drvdata_to_dev(ctx->drvdata);
1225 struct cc_crypto_req cc_req = {};
1226 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1227 int idx = 0;
1228 int rc = 0;
1229 u32 key_size, key_len;
1230 u32 digestsize = crypto_ahash_digestsize(tfm);
1231 gfp_t flags = cc_gfp_flags(&req->base);
1232 u32 rem_cnt = *cc_hash_buf_cnt(state);
1233
1234 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1235 key_size = CC_AES_128_BIT_KEY_SIZE;
1236 key_len = CC_AES_128_BIT_KEY_SIZE;
1237 } else {
1238 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1239 ctx->key_params.keylen;
1240 key_len = ctx->key_params.keylen;
1241 }
1242
1243 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1244
1245 if (cc_map_req(dev, state, ctx)) {
1246 dev_err(dev, "map_ahash_source() failed\n");
1247 return -EINVAL;
1248 }
1249
1250 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1251 req->nbytes, 0, flags)) {
1252 dev_err(dev, "map_ahash_request_final() failed\n");
1253 cc_unmap_req(dev, state, ctx);
1254 return -ENOMEM;
1255 }
1256
1257 if (cc_map_result(dev, state, digestsize)) {
1258 dev_err(dev, "map_ahash_digest() failed\n");
1259 cc_unmap_hash_request(dev, state, req->src, true);
1260 cc_unmap_req(dev, state, ctx);
1261 return -ENOMEM;
1262 }
1263
1264 /* Setup request structure */
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001265 cc_req.user_cb = cc_hash_complete;
1266 cc_req.user_arg = req;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001267
1268 if (state->xcbc_count && rem_cnt == 0) {
1269 /* Load key for ECB decryption */
1270 hw_desc_init(&desc[idx]);
1271 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1272 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1273 set_din_type(&desc[idx], DMA_DLLI,
1274 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1275 key_size, NS_BIT);
1276 set_key_size_aes(&desc[idx], key_len);
1277 set_flow_mode(&desc[idx], S_DIN_to_AES);
1278 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1279 idx++;
1280
1281 /* Initiate decryption of block state to previous
1282 * block_state-XOR-M[n]
1283 */
1284 hw_desc_init(&desc[idx]);
1285 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1286 CC_AES_BLOCK_SIZE, NS_BIT);
1287 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1288 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1289 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1290 idx++;
1291
1292 /* Memory Barrier: wait for axi write to complete */
1293 hw_desc_init(&desc[idx]);
1294 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1295 set_dout_no_dma(&desc[idx], 0, 0, 1);
1296 idx++;
1297 }
1298
1299 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1300 cc_setup_xcbc(req, desc, &idx);
1301 else
1302 cc_setup_cmac(req, desc, &idx);
1303
1304 if (state->xcbc_count == 0) {
1305 hw_desc_init(&desc[idx]);
1306 set_cipher_mode(&desc[idx], ctx->hw_mode);
1307 set_key_size_aes(&desc[idx], key_len);
1308 set_cmac_size0_mode(&desc[idx]);
1309 set_flow_mode(&desc[idx], S_DIN_to_AES);
1310 idx++;
1311 } else if (rem_cnt > 0) {
1312 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1313 } else {
1314 hw_desc_init(&desc[idx]);
1315 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1316 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1317 idx++;
1318 }
1319
1320 /* Get final MAC result */
1321 hw_desc_init(&desc[idx]);
1322 /* TODO */
1323 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1324 digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001325 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001326 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1327 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1328 set_cipher_mode(&desc[idx], ctx->hw_mode);
1329 idx++;
1330
1331 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1332 if (rc != -EINPROGRESS && rc != -EBUSY) {
1333 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1334 cc_unmap_hash_request(dev, state, req->src, true);
1335 cc_unmap_result(dev, state, digestsize, req->result);
1336 cc_unmap_req(dev, state, ctx);
1337 }
1338 return rc;
1339}
1340
1341static int cc_mac_finup(struct ahash_request *req)
1342{
1343 struct ahash_req_ctx *state = ahash_request_ctx(req);
1344 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1345 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1346 struct device *dev = drvdata_to_dev(ctx->drvdata);
1347 struct cc_crypto_req cc_req = {};
1348 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1349 int idx = 0;
1350 int rc = 0;
1351 u32 key_len = 0;
1352 u32 digestsize = crypto_ahash_digestsize(tfm);
1353 gfp_t flags = cc_gfp_flags(&req->base);
1354
1355 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1356 if (state->xcbc_count > 0 && req->nbytes == 0) {
1357 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1358 return cc_mac_final(req);
1359 }
1360
1361 if (cc_map_req(dev, state, ctx)) {
1362 dev_err(dev, "map_ahash_source() failed\n");
1363 return -EINVAL;
1364 }
1365
1366 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1367 req->nbytes, 1, flags)) {
1368 dev_err(dev, "map_ahash_request_final() failed\n");
1369 cc_unmap_req(dev, state, ctx);
1370 return -ENOMEM;
1371 }
1372 if (cc_map_result(dev, state, digestsize)) {
1373 dev_err(dev, "map_ahash_digest() failed\n");
1374 cc_unmap_hash_request(dev, state, req->src, true);
1375 cc_unmap_req(dev, state, ctx);
1376 return -ENOMEM;
1377 }
1378
1379 /* Setup request structure */
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001380 cc_req.user_cb = cc_hash_complete;
1381 cc_req.user_arg = req;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001382
1383 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1384 key_len = CC_AES_128_BIT_KEY_SIZE;
1385 cc_setup_xcbc(req, desc, &idx);
1386 } else {
1387 key_len = ctx->key_params.keylen;
1388 cc_setup_cmac(req, desc, &idx);
1389 }
1390
1391 if (req->nbytes == 0) {
1392 hw_desc_init(&desc[idx]);
1393 set_cipher_mode(&desc[idx], ctx->hw_mode);
1394 set_key_size_aes(&desc[idx], key_len);
1395 set_cmac_size0_mode(&desc[idx]);
1396 set_flow_mode(&desc[idx], S_DIN_to_AES);
1397 idx++;
1398 } else {
1399 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1400 }
1401
1402 /* Get final MAC result */
1403 hw_desc_init(&desc[idx]);
1404 /* TODO */
1405 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1406 digestsize, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001407 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001408 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1409 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1410 set_cipher_mode(&desc[idx], ctx->hw_mode);
1411 idx++;
1412
1413 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1414 if (rc != -EINPROGRESS && rc != -EBUSY) {
1415 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416 cc_unmap_hash_request(dev, state, req->src, true);
1417 cc_unmap_result(dev, state, digestsize, req->result);
1418 cc_unmap_req(dev, state, ctx);
1419 }
1420 return rc;
1421}
1422
1423static int cc_mac_digest(struct ahash_request *req)
1424{
1425 struct ahash_req_ctx *state = ahash_request_ctx(req);
1426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1427 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1428 struct device *dev = drvdata_to_dev(ctx->drvdata);
1429 u32 digestsize = crypto_ahash_digestsize(tfm);
1430 struct cc_crypto_req cc_req = {};
1431 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1432 u32 key_len;
1433 unsigned int idx = 0;
1434 int rc;
1435 gfp_t flags = cc_gfp_flags(&req->base);
1436
1437 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1438
1439 cc_init_req(dev, state, ctx);
1440
1441 if (cc_map_req(dev, state, ctx)) {
1442 dev_err(dev, "map_ahash_source() failed\n");
1443 return -ENOMEM;
1444 }
1445 if (cc_map_result(dev, state, digestsize)) {
1446 dev_err(dev, "map_ahash_digest() failed\n");
1447 cc_unmap_req(dev, state, ctx);
1448 return -ENOMEM;
1449 }
1450
1451 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1452 req->nbytes, 1, flags)) {
1453 dev_err(dev, "map_ahash_request_final() failed\n");
1454 cc_unmap_req(dev, state, ctx);
1455 return -ENOMEM;
1456 }
1457
1458 /* Setup request structure */
Geert Uytterhoevenf4274ee2020-02-11 19:18:58 +01001459 cc_req.user_cb = cc_digest_complete;
1460 cc_req.user_arg = req;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001461
1462 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1463 key_len = CC_AES_128_BIT_KEY_SIZE;
1464 cc_setup_xcbc(req, desc, &idx);
1465 } else {
1466 key_len = ctx->key_params.keylen;
1467 cc_setup_cmac(req, desc, &idx);
1468 }
1469
1470 if (req->nbytes == 0) {
1471 hw_desc_init(&desc[idx]);
1472 set_cipher_mode(&desc[idx], ctx->hw_mode);
1473 set_key_size_aes(&desc[idx], key_len);
1474 set_cmac_size0_mode(&desc[idx]);
1475 set_flow_mode(&desc[idx], S_DIN_to_AES);
1476 idx++;
1477 } else {
1478 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1479 }
1480
1481 /* Get final MAC result */
1482 hw_desc_init(&desc[idx]);
1483 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1484 CC_AES_BLOCK_SIZE, NS_BIT, 1);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001485 set_queue_last_ind(ctx->drvdata, &desc[idx]);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001486 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1487 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1488 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489 set_cipher_mode(&desc[idx], ctx->hw_mode);
1490 idx++;
1491
1492 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1493 if (rc != -EINPROGRESS && rc != -EBUSY) {
1494 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1495 cc_unmap_hash_request(dev, state, req->src, true);
1496 cc_unmap_result(dev, state, digestsize, req->result);
1497 cc_unmap_req(dev, state, ctx);
1498 }
1499 return rc;
1500}
1501
1502static int cc_hash_export(struct ahash_request *req, void *out)
1503{
1504 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1505 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1506 struct ahash_req_ctx *state = ahash_request_ctx(req);
1507 u8 *curr_buff = cc_hash_buf(state);
1508 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1509 const u32 tmp = CC_EXPORT_MAGIC;
1510
1511 memcpy(out, &tmp, sizeof(u32));
1512 out += sizeof(u32);
1513
1514 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1515 out += ctx->inter_digestsize;
1516
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001517 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1518 out += ctx->hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001519
1520 memcpy(out, &curr_buff_cnt, sizeof(u32));
1521 out += sizeof(u32);
1522
1523 memcpy(out, curr_buff, curr_buff_cnt);
1524
1525 return 0;
1526}
1527
1528static int cc_hash_import(struct ahash_request *req, const void *in)
1529{
1530 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1531 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1532 struct device *dev = drvdata_to_dev(ctx->drvdata);
1533 struct ahash_req_ctx *state = ahash_request_ctx(req);
1534 u32 tmp;
1535
1536 memcpy(&tmp, in, sizeof(u32));
1537 if (tmp != CC_EXPORT_MAGIC)
1538 return -EINVAL;
1539 in += sizeof(u32);
1540
1541 cc_init_req(dev, state, ctx);
1542
1543 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1544 in += ctx->inter_digestsize;
1545
Yael Chemlaf1e52fd2018-10-18 13:59:57 +01001546 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1547 in += ctx->hash_len;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001548
1549 /* Sanity check the data as much as possible */
1550 memcpy(&tmp, in, sizeof(u32));
1551 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1552 return -EINVAL;
1553 in += sizeof(u32);
1554
1555 state->buf_cnt[0] = tmp;
1556 memcpy(state->buffers[0], in, tmp);
1557
1558 return 0;
1559}
1560
1561struct cc_hash_template {
1562 char name[CRYPTO_MAX_ALG_NAME];
1563 char driver_name[CRYPTO_MAX_ALG_NAME];
1564 char mac_name[CRYPTO_MAX_ALG_NAME];
1565 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1566 unsigned int blocksize;
Yael Chemla927574e2018-10-18 13:59:59 +01001567 bool is_mac;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001568 bool synchronize;
1569 struct ahash_alg template_ahash;
1570 int hash_mode;
1571 int hw_mode;
1572 int inter_digestsize;
1573 struct cc_drvdata *drvdata;
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001574 u32 min_hw_rev;
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001575 enum cc_std_body std_body;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001576};
1577
1578#define CC_STATE_SIZE(_x) \
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001579 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001580
1581/* hash descriptors */
1582static struct cc_hash_template driver_hash[] = {
1583 //Asynchronize hash template
1584 {
1585 .name = "sha1",
1586 .driver_name = "sha1-ccree",
1587 .mac_name = "hmac(sha1)",
1588 .mac_driver_name = "hmac-sha1-ccree",
1589 .blocksize = SHA1_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001590 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001591 .synchronize = false,
1592 .template_ahash = {
1593 .init = cc_hash_init,
1594 .update = cc_hash_update,
1595 .final = cc_hash_final,
1596 .finup = cc_hash_finup,
1597 .digest = cc_hash_digest,
1598 .export = cc_hash_export,
1599 .import = cc_hash_import,
1600 .setkey = cc_hash_setkey,
1601 .halg = {
1602 .digestsize = SHA1_DIGEST_SIZE,
1603 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1604 },
1605 },
1606 .hash_mode = DRV_HASH_SHA1,
1607 .hw_mode = DRV_HASH_HW_SHA1,
1608 .inter_digestsize = SHA1_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001609 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001610 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001611 },
1612 {
1613 .name = "sha256",
1614 .driver_name = "sha256-ccree",
1615 .mac_name = "hmac(sha256)",
1616 .mac_driver_name = "hmac-sha256-ccree",
1617 .blocksize = SHA256_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001618 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001619 .template_ahash = {
1620 .init = cc_hash_init,
1621 .update = cc_hash_update,
1622 .final = cc_hash_final,
1623 .finup = cc_hash_finup,
1624 .digest = cc_hash_digest,
1625 .export = cc_hash_export,
1626 .import = cc_hash_import,
1627 .setkey = cc_hash_setkey,
1628 .halg = {
1629 .digestsize = SHA256_DIGEST_SIZE,
1630 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1631 },
1632 },
1633 .hash_mode = DRV_HASH_SHA256,
1634 .hw_mode = DRV_HASH_HW_SHA256,
1635 .inter_digestsize = SHA256_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001636 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001637 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001638 },
1639 {
1640 .name = "sha224",
1641 .driver_name = "sha224-ccree",
1642 .mac_name = "hmac(sha224)",
1643 .mac_driver_name = "hmac-sha224-ccree",
1644 .blocksize = SHA224_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001645 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001646 .template_ahash = {
1647 .init = cc_hash_init,
1648 .update = cc_hash_update,
1649 .final = cc_hash_final,
1650 .finup = cc_hash_finup,
1651 .digest = cc_hash_digest,
1652 .export = cc_hash_export,
1653 .import = cc_hash_import,
1654 .setkey = cc_hash_setkey,
1655 .halg = {
1656 .digestsize = SHA224_DIGEST_SIZE,
Gilad Ben-Yosseff3df82b2019-04-18 16:39:02 +03001657 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001658 },
1659 },
1660 .hash_mode = DRV_HASH_SHA224,
1661 .hw_mode = DRV_HASH_HW_SHA256,
1662 .inter_digestsize = SHA256_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001663 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001664 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001665 },
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001666 {
1667 .name = "sha384",
1668 .driver_name = "sha384-ccree",
1669 .mac_name = "hmac(sha384)",
1670 .mac_driver_name = "hmac-sha384-ccree",
1671 .blocksize = SHA384_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001672 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001673 .template_ahash = {
1674 .init = cc_hash_init,
1675 .update = cc_hash_update,
1676 .final = cc_hash_final,
1677 .finup = cc_hash_finup,
1678 .digest = cc_hash_digest,
1679 .export = cc_hash_export,
1680 .import = cc_hash_import,
1681 .setkey = cc_hash_setkey,
1682 .halg = {
1683 .digestsize = SHA384_DIGEST_SIZE,
Gilad Ben-Yosseff3df82b2019-04-18 16:39:02 +03001684 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001685 },
1686 },
1687 .hash_mode = DRV_HASH_SHA384,
1688 .hw_mode = DRV_HASH_HW_SHA512,
1689 .inter_digestsize = SHA512_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001690 .min_hw_rev = CC_HW_REV_712,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001691 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001692 },
1693 {
1694 .name = "sha512",
1695 .driver_name = "sha512-ccree",
1696 .mac_name = "hmac(sha512)",
1697 .mac_driver_name = "hmac-sha512-ccree",
1698 .blocksize = SHA512_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001699 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001700 .template_ahash = {
1701 .init = cc_hash_init,
1702 .update = cc_hash_update,
1703 .final = cc_hash_final,
1704 .finup = cc_hash_finup,
1705 .digest = cc_hash_digest,
1706 .export = cc_hash_export,
1707 .import = cc_hash_import,
1708 .setkey = cc_hash_setkey,
1709 .halg = {
1710 .digestsize = SHA512_DIGEST_SIZE,
1711 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1712 },
1713 },
1714 .hash_mode = DRV_HASH_SHA512,
1715 .hw_mode = DRV_HASH_HW_SHA512,
1716 .inter_digestsize = SHA512_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001717 .min_hw_rev = CC_HW_REV_712,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001718 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001719 },
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001720 {
1721 .name = "md5",
1722 .driver_name = "md5-ccree",
1723 .mac_name = "hmac(md5)",
1724 .mac_driver_name = "hmac-md5-ccree",
1725 .blocksize = MD5_HMAC_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001726 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001727 .template_ahash = {
1728 .init = cc_hash_init,
1729 .update = cc_hash_update,
1730 .final = cc_hash_final,
1731 .finup = cc_hash_finup,
1732 .digest = cc_hash_digest,
1733 .export = cc_hash_export,
1734 .import = cc_hash_import,
1735 .setkey = cc_hash_setkey,
1736 .halg = {
1737 .digestsize = MD5_DIGEST_SIZE,
1738 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1739 },
1740 },
1741 .hash_mode = DRV_HASH_MD5,
1742 .hw_mode = DRV_HASH_HW_MD5,
1743 .inter_digestsize = MD5_DIGEST_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001744 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001745 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001746 },
1747 {
Yael Chemla927574e2018-10-18 13:59:59 +01001748 .name = "sm3",
1749 .driver_name = "sm3-ccree",
1750 .blocksize = SM3_BLOCK_SIZE,
1751 .is_mac = false,
1752 .template_ahash = {
1753 .init = cc_hash_init,
1754 .update = cc_hash_update,
1755 .final = cc_hash_final,
1756 .finup = cc_hash_finup,
1757 .digest = cc_hash_digest,
1758 .export = cc_hash_export,
1759 .import = cc_hash_import,
1760 .setkey = cc_hash_setkey,
1761 .halg = {
1762 .digestsize = SM3_DIGEST_SIZE,
1763 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1764 },
1765 },
1766 .hash_mode = DRV_HASH_SM3,
1767 .hw_mode = DRV_HASH_HW_SM3,
1768 .inter_digestsize = SM3_DIGEST_SIZE,
1769 .min_hw_rev = CC_HW_REV_713,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001770 .std_body = CC_STD_OSCCA,
Yael Chemla927574e2018-10-18 13:59:59 +01001771 },
1772 {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001773 .mac_name = "xcbc(aes)",
1774 .mac_driver_name = "xcbc-aes-ccree",
1775 .blocksize = AES_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001776 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001777 .template_ahash = {
1778 .init = cc_hash_init,
1779 .update = cc_mac_update,
1780 .final = cc_mac_final,
1781 .finup = cc_mac_finup,
1782 .digest = cc_mac_digest,
1783 .setkey = cc_xcbc_setkey,
1784 .export = cc_hash_export,
1785 .import = cc_hash_import,
1786 .halg = {
1787 .digestsize = AES_BLOCK_SIZE,
1788 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1789 },
1790 },
1791 .hash_mode = DRV_HASH_NULL,
1792 .hw_mode = DRV_CIPHER_XCBC_MAC,
1793 .inter_digestsize = AES_BLOCK_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001794 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001795 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001796 },
1797 {
1798 .mac_name = "cmac(aes)",
1799 .mac_driver_name = "cmac-aes-ccree",
1800 .blocksize = AES_BLOCK_SIZE,
Yael Chemla927574e2018-10-18 13:59:59 +01001801 .is_mac = true,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001802 .template_ahash = {
1803 .init = cc_hash_init,
1804 .update = cc_mac_update,
1805 .final = cc_mac_final,
1806 .finup = cc_mac_finup,
1807 .digest = cc_mac_digest,
1808 .setkey = cc_cmac_setkey,
1809 .export = cc_hash_export,
1810 .import = cc_hash_import,
1811 .halg = {
1812 .digestsize = AES_BLOCK_SIZE,
1813 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1814 },
1815 },
1816 .hash_mode = DRV_HASH_NULL,
1817 .hw_mode = DRV_CIPHER_CMAC,
1818 .inter_digestsize = AES_BLOCK_SIZE,
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001819 .min_hw_rev = CC_HW_REV_630,
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00001820 .std_body = CC_STD_NIST,
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001821 },
1822};
1823
1824static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1825 struct device *dev, bool keyed)
1826{
1827 struct cc_hash_alg *t_crypto_alg;
1828 struct crypto_alg *alg;
1829 struct ahash_alg *halg;
1830
1831 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1832 if (!t_crypto_alg)
1833 return ERR_PTR(-ENOMEM);
1834
1835 t_crypto_alg->ahash_alg = template->template_ahash;
1836 halg = &t_crypto_alg->ahash_alg;
1837 alg = &halg->halg.base;
1838
1839 if (keyed) {
1840 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1841 template->mac_name);
1842 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1843 template->mac_driver_name);
1844 } else {
1845 halg->setkey = NULL;
1846 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1847 template->name);
1848 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1849 template->driver_name);
1850 }
1851 alg->cra_module = THIS_MODULE;
1852 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1853 alg->cra_priority = CC_CRA_PRIO;
1854 alg->cra_blocksize = template->blocksize;
1855 alg->cra_alignmask = 0;
1856 alg->cra_exit = cc_cra_exit;
1857
1858 alg->cra_init = cc_cra_init;
Eric Biggers6a38f622018-06-30 15:16:12 -07001859 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001860
1861 t_crypto_alg->hash_mode = template->hash_mode;
1862 t_crypto_alg->hw_mode = template->hw_mode;
1863 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1864
1865 return t_crypto_alg;
1866}
1867
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001868static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
1869 unsigned int size, u32 *sram_buff_ofs)
1870{
1871 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1872 unsigned int larval_seq_len = 0;
1873 int rc;
1874
1875 cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
1876 larval_seq, &larval_seq_len);
1877 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1878 if (rc)
1879 return rc;
1880
1881 *sram_buff_ofs += size;
1882 return 0;
1883}
1884
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001885int cc_init_hash_sram(struct cc_drvdata *drvdata)
1886{
1887 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01001888 u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001889 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
Yael Chemla927574e2018-10-18 13:59:59 +01001890 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001891 int rc = 0;
1892
1893 /* Copy-to-sram digest-len */
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001894 rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
1895 sizeof(cc_digest_len_init), &sram_buff_ofs);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001896 if (rc)
1897 goto init_digest_const_err;
1898
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001899 if (large_sha_supported) {
1900 /* Copy-to-sram digest-len for sha384/512 */
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001901 rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
1902 sizeof(cc_digest_len_sha512_init),
1903 &sram_buff_ofs);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001904 if (rc)
1905 goto init_digest_const_err;
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001906 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001907
1908 /* The initial digests offset */
1909 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1910
1911 /* Copy-to-sram initial SHA* digests */
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001912 rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
1913 &sram_buff_ofs);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001914 if (rc)
1915 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001916
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001917 rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
1918 &sram_buff_ofs);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001919 if (rc)
1920 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001921
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001922 rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
1923 &sram_buff_ofs);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001924 if (rc)
1925 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001926
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001927 rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
1928 &sram_buff_ofs);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001929 if (rc)
1930 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001931
Yael Chemla927574e2018-10-18 13:59:59 +01001932 if (sm3_supported) {
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001933 rc = cc_init_copy_sram(drvdata, cc_sm3_init,
1934 sizeof(cc_sm3_init), &sram_buff_ofs);
Yael Chemla927574e2018-10-18 13:59:59 +01001935 if (rc)
1936 goto init_digest_const_err;
Yael Chemla927574e2018-10-18 13:59:59 +01001937 }
1938
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001939 if (large_sha_supported) {
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001940 rc = cc_init_copy_sram(drvdata, cc_sha384_init,
1941 sizeof(cc_sha384_init), &sram_buff_ofs);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001942 if (rc)
1943 goto init_digest_const_err;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001944
Geert Uytterhoeven08884312020-02-11 19:19:16 +01001945 rc = cc_init_copy_sram(drvdata, cc_sha512_init,
1946 sizeof(cc_sha512_init), &sram_buff_ofs);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001947 if (rc)
1948 goto init_digest_const_err;
1949 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001950
1951init_digest_const_err:
1952 return rc;
1953}
1954
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001955int cc_hash_alloc(struct cc_drvdata *drvdata)
1956{
1957 struct cc_hash_handle *hash_handle;
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01001958 u32 sram_buff;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001959 u32 sram_size_to_alloc;
1960 struct device *dev = drvdata_to_dev(drvdata);
1961 int rc = 0;
1962 int alg;
1963
1964 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1965 if (!hash_handle)
1966 return -ENOMEM;
1967
1968 INIT_LIST_HEAD(&hash_handle->hash_list);
1969 drvdata->hash_handle = hash_handle;
1970
Hans de Goedee55d8a72019-09-01 22:35:28 +02001971 sram_size_to_alloc = sizeof(cc_digest_len_init) +
1972 sizeof(cc_md5_init) +
1973 sizeof(cc_sha1_init) +
1974 sizeof(cc_sha224_init) +
1975 sizeof(cc_sha256_init);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001976
Yael Chemla927574e2018-10-18 13:59:59 +01001977 if (drvdata->hw_rev >= CC_HW_REV_713)
Hans de Goedee55d8a72019-09-01 22:35:28 +02001978 sram_size_to_alloc += sizeof(cc_sm3_init);
Yael Chemla927574e2018-10-18 13:59:59 +01001979
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001980 if (drvdata->hw_rev >= CC_HW_REV_712)
Hans de Goedee55d8a72019-09-01 22:35:28 +02001981 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
1982 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00001983
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001984 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1985 if (sram_buff == NULL_SRAM_ADDR) {
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00001986 rc = -ENOMEM;
1987 goto fail;
1988 }
1989
1990 /* The initial digest-len offset */
1991 hash_handle->digest_len_sram_addr = sram_buff;
1992
1993 /*must be set before the alg registration as it is being used there*/
1994 rc = cc_init_hash_sram(drvdata);
1995 if (rc) {
1996 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1997 goto fail;
1998 }
1999
2000 /* ahash registration */
2001 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2002 struct cc_hash_alg *t_alg;
2003 int hw_mode = driver_hash[alg].hw_mode;
2004
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00002005 /* Check that the HW revision and variants are suitable */
2006 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2007 !(drvdata->std_bodies & driver_hash[alg].std_body))
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00002008 continue;
Gilad Ben-Yossef1c876a92018-11-13 09:40:35 +00002009
Yael Chemla927574e2018-10-18 13:59:59 +01002010 if (driver_hash[alg].is_mac) {
2011 /* register hmac version */
2012 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2013 if (IS_ERR(t_alg)) {
2014 rc = PTR_ERR(t_alg);
2015 dev_err(dev, "%s alg allocation failed\n",
2016 driver_hash[alg].driver_name);
2017 goto fail;
2018 }
2019 t_alg->drvdata = drvdata;
Gilad Ben-Yossef27b3b222018-02-19 14:51:23 +00002020
Yael Chemla927574e2018-10-18 13:59:59 +01002021 rc = crypto_register_ahash(&t_alg->ahash_alg);
2022 if (rc) {
2023 dev_err(dev, "%s alg registration failed\n",
2024 driver_hash[alg].driver_name);
2025 kfree(t_alg);
2026 goto fail;
2027 } else {
2028 list_add_tail(&t_alg->entry,
2029 &hash_handle->hash_list);
2030 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002031 }
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002032 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2033 hw_mode == DRV_CIPHER_CMAC)
2034 continue;
2035
2036 /* register hash version */
2037 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2038 if (IS_ERR(t_alg)) {
2039 rc = PTR_ERR(t_alg);
2040 dev_err(dev, "%s alg allocation failed\n",
2041 driver_hash[alg].driver_name);
2042 goto fail;
2043 }
2044 t_alg->drvdata = drvdata;
2045
2046 rc = crypto_register_ahash(&t_alg->ahash_alg);
2047 if (rc) {
2048 dev_err(dev, "%s alg registration failed\n",
2049 driver_hash[alg].driver_name);
2050 kfree(t_alg);
2051 goto fail;
2052 } else {
2053 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2054 }
2055 }
2056
2057 return 0;
2058
2059fail:
2060 kfree(drvdata->hash_handle);
2061 drvdata->hash_handle = NULL;
2062 return rc;
2063}
2064
2065int cc_hash_free(struct cc_drvdata *drvdata)
2066{
2067 struct cc_hash_alg *t_hash_alg, *hash_n;
2068 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2069
2070 if (hash_handle) {
2071 list_for_each_entry_safe(t_hash_alg, hash_n,
2072 &hash_handle->hash_list, entry) {
2073 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2074 list_del(&t_hash_alg->entry);
2075 kfree(t_hash_alg);
2076 }
2077
2078 kfree(hash_handle);
2079 drvdata->hash_handle = NULL;
2080 }
2081 return 0;
2082}
2083
2084static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2085 unsigned int *seq_size)
2086{
2087 unsigned int idx = *seq_size;
2088 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2089 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2090 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2091
2092 /* Setup XCBC MAC K1 */
2093 hw_desc_init(&desc[idx]);
2094 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2095 XCBC_MAC_K1_OFFSET),
2096 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2097 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
Yael Chemla18a1dc12018-10-18 13:59:58 +01002098 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002099 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2100 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2101 set_flow_mode(&desc[idx], S_DIN_to_AES);
2102 idx++;
2103
2104 /* Setup XCBC MAC K2 */
2105 hw_desc_init(&desc[idx]);
2106 set_din_type(&desc[idx], DMA_DLLI,
2107 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2108 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2109 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2110 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2111 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2112 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2113 set_flow_mode(&desc[idx], S_DIN_to_AES);
2114 idx++;
2115
2116 /* Setup XCBC MAC K3 */
2117 hw_desc_init(&desc[idx]);
2118 set_din_type(&desc[idx], DMA_DLLI,
2119 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2120 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2121 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2122 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2123 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2124 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2125 set_flow_mode(&desc[idx], S_DIN_to_AES);
2126 idx++;
2127
2128 /* Loading MAC state */
2129 hw_desc_init(&desc[idx]);
2130 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2131 CC_AES_BLOCK_SIZE, NS_BIT);
2132 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2133 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2134 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2135 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2136 set_flow_mode(&desc[idx], S_DIN_to_AES);
2137 idx++;
2138 *seq_size = idx;
2139}
2140
2141static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2142 unsigned int *seq_size)
2143{
2144 unsigned int idx = *seq_size;
2145 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2146 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2147 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2148
2149 /* Setup CMAC Key */
2150 hw_desc_init(&desc[idx]);
2151 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2152 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2153 ctx->key_params.keylen), NS_BIT);
2154 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2155 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2156 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2157 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2158 set_flow_mode(&desc[idx], S_DIN_to_AES);
2159 idx++;
2160
2161 /* Load MAC state */
2162 hw_desc_init(&desc[idx]);
2163 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2164 CC_AES_BLOCK_SIZE, NS_BIT);
2165 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2166 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2167 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2168 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2169 set_flow_mode(&desc[idx], S_DIN_to_AES);
2170 idx++;
2171 *seq_size = idx;
2172}
2173
2174static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2175 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2176 struct cc_hw_desc desc[], bool is_not_last_data,
2177 unsigned int *seq_size)
2178{
2179 unsigned int idx = *seq_size;
2180 struct device *dev = drvdata_to_dev(ctx->drvdata);
2181
2182 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2183 hw_desc_init(&desc[idx]);
2184 set_din_type(&desc[idx], DMA_DLLI,
2185 sg_dma_address(areq_ctx->curr_sg),
2186 areq_ctx->curr_sg->length, NS_BIT);
2187 set_flow_mode(&desc[idx], flow_mode);
2188 idx++;
2189 } else {
2190 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2191 dev_dbg(dev, " NULL mode\n");
2192 /* nothing to build */
2193 return;
2194 }
2195 /* bypass */
2196 hw_desc_init(&desc[idx]);
2197 set_din_type(&desc[idx], DMA_DLLI,
2198 areq_ctx->mlli_params.mlli_dma_addr,
2199 areq_ctx->mlli_params.mlli_len, NS_BIT);
2200 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2201 areq_ctx->mlli_params.mlli_len);
2202 set_flow_mode(&desc[idx], BYPASS);
2203 idx++;
2204 /* process */
2205 hw_desc_init(&desc[idx]);
2206 set_din_type(&desc[idx], DMA_MLLI,
2207 ctx->drvdata->mlli_sram_addr,
2208 areq_ctx->mlli_nents, NS_BIT);
2209 set_flow_mode(&desc[idx], flow_mode);
2210 idx++;
2211 }
2212 if (is_not_last_data)
2213 set_din_not_last_indication(&desc[(idx - 1)]);
2214 /* return updated desc sequence size */
2215 *seq_size = idx;
2216}
2217
2218static const void *cc_larval_digest(struct device *dev, u32 mode)
2219{
2220 switch (mode) {
2221 case DRV_HASH_MD5:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002222 return cc_md5_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002223 case DRV_HASH_SHA1:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002224 return cc_sha1_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002225 case DRV_HASH_SHA224:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002226 return cc_sha224_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002227 case DRV_HASH_SHA256:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002228 return cc_sha256_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002229 case DRV_HASH_SHA384:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002230 return cc_sha384_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002231 case DRV_HASH_SHA512:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002232 return cc_sha512_init;
Yael Chemla927574e2018-10-18 13:59:59 +01002233 case DRV_HASH_SM3:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002234 return cc_sm3_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002235 default:
2236 dev_err(dev, "Invalid hash mode (%d)\n", mode);
Hans de Goedee55d8a72019-09-01 22:35:28 +02002237 return cc_md5_init;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002238 }
2239}
2240
2241/*!
2242 * Gets the address of the initial digest in SRAM
2243 * according to the given hash mode
2244 *
2245 * \param drvdata
2246 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2247 *
2248 * \return u32 The address of the initial digest in SRAM
2249 */
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01002250u32 cc_larval_digest_addr(void *drvdata, u32 mode)
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002251{
2252 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2253 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2254 struct device *dev = drvdata_to_dev(_drvdata);
Yael Chemla927574e2018-10-18 13:59:59 +01002255 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01002256 u32 addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002257
2258 switch (mode) {
2259 case DRV_HASH_NULL:
2260 break; /*Ignore*/
2261 case DRV_HASH_MD5:
2262 return (hash_handle->larval_digest_sram_addr);
2263 case DRV_HASH_SHA1:
2264 return (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002265 sizeof(cc_md5_init));
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002266 case DRV_HASH_SHA224:
2267 return (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002268 sizeof(cc_md5_init) +
2269 sizeof(cc_sha1_init));
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002270 case DRV_HASH_SHA256:
2271 return (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002272 sizeof(cc_md5_init) +
2273 sizeof(cc_sha1_init) +
2274 sizeof(cc_sha224_init));
Yael Chemla927574e2018-10-18 13:59:59 +01002275 case DRV_HASH_SM3:
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002276 return (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002277 sizeof(cc_md5_init) +
2278 sizeof(cc_sha1_init) +
2279 sizeof(cc_sha224_init) +
2280 sizeof(cc_sha256_init));
Yael Chemla927574e2018-10-18 13:59:59 +01002281 case DRV_HASH_SHA384:
2282 addr = (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002283 sizeof(cc_md5_init) +
2284 sizeof(cc_sha1_init) +
2285 sizeof(cc_sha224_init) +
2286 sizeof(cc_sha256_init));
Yael Chemla927574e2018-10-18 13:59:59 +01002287 if (sm3_supported)
Hans de Goedee55d8a72019-09-01 22:35:28 +02002288 addr += sizeof(cc_sm3_init);
Yael Chemla927574e2018-10-18 13:59:59 +01002289 return addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002290 case DRV_HASH_SHA512:
Yael Chemla927574e2018-10-18 13:59:59 +01002291 addr = (hash_handle->larval_digest_sram_addr +
Hans de Goedee55d8a72019-09-01 22:35:28 +02002292 sizeof(cc_md5_init) +
2293 sizeof(cc_sha1_init) +
2294 sizeof(cc_sha224_init) +
2295 sizeof(cc_sha256_init) +
2296 sizeof(cc_sha384_init));
Yael Chemla927574e2018-10-18 13:59:59 +01002297 if (sm3_supported)
Hans de Goedee55d8a72019-09-01 22:35:28 +02002298 addr += sizeof(cc_sm3_init);
Yael Chemla927574e2018-10-18 13:59:59 +01002299 return addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002300 default:
2301 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2302 }
2303
2304 /*This is valid wrong value to avoid kernel crash*/
2305 return hash_handle->larval_digest_sram_addr;
2306}
2307
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01002308u32 cc_digest_len_addr(void *drvdata, u32 mode)
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002309{
2310 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2311 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
Geert Uytterhoeven1a895f12020-02-11 19:19:07 +01002312 u32 digest_len_addr = hash_handle->digest_len_sram_addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002313
2314 switch (mode) {
2315 case DRV_HASH_SHA1:
2316 case DRV_HASH_SHA224:
2317 case DRV_HASH_SHA256:
2318 case DRV_HASH_MD5:
2319 return digest_len_addr;
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002320 case DRV_HASH_SHA384:
2321 case DRV_HASH_SHA512:
Hans de Goedee55d8a72019-09-01 22:35:28 +02002322 return digest_len_addr + sizeof(cc_digest_len_init);
Gilad Ben-Yossef63893812018-01-22 09:27:02 +00002323 default:
2324 return digest_len_addr; /*to avoid kernel crash*/
2325 }
2326}