blob: d97ffb03afc03dad56b3706eb22638963b1781a6 [file] [log] [blame]
Horia Geantă618b5dc2018-10-10 14:26:48 +03001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Tudor Ambarus8c419772016-07-04 13:12:08 +03002/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
Horia Geantăa5e5c132019-05-03 17:17:38 +03006 * Copyright 2018-2019 NXP
Tudor Ambarus8c419772016-07-04 13:12:08 +03007 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
20#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
21#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f1_pdb))
Radu Alexe52e26d72017-04-25 16:26:38 +030023#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f2_pdb))
Radu Alexe4a651b12017-04-25 16:26:39 +030025#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb))
Tudor Ambarus8c419772016-07-04 13:12:08 +030027
28static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
29 struct akcipher_request *req)
30{
31 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
32 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
33
34 if (edesc->sec4_sg_bytes)
35 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
36 DMA_TO_DEVICE);
37}
38
39static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
40 struct akcipher_request *req)
41{
42 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
43 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
44 struct caam_rsa_key *key = &ctx->key;
45 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
46
47 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
48 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
49}
50
51static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
52 struct akcipher_request *req)
53{
54 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
55 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
56 struct caam_rsa_key *key = &ctx->key;
57 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
58
59 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
60 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
61}
62
Radu Alexe52e26d72017-04-25 16:26:38 +030063static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
64 struct akcipher_request *req)
65{
66 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
67 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
68 struct caam_rsa_key *key = &ctx->key;
69 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
70 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030071 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +030072
73 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
75 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +030076 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
77 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +030078}
79
Radu Alexe4a651b12017-04-25 16:26:39 +030080static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
82{
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
87 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030088 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +030089
90 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +030095 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
96 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +030097}
98
Tudor Ambarus8c419772016-07-04 13:12:08 +030099/* RSA Job Completion handler */
100static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
101{
102 struct akcipher_request *req = context;
103 struct rsa_edesc *edesc;
104
105 if (err)
106 caam_jr_strstatus(dev, err);
107
108 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
109
110 rsa_pub_unmap(dev, edesc, req);
111 rsa_io_unmap(dev, edesc, req);
112 kfree(edesc);
113
114 akcipher_request_complete(req, err);
115}
116
117static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
118 void *context)
119{
120 struct akcipher_request *req = context;
121 struct rsa_edesc *edesc;
122
123 if (err)
124 caam_jr_strstatus(dev, err);
125
126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
127
128 rsa_priv_f1_unmap(dev, edesc, req);
129 rsa_io_unmap(dev, edesc, req);
130 kfree(edesc);
131
132 akcipher_request_complete(req, err);
133}
134
Radu Alexe52e26d72017-04-25 16:26:38 +0300135static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
136 void *context)
137{
138 struct akcipher_request *req = context;
139 struct rsa_edesc *edesc;
140
141 if (err)
142 caam_jr_strstatus(dev, err);
143
144 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
145
146 rsa_priv_f2_unmap(dev, edesc, req);
147 rsa_io_unmap(dev, edesc, req);
148 kfree(edesc);
149
150 akcipher_request_complete(req, err);
151}
152
Radu Alexe4a651b12017-04-25 16:26:39 +0300153static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
154 void *context)
155{
156 struct akcipher_request *req = context;
157 struct rsa_edesc *edesc;
158
159 if (err)
160 caam_jr_strstatus(dev, err);
161
162 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
163
164 rsa_priv_f3_unmap(dev, edesc, req);
165 rsa_io_unmap(dev, edesc, req);
166 kfree(edesc);
167
168 akcipher_request_complete(req, err);
169}
170
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500171static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
172 unsigned int nbytes,
173 unsigned int flags)
174{
175 struct sg_mapping_iter miter;
176 int lzeros, ents;
177 unsigned int len;
178 unsigned int tbytes = nbytes;
179 const u8 *buff;
180
181 ents = sg_nents_for_len(sgl, nbytes);
182 if (ents < 0)
183 return ents;
184
185 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
186
187 lzeros = 0;
188 len = 0;
189 while (nbytes > 0) {
190 while (len && !*buff) {
191 lzeros++;
192 len--;
193 buff++;
194 }
195
196 if (len && *buff)
197 break;
198
199 sg_miter_next(&miter);
200 buff = miter.addr;
201 len = miter.length;
202
203 nbytes -= lzeros;
204 lzeros = 0;
205 }
206
207 miter.consumed = lzeros;
208 sg_miter_stop(&miter);
209 nbytes -= lzeros;
210
211 return tbytes - nbytes;
212}
213
Tudor Ambarus8c419772016-07-04 13:12:08 +0300214static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
215 size_t desclen)
216{
217 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
218 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 struct device *dev = ctx->dev;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500220 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300221 struct rsa_edesc *edesc;
Horia Geantă019d62d2017-06-19 11:44:46 +0300222 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500224 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300225 int sgc;
226 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
227 int src_nents, dst_nents;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500228 int lzeros;
229
230 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
231 if (lzeros < 0)
232 return ERR_PTR(lzeros);
233
234 req->src_len -= lzeros;
235 req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300236
237 src_nents = sg_nents_for_len(req->src, req->src_len);
238 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
239
240 if (src_nents > 1)
241 sec4_sg_len = src_nents;
Horia Geantăa5e5c132019-05-03 17:17:38 +0300242
Tudor Ambarus8c419772016-07-04 13:12:08 +0300243 if (dst_nents > 1)
Horia Geantăa5e5c132019-05-03 17:17:38 +0300244 sec4_sg_len += pad_sg_nents(dst_nents);
245 else
246 sec4_sg_len = pad_sg_nents(sec4_sg_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300247
248 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
249
250 /* allocate space for base edesc, hw desc commands and link tables */
251 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
252 GFP_DMA | flags);
253 if (!edesc)
254 return ERR_PTR(-ENOMEM);
255
256 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
257 if (unlikely(!sgc)) {
258 dev_err(dev, "unable to map source\n");
259 goto src_fail;
260 }
261
262 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
263 if (unlikely(!sgc)) {
264 dev_err(dev, "unable to map destination\n");
265 goto dst_fail;
266 }
267
268 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
269
270 sec4_sg_index = 0;
271 if (src_nents > 1) {
272 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
273 sec4_sg_index += src_nents;
274 }
275 if (dst_nents > 1)
276 sg_to_sec4_sg_last(req->dst, dst_nents,
277 edesc->sec4_sg + sec4_sg_index, 0);
278
279 /* Save nents for later use in Job Descriptor */
280 edesc->src_nents = src_nents;
281 edesc->dst_nents = dst_nents;
282
283 if (!sec4_sg_bytes)
284 return edesc;
285
286 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
287 sec4_sg_bytes, DMA_TO_DEVICE);
288 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
289 dev_err(dev, "unable to map S/G table\n");
290 goto sec4_sg_fail;
291 }
292
293 edesc->sec4_sg_bytes = sec4_sg_bytes;
294
295 return edesc;
296
297sec4_sg_fail:
298 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
299dst_fail:
300 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
301src_fail:
302 kfree(edesc);
303 return ERR_PTR(-ENOMEM);
304}
305
306static int set_rsa_pub_pdb(struct akcipher_request *req,
307 struct rsa_edesc *edesc)
308{
309 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
310 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
311 struct caam_rsa_key *key = &ctx->key;
312 struct device *dev = ctx->dev;
313 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
314 int sec4_sg_index = 0;
315
316 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
317 if (dma_mapping_error(dev, pdb->n_dma)) {
318 dev_err(dev, "Unable to map RSA modulus memory\n");
319 return -ENOMEM;
320 }
321
322 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
323 if (dma_mapping_error(dev, pdb->e_dma)) {
324 dev_err(dev, "Unable to map RSA public exponent memory\n");
325 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
326 return -ENOMEM;
327 }
328
329 if (edesc->src_nents > 1) {
330 pdb->sgf |= RSA_PDB_SGF_F;
331 pdb->f_dma = edesc->sec4_sg_dma;
332 sec4_sg_index += edesc->src_nents;
333 } else {
334 pdb->f_dma = sg_dma_address(req->src);
335 }
336
337 if (edesc->dst_nents > 1) {
338 pdb->sgf |= RSA_PDB_SGF_G;
339 pdb->g_dma = edesc->sec4_sg_dma +
340 sec4_sg_index * sizeof(struct sec4_sg_entry);
341 } else {
342 pdb->g_dma = sg_dma_address(req->dst);
343 }
344
345 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
346 pdb->f_len = req->src_len;
347
348 return 0;
349}
350
351static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
352 struct rsa_edesc *edesc)
353{
354 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
355 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
356 struct caam_rsa_key *key = &ctx->key;
357 struct device *dev = ctx->dev;
358 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
359 int sec4_sg_index = 0;
360
361 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
362 if (dma_mapping_error(dev, pdb->n_dma)) {
363 dev_err(dev, "Unable to map modulus memory\n");
364 return -ENOMEM;
365 }
366
367 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
368 if (dma_mapping_error(dev, pdb->d_dma)) {
369 dev_err(dev, "Unable to map RSA private exponent memory\n");
370 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
371 return -ENOMEM;
372 }
373
374 if (edesc->src_nents > 1) {
375 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
376 pdb->g_dma = edesc->sec4_sg_dma;
377 sec4_sg_index += edesc->src_nents;
378 } else {
379 pdb->g_dma = sg_dma_address(req->src);
380 }
381
382 if (edesc->dst_nents > 1) {
383 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
384 pdb->f_dma = edesc->sec4_sg_dma +
385 sec4_sg_index * sizeof(struct sec4_sg_entry);
386 } else {
387 pdb->f_dma = sg_dma_address(req->dst);
388 }
389
390 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
391
392 return 0;
393}
394
Radu Alexe52e26d72017-04-25 16:26:38 +0300395static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
396 struct rsa_edesc *edesc)
397{
398 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
399 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
400 struct caam_rsa_key *key = &ctx->key;
401 struct device *dev = ctx->dev;
402 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
403 int sec4_sg_index = 0;
404 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300405 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +0300406
407 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
408 if (dma_mapping_error(dev, pdb->d_dma)) {
409 dev_err(dev, "Unable to map RSA private exponent memory\n");
410 return -ENOMEM;
411 }
412
413 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
414 if (dma_mapping_error(dev, pdb->p_dma)) {
415 dev_err(dev, "Unable to map RSA prime factor p memory\n");
416 goto unmap_d;
417 }
418
419 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
420 if (dma_mapping_error(dev, pdb->q_dma)) {
421 dev_err(dev, "Unable to map RSA prime factor q memory\n");
422 goto unmap_p;
423 }
424
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300425 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300426 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
427 dev_err(dev, "Unable to map RSA tmp1 memory\n");
428 goto unmap_q;
429 }
430
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300431 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300432 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
433 dev_err(dev, "Unable to map RSA tmp2 memory\n");
434 goto unmap_tmp1;
435 }
436
437 if (edesc->src_nents > 1) {
438 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
439 pdb->g_dma = edesc->sec4_sg_dma;
440 sec4_sg_index += edesc->src_nents;
441 } else {
442 pdb->g_dma = sg_dma_address(req->src);
443 }
444
445 if (edesc->dst_nents > 1) {
446 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
447 pdb->f_dma = edesc->sec4_sg_dma +
448 sec4_sg_index * sizeof(struct sec4_sg_entry);
449 } else {
450 pdb->f_dma = sg_dma_address(req->dst);
451 }
452
453 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
454 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
455
456 return 0;
457
458unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300459 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300460unmap_q:
461 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
462unmap_p:
463 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
464unmap_d:
465 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
466
467 return -ENOMEM;
468}
469
Radu Alexe4a651b12017-04-25 16:26:39 +0300470static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
471 struct rsa_edesc *edesc)
472{
473 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
474 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
475 struct caam_rsa_key *key = &ctx->key;
476 struct device *dev = ctx->dev;
477 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
478 int sec4_sg_index = 0;
479 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300480 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300481
482 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
483 if (dma_mapping_error(dev, pdb->p_dma)) {
484 dev_err(dev, "Unable to map RSA prime factor p memory\n");
485 return -ENOMEM;
486 }
487
488 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
489 if (dma_mapping_error(dev, pdb->q_dma)) {
490 dev_err(dev, "Unable to map RSA prime factor q memory\n");
491 goto unmap_p;
492 }
493
494 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
495 if (dma_mapping_error(dev, pdb->dp_dma)) {
496 dev_err(dev, "Unable to map RSA exponent dp memory\n");
497 goto unmap_q;
498 }
499
500 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
501 if (dma_mapping_error(dev, pdb->dq_dma)) {
502 dev_err(dev, "Unable to map RSA exponent dq memory\n");
503 goto unmap_dp;
504 }
505
506 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
507 if (dma_mapping_error(dev, pdb->c_dma)) {
508 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
509 goto unmap_dq;
510 }
511
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300512 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300513 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
514 dev_err(dev, "Unable to map RSA tmp1 memory\n");
515 goto unmap_qinv;
516 }
517
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300518 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300519 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
520 dev_err(dev, "Unable to map RSA tmp2 memory\n");
521 goto unmap_tmp1;
522 }
523
524 if (edesc->src_nents > 1) {
525 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
526 pdb->g_dma = edesc->sec4_sg_dma;
527 sec4_sg_index += edesc->src_nents;
528 } else {
529 pdb->g_dma = sg_dma_address(req->src);
530 }
531
532 if (edesc->dst_nents > 1) {
533 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
534 pdb->f_dma = edesc->sec4_sg_dma +
535 sec4_sg_index * sizeof(struct sec4_sg_entry);
536 } else {
537 pdb->f_dma = sg_dma_address(req->dst);
538 }
539
540 pdb->sgf |= key->n_sz;
541 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
542
543 return 0;
544
545unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300546 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300547unmap_qinv:
548 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
549unmap_dq:
550 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
551unmap_dp:
552 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
553unmap_q:
554 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
555unmap_p:
556 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
557
558 return -ENOMEM;
559}
560
Tudor Ambarus8c419772016-07-04 13:12:08 +0300561static int caam_rsa_enc(struct akcipher_request *req)
562{
563 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
564 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
565 struct caam_rsa_key *key = &ctx->key;
566 struct device *jrdev = ctx->dev;
567 struct rsa_edesc *edesc;
568 int ret;
569
570 if (unlikely(!key->n || !key->e))
571 return -EINVAL;
572
573 if (req->dst_len < key->n_sz) {
574 req->dst_len = key->n_sz;
575 dev_err(jrdev, "Output buffer length less than parameter n\n");
576 return -EOVERFLOW;
577 }
578
579 /* Allocate extended descriptor */
580 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
581 if (IS_ERR(edesc))
582 return PTR_ERR(edesc);
583
584 /* Set RSA Encrypt Protocol Data Block */
585 ret = set_rsa_pub_pdb(req, edesc);
586 if (ret)
587 goto init_fail;
588
589 /* Initialize Job Descriptor */
590 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
591
592 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
593 if (!ret)
594 return -EINPROGRESS;
595
596 rsa_pub_unmap(jrdev, edesc, req);
597
598init_fail:
599 rsa_io_unmap(jrdev, edesc, req);
600 kfree(edesc);
601 return ret;
602}
603
Radu Alexe52e26d72017-04-25 16:26:38 +0300604static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300605{
606 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
607 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300608 struct device *jrdev = ctx->dev;
609 struct rsa_edesc *edesc;
610 int ret;
611
Tudor Ambarus8c419772016-07-04 13:12:08 +0300612 /* Allocate extended descriptor */
613 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
614 if (IS_ERR(edesc))
615 return PTR_ERR(edesc);
616
617 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
618 ret = set_rsa_priv_f1_pdb(req, edesc);
619 if (ret)
620 goto init_fail;
621
622 /* Initialize Job Descriptor */
623 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
624
625 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
626 if (!ret)
627 return -EINPROGRESS;
628
629 rsa_priv_f1_unmap(jrdev, edesc, req);
630
631init_fail:
632 rsa_io_unmap(jrdev, edesc, req);
633 kfree(edesc);
634 return ret;
635}
636
Radu Alexe52e26d72017-04-25 16:26:38 +0300637static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
638{
639 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
640 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
641 struct device *jrdev = ctx->dev;
642 struct rsa_edesc *edesc;
643 int ret;
644
645 /* Allocate extended descriptor */
646 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
647 if (IS_ERR(edesc))
648 return PTR_ERR(edesc);
649
650 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
651 ret = set_rsa_priv_f2_pdb(req, edesc);
652 if (ret)
653 goto init_fail;
654
655 /* Initialize Job Descriptor */
656 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
657
658 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
659 if (!ret)
660 return -EINPROGRESS;
661
662 rsa_priv_f2_unmap(jrdev, edesc, req);
663
664init_fail:
665 rsa_io_unmap(jrdev, edesc, req);
666 kfree(edesc);
667 return ret;
668}
669
Radu Alexe4a651b12017-04-25 16:26:39 +0300670static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
671{
672 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
673 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
674 struct device *jrdev = ctx->dev;
675 struct rsa_edesc *edesc;
676 int ret;
677
678 /* Allocate extended descriptor */
679 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
680 if (IS_ERR(edesc))
681 return PTR_ERR(edesc);
682
683 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
684 ret = set_rsa_priv_f3_pdb(req, edesc);
685 if (ret)
686 goto init_fail;
687
688 /* Initialize Job Descriptor */
689 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
690
691 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
692 if (!ret)
693 return -EINPROGRESS;
694
695 rsa_priv_f3_unmap(jrdev, edesc, req);
696
697init_fail:
698 rsa_io_unmap(jrdev, edesc, req);
699 kfree(edesc);
700 return ret;
701}
702
Radu Alexe52e26d72017-04-25 16:26:38 +0300703static int caam_rsa_dec(struct akcipher_request *req)
704{
705 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
706 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
707 struct caam_rsa_key *key = &ctx->key;
708 int ret;
709
710 if (unlikely(!key->n || !key->d))
711 return -EINVAL;
712
713 if (req->dst_len < key->n_sz) {
714 req->dst_len = key->n_sz;
715 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
716 return -EOVERFLOW;
717 }
718
Radu Alexe4a651b12017-04-25 16:26:39 +0300719 if (key->priv_form == FORM3)
720 ret = caam_rsa_dec_priv_f3(req);
721 else if (key->priv_form == FORM2)
Radu Alexe52e26d72017-04-25 16:26:38 +0300722 ret = caam_rsa_dec_priv_f2(req);
723 else
724 ret = caam_rsa_dec_priv_f1(req);
725
726 return ret;
727}
728
Tudor Ambarus8c419772016-07-04 13:12:08 +0300729static void caam_rsa_free_key(struct caam_rsa_key *key)
730{
731 kzfree(key->d);
Radu Alexe52e26d72017-04-25 16:26:38 +0300732 kzfree(key->p);
733 kzfree(key->q);
Radu Alexe4a651b12017-04-25 16:26:39 +0300734 kzfree(key->dp);
735 kzfree(key->dq);
736 kzfree(key->qinv);
Radu Alexe52e26d72017-04-25 16:26:38 +0300737 kzfree(key->tmp1);
738 kzfree(key->tmp2);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300739 kfree(key->e);
740 kfree(key->n);
Radu Alexe52e26d72017-04-25 16:26:38 +0300741 memset(key, 0, sizeof(*key));
Tudor Ambarus8c419772016-07-04 13:12:08 +0300742}
743
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300744static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
745{
746 while (!**ptr && *nbytes) {
747 (*ptr)++;
748 (*nbytes)--;
749 }
750}
751
Tudor Ambarus8c419772016-07-04 13:12:08 +0300752/**
Radu Alexe4a651b12017-04-25 16:26:39 +0300753 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
754 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
755 * BER-encoding requires that the minimum number of bytes be used to encode the
756 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
757 * length.
758 *
759 * @ptr : pointer to {dP, dQ, qInv} CRT member
760 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
761 * @dstlen: length in bytes of corresponding p or q prime factor
762 */
763static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
764{
765 u8 *dst;
766
767 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
768 if (!nbytes)
769 return NULL;
770
771 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
772 if (!dst)
773 return NULL;
774
775 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
776
777 return dst;
778}
779
780/**
Tudor Ambarus8c419772016-07-04 13:12:08 +0300781 * caam_read_raw_data - Read a raw byte stream as a positive integer.
782 * The function skips buffer's leading zeros, copies the remained data
783 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
784 * the address of the new buffer.
785 *
786 * @buf : The data to read
787 * @nbytes: The amount of data to read
788 */
789static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
790{
Tudor Ambarus8c419772016-07-04 13:12:08 +0300791
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300792 caam_rsa_drop_leading_zeros(&buf, nbytes);
Tudor Ambarus7fcaf622017-04-25 16:26:36 +0300793 if (!*nbytes)
794 return NULL;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300795
Fabio Estevamb930f3a2018-04-16 13:05:01 -0300796 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300797}
798
799static int caam_rsa_check_key_length(unsigned int len)
800{
801 if (len > 4096)
802 return -EINVAL;
803 return 0;
804}
805
806static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
807 unsigned int keylen)
808{
809 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200810 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300811 struct caam_rsa_key *rsa_key = &ctx->key;
812 int ret;
813
814 /* Free the old RSA key if any */
815 caam_rsa_free_key(rsa_key);
816
817 ret = rsa_parse_pub_key(&raw_key, key, keylen);
818 if (ret)
819 return ret;
820
821 /* Copy key in DMA zone */
822 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
823 if (!rsa_key->e)
824 goto err;
825
826 /*
827 * Skip leading zeros and copy the positive integer to a buffer
828 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
829 * expects a positive integer for the RSA modulus and uses its length as
830 * decryption output length.
831 */
832 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
833 if (!rsa_key->n)
834 goto err;
835
836 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
837 caam_rsa_free_key(rsa_key);
838 return -EINVAL;
839 }
840
841 rsa_key->e_sz = raw_key.e_sz;
842 rsa_key->n_sz = raw_key.n_sz;
843
844 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
845
846 return 0;
847err:
848 caam_rsa_free_key(rsa_key);
849 return -ENOMEM;
850}
851
Radu Alexe52e26d72017-04-25 16:26:38 +0300852static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
853 struct rsa_key *raw_key)
854{
855 struct caam_rsa_key *rsa_key = &ctx->key;
856 size_t p_sz = raw_key->p_sz;
857 size_t q_sz = raw_key->q_sz;
858
859 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
860 if (!rsa_key->p)
861 return;
862 rsa_key->p_sz = p_sz;
863
864 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
865 if (!rsa_key->q)
866 goto free_p;
867 rsa_key->q_sz = q_sz;
868
869 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
870 if (!rsa_key->tmp1)
871 goto free_q;
872
873 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
874 if (!rsa_key->tmp2)
875 goto free_tmp1;
876
877 rsa_key->priv_form = FORM2;
878
Radu Alexe4a651b12017-04-25 16:26:39 +0300879 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
880 if (!rsa_key->dp)
881 goto free_tmp2;
882
883 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
884 if (!rsa_key->dq)
885 goto free_dp;
886
887 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
888 q_sz);
889 if (!rsa_key->qinv)
890 goto free_dq;
891
892 rsa_key->priv_form = FORM3;
893
Radu Alexe52e26d72017-04-25 16:26:38 +0300894 return;
895
Radu Alexe4a651b12017-04-25 16:26:39 +0300896free_dq:
897 kzfree(rsa_key->dq);
898free_dp:
899 kzfree(rsa_key->dp);
900free_tmp2:
901 kzfree(rsa_key->tmp2);
Radu Alexe52e26d72017-04-25 16:26:38 +0300902free_tmp1:
903 kzfree(rsa_key->tmp1);
904free_q:
905 kzfree(rsa_key->q);
906free_p:
907 kzfree(rsa_key->p);
908}
909
Tudor Ambarus8c419772016-07-04 13:12:08 +0300910static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
911 unsigned int keylen)
912{
913 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200914 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300915 struct caam_rsa_key *rsa_key = &ctx->key;
916 int ret;
917
918 /* Free the old RSA key if any */
919 caam_rsa_free_key(rsa_key);
920
921 ret = rsa_parse_priv_key(&raw_key, key, keylen);
922 if (ret)
923 return ret;
924
925 /* Copy key in DMA zone */
926 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
927 if (!rsa_key->d)
928 goto err;
929
930 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
931 if (!rsa_key->e)
932 goto err;
933
934 /*
935 * Skip leading zeros and copy the positive integer to a buffer
936 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
937 * expects a positive integer for the RSA modulus and uses its length as
938 * decryption output length.
939 */
940 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
941 if (!rsa_key->n)
942 goto err;
943
944 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
945 caam_rsa_free_key(rsa_key);
946 return -EINVAL;
947 }
948
949 rsa_key->d_sz = raw_key.d_sz;
950 rsa_key->e_sz = raw_key.e_sz;
951 rsa_key->n_sz = raw_key.n_sz;
952
953 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
954 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
955
Radu Alexe52e26d72017-04-25 16:26:38 +0300956 caam_rsa_set_priv_key_form(ctx, &raw_key);
957
Tudor Ambarus8c419772016-07-04 13:12:08 +0300958 return 0;
959
960err:
961 caam_rsa_free_key(rsa_key);
962 return -ENOMEM;
963}
964
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +0300965static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300966{
967 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300968
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +0300969 return ctx->key.n_sz;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300970}
971
972/* Per session pkc's driver context creation function */
973static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
974{
975 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
976
977 ctx->dev = caam_jr_alloc();
978
979 if (IS_ERR(ctx->dev)) {
Horia Geantă33fa46d2017-04-03 18:30:07 +0300980 pr_err("Job Ring Device allocation for transform failed\n");
Tudor Ambarus8c419772016-07-04 13:12:08 +0300981 return PTR_ERR(ctx->dev);
982 }
983
984 return 0;
985}
986
987/* Per session pkc's driver context cleanup function */
988static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
989{
990 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
991 struct caam_rsa_key *key = &ctx->key;
992
993 caam_rsa_free_key(key);
994 caam_jr_free(ctx->dev);
995}
996
997static struct akcipher_alg caam_rsa = {
998 .encrypt = caam_rsa_enc,
999 .decrypt = caam_rsa_dec,
Tudor Ambarus8c419772016-07-04 13:12:08 +03001000 .set_pub_key = caam_rsa_set_pub_key,
1001 .set_priv_key = caam_rsa_set_priv_key,
1002 .max_size = caam_rsa_max_size,
1003 .init = caam_rsa_init_tfm,
1004 .exit = caam_rsa_exit_tfm,
Horia Geantă8a2a0dd2018-04-16 08:07:05 -05001005 .reqsize = sizeof(struct caam_rsa_req_ctx),
Tudor Ambarus8c419772016-07-04 13:12:08 +03001006 .base = {
1007 .cra_name = "rsa",
1008 .cra_driver_name = "rsa-caam",
1009 .cra_priority = 3000,
1010 .cra_module = THIS_MODULE,
1011 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1012 },
1013};
1014
1015/* Public Key Cryptography module initialization handler */
1016static int __init caam_pkc_init(void)
1017{
1018 struct device_node *dev_node;
1019 struct platform_device *pdev;
1020 struct device *ctrldev;
1021 struct caam_drv_private *priv;
Horia Geantăd239b102018-11-08 15:36:27 +02001022 u32 pk_inst;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001023 int err;
1024
1025 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1026 if (!dev_node) {
1027 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1028 if (!dev_node)
1029 return -ENODEV;
1030 }
1031
1032 pdev = of_find_device_by_node(dev_node);
1033 if (!pdev) {
1034 of_node_put(dev_node);
1035 return -ENODEV;
1036 }
1037
1038 ctrldev = &pdev->dev;
1039 priv = dev_get_drvdata(ctrldev);
1040 of_node_put(dev_node);
1041
1042 /*
1043 * If priv is NULL, it's probably because the caam driver wasn't
1044 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1045 */
Wen Yang00e87442019-03-01 19:19:25 +02001046 if (!priv) {
1047 err = -ENODEV;
1048 goto out_put_dev;
1049 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001050
1051 /* Determine public key hardware accelerator presence. */
Horia Geantăd239b102018-11-08 15:36:27 +02001052 if (priv->era < 10)
1053 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1054 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1055 else
1056 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001057
1058 /* Do not register algorithms if PKHA is not present. */
Wen Yang00e87442019-03-01 19:19:25 +02001059 if (!pk_inst) {
1060 err = -ENODEV;
1061 goto out_put_dev;
1062 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001063
1064 err = crypto_register_akcipher(&caam_rsa);
1065 if (err)
1066 dev_warn(ctrldev, "%s alg registration failed\n",
1067 caam_rsa.base.cra_driver_name);
1068 else
1069 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1070
Wen Yang00e87442019-03-01 19:19:25 +02001071out_put_dev:
1072 put_device(ctrldev);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001073 return err;
1074}
1075
1076static void __exit caam_pkc_exit(void)
1077{
1078 crypto_unregister_akcipher(&caam_rsa);
1079}
1080
1081module_init(caam_pkc_init);
1082module_exit(caam_pkc_exit);
1083
1084MODULE_LICENSE("Dual BSD/GPL");
1085MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1086MODULE_AUTHOR("Freescale Semiconductor");