blob: 912e41700522951aa0f0084aa584320aa04435db [file] [log] [blame]
Tudor Ambarus8c419772016-07-04 13:12:08 +03001/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9#include "compat.h"
10#include "regs.h"
11#include "intern.h"
12#include "jr.h"
13#include "error.h"
14#include "desc_constr.h"
15#include "sg_sw_sec4.h"
16#include "caampkc.h"
17
18#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
Radu Alexe52e26d72017-04-25 16:26:38 +030021#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
Tudor Ambarus8c419772016-07-04 13:12:08 +030023
24static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
25 struct akcipher_request *req)
26{
27 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
28 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
29
30 if (edesc->sec4_sg_bytes)
31 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
32 DMA_TO_DEVICE);
33}
34
35static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
36 struct akcipher_request *req)
37{
38 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
39 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
40 struct caam_rsa_key *key = &ctx->key;
41 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
42
43 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
44 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
45}
46
47static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
48 struct akcipher_request *req)
49{
50 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
51 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
52 struct caam_rsa_key *key = &ctx->key;
53 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
54
55 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
56 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
57}
58
Radu Alexe52e26d72017-04-25 16:26:38 +030059static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
60 struct akcipher_request *req)
61{
62 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
63 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
64 struct caam_rsa_key *key = &ctx->key;
65 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
66 size_t p_sz = key->p_sz;
67 size_t q_sz = key->p_sz;
68
69 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
70 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
71 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
74}
75
Tudor Ambarus8c419772016-07-04 13:12:08 +030076/* RSA Job Completion handler */
77static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
78{
79 struct akcipher_request *req = context;
80 struct rsa_edesc *edesc;
81
82 if (err)
83 caam_jr_strstatus(dev, err);
84
85 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
86
87 rsa_pub_unmap(dev, edesc, req);
88 rsa_io_unmap(dev, edesc, req);
89 kfree(edesc);
90
91 akcipher_request_complete(req, err);
92}
93
94static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
95 void *context)
96{
97 struct akcipher_request *req = context;
98 struct rsa_edesc *edesc;
99
100 if (err)
101 caam_jr_strstatus(dev, err);
102
103 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
104
105 rsa_priv_f1_unmap(dev, edesc, req);
106 rsa_io_unmap(dev, edesc, req);
107 kfree(edesc);
108
109 akcipher_request_complete(req, err);
110}
111
Radu Alexe52e26d72017-04-25 16:26:38 +0300112static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
113 void *context)
114{
115 struct akcipher_request *req = context;
116 struct rsa_edesc *edesc;
117
118 if (err)
119 caam_jr_strstatus(dev, err);
120
121 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
122
123 rsa_priv_f2_unmap(dev, edesc, req);
124 rsa_io_unmap(dev, edesc, req);
125 kfree(edesc);
126
127 akcipher_request_complete(req, err);
128}
129
Tudor Ambarus8c419772016-07-04 13:12:08 +0300130static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
131 size_t desclen)
132{
133 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
134 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
135 struct device *dev = ctx->dev;
136 struct rsa_edesc *edesc;
137 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
138 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
139 int sgc;
140 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
141 int src_nents, dst_nents;
142
143 src_nents = sg_nents_for_len(req->src, req->src_len);
144 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
145
146 if (src_nents > 1)
147 sec4_sg_len = src_nents;
148 if (dst_nents > 1)
149 sec4_sg_len += dst_nents;
150
151 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
152
153 /* allocate space for base edesc, hw desc commands and link tables */
154 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
155 GFP_DMA | flags);
156 if (!edesc)
157 return ERR_PTR(-ENOMEM);
158
159 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
160 if (unlikely(!sgc)) {
161 dev_err(dev, "unable to map source\n");
162 goto src_fail;
163 }
164
165 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
166 if (unlikely(!sgc)) {
167 dev_err(dev, "unable to map destination\n");
168 goto dst_fail;
169 }
170
171 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
172
173 sec4_sg_index = 0;
174 if (src_nents > 1) {
175 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
176 sec4_sg_index += src_nents;
177 }
178 if (dst_nents > 1)
179 sg_to_sec4_sg_last(req->dst, dst_nents,
180 edesc->sec4_sg + sec4_sg_index, 0);
181
182 /* Save nents for later use in Job Descriptor */
183 edesc->src_nents = src_nents;
184 edesc->dst_nents = dst_nents;
185
186 if (!sec4_sg_bytes)
187 return edesc;
188
189 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
190 sec4_sg_bytes, DMA_TO_DEVICE);
191 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
192 dev_err(dev, "unable to map S/G table\n");
193 goto sec4_sg_fail;
194 }
195
196 edesc->sec4_sg_bytes = sec4_sg_bytes;
197
198 return edesc;
199
200sec4_sg_fail:
201 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
202dst_fail:
203 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
204src_fail:
205 kfree(edesc);
206 return ERR_PTR(-ENOMEM);
207}
208
209static int set_rsa_pub_pdb(struct akcipher_request *req,
210 struct rsa_edesc *edesc)
211{
212 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
213 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
214 struct caam_rsa_key *key = &ctx->key;
215 struct device *dev = ctx->dev;
216 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
217 int sec4_sg_index = 0;
218
219 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
220 if (dma_mapping_error(dev, pdb->n_dma)) {
221 dev_err(dev, "Unable to map RSA modulus memory\n");
222 return -ENOMEM;
223 }
224
225 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
226 if (dma_mapping_error(dev, pdb->e_dma)) {
227 dev_err(dev, "Unable to map RSA public exponent memory\n");
228 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
229 return -ENOMEM;
230 }
231
232 if (edesc->src_nents > 1) {
233 pdb->sgf |= RSA_PDB_SGF_F;
234 pdb->f_dma = edesc->sec4_sg_dma;
235 sec4_sg_index += edesc->src_nents;
236 } else {
237 pdb->f_dma = sg_dma_address(req->src);
238 }
239
240 if (edesc->dst_nents > 1) {
241 pdb->sgf |= RSA_PDB_SGF_G;
242 pdb->g_dma = edesc->sec4_sg_dma +
243 sec4_sg_index * sizeof(struct sec4_sg_entry);
244 } else {
245 pdb->g_dma = sg_dma_address(req->dst);
246 }
247
248 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
249 pdb->f_len = req->src_len;
250
251 return 0;
252}
253
254static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
255 struct rsa_edesc *edesc)
256{
257 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
258 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
259 struct caam_rsa_key *key = &ctx->key;
260 struct device *dev = ctx->dev;
261 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
262 int sec4_sg_index = 0;
263
264 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
265 if (dma_mapping_error(dev, pdb->n_dma)) {
266 dev_err(dev, "Unable to map modulus memory\n");
267 return -ENOMEM;
268 }
269
270 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
271 if (dma_mapping_error(dev, pdb->d_dma)) {
272 dev_err(dev, "Unable to map RSA private exponent memory\n");
273 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
274 return -ENOMEM;
275 }
276
277 if (edesc->src_nents > 1) {
278 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
279 pdb->g_dma = edesc->sec4_sg_dma;
280 sec4_sg_index += edesc->src_nents;
281 } else {
282 pdb->g_dma = sg_dma_address(req->src);
283 }
284
285 if (edesc->dst_nents > 1) {
286 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
287 pdb->f_dma = edesc->sec4_sg_dma +
288 sec4_sg_index * sizeof(struct sec4_sg_entry);
289 } else {
290 pdb->f_dma = sg_dma_address(req->dst);
291 }
292
293 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
294
295 return 0;
296}
297
Radu Alexe52e26d72017-04-25 16:26:38 +0300298static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
299 struct rsa_edesc *edesc)
300{
301 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
302 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
303 struct caam_rsa_key *key = &ctx->key;
304 struct device *dev = ctx->dev;
305 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
306 int sec4_sg_index = 0;
307 size_t p_sz = key->p_sz;
308 size_t q_sz = key->p_sz;
309
310 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
311 if (dma_mapping_error(dev, pdb->d_dma)) {
312 dev_err(dev, "Unable to map RSA private exponent memory\n");
313 return -ENOMEM;
314 }
315
316 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
317 if (dma_mapping_error(dev, pdb->p_dma)) {
318 dev_err(dev, "Unable to map RSA prime factor p memory\n");
319 goto unmap_d;
320 }
321
322 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
323 if (dma_mapping_error(dev, pdb->q_dma)) {
324 dev_err(dev, "Unable to map RSA prime factor q memory\n");
325 goto unmap_p;
326 }
327
328 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
329 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
330 dev_err(dev, "Unable to map RSA tmp1 memory\n");
331 goto unmap_q;
332 }
333
334 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
335 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
336 dev_err(dev, "Unable to map RSA tmp2 memory\n");
337 goto unmap_tmp1;
338 }
339
340 if (edesc->src_nents > 1) {
341 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
342 pdb->g_dma = edesc->sec4_sg_dma;
343 sec4_sg_index += edesc->src_nents;
344 } else {
345 pdb->g_dma = sg_dma_address(req->src);
346 }
347
348 if (edesc->dst_nents > 1) {
349 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
350 pdb->f_dma = edesc->sec4_sg_dma +
351 sec4_sg_index * sizeof(struct sec4_sg_entry);
352 } else {
353 pdb->f_dma = sg_dma_address(req->dst);
354 }
355
356 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
357 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
358
359 return 0;
360
361unmap_tmp1:
362 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
363unmap_q:
364 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
365unmap_p:
366 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
367unmap_d:
368 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
369
370 return -ENOMEM;
371}
372
Tudor Ambarus8c419772016-07-04 13:12:08 +0300373static int caam_rsa_enc(struct akcipher_request *req)
374{
375 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
376 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
377 struct caam_rsa_key *key = &ctx->key;
378 struct device *jrdev = ctx->dev;
379 struct rsa_edesc *edesc;
380 int ret;
381
382 if (unlikely(!key->n || !key->e))
383 return -EINVAL;
384
385 if (req->dst_len < key->n_sz) {
386 req->dst_len = key->n_sz;
387 dev_err(jrdev, "Output buffer length less than parameter n\n");
388 return -EOVERFLOW;
389 }
390
391 /* Allocate extended descriptor */
392 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
393 if (IS_ERR(edesc))
394 return PTR_ERR(edesc);
395
396 /* Set RSA Encrypt Protocol Data Block */
397 ret = set_rsa_pub_pdb(req, edesc);
398 if (ret)
399 goto init_fail;
400
401 /* Initialize Job Descriptor */
402 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
403
404 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
405 if (!ret)
406 return -EINPROGRESS;
407
408 rsa_pub_unmap(jrdev, edesc, req);
409
410init_fail:
411 rsa_io_unmap(jrdev, edesc, req);
412 kfree(edesc);
413 return ret;
414}
415
Radu Alexe52e26d72017-04-25 16:26:38 +0300416static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300417{
418 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
419 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300420 struct device *jrdev = ctx->dev;
421 struct rsa_edesc *edesc;
422 int ret;
423
Tudor Ambarus8c419772016-07-04 13:12:08 +0300424 /* Allocate extended descriptor */
425 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
426 if (IS_ERR(edesc))
427 return PTR_ERR(edesc);
428
429 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
430 ret = set_rsa_priv_f1_pdb(req, edesc);
431 if (ret)
432 goto init_fail;
433
434 /* Initialize Job Descriptor */
435 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
436
437 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
438 if (!ret)
439 return -EINPROGRESS;
440
441 rsa_priv_f1_unmap(jrdev, edesc, req);
442
443init_fail:
444 rsa_io_unmap(jrdev, edesc, req);
445 kfree(edesc);
446 return ret;
447}
448
Radu Alexe52e26d72017-04-25 16:26:38 +0300449static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
450{
451 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
452 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
453 struct device *jrdev = ctx->dev;
454 struct rsa_edesc *edesc;
455 int ret;
456
457 /* Allocate extended descriptor */
458 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
459 if (IS_ERR(edesc))
460 return PTR_ERR(edesc);
461
462 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
463 ret = set_rsa_priv_f2_pdb(req, edesc);
464 if (ret)
465 goto init_fail;
466
467 /* Initialize Job Descriptor */
468 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
469
470 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
471 if (!ret)
472 return -EINPROGRESS;
473
474 rsa_priv_f2_unmap(jrdev, edesc, req);
475
476init_fail:
477 rsa_io_unmap(jrdev, edesc, req);
478 kfree(edesc);
479 return ret;
480}
481
482static int caam_rsa_dec(struct akcipher_request *req)
483{
484 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
485 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
486 struct caam_rsa_key *key = &ctx->key;
487 int ret;
488
489 if (unlikely(!key->n || !key->d))
490 return -EINVAL;
491
492 if (req->dst_len < key->n_sz) {
493 req->dst_len = key->n_sz;
494 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
495 return -EOVERFLOW;
496 }
497
498 if (key->priv_form == FORM2)
499 ret = caam_rsa_dec_priv_f2(req);
500 else
501 ret = caam_rsa_dec_priv_f1(req);
502
503 return ret;
504}
505
Tudor Ambarus8c419772016-07-04 13:12:08 +0300506static void caam_rsa_free_key(struct caam_rsa_key *key)
507{
508 kzfree(key->d);
Radu Alexe52e26d72017-04-25 16:26:38 +0300509 kzfree(key->p);
510 kzfree(key->q);
511 kzfree(key->tmp1);
512 kzfree(key->tmp2);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300513 kfree(key->e);
514 kfree(key->n);
Radu Alexe52e26d72017-04-25 16:26:38 +0300515 memset(key, 0, sizeof(*key));
Tudor Ambarus8c419772016-07-04 13:12:08 +0300516}
517
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300518static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
519{
520 while (!**ptr && *nbytes) {
521 (*ptr)++;
522 (*nbytes)--;
523 }
524}
525
Tudor Ambarus8c419772016-07-04 13:12:08 +0300526/**
527 * caam_read_raw_data - Read a raw byte stream as a positive integer.
528 * The function skips buffer's leading zeros, copies the remained data
529 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
530 * the address of the new buffer.
531 *
532 * @buf : The data to read
533 * @nbytes: The amount of data to read
534 */
535static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
536{
537 u8 *val;
538
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300539 caam_rsa_drop_leading_zeros(&buf, nbytes);
Tudor Ambarus7fcaf622017-04-25 16:26:36 +0300540 if (!*nbytes)
541 return NULL;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300542
543 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
544 if (!val)
545 return NULL;
546
547 memcpy(val, buf, *nbytes);
548
549 return val;
550}
551
552static int caam_rsa_check_key_length(unsigned int len)
553{
554 if (len > 4096)
555 return -EINVAL;
556 return 0;
557}
558
559static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
560 unsigned int keylen)
561{
562 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200563 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300564 struct caam_rsa_key *rsa_key = &ctx->key;
565 int ret;
566
567 /* Free the old RSA key if any */
568 caam_rsa_free_key(rsa_key);
569
570 ret = rsa_parse_pub_key(&raw_key, key, keylen);
571 if (ret)
572 return ret;
573
574 /* Copy key in DMA zone */
575 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
576 if (!rsa_key->e)
577 goto err;
578
579 /*
580 * Skip leading zeros and copy the positive integer to a buffer
581 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
582 * expects a positive integer for the RSA modulus and uses its length as
583 * decryption output length.
584 */
585 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
586 if (!rsa_key->n)
587 goto err;
588
589 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
590 caam_rsa_free_key(rsa_key);
591 return -EINVAL;
592 }
593
594 rsa_key->e_sz = raw_key.e_sz;
595 rsa_key->n_sz = raw_key.n_sz;
596
597 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
598
599 return 0;
600err:
601 caam_rsa_free_key(rsa_key);
602 return -ENOMEM;
603}
604
Radu Alexe52e26d72017-04-25 16:26:38 +0300605static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
606 struct rsa_key *raw_key)
607{
608 struct caam_rsa_key *rsa_key = &ctx->key;
609 size_t p_sz = raw_key->p_sz;
610 size_t q_sz = raw_key->q_sz;
611
612 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
613 if (!rsa_key->p)
614 return;
615 rsa_key->p_sz = p_sz;
616
617 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
618 if (!rsa_key->q)
619 goto free_p;
620 rsa_key->q_sz = q_sz;
621
622 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
623 if (!rsa_key->tmp1)
624 goto free_q;
625
626 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
627 if (!rsa_key->tmp2)
628 goto free_tmp1;
629
630 rsa_key->priv_form = FORM2;
631
632 return;
633
634free_tmp1:
635 kzfree(rsa_key->tmp1);
636free_q:
637 kzfree(rsa_key->q);
638free_p:
639 kzfree(rsa_key->p);
640}
641
Tudor Ambarus8c419772016-07-04 13:12:08 +0300642static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
643 unsigned int keylen)
644{
645 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200646 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300647 struct caam_rsa_key *rsa_key = &ctx->key;
648 int ret;
649
650 /* Free the old RSA key if any */
651 caam_rsa_free_key(rsa_key);
652
653 ret = rsa_parse_priv_key(&raw_key, key, keylen);
654 if (ret)
655 return ret;
656
657 /* Copy key in DMA zone */
658 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
659 if (!rsa_key->d)
660 goto err;
661
662 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
663 if (!rsa_key->e)
664 goto err;
665
666 /*
667 * Skip leading zeros and copy the positive integer to a buffer
668 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
669 * expects a positive integer for the RSA modulus and uses its length as
670 * decryption output length.
671 */
672 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
673 if (!rsa_key->n)
674 goto err;
675
676 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
677 caam_rsa_free_key(rsa_key);
678 return -EINVAL;
679 }
680
681 rsa_key->d_sz = raw_key.d_sz;
682 rsa_key->e_sz = raw_key.e_sz;
683 rsa_key->n_sz = raw_key.n_sz;
684
685 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
686 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
687
Radu Alexe52e26d72017-04-25 16:26:38 +0300688 caam_rsa_set_priv_key_form(ctx, &raw_key);
689
Tudor Ambarus8c419772016-07-04 13:12:08 +0300690 return 0;
691
692err:
693 caam_rsa_free_key(rsa_key);
694 return -ENOMEM;
695}
696
697static int caam_rsa_max_size(struct crypto_akcipher *tfm)
698{
699 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
700 struct caam_rsa_key *key = &ctx->key;
701
702 return (key->n) ? key->n_sz : -EINVAL;
703}
704
705/* Per session pkc's driver context creation function */
706static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
707{
708 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
709
710 ctx->dev = caam_jr_alloc();
711
712 if (IS_ERR(ctx->dev)) {
Horia Geantă33fa46d2017-04-03 18:30:07 +0300713 pr_err("Job Ring Device allocation for transform failed\n");
Tudor Ambarus8c419772016-07-04 13:12:08 +0300714 return PTR_ERR(ctx->dev);
715 }
716
717 return 0;
718}
719
720/* Per session pkc's driver context cleanup function */
721static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
722{
723 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
724 struct caam_rsa_key *key = &ctx->key;
725
726 caam_rsa_free_key(key);
727 caam_jr_free(ctx->dev);
728}
729
730static struct akcipher_alg caam_rsa = {
731 .encrypt = caam_rsa_enc,
732 .decrypt = caam_rsa_dec,
733 .sign = caam_rsa_dec,
734 .verify = caam_rsa_enc,
735 .set_pub_key = caam_rsa_set_pub_key,
736 .set_priv_key = caam_rsa_set_priv_key,
737 .max_size = caam_rsa_max_size,
738 .init = caam_rsa_init_tfm,
739 .exit = caam_rsa_exit_tfm,
740 .base = {
741 .cra_name = "rsa",
742 .cra_driver_name = "rsa-caam",
743 .cra_priority = 3000,
744 .cra_module = THIS_MODULE,
745 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
746 },
747};
748
749/* Public Key Cryptography module initialization handler */
750static int __init caam_pkc_init(void)
751{
752 struct device_node *dev_node;
753 struct platform_device *pdev;
754 struct device *ctrldev;
755 struct caam_drv_private *priv;
756 u32 cha_inst, pk_inst;
757 int err;
758
759 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
760 if (!dev_node) {
761 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
762 if (!dev_node)
763 return -ENODEV;
764 }
765
766 pdev = of_find_device_by_node(dev_node);
767 if (!pdev) {
768 of_node_put(dev_node);
769 return -ENODEV;
770 }
771
772 ctrldev = &pdev->dev;
773 priv = dev_get_drvdata(ctrldev);
774 of_node_put(dev_node);
775
776 /*
777 * If priv is NULL, it's probably because the caam driver wasn't
778 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
779 */
780 if (!priv)
781 return -ENODEV;
782
783 /* Determine public key hardware accelerator presence. */
784 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
785 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
786
787 /* Do not register algorithms if PKHA is not present. */
788 if (!pk_inst)
789 return -ENODEV;
790
791 err = crypto_register_akcipher(&caam_rsa);
792 if (err)
793 dev_warn(ctrldev, "%s alg registration failed\n",
794 caam_rsa.base.cra_driver_name);
795 else
796 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
797
798 return err;
799}
800
801static void __exit caam_pkc_exit(void)
802{
803 crypto_unregister_akcipher(&caam_rsa);
804}
805
806module_init(caam_pkc_init);
807module_exit(caam_pkc_exit);
808
809MODULE_LICENSE("Dual BSD/GPL");
810MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
811MODULE_AUTHOR("Freescale Semiconductor");