blob: 80574106af29b19ce91b381d17a1677093c1831c [file] [log] [blame]
Horia Geantă618b5dc2018-10-10 14:26:48 +03001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Tudor Ambarus8c419772016-07-04 13:12:08 +03002/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
Horia Geantăa5e5c132019-05-03 17:17:38 +03006 * Copyright 2018-2019 NXP
Tudor Ambarus8c419772016-07-04 13:12:08 +03007 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
20#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
21#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f1_pdb))
Radu Alexe52e26d72017-04-25 16:26:38 +030023#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f2_pdb))
Radu Alexe4a651b12017-04-25 16:26:39 +030025#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb))
Iuliana Prodanc3725f72019-05-28 12:52:10 +030027#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28
29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer;
Tudor Ambarus8c419772016-07-04 13:12:08 +030031
32static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
33 struct akcipher_request *req)
34{
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030035 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
36
Tudor Ambarus8c419772016-07-04 13:12:08 +030037 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030038 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +030039
40 if (edesc->sec4_sg_bytes)
41 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
42 DMA_TO_DEVICE);
43}
44
45static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
46 struct akcipher_request *req)
47{
48 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
49 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
50 struct caam_rsa_key *key = &ctx->key;
51 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
52
53 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
54 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
55}
56
57static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
58 struct akcipher_request *req)
59{
60 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
61 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
62 struct caam_rsa_key *key = &ctx->key;
63 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
64
65 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
66 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
67}
68
Radu Alexe52e26d72017-04-25 16:26:38 +030069static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
70 struct akcipher_request *req)
71{
72 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
73 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
74 struct caam_rsa_key *key = &ctx->key;
75 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
76 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030077 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +030078
79 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
80 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
81 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +030082 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
83 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +030084}
85
Radu Alexe4a651b12017-04-25 16:26:39 +030086static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
87 struct akcipher_request *req)
88{
89 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
90 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
91 struct caam_rsa_key *key = &ctx->key;
92 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
93 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030094 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +030095
96 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
97 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
98 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
99 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
100 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300101 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
102 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300103}
104
Tudor Ambarus8c419772016-07-04 13:12:08 +0300105/* RSA Job Completion handler */
106static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
107{
108 struct akcipher_request *req = context;
109 struct rsa_edesc *edesc;
110
111 if (err)
112 caam_jr_strstatus(dev, err);
113
114 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
115
116 rsa_pub_unmap(dev, edesc, req);
117 rsa_io_unmap(dev, edesc, req);
118 kfree(edesc);
119
120 akcipher_request_complete(req, err);
121}
122
123static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
124 void *context)
125{
126 struct akcipher_request *req = context;
127 struct rsa_edesc *edesc;
128
129 if (err)
130 caam_jr_strstatus(dev, err);
131
132 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
133
134 rsa_priv_f1_unmap(dev, edesc, req);
135 rsa_io_unmap(dev, edesc, req);
136 kfree(edesc);
137
138 akcipher_request_complete(req, err);
139}
140
Radu Alexe52e26d72017-04-25 16:26:38 +0300141static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
142 void *context)
143{
144 struct akcipher_request *req = context;
145 struct rsa_edesc *edesc;
146
147 if (err)
148 caam_jr_strstatus(dev, err);
149
150 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
151
152 rsa_priv_f2_unmap(dev, edesc, req);
153 rsa_io_unmap(dev, edesc, req);
154 kfree(edesc);
155
156 akcipher_request_complete(req, err);
157}
158
Radu Alexe4a651b12017-04-25 16:26:39 +0300159static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
160 void *context)
161{
162 struct akcipher_request *req = context;
163 struct rsa_edesc *edesc;
164
165 if (err)
166 caam_jr_strstatus(dev, err);
167
168 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
169
170 rsa_priv_f3_unmap(dev, edesc, req);
171 rsa_io_unmap(dev, edesc, req);
172 kfree(edesc);
173
174 akcipher_request_complete(req, err);
175}
176
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300177/**
178 * Count leading zeros, need it to strip, from a given scatterlist
179 *
180 * @sgl : scatterlist to count zeros from
181 * @nbytes: number of zeros, in bytes, to strip
182 * @flags : operation flags
183 */
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500184static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
185 unsigned int nbytes,
186 unsigned int flags)
187{
188 struct sg_mapping_iter miter;
189 int lzeros, ents;
190 unsigned int len;
191 unsigned int tbytes = nbytes;
192 const u8 *buff;
193
194 ents = sg_nents_for_len(sgl, nbytes);
195 if (ents < 0)
196 return ents;
197
198 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
199
200 lzeros = 0;
201 len = 0;
202 while (nbytes > 0) {
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300203 /* do not strip more than given bytes */
204 while (len && !*buff && lzeros < nbytes) {
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500205 lzeros++;
206 len--;
207 buff++;
208 }
209
210 if (len && *buff)
211 break;
212
213 sg_miter_next(&miter);
214 buff = miter.addr;
215 len = miter.length;
216
217 nbytes -= lzeros;
218 lzeros = 0;
219 }
220
221 miter.consumed = lzeros;
222 sg_miter_stop(&miter);
223 nbytes -= lzeros;
224
225 return tbytes - nbytes;
226}
227
Tudor Ambarus8c419772016-07-04 13:12:08 +0300228static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
229 size_t desclen)
230{
231 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
232 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
233 struct device *dev = ctx->dev;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500234 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300235 struct caam_rsa_key *key = &ctx->key;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300236 struct rsa_edesc *edesc;
Horia Geantă019d62d2017-06-19 11:44:46 +0300237 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
238 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500239 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300240 int sgc;
241 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
242 int src_nents, dst_nents;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300243 unsigned int diff_size = 0;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500244 int lzeros;
245
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300246 if (req->src_len > key->n_sz) {
247 /*
248 * strip leading zeros and
249 * return the number of zeros to skip
250 */
251 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
252 key->n_sz, sg_flags);
253 if (lzeros < 0)
254 return ERR_PTR(lzeros);
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500255
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300256 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
257 lzeros);
258 req_ctx->fixup_src_len = req->src_len - lzeros;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300259 } else {
260 /*
261 * input src is less then n key modulus,
262 * so there will be zero padding
263 */
264 diff_size = key->n_sz - req->src_len;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300265 req_ctx->fixup_src = req->src;
266 req_ctx->fixup_src_len = req->src_len;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300267 }
Tudor Ambarus8c419772016-07-04 13:12:08 +0300268
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300269 src_nents = sg_nents_for_len(req_ctx->fixup_src,
270 req_ctx->fixup_src_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300271 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
272
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300273 if (!diff_size && src_nents == 1)
274 sec4_sg_len = 0; /* no need for an input hw s/g table */
275 else
276 sec4_sg_len = src_nents + !!diff_size;
277 sec4_sg_index = sec4_sg_len;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300278 if (dst_nents > 1)
Horia Geantăa5e5c132019-05-03 17:17:38 +0300279 sec4_sg_len += pad_sg_nents(dst_nents);
280 else
281 sec4_sg_len = pad_sg_nents(sec4_sg_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300282
283 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
284
285 /* allocate space for base edesc, hw desc commands and link tables */
286 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
287 GFP_DMA | flags);
288 if (!edesc)
289 return ERR_PTR(-ENOMEM);
290
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300291 sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300292 if (unlikely(!sgc)) {
293 dev_err(dev, "unable to map source\n");
294 goto src_fail;
295 }
296
297 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
298 if (unlikely(!sgc)) {
299 dev_err(dev, "unable to map destination\n");
300 goto dst_fail;
301 }
302
303 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300304 if (diff_size)
305 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
306 0);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300307
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300308 if (sec4_sg_index)
Horia Geantă059d73e2019-06-10 16:30:58 +0300309 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300310 edesc->sec4_sg + !!diff_size, 0);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300311
Tudor Ambarus8c419772016-07-04 13:12:08 +0300312 if (dst_nents > 1)
Horia Geantă059d73e2019-06-10 16:30:58 +0300313 sg_to_sec4_sg_last(req->dst, req->dst_len,
Tudor Ambarus8c419772016-07-04 13:12:08 +0300314 edesc->sec4_sg + sec4_sg_index, 0);
315
316 /* Save nents for later use in Job Descriptor */
317 edesc->src_nents = src_nents;
318 edesc->dst_nents = dst_nents;
319
320 if (!sec4_sg_bytes)
321 return edesc;
322
323 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
324 sec4_sg_bytes, DMA_TO_DEVICE);
325 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
326 dev_err(dev, "unable to map S/G table\n");
327 goto sec4_sg_fail;
328 }
329
330 edesc->sec4_sg_bytes = sec4_sg_bytes;
331
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300332 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
333 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
334 edesc->sec4_sg_bytes, 1);
335
Tudor Ambarus8c419772016-07-04 13:12:08 +0300336 return edesc;
337
338sec4_sg_fail:
339 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
340dst_fail:
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300341 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300342src_fail:
343 kfree(edesc);
344 return ERR_PTR(-ENOMEM);
345}
346
347static int set_rsa_pub_pdb(struct akcipher_request *req,
348 struct rsa_edesc *edesc)
349{
350 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300351 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300352 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
353 struct caam_rsa_key *key = &ctx->key;
354 struct device *dev = ctx->dev;
355 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
356 int sec4_sg_index = 0;
357
358 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
359 if (dma_mapping_error(dev, pdb->n_dma)) {
360 dev_err(dev, "Unable to map RSA modulus memory\n");
361 return -ENOMEM;
362 }
363
364 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
365 if (dma_mapping_error(dev, pdb->e_dma)) {
366 dev_err(dev, "Unable to map RSA public exponent memory\n");
367 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
368 return -ENOMEM;
369 }
370
371 if (edesc->src_nents > 1) {
372 pdb->sgf |= RSA_PDB_SGF_F;
373 pdb->f_dma = edesc->sec4_sg_dma;
374 sec4_sg_index += edesc->src_nents;
375 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300376 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300377 }
378
379 if (edesc->dst_nents > 1) {
380 pdb->sgf |= RSA_PDB_SGF_G;
381 pdb->g_dma = edesc->sec4_sg_dma +
382 sec4_sg_index * sizeof(struct sec4_sg_entry);
383 } else {
384 pdb->g_dma = sg_dma_address(req->dst);
385 }
386
387 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300388 pdb->f_len = req_ctx->fixup_src_len;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300389
390 return 0;
391}
392
393static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
394 struct rsa_edesc *edesc)
395{
396 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
397 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
398 struct caam_rsa_key *key = &ctx->key;
399 struct device *dev = ctx->dev;
400 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
401 int sec4_sg_index = 0;
402
403 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
404 if (dma_mapping_error(dev, pdb->n_dma)) {
405 dev_err(dev, "Unable to map modulus memory\n");
406 return -ENOMEM;
407 }
408
409 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
410 if (dma_mapping_error(dev, pdb->d_dma)) {
411 dev_err(dev, "Unable to map RSA private exponent memory\n");
412 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
413 return -ENOMEM;
414 }
415
416 if (edesc->src_nents > 1) {
417 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
418 pdb->g_dma = edesc->sec4_sg_dma;
419 sec4_sg_index += edesc->src_nents;
420 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300421 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
422
423 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300424 }
425
426 if (edesc->dst_nents > 1) {
427 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
428 pdb->f_dma = edesc->sec4_sg_dma +
429 sec4_sg_index * sizeof(struct sec4_sg_entry);
430 } else {
431 pdb->f_dma = sg_dma_address(req->dst);
432 }
433
434 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
435
436 return 0;
437}
438
Radu Alexe52e26d72017-04-25 16:26:38 +0300439static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
440 struct rsa_edesc *edesc)
441{
442 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
443 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
444 struct caam_rsa_key *key = &ctx->key;
445 struct device *dev = ctx->dev;
446 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
447 int sec4_sg_index = 0;
448 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300449 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +0300450
451 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
452 if (dma_mapping_error(dev, pdb->d_dma)) {
453 dev_err(dev, "Unable to map RSA private exponent memory\n");
454 return -ENOMEM;
455 }
456
457 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
458 if (dma_mapping_error(dev, pdb->p_dma)) {
459 dev_err(dev, "Unable to map RSA prime factor p memory\n");
460 goto unmap_d;
461 }
462
463 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
464 if (dma_mapping_error(dev, pdb->q_dma)) {
465 dev_err(dev, "Unable to map RSA prime factor q memory\n");
466 goto unmap_p;
467 }
468
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300469 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300470 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
471 dev_err(dev, "Unable to map RSA tmp1 memory\n");
472 goto unmap_q;
473 }
474
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300475 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300476 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
477 dev_err(dev, "Unable to map RSA tmp2 memory\n");
478 goto unmap_tmp1;
479 }
480
481 if (edesc->src_nents > 1) {
482 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
483 pdb->g_dma = edesc->sec4_sg_dma;
484 sec4_sg_index += edesc->src_nents;
485 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300486 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
487
488 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe52e26d72017-04-25 16:26:38 +0300489 }
490
491 if (edesc->dst_nents > 1) {
492 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
493 pdb->f_dma = edesc->sec4_sg_dma +
494 sec4_sg_index * sizeof(struct sec4_sg_entry);
495 } else {
496 pdb->f_dma = sg_dma_address(req->dst);
497 }
498
499 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
500 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
501
502 return 0;
503
504unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300505 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300506unmap_q:
507 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
508unmap_p:
509 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
510unmap_d:
511 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
512
513 return -ENOMEM;
514}
515
Radu Alexe4a651b12017-04-25 16:26:39 +0300516static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
517 struct rsa_edesc *edesc)
518{
519 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
520 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
521 struct caam_rsa_key *key = &ctx->key;
522 struct device *dev = ctx->dev;
523 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
524 int sec4_sg_index = 0;
525 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300526 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300527
528 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
529 if (dma_mapping_error(dev, pdb->p_dma)) {
530 dev_err(dev, "Unable to map RSA prime factor p memory\n");
531 return -ENOMEM;
532 }
533
534 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
535 if (dma_mapping_error(dev, pdb->q_dma)) {
536 dev_err(dev, "Unable to map RSA prime factor q memory\n");
537 goto unmap_p;
538 }
539
540 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, pdb->dp_dma)) {
542 dev_err(dev, "Unable to map RSA exponent dp memory\n");
543 goto unmap_q;
544 }
545
546 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
547 if (dma_mapping_error(dev, pdb->dq_dma)) {
548 dev_err(dev, "Unable to map RSA exponent dq memory\n");
549 goto unmap_dp;
550 }
551
552 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
553 if (dma_mapping_error(dev, pdb->c_dma)) {
554 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
555 goto unmap_dq;
556 }
557
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300558 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300559 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
560 dev_err(dev, "Unable to map RSA tmp1 memory\n");
561 goto unmap_qinv;
562 }
563
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300564 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300565 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
566 dev_err(dev, "Unable to map RSA tmp2 memory\n");
567 goto unmap_tmp1;
568 }
569
570 if (edesc->src_nents > 1) {
571 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
572 pdb->g_dma = edesc->sec4_sg_dma;
573 sec4_sg_index += edesc->src_nents;
574 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300575 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
576
577 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe4a651b12017-04-25 16:26:39 +0300578 }
579
580 if (edesc->dst_nents > 1) {
581 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
582 pdb->f_dma = edesc->sec4_sg_dma +
583 sec4_sg_index * sizeof(struct sec4_sg_entry);
584 } else {
585 pdb->f_dma = sg_dma_address(req->dst);
586 }
587
588 pdb->sgf |= key->n_sz;
589 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
590
591 return 0;
592
593unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300594 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300595unmap_qinv:
596 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
597unmap_dq:
598 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
599unmap_dp:
600 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
601unmap_q:
602 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
603unmap_p:
604 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
605
606 return -ENOMEM;
607}
608
Tudor Ambarus8c419772016-07-04 13:12:08 +0300609static int caam_rsa_enc(struct akcipher_request *req)
610{
611 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
612 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
613 struct caam_rsa_key *key = &ctx->key;
614 struct device *jrdev = ctx->dev;
615 struct rsa_edesc *edesc;
616 int ret;
617
618 if (unlikely(!key->n || !key->e))
619 return -EINVAL;
620
621 if (req->dst_len < key->n_sz) {
622 req->dst_len = key->n_sz;
623 dev_err(jrdev, "Output buffer length less than parameter n\n");
624 return -EOVERFLOW;
625 }
626
627 /* Allocate extended descriptor */
628 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
629 if (IS_ERR(edesc))
630 return PTR_ERR(edesc);
631
632 /* Set RSA Encrypt Protocol Data Block */
633 ret = set_rsa_pub_pdb(req, edesc);
634 if (ret)
635 goto init_fail;
636
637 /* Initialize Job Descriptor */
638 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
639
640 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
641 if (!ret)
642 return -EINPROGRESS;
643
644 rsa_pub_unmap(jrdev, edesc, req);
645
646init_fail:
647 rsa_io_unmap(jrdev, edesc, req);
648 kfree(edesc);
649 return ret;
650}
651
Radu Alexe52e26d72017-04-25 16:26:38 +0300652static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300653{
654 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
655 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300656 struct device *jrdev = ctx->dev;
657 struct rsa_edesc *edesc;
658 int ret;
659
Tudor Ambarus8c419772016-07-04 13:12:08 +0300660 /* Allocate extended descriptor */
661 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
662 if (IS_ERR(edesc))
663 return PTR_ERR(edesc);
664
665 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
666 ret = set_rsa_priv_f1_pdb(req, edesc);
667 if (ret)
668 goto init_fail;
669
670 /* Initialize Job Descriptor */
671 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
672
673 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
674 if (!ret)
675 return -EINPROGRESS;
676
677 rsa_priv_f1_unmap(jrdev, edesc, req);
678
679init_fail:
680 rsa_io_unmap(jrdev, edesc, req);
681 kfree(edesc);
682 return ret;
683}
684
Radu Alexe52e26d72017-04-25 16:26:38 +0300685static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
686{
687 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
688 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
689 struct device *jrdev = ctx->dev;
690 struct rsa_edesc *edesc;
691 int ret;
692
693 /* Allocate extended descriptor */
694 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
695 if (IS_ERR(edesc))
696 return PTR_ERR(edesc);
697
698 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
699 ret = set_rsa_priv_f2_pdb(req, edesc);
700 if (ret)
701 goto init_fail;
702
703 /* Initialize Job Descriptor */
704 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
705
706 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
707 if (!ret)
708 return -EINPROGRESS;
709
710 rsa_priv_f2_unmap(jrdev, edesc, req);
711
712init_fail:
713 rsa_io_unmap(jrdev, edesc, req);
714 kfree(edesc);
715 return ret;
716}
717
Radu Alexe4a651b12017-04-25 16:26:39 +0300718static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
719{
720 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
721 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
722 struct device *jrdev = ctx->dev;
723 struct rsa_edesc *edesc;
724 int ret;
725
726 /* Allocate extended descriptor */
727 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
728 if (IS_ERR(edesc))
729 return PTR_ERR(edesc);
730
731 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
732 ret = set_rsa_priv_f3_pdb(req, edesc);
733 if (ret)
734 goto init_fail;
735
736 /* Initialize Job Descriptor */
737 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
738
739 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
740 if (!ret)
741 return -EINPROGRESS;
742
743 rsa_priv_f3_unmap(jrdev, edesc, req);
744
745init_fail:
746 rsa_io_unmap(jrdev, edesc, req);
747 kfree(edesc);
748 return ret;
749}
750
Radu Alexe52e26d72017-04-25 16:26:38 +0300751static int caam_rsa_dec(struct akcipher_request *req)
752{
753 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
754 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
755 struct caam_rsa_key *key = &ctx->key;
756 int ret;
757
758 if (unlikely(!key->n || !key->d))
759 return -EINVAL;
760
761 if (req->dst_len < key->n_sz) {
762 req->dst_len = key->n_sz;
763 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
764 return -EOVERFLOW;
765 }
766
Radu Alexe4a651b12017-04-25 16:26:39 +0300767 if (key->priv_form == FORM3)
768 ret = caam_rsa_dec_priv_f3(req);
769 else if (key->priv_form == FORM2)
Radu Alexe52e26d72017-04-25 16:26:38 +0300770 ret = caam_rsa_dec_priv_f2(req);
771 else
772 ret = caam_rsa_dec_priv_f1(req);
773
774 return ret;
775}
776
Tudor Ambarus8c419772016-07-04 13:12:08 +0300777static void caam_rsa_free_key(struct caam_rsa_key *key)
778{
779 kzfree(key->d);
Radu Alexe52e26d72017-04-25 16:26:38 +0300780 kzfree(key->p);
781 kzfree(key->q);
Radu Alexe4a651b12017-04-25 16:26:39 +0300782 kzfree(key->dp);
783 kzfree(key->dq);
784 kzfree(key->qinv);
Radu Alexe52e26d72017-04-25 16:26:38 +0300785 kzfree(key->tmp1);
786 kzfree(key->tmp2);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300787 kfree(key->e);
788 kfree(key->n);
Radu Alexe52e26d72017-04-25 16:26:38 +0300789 memset(key, 0, sizeof(*key));
Tudor Ambarus8c419772016-07-04 13:12:08 +0300790}
791
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300792static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
793{
794 while (!**ptr && *nbytes) {
795 (*ptr)++;
796 (*nbytes)--;
797 }
798}
799
Tudor Ambarus8c419772016-07-04 13:12:08 +0300800/**
Radu Alexe4a651b12017-04-25 16:26:39 +0300801 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
802 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
803 * BER-encoding requires that the minimum number of bytes be used to encode the
804 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
805 * length.
806 *
807 * @ptr : pointer to {dP, dQ, qInv} CRT member
808 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
809 * @dstlen: length in bytes of corresponding p or q prime factor
810 */
811static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
812{
813 u8 *dst;
814
815 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
816 if (!nbytes)
817 return NULL;
818
819 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
820 if (!dst)
821 return NULL;
822
823 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
824
825 return dst;
826}
827
828/**
Tudor Ambarus8c419772016-07-04 13:12:08 +0300829 * caam_read_raw_data - Read a raw byte stream as a positive integer.
830 * The function skips buffer's leading zeros, copies the remained data
831 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
832 * the address of the new buffer.
833 *
834 * @buf : The data to read
835 * @nbytes: The amount of data to read
836 */
837static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
838{
Tudor Ambarus8c419772016-07-04 13:12:08 +0300839
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300840 caam_rsa_drop_leading_zeros(&buf, nbytes);
Tudor Ambarus7fcaf622017-04-25 16:26:36 +0300841 if (!*nbytes)
842 return NULL;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300843
Fabio Estevamb930f3a2018-04-16 13:05:01 -0300844 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300845}
846
847static int caam_rsa_check_key_length(unsigned int len)
848{
849 if (len > 4096)
850 return -EINVAL;
851 return 0;
852}
853
854static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
855 unsigned int keylen)
856{
857 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200858 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300859 struct caam_rsa_key *rsa_key = &ctx->key;
860 int ret;
861
862 /* Free the old RSA key if any */
863 caam_rsa_free_key(rsa_key);
864
865 ret = rsa_parse_pub_key(&raw_key, key, keylen);
866 if (ret)
867 return ret;
868
869 /* Copy key in DMA zone */
870 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
871 if (!rsa_key->e)
872 goto err;
873
874 /*
875 * Skip leading zeros and copy the positive integer to a buffer
876 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
877 * expects a positive integer for the RSA modulus and uses its length as
878 * decryption output length.
879 */
880 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
881 if (!rsa_key->n)
882 goto err;
883
884 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
885 caam_rsa_free_key(rsa_key);
886 return -EINVAL;
887 }
888
889 rsa_key->e_sz = raw_key.e_sz;
890 rsa_key->n_sz = raw_key.n_sz;
891
892 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
893
894 return 0;
895err:
896 caam_rsa_free_key(rsa_key);
897 return -ENOMEM;
898}
899
Radu Alexe52e26d72017-04-25 16:26:38 +0300900static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
901 struct rsa_key *raw_key)
902{
903 struct caam_rsa_key *rsa_key = &ctx->key;
904 size_t p_sz = raw_key->p_sz;
905 size_t q_sz = raw_key->q_sz;
906
907 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
908 if (!rsa_key->p)
909 return;
910 rsa_key->p_sz = p_sz;
911
912 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
913 if (!rsa_key->q)
914 goto free_p;
915 rsa_key->q_sz = q_sz;
916
917 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
918 if (!rsa_key->tmp1)
919 goto free_q;
920
921 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
922 if (!rsa_key->tmp2)
923 goto free_tmp1;
924
925 rsa_key->priv_form = FORM2;
926
Radu Alexe4a651b12017-04-25 16:26:39 +0300927 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
928 if (!rsa_key->dp)
929 goto free_tmp2;
930
931 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
932 if (!rsa_key->dq)
933 goto free_dp;
934
935 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
936 q_sz);
937 if (!rsa_key->qinv)
938 goto free_dq;
939
940 rsa_key->priv_form = FORM3;
941
Radu Alexe52e26d72017-04-25 16:26:38 +0300942 return;
943
Radu Alexe4a651b12017-04-25 16:26:39 +0300944free_dq:
945 kzfree(rsa_key->dq);
946free_dp:
947 kzfree(rsa_key->dp);
948free_tmp2:
949 kzfree(rsa_key->tmp2);
Radu Alexe52e26d72017-04-25 16:26:38 +0300950free_tmp1:
951 kzfree(rsa_key->tmp1);
952free_q:
953 kzfree(rsa_key->q);
954free_p:
955 kzfree(rsa_key->p);
956}
957
Tudor Ambarus8c419772016-07-04 13:12:08 +0300958static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
959 unsigned int keylen)
960{
961 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200962 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300963 struct caam_rsa_key *rsa_key = &ctx->key;
964 int ret;
965
966 /* Free the old RSA key if any */
967 caam_rsa_free_key(rsa_key);
968
969 ret = rsa_parse_priv_key(&raw_key, key, keylen);
970 if (ret)
971 return ret;
972
973 /* Copy key in DMA zone */
974 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
975 if (!rsa_key->d)
976 goto err;
977
978 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
979 if (!rsa_key->e)
980 goto err;
981
982 /*
983 * Skip leading zeros and copy the positive integer to a buffer
984 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
985 * expects a positive integer for the RSA modulus and uses its length as
986 * decryption output length.
987 */
988 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
989 if (!rsa_key->n)
990 goto err;
991
992 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
993 caam_rsa_free_key(rsa_key);
994 return -EINVAL;
995 }
996
997 rsa_key->d_sz = raw_key.d_sz;
998 rsa_key->e_sz = raw_key.e_sz;
999 rsa_key->n_sz = raw_key.n_sz;
1000
1001 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
1002 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
1003
Radu Alexe52e26d72017-04-25 16:26:38 +03001004 caam_rsa_set_priv_key_form(ctx, &raw_key);
1005
Tudor Ambarus8c419772016-07-04 13:12:08 +03001006 return 0;
1007
1008err:
1009 caam_rsa_free_key(rsa_key);
1010 return -ENOMEM;
1011}
1012
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001013static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001014{
1015 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001016
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001017 return ctx->key.n_sz;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001018}
1019
1020/* Per session pkc's driver context creation function */
1021static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1022{
1023 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1024
1025 ctx->dev = caam_jr_alloc();
1026
1027 if (IS_ERR(ctx->dev)) {
Horia Geantă33fa46d2017-04-03 18:30:07 +03001028 pr_err("Job Ring Device allocation for transform failed\n");
Tudor Ambarus8c419772016-07-04 13:12:08 +03001029 return PTR_ERR(ctx->dev);
1030 }
1031
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001032 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1033 CAAM_RSA_MAX_INPUT_SIZE - 1,
1034 DMA_TO_DEVICE);
1035 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1036 dev_err(ctx->dev, "unable to map padding\n");
1037 caam_jr_free(ctx->dev);
1038 return -ENOMEM;
1039 }
1040
Tudor Ambarus8c419772016-07-04 13:12:08 +03001041 return 0;
1042}
1043
1044/* Per session pkc's driver context cleanup function */
1045static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1046{
1047 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1048 struct caam_rsa_key *key = &ctx->key;
1049
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001050 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1051 1, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001052 caam_rsa_free_key(key);
1053 caam_jr_free(ctx->dev);
1054}
1055
1056static struct akcipher_alg caam_rsa = {
1057 .encrypt = caam_rsa_enc,
1058 .decrypt = caam_rsa_dec,
Tudor Ambarus8c419772016-07-04 13:12:08 +03001059 .set_pub_key = caam_rsa_set_pub_key,
1060 .set_priv_key = caam_rsa_set_priv_key,
1061 .max_size = caam_rsa_max_size,
1062 .init = caam_rsa_init_tfm,
1063 .exit = caam_rsa_exit_tfm,
Horia Geantă8a2a0dd2018-04-16 08:07:05 -05001064 .reqsize = sizeof(struct caam_rsa_req_ctx),
Tudor Ambarus8c419772016-07-04 13:12:08 +03001065 .base = {
1066 .cra_name = "rsa",
1067 .cra_driver_name = "rsa-caam",
1068 .cra_priority = 3000,
1069 .cra_module = THIS_MODULE,
1070 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1071 },
1072};
1073
1074/* Public Key Cryptography module initialization handler */
Horia Geantă1b46c902019-05-03 17:17:39 +03001075int caam_pkc_init(struct device *ctrldev)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001076{
Horia Geantă1b46c902019-05-03 17:17:39 +03001077 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Horia Geantăd239b102018-11-08 15:36:27 +02001078 u32 pk_inst;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001079 int err;
1080
Tudor Ambarus8c419772016-07-04 13:12:08 +03001081 /* Determine public key hardware accelerator presence. */
Horia Geantăd239b102018-11-08 15:36:27 +02001082 if (priv->era < 10)
1083 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1084 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1085 else
1086 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001087
1088 /* Do not register algorithms if PKHA is not present. */
Horia Geantă1b46c902019-05-03 17:17:39 +03001089 if (!pk_inst)
1090 return 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001091
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001092 /* allocate zero buffer, used for padding input */
1093 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1094 GFP_KERNEL);
1095 if (!zero_buffer)
1096 return -ENOMEM;
1097
Tudor Ambarus8c419772016-07-04 13:12:08 +03001098 err = crypto_register_akcipher(&caam_rsa);
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001099 if (err) {
1100 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001101 dev_warn(ctrldev, "%s alg registration failed\n",
1102 caam_rsa.base.cra_driver_name);
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001103 } else {
Tudor Ambarus8c419772016-07-04 13:12:08 +03001104 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001105 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001106
Tudor Ambarus8c419772016-07-04 13:12:08 +03001107 return err;
1108}
1109
Horia Geantă1b46c902019-05-03 17:17:39 +03001110void caam_pkc_exit(void)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001111{
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001112 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001113 crypto_unregister_akcipher(&caam_rsa);
1114}