blob: 83f96d4f86e03614be5e87e3c09e6dc9b2c40f5d [file] [log] [blame]
Horia Geantă618b5dc2018-10-10 14:26:48 +03001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Tudor Ambarus8c419772016-07-04 13:12:08 +03002/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
Horia Geantăa5e5c132019-05-03 17:17:38 +03006 * Copyright 2018-2019 NXP
Tudor Ambarus8c419772016-07-04 13:12:08 +03007 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
Andrey Smirnova1cf5732019-08-20 13:23:59 -070020#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
Tudor Ambarus8c419772016-07-04 13:12:08 +030021#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070022 SIZEOF_RSA_PRIV_F1_PDB)
Radu Alexe52e26d72017-04-25 16:26:38 +030023#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070024 SIZEOF_RSA_PRIV_F2_PDB)
Radu Alexe4a651b12017-04-25 16:26:39 +030025#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070026 SIZEOF_RSA_PRIV_F3_PDB)
Iuliana Prodanc3725f72019-05-28 12:52:10 +030027#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28
29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer;
Tudor Ambarus8c419772016-07-04 13:12:08 +030031
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +030032/*
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
35 */
36static bool init_done;
37
Iuliana Prodan58068cf2019-07-31 16:08:14 +030038struct caam_akcipher_alg {
39 struct akcipher_alg akcipher;
40 bool registered;
41};
42
Tudor Ambarus8c419772016-07-04 13:12:08 +030043static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 struct akcipher_request *req)
45{
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030046 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47
Tudor Ambarus8c419772016-07-04 13:12:08 +030048 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030049 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +030050
51 if (edesc->sec4_sg_bytes)
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 DMA_TO_DEVICE);
54}
55
56static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 struct akcipher_request *req)
58{
59 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 struct caam_rsa_key *key = &ctx->key;
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63
64 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66}
67
68static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 struct akcipher_request *req)
70{
71 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 struct caam_rsa_key *key = &ctx->key;
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75
76 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78}
79
Radu Alexe52e26d72017-04-25 16:26:38 +030080static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
82{
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030088 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +030089
90 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +030093 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +030095}
96
Radu Alexe4a651b12017-04-25 16:26:39 +030097static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 struct akcipher_request *req)
99{
100 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 struct caam_rsa_key *key = &ctx->key;
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300105 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300106
107 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300112 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300114}
115
Tudor Ambarus8c419772016-07-04 13:12:08 +0300116/* RSA Job Completion handler */
117static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118{
119 struct akcipher_request *req = context;
120 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300121 int ecode = 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300122
123 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300124 ecode = caam_jr_strstatus(dev, err);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300125
126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
127
128 rsa_pub_unmap(dev, edesc, req);
129 rsa_io_unmap(dev, edesc, req);
130 kfree(edesc);
131
Horia Geantă1984aae2019-07-31 16:08:03 +0300132 akcipher_request_complete(req, ecode);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300133}
134
135static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
136 void *context)
137{
138 struct akcipher_request *req = context;
139 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300140 int ecode = 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300141
142 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300143 ecode = caam_jr_strstatus(dev, err);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300144
145 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
146
147 rsa_priv_f1_unmap(dev, edesc, req);
148 rsa_io_unmap(dev, edesc, req);
149 kfree(edesc);
150
Horia Geantă1984aae2019-07-31 16:08:03 +0300151 akcipher_request_complete(req, ecode);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300152}
153
Radu Alexe52e26d72017-04-25 16:26:38 +0300154static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
155 void *context)
156{
157 struct akcipher_request *req = context;
158 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300159 int ecode = 0;
Radu Alexe52e26d72017-04-25 16:26:38 +0300160
161 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300162 ecode = caam_jr_strstatus(dev, err);
Radu Alexe52e26d72017-04-25 16:26:38 +0300163
164 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
165
166 rsa_priv_f2_unmap(dev, edesc, req);
167 rsa_io_unmap(dev, edesc, req);
168 kfree(edesc);
169
Horia Geantă1984aae2019-07-31 16:08:03 +0300170 akcipher_request_complete(req, ecode);
Radu Alexe52e26d72017-04-25 16:26:38 +0300171}
172
Radu Alexe4a651b12017-04-25 16:26:39 +0300173static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
174 void *context)
175{
176 struct akcipher_request *req = context;
177 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300178 int ecode = 0;
Radu Alexe4a651b12017-04-25 16:26:39 +0300179
180 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300181 ecode = caam_jr_strstatus(dev, err);
Radu Alexe4a651b12017-04-25 16:26:39 +0300182
183 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
184
185 rsa_priv_f3_unmap(dev, edesc, req);
186 rsa_io_unmap(dev, edesc, req);
187 kfree(edesc);
188
Horia Geantă1984aae2019-07-31 16:08:03 +0300189 akcipher_request_complete(req, ecode);
Radu Alexe4a651b12017-04-25 16:26:39 +0300190}
191
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300192/**
193 * Count leading zeros, need it to strip, from a given scatterlist
194 *
195 * @sgl : scatterlist to count zeros from
196 * @nbytes: number of zeros, in bytes, to strip
197 * @flags : operation flags
198 */
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500199static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
200 unsigned int nbytes,
201 unsigned int flags)
202{
203 struct sg_mapping_iter miter;
204 int lzeros, ents;
205 unsigned int len;
206 unsigned int tbytes = nbytes;
207 const u8 *buff;
208
209 ents = sg_nents_for_len(sgl, nbytes);
210 if (ents < 0)
211 return ents;
212
213 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
214
215 lzeros = 0;
216 len = 0;
217 while (nbytes > 0) {
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300218 /* do not strip more than given bytes */
219 while (len && !*buff && lzeros < nbytes) {
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500220 lzeros++;
221 len--;
222 buff++;
223 }
224
225 if (len && *buff)
226 break;
227
228 sg_miter_next(&miter);
229 buff = miter.addr;
230 len = miter.length;
231
232 nbytes -= lzeros;
233 lzeros = 0;
234 }
235
236 miter.consumed = lzeros;
237 sg_miter_stop(&miter);
238 nbytes -= lzeros;
239
240 return tbytes - nbytes;
241}
242
Tudor Ambarus8c419772016-07-04 13:12:08 +0300243static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
244 size_t desclen)
245{
246 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
247 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
248 struct device *dev = ctx->dev;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500249 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300250 struct caam_rsa_key *key = &ctx->key;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300251 struct rsa_edesc *edesc;
Horia Geantă019d62d2017-06-19 11:44:46 +0300252 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
253 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500254 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300255 int sgc;
256 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
257 int src_nents, dst_nents;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300258 unsigned int diff_size = 0;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500259 int lzeros;
260
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300261 if (req->src_len > key->n_sz) {
262 /*
263 * strip leading zeros and
264 * return the number of zeros to skip
265 */
266 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
267 key->n_sz, sg_flags);
268 if (lzeros < 0)
269 return ERR_PTR(lzeros);
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500270
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300271 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
272 lzeros);
273 req_ctx->fixup_src_len = req->src_len - lzeros;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300274 } else {
275 /*
276 * input src is less then n key modulus,
277 * so there will be zero padding
278 */
279 diff_size = key->n_sz - req->src_len;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300280 req_ctx->fixup_src = req->src;
281 req_ctx->fixup_src_len = req->src_len;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300282 }
Tudor Ambarus8c419772016-07-04 13:12:08 +0300283
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300284 src_nents = sg_nents_for_len(req_ctx->fixup_src,
285 req_ctx->fixup_src_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300286 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
287
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300288 if (!diff_size && src_nents == 1)
289 sec4_sg_len = 0; /* no need for an input hw s/g table */
290 else
291 sec4_sg_len = src_nents + !!diff_size;
292 sec4_sg_index = sec4_sg_len;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300293 if (dst_nents > 1)
Horia Geantăa5e5c132019-05-03 17:17:38 +0300294 sec4_sg_len += pad_sg_nents(dst_nents);
295 else
296 sec4_sg_len = pad_sg_nents(sec4_sg_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300297
298 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
299
300 /* allocate space for base edesc, hw desc commands and link tables */
301 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
302 GFP_DMA | flags);
303 if (!edesc)
304 return ERR_PTR(-ENOMEM);
305
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300306 sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300307 if (unlikely(!sgc)) {
308 dev_err(dev, "unable to map source\n");
309 goto src_fail;
310 }
311
312 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
313 if (unlikely(!sgc)) {
314 dev_err(dev, "unable to map destination\n");
315 goto dst_fail;
316 }
317
318 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300319 if (diff_size)
320 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
321 0);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300322
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300323 if (sec4_sg_index)
Horia Geantă059d73e2019-06-10 16:30:58 +0300324 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300325 edesc->sec4_sg + !!diff_size, 0);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300326
Tudor Ambarus8c419772016-07-04 13:12:08 +0300327 if (dst_nents > 1)
Horia Geantă059d73e2019-06-10 16:30:58 +0300328 sg_to_sec4_sg_last(req->dst, req->dst_len,
Tudor Ambarus8c419772016-07-04 13:12:08 +0300329 edesc->sec4_sg + sec4_sg_index, 0);
330
331 /* Save nents for later use in Job Descriptor */
332 edesc->src_nents = src_nents;
333 edesc->dst_nents = dst_nents;
334
335 if (!sec4_sg_bytes)
336 return edesc;
337
338 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
339 sec4_sg_bytes, DMA_TO_DEVICE);
340 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
341 dev_err(dev, "unable to map S/G table\n");
342 goto sec4_sg_fail;
343 }
344
345 edesc->sec4_sg_bytes = sec4_sg_bytes;
346
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300347 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
348 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
349 edesc->sec4_sg_bytes, 1);
350
Tudor Ambarus8c419772016-07-04 13:12:08 +0300351 return edesc;
352
353sec4_sg_fail:
354 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
355dst_fail:
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300356 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300357src_fail:
358 kfree(edesc);
359 return ERR_PTR(-ENOMEM);
360}
361
362static int set_rsa_pub_pdb(struct akcipher_request *req,
363 struct rsa_edesc *edesc)
364{
365 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300366 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300367 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
368 struct caam_rsa_key *key = &ctx->key;
369 struct device *dev = ctx->dev;
370 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
371 int sec4_sg_index = 0;
372
373 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
374 if (dma_mapping_error(dev, pdb->n_dma)) {
375 dev_err(dev, "Unable to map RSA modulus memory\n");
376 return -ENOMEM;
377 }
378
379 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
380 if (dma_mapping_error(dev, pdb->e_dma)) {
381 dev_err(dev, "Unable to map RSA public exponent memory\n");
382 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
383 return -ENOMEM;
384 }
385
386 if (edesc->src_nents > 1) {
387 pdb->sgf |= RSA_PDB_SGF_F;
388 pdb->f_dma = edesc->sec4_sg_dma;
389 sec4_sg_index += edesc->src_nents;
390 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300391 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300392 }
393
394 if (edesc->dst_nents > 1) {
395 pdb->sgf |= RSA_PDB_SGF_G;
396 pdb->g_dma = edesc->sec4_sg_dma +
397 sec4_sg_index * sizeof(struct sec4_sg_entry);
398 } else {
399 pdb->g_dma = sg_dma_address(req->dst);
400 }
401
402 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300403 pdb->f_len = req_ctx->fixup_src_len;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300404
405 return 0;
406}
407
408static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
409 struct rsa_edesc *edesc)
410{
411 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
412 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
413 struct caam_rsa_key *key = &ctx->key;
414 struct device *dev = ctx->dev;
415 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
416 int sec4_sg_index = 0;
417
418 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
419 if (dma_mapping_error(dev, pdb->n_dma)) {
420 dev_err(dev, "Unable to map modulus memory\n");
421 return -ENOMEM;
422 }
423
424 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
425 if (dma_mapping_error(dev, pdb->d_dma)) {
426 dev_err(dev, "Unable to map RSA private exponent memory\n");
427 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
428 return -ENOMEM;
429 }
430
431 if (edesc->src_nents > 1) {
432 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
433 pdb->g_dma = edesc->sec4_sg_dma;
434 sec4_sg_index += edesc->src_nents;
435 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300436 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
437
438 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300439 }
440
441 if (edesc->dst_nents > 1) {
442 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
443 pdb->f_dma = edesc->sec4_sg_dma +
444 sec4_sg_index * sizeof(struct sec4_sg_entry);
445 } else {
446 pdb->f_dma = sg_dma_address(req->dst);
447 }
448
449 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
450
451 return 0;
452}
453
Radu Alexe52e26d72017-04-25 16:26:38 +0300454static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
455 struct rsa_edesc *edesc)
456{
457 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
458 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
459 struct caam_rsa_key *key = &ctx->key;
460 struct device *dev = ctx->dev;
461 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
462 int sec4_sg_index = 0;
463 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300464 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +0300465
466 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
467 if (dma_mapping_error(dev, pdb->d_dma)) {
468 dev_err(dev, "Unable to map RSA private exponent memory\n");
469 return -ENOMEM;
470 }
471
472 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
473 if (dma_mapping_error(dev, pdb->p_dma)) {
474 dev_err(dev, "Unable to map RSA prime factor p memory\n");
475 goto unmap_d;
476 }
477
478 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
479 if (dma_mapping_error(dev, pdb->q_dma)) {
480 dev_err(dev, "Unable to map RSA prime factor q memory\n");
481 goto unmap_p;
482 }
483
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300484 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300485 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
486 dev_err(dev, "Unable to map RSA tmp1 memory\n");
487 goto unmap_q;
488 }
489
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300490 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300491 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
492 dev_err(dev, "Unable to map RSA tmp2 memory\n");
493 goto unmap_tmp1;
494 }
495
496 if (edesc->src_nents > 1) {
497 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
498 pdb->g_dma = edesc->sec4_sg_dma;
499 sec4_sg_index += edesc->src_nents;
500 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300501 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
502
503 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe52e26d72017-04-25 16:26:38 +0300504 }
505
506 if (edesc->dst_nents > 1) {
507 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
508 pdb->f_dma = edesc->sec4_sg_dma +
509 sec4_sg_index * sizeof(struct sec4_sg_entry);
510 } else {
511 pdb->f_dma = sg_dma_address(req->dst);
512 }
513
514 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
515 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
516
517 return 0;
518
519unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300520 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300521unmap_q:
522 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
523unmap_p:
524 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
525unmap_d:
526 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
527
528 return -ENOMEM;
529}
530
Radu Alexe4a651b12017-04-25 16:26:39 +0300531static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
532 struct rsa_edesc *edesc)
533{
534 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
535 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
536 struct caam_rsa_key *key = &ctx->key;
537 struct device *dev = ctx->dev;
538 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
539 int sec4_sg_index = 0;
540 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300541 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300542
543 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
544 if (dma_mapping_error(dev, pdb->p_dma)) {
545 dev_err(dev, "Unable to map RSA prime factor p memory\n");
546 return -ENOMEM;
547 }
548
549 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
550 if (dma_mapping_error(dev, pdb->q_dma)) {
551 dev_err(dev, "Unable to map RSA prime factor q memory\n");
552 goto unmap_p;
553 }
554
555 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
556 if (dma_mapping_error(dev, pdb->dp_dma)) {
557 dev_err(dev, "Unable to map RSA exponent dp memory\n");
558 goto unmap_q;
559 }
560
561 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
562 if (dma_mapping_error(dev, pdb->dq_dma)) {
563 dev_err(dev, "Unable to map RSA exponent dq memory\n");
564 goto unmap_dp;
565 }
566
567 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
568 if (dma_mapping_error(dev, pdb->c_dma)) {
569 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
570 goto unmap_dq;
571 }
572
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300573 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300574 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
575 dev_err(dev, "Unable to map RSA tmp1 memory\n");
576 goto unmap_qinv;
577 }
578
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300579 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300580 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
581 dev_err(dev, "Unable to map RSA tmp2 memory\n");
582 goto unmap_tmp1;
583 }
584
585 if (edesc->src_nents > 1) {
586 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
587 pdb->g_dma = edesc->sec4_sg_dma;
588 sec4_sg_index += edesc->src_nents;
589 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300590 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
591
592 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe4a651b12017-04-25 16:26:39 +0300593 }
594
595 if (edesc->dst_nents > 1) {
596 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
597 pdb->f_dma = edesc->sec4_sg_dma +
598 sec4_sg_index * sizeof(struct sec4_sg_entry);
599 } else {
600 pdb->f_dma = sg_dma_address(req->dst);
601 }
602
603 pdb->sgf |= key->n_sz;
604 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
605
606 return 0;
607
608unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300609 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300610unmap_qinv:
611 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
612unmap_dq:
613 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
614unmap_dp:
615 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
616unmap_q:
617 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
618unmap_p:
619 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
620
621 return -ENOMEM;
622}
623
Tudor Ambarus8c419772016-07-04 13:12:08 +0300624static int caam_rsa_enc(struct akcipher_request *req)
625{
626 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
627 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
628 struct caam_rsa_key *key = &ctx->key;
629 struct device *jrdev = ctx->dev;
630 struct rsa_edesc *edesc;
631 int ret;
632
633 if (unlikely(!key->n || !key->e))
634 return -EINVAL;
635
636 if (req->dst_len < key->n_sz) {
637 req->dst_len = key->n_sz;
638 dev_err(jrdev, "Output buffer length less than parameter n\n");
639 return -EOVERFLOW;
640 }
641
642 /* Allocate extended descriptor */
643 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
644 if (IS_ERR(edesc))
645 return PTR_ERR(edesc);
646
647 /* Set RSA Encrypt Protocol Data Block */
648 ret = set_rsa_pub_pdb(req, edesc);
649 if (ret)
650 goto init_fail;
651
652 /* Initialize Job Descriptor */
653 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
654
655 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
656 if (!ret)
657 return -EINPROGRESS;
658
659 rsa_pub_unmap(jrdev, edesc, req);
660
661init_fail:
662 rsa_io_unmap(jrdev, edesc, req);
663 kfree(edesc);
664 return ret;
665}
666
Radu Alexe52e26d72017-04-25 16:26:38 +0300667static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300668{
669 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
670 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300671 struct device *jrdev = ctx->dev;
672 struct rsa_edesc *edesc;
673 int ret;
674
Tudor Ambarus8c419772016-07-04 13:12:08 +0300675 /* Allocate extended descriptor */
676 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
677 if (IS_ERR(edesc))
678 return PTR_ERR(edesc);
679
680 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
681 ret = set_rsa_priv_f1_pdb(req, edesc);
682 if (ret)
683 goto init_fail;
684
685 /* Initialize Job Descriptor */
686 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
687
688 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
689 if (!ret)
690 return -EINPROGRESS;
691
692 rsa_priv_f1_unmap(jrdev, edesc, req);
693
694init_fail:
695 rsa_io_unmap(jrdev, edesc, req);
696 kfree(edesc);
697 return ret;
698}
699
Radu Alexe52e26d72017-04-25 16:26:38 +0300700static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
701{
702 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
703 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
704 struct device *jrdev = ctx->dev;
705 struct rsa_edesc *edesc;
706 int ret;
707
708 /* Allocate extended descriptor */
709 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
710 if (IS_ERR(edesc))
711 return PTR_ERR(edesc);
712
713 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
714 ret = set_rsa_priv_f2_pdb(req, edesc);
715 if (ret)
716 goto init_fail;
717
718 /* Initialize Job Descriptor */
719 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
720
721 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
722 if (!ret)
723 return -EINPROGRESS;
724
725 rsa_priv_f2_unmap(jrdev, edesc, req);
726
727init_fail:
728 rsa_io_unmap(jrdev, edesc, req);
729 kfree(edesc);
730 return ret;
731}
732
Radu Alexe4a651b12017-04-25 16:26:39 +0300733static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
734{
735 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
736 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
737 struct device *jrdev = ctx->dev;
738 struct rsa_edesc *edesc;
739 int ret;
740
741 /* Allocate extended descriptor */
742 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
743 if (IS_ERR(edesc))
744 return PTR_ERR(edesc);
745
746 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
747 ret = set_rsa_priv_f3_pdb(req, edesc);
748 if (ret)
749 goto init_fail;
750
751 /* Initialize Job Descriptor */
752 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
753
754 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
755 if (!ret)
756 return -EINPROGRESS;
757
758 rsa_priv_f3_unmap(jrdev, edesc, req);
759
760init_fail:
761 rsa_io_unmap(jrdev, edesc, req);
762 kfree(edesc);
763 return ret;
764}
765
Radu Alexe52e26d72017-04-25 16:26:38 +0300766static int caam_rsa_dec(struct akcipher_request *req)
767{
768 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
769 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
770 struct caam_rsa_key *key = &ctx->key;
771 int ret;
772
773 if (unlikely(!key->n || !key->d))
774 return -EINVAL;
775
776 if (req->dst_len < key->n_sz) {
777 req->dst_len = key->n_sz;
778 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
779 return -EOVERFLOW;
780 }
781
Radu Alexe4a651b12017-04-25 16:26:39 +0300782 if (key->priv_form == FORM3)
783 ret = caam_rsa_dec_priv_f3(req);
784 else if (key->priv_form == FORM2)
Radu Alexe52e26d72017-04-25 16:26:38 +0300785 ret = caam_rsa_dec_priv_f2(req);
786 else
787 ret = caam_rsa_dec_priv_f1(req);
788
789 return ret;
790}
791
Tudor Ambarus8c419772016-07-04 13:12:08 +0300792static void caam_rsa_free_key(struct caam_rsa_key *key)
793{
794 kzfree(key->d);
Radu Alexe52e26d72017-04-25 16:26:38 +0300795 kzfree(key->p);
796 kzfree(key->q);
Radu Alexe4a651b12017-04-25 16:26:39 +0300797 kzfree(key->dp);
798 kzfree(key->dq);
799 kzfree(key->qinv);
Radu Alexe52e26d72017-04-25 16:26:38 +0300800 kzfree(key->tmp1);
801 kzfree(key->tmp2);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300802 kfree(key->e);
803 kfree(key->n);
Radu Alexe52e26d72017-04-25 16:26:38 +0300804 memset(key, 0, sizeof(*key));
Tudor Ambarus8c419772016-07-04 13:12:08 +0300805}
806
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300807static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
808{
809 while (!**ptr && *nbytes) {
810 (*ptr)++;
811 (*nbytes)--;
812 }
813}
814
Tudor Ambarus8c419772016-07-04 13:12:08 +0300815/**
Radu Alexe4a651b12017-04-25 16:26:39 +0300816 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
817 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
818 * BER-encoding requires that the minimum number of bytes be used to encode the
819 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
820 * length.
821 *
822 * @ptr : pointer to {dP, dQ, qInv} CRT member
823 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
824 * @dstlen: length in bytes of corresponding p or q prime factor
825 */
826static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
827{
828 u8 *dst;
829
830 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
831 if (!nbytes)
832 return NULL;
833
834 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
835 if (!dst)
836 return NULL;
837
838 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
839
840 return dst;
841}
842
843/**
Tudor Ambarus8c419772016-07-04 13:12:08 +0300844 * caam_read_raw_data - Read a raw byte stream as a positive integer.
845 * The function skips buffer's leading zeros, copies the remained data
846 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
847 * the address of the new buffer.
848 *
849 * @buf : The data to read
850 * @nbytes: The amount of data to read
851 */
852static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
853{
Tudor Ambarus8c419772016-07-04 13:12:08 +0300854
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300855 caam_rsa_drop_leading_zeros(&buf, nbytes);
Tudor Ambarus7fcaf622017-04-25 16:26:36 +0300856 if (!*nbytes)
857 return NULL;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300858
Fabio Estevamb930f3a2018-04-16 13:05:01 -0300859 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300860}
861
862static int caam_rsa_check_key_length(unsigned int len)
863{
864 if (len > 4096)
865 return -EINVAL;
866 return 0;
867}
868
869static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
870 unsigned int keylen)
871{
872 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200873 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300874 struct caam_rsa_key *rsa_key = &ctx->key;
875 int ret;
876
877 /* Free the old RSA key if any */
878 caam_rsa_free_key(rsa_key);
879
880 ret = rsa_parse_pub_key(&raw_key, key, keylen);
881 if (ret)
882 return ret;
883
884 /* Copy key in DMA zone */
Fuqian Huangcc2a58f2019-07-04 00:27:08 +0800885 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300886 if (!rsa_key->e)
887 goto err;
888
889 /*
890 * Skip leading zeros and copy the positive integer to a buffer
891 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
892 * expects a positive integer for the RSA modulus and uses its length as
893 * decryption output length.
894 */
895 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
896 if (!rsa_key->n)
897 goto err;
898
899 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
900 caam_rsa_free_key(rsa_key);
901 return -EINVAL;
902 }
903
904 rsa_key->e_sz = raw_key.e_sz;
905 rsa_key->n_sz = raw_key.n_sz;
906
Tudor Ambarus8c419772016-07-04 13:12:08 +0300907 return 0;
908err:
909 caam_rsa_free_key(rsa_key);
910 return -ENOMEM;
911}
912
Radu Alexe52e26d72017-04-25 16:26:38 +0300913static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
914 struct rsa_key *raw_key)
915{
916 struct caam_rsa_key *rsa_key = &ctx->key;
917 size_t p_sz = raw_key->p_sz;
918 size_t q_sz = raw_key->q_sz;
919
920 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
921 if (!rsa_key->p)
922 return;
923 rsa_key->p_sz = p_sz;
924
925 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
926 if (!rsa_key->q)
927 goto free_p;
928 rsa_key->q_sz = q_sz;
929
930 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
931 if (!rsa_key->tmp1)
932 goto free_q;
933
934 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
935 if (!rsa_key->tmp2)
936 goto free_tmp1;
937
938 rsa_key->priv_form = FORM2;
939
Radu Alexe4a651b12017-04-25 16:26:39 +0300940 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
941 if (!rsa_key->dp)
942 goto free_tmp2;
943
944 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
945 if (!rsa_key->dq)
946 goto free_dp;
947
948 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
949 q_sz);
950 if (!rsa_key->qinv)
951 goto free_dq;
952
953 rsa_key->priv_form = FORM3;
954
Radu Alexe52e26d72017-04-25 16:26:38 +0300955 return;
956
Radu Alexe4a651b12017-04-25 16:26:39 +0300957free_dq:
958 kzfree(rsa_key->dq);
959free_dp:
960 kzfree(rsa_key->dp);
961free_tmp2:
962 kzfree(rsa_key->tmp2);
Radu Alexe52e26d72017-04-25 16:26:38 +0300963free_tmp1:
964 kzfree(rsa_key->tmp1);
965free_q:
966 kzfree(rsa_key->q);
967free_p:
968 kzfree(rsa_key->p);
969}
970
Tudor Ambarus8c419772016-07-04 13:12:08 +0300971static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
972 unsigned int keylen)
973{
974 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200975 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300976 struct caam_rsa_key *rsa_key = &ctx->key;
977 int ret;
978
979 /* Free the old RSA key if any */
980 caam_rsa_free_key(rsa_key);
981
982 ret = rsa_parse_priv_key(&raw_key, key, keylen);
983 if (ret)
984 return ret;
985
986 /* Copy key in DMA zone */
Fuqian Huangcc2a58f2019-07-04 00:27:08 +0800987 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300988 if (!rsa_key->d)
989 goto err;
990
Fuqian Huangcc2a58f2019-07-04 00:27:08 +0800991 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300992 if (!rsa_key->e)
993 goto err;
994
995 /*
996 * Skip leading zeros and copy the positive integer to a buffer
997 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
998 * expects a positive integer for the RSA modulus and uses its length as
999 * decryption output length.
1000 */
1001 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1002 if (!rsa_key->n)
1003 goto err;
1004
1005 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1006 caam_rsa_free_key(rsa_key);
1007 return -EINVAL;
1008 }
1009
1010 rsa_key->d_sz = raw_key.d_sz;
1011 rsa_key->e_sz = raw_key.e_sz;
1012 rsa_key->n_sz = raw_key.n_sz;
1013
Radu Alexe52e26d72017-04-25 16:26:38 +03001014 caam_rsa_set_priv_key_form(ctx, &raw_key);
1015
Tudor Ambarus8c419772016-07-04 13:12:08 +03001016 return 0;
1017
1018err:
1019 caam_rsa_free_key(rsa_key);
1020 return -ENOMEM;
1021}
1022
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001023static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001024{
1025 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001026
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001027 return ctx->key.n_sz;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001028}
1029
1030/* Per session pkc's driver context creation function */
1031static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1032{
1033 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1034
1035 ctx->dev = caam_jr_alloc();
1036
1037 if (IS_ERR(ctx->dev)) {
Horia Geantă33fa46d2017-04-03 18:30:07 +03001038 pr_err("Job Ring Device allocation for transform failed\n");
Tudor Ambarus8c419772016-07-04 13:12:08 +03001039 return PTR_ERR(ctx->dev);
1040 }
1041
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001042 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1043 CAAM_RSA_MAX_INPUT_SIZE - 1,
1044 DMA_TO_DEVICE);
1045 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1046 dev_err(ctx->dev, "unable to map padding\n");
1047 caam_jr_free(ctx->dev);
1048 return -ENOMEM;
1049 }
1050
Tudor Ambarus8c419772016-07-04 13:12:08 +03001051 return 0;
1052}
1053
1054/* Per session pkc's driver context cleanup function */
1055static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1056{
1057 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1058 struct caam_rsa_key *key = &ctx->key;
1059
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001060 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1061 1, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001062 caam_rsa_free_key(key);
1063 caam_jr_free(ctx->dev);
1064}
1065
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001066static struct caam_akcipher_alg caam_rsa = {
1067 .akcipher = {
1068 .encrypt = caam_rsa_enc,
1069 .decrypt = caam_rsa_dec,
1070 .set_pub_key = caam_rsa_set_pub_key,
1071 .set_priv_key = caam_rsa_set_priv_key,
1072 .max_size = caam_rsa_max_size,
1073 .init = caam_rsa_init_tfm,
1074 .exit = caam_rsa_exit_tfm,
1075 .reqsize = sizeof(struct caam_rsa_req_ctx),
1076 .base = {
1077 .cra_name = "rsa",
1078 .cra_driver_name = "rsa-caam",
1079 .cra_priority = 3000,
1080 .cra_module = THIS_MODULE,
1081 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1082 },
1083 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001084};
1085
1086/* Public Key Cryptography module initialization handler */
Horia Geantă1b46c902019-05-03 17:17:39 +03001087int caam_pkc_init(struct device *ctrldev)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001088{
Horia Geantă1b46c902019-05-03 17:17:39 +03001089 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Horia Geantăd239b102018-11-08 15:36:27 +02001090 u32 pk_inst;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001091 int err;
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001092 init_done = false;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001093
Tudor Ambarus8c419772016-07-04 13:12:08 +03001094 /* Determine public key hardware accelerator presence. */
Horia Geantăd239b102018-11-08 15:36:27 +02001095 if (priv->era < 10)
1096 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1097 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1098 else
1099 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001100
1101 /* Do not register algorithms if PKHA is not present. */
Horia Geantă1b46c902019-05-03 17:17:39 +03001102 if (!pk_inst)
1103 return 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001104
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001105 /* allocate zero buffer, used for padding input */
1106 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1107 GFP_KERNEL);
1108 if (!zero_buffer)
1109 return -ENOMEM;
1110
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001111 err = crypto_register_akcipher(&caam_rsa.akcipher);
1112
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001113 if (err) {
1114 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001115 dev_warn(ctrldev, "%s alg registration failed\n",
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001116 caam_rsa.akcipher.base.cra_driver_name);
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001117 } else {
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001118 init_done = true;
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001119 caam_rsa.registered = true;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001120 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001121 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001122
Tudor Ambarus8c419772016-07-04 13:12:08 +03001123 return err;
1124}
1125
Horia Geantă1b46c902019-05-03 17:17:39 +03001126void caam_pkc_exit(void)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001127{
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001128 if (!init_done)
1129 return;
1130
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001131 if (caam_rsa.registered)
1132 crypto_unregister_akcipher(&caam_rsa.akcipher);
1133
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001134 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001135}