blob: d19e5ffb5104b953c90e771401ca5c5a3778fd48 [file] [log] [blame]
Thomas Gleixnerfcaf2032019-05-27 08:55:08 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Marek Vasut15b59e72013-12-10 20:26:21 +01002/*
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 *
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
Marek Vasut15b59e72013-12-10 20:26:21 +01006 */
7
Marek Vasut15b59e72013-12-10 20:26:21 +01008#include <linux/dma-mapping.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/kernel.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/stmp_device.h>
Leonard Crestez57f00282018-11-07 15:33:32 +000017#include <linux/clk.h>
Marek Vasut15b59e72013-12-10 20:26:21 +010018
19#include <crypto/aes.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080020#include <crypto/sha1.h>
21#include <crypto/sha2.h>
Marek Vasut15b59e72013-12-10 20:26:21 +010022#include <crypto/internal/hash.h>
Herbert Xu29406bb92016-06-29 18:04:02 +080023#include <crypto/internal/skcipher.h>
Rosioru Dragosfa034812020-02-25 17:05:52 +020024#include <crypto/scatterwalk.h>
Marek Vasut15b59e72013-12-10 20:26:21 +010025
26#define DCP_MAX_CHANS 4
27#define DCP_BUF_SZ PAGE_SIZE
Radu Soleac709eeb2018-10-02 19:01:50 +000028#define DCP_SHA_PAY_SZ 64
Marek Vasut15b59e72013-12-10 20:26:21 +010029
Marek Vasut1a7c6852014-03-03 01:23:15 +010030#define DCP_ALIGNMENT 64
31
Radu Soleac709eeb2018-10-02 19:01:50 +000032/*
33 * Null hashes to align with hw behavior on imx6sl and ull
34 * these are flipped for consistency with hw output
35 */
Wei Yongjunce4e4582018-10-11 01:49:48 +000036static const uint8_t sha1_null_hash[] =
Radu Soleac709eeb2018-10-02 19:01:50 +000037 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
38 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
39
Wei Yongjunce4e4582018-10-11 01:49:48 +000040static const uint8_t sha256_null_hash[] =
Radu Soleac709eeb2018-10-02 19:01:50 +000041 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
42 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
43 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
44 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
45
Marek Vasut15b59e72013-12-10 20:26:21 +010046/* DCP DMA descriptor. */
47struct dcp_dma_desc {
48 uint32_t next_cmd_addr;
49 uint32_t control0;
50 uint32_t control1;
51 uint32_t source;
52 uint32_t destination;
53 uint32_t size;
54 uint32_t payload;
55 uint32_t status;
56};
57
58/* Coherent aligned block for bounce buffering. */
59struct dcp_coherent_block {
60 uint8_t aes_in_buf[DCP_BUF_SZ];
61 uint8_t aes_out_buf[DCP_BUF_SZ];
62 uint8_t sha_in_buf[DCP_BUF_SZ];
Radu Soleac709eeb2018-10-02 19:01:50 +000063 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
Marek Vasut15b59e72013-12-10 20:26:21 +010064
65 uint8_t aes_key[2 * AES_KEYSIZE_128];
Marek Vasut15b59e72013-12-10 20:26:21 +010066
67 struct dcp_dma_desc desc[DCP_MAX_CHANS];
68};
69
70struct dcp {
71 struct device *dev;
72 void __iomem *base;
73
74 uint32_t caps;
75
76 struct dcp_coherent_block *coh;
77
78 struct completion completion[DCP_MAX_CHANS];
Leonard Crestezd80771c2018-09-21 18:03:18 +030079 spinlock_t lock[DCP_MAX_CHANS];
Marek Vasut15b59e72013-12-10 20:26:21 +010080 struct task_struct *thread[DCP_MAX_CHANS];
81 struct crypto_queue queue[DCP_MAX_CHANS];
Leonard Crestez57f00282018-11-07 15:33:32 +000082 struct clk *dcp_clk;
Marek Vasut15b59e72013-12-10 20:26:21 +010083};
84
85enum dcp_chan {
86 DCP_CHAN_HASH_SHA = 0,
87 DCP_CHAN_CRYPTO = 2,
88};
89
90struct dcp_async_ctx {
91 /* Common context */
92 enum dcp_chan chan;
93 uint32_t fill;
94
95 /* SHA Hash-specific context */
96 struct mutex mutex;
97 uint32_t alg;
98 unsigned int hot:1;
99
100 /* Crypto-specific context */
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300101 struct crypto_skcipher *fallback;
Marek Vasut15b59e72013-12-10 20:26:21 +0100102 unsigned int key_len;
103 uint8_t key[AES_KEYSIZE_128];
104};
105
Marek Vasut2021aba2014-01-14 18:31:01 +0100106struct dcp_aes_req_ctx {
107 unsigned int enc:1;
108 unsigned int ecb:1;
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300109 struct skcipher_request fallback_req; // keep at the end
Marek Vasut2021aba2014-01-14 18:31:01 +0100110};
111
Marek Vasut15b59e72013-12-10 20:26:21 +0100112struct dcp_sha_req_ctx {
113 unsigned int init:1;
114 unsigned int fini:1;
115};
116
Dan Douglassea9e7562018-10-02 19:01:48 +0000117struct dcp_export_state {
118 struct dcp_sha_req_ctx req_ctx;
119 struct dcp_async_ctx async_ctx;
120};
121
Marek Vasut15b59e72013-12-10 20:26:21 +0100122/*
123 * There can even be only one instance of the MXS DCP due to the
124 * design of Linux Crypto API.
125 */
126static struct dcp *global_sdcp;
Marek Vasut15b59e72013-12-10 20:26:21 +0100127
128/* DCP register layout. */
129#define MXS_DCP_CTRL 0x00
130#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
131#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
132
133#define MXS_DCP_STAT 0x10
134#define MXS_DCP_STAT_CLR 0x18
135#define MXS_DCP_STAT_IRQ_MASK 0xf
136
137#define MXS_DCP_CHANNELCTRL 0x20
138#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
139
140#define MXS_DCP_CAPABILITY1 0x40
141#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
142#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
143#define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
144
145#define MXS_DCP_CONTEXT 0x50
146
147#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
148
149#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
150
151#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
152#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
153
154/* DMA descriptor bits. */
155#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
156#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
157#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
158#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
159#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
160#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
161#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
162#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
163#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
164
165#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
166#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
167#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
168#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
169#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
170
171static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
172{
Sean Andersondf6313d2021-07-01 14:56:37 -0400173 int dma_err;
Marek Vasut15b59e72013-12-10 20:26:21 +0100174 struct dcp *sdcp = global_sdcp;
175 const int chan = actx->chan;
176 uint32_t stat;
Nicholas Mc Guiredd0fff82015-02-07 03:09:41 -0500177 unsigned long ret;
Marek Vasut15b59e72013-12-10 20:26:21 +0100178 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
Marek Vasut15b59e72013-12-10 20:26:21 +0100179 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
180 DMA_TO_DEVICE);
181
Sean Andersondf6313d2021-07-01 14:56:37 -0400182 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
183 if (dma_err)
184 return dma_err;
185
Marek Vasut15b59e72013-12-10 20:26:21 +0100186 reinit_completion(&sdcp->completion[chan]);
187
188 /* Clear status register. */
189 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
190
191 /* Load the DMA descriptor. */
192 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
193
194 /* Increment the semaphore to start the DMA transfer. */
195 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
196
197 ret = wait_for_completion_timeout(&sdcp->completion[chan],
198 msecs_to_jiffies(1000));
199 if (!ret) {
200 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
201 chan, readl(sdcp->base + MXS_DCP_STAT));
202 return -ETIMEDOUT;
203 }
204
205 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
206 if (stat & 0xff) {
207 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
208 chan, stat);
209 return -EINVAL;
210 }
211
212 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
213
214 return 0;
215}
216
217/*
218 * Encryption (AES128)
219 */
Marek Vasut2021aba2014-01-14 18:31:01 +0100220static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100221 struct skcipher_request *req, int init)
Marek Vasut15b59e72013-12-10 20:26:21 +0100222{
Sean Andersondf6313d2021-07-01 14:56:37 -0400223 dma_addr_t key_phys, src_phys, dst_phys;
Marek Vasut15b59e72013-12-10 20:26:21 +0100224 struct dcp *sdcp = global_sdcp;
225 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100226 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100227 int ret;
228
Sean Andersondf6313d2021-07-01 14:56:37 -0400229 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
230 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
231 ret = dma_mapping_error(sdcp->dev, key_phys);
232 if (ret)
233 return ret;
234
235 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
236 DCP_BUF_SZ, DMA_TO_DEVICE);
237 ret = dma_mapping_error(sdcp->dev, src_phys);
238 if (ret)
239 goto err_src;
240
241 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
242 DCP_BUF_SZ, DMA_FROM_DEVICE);
243 ret = dma_mapping_error(sdcp->dev, dst_phys);
244 if (ret)
245 goto err_dst;
Marek Vasut15b59e72013-12-10 20:26:21 +0100246
Radu Soleafadd7a62018-10-02 19:01:52 +0000247 if (actx->fill % AES_BLOCK_SIZE) {
248 dev_err(sdcp->dev, "Invalid block size!\n");
249 ret = -EINVAL;
250 goto aes_done_run;
251 }
252
Marek Vasut15b59e72013-12-10 20:26:21 +0100253 /* Fill in the DMA descriptor. */
254 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
255 MXS_DCP_CONTROL0_INTERRUPT |
256 MXS_DCP_CONTROL0_ENABLE_CIPHER;
257
258 /* Payload contains the key. */
259 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
260
Marek Vasut2021aba2014-01-14 18:31:01 +0100261 if (rctx->enc)
Marek Vasut15b59e72013-12-10 20:26:21 +0100262 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
263 if (init)
264 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
265
266 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
267
Marek Vasut2021aba2014-01-14 18:31:01 +0100268 if (rctx->ecb)
Marek Vasut15b59e72013-12-10 20:26:21 +0100269 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
270 else
271 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
272
273 desc->next_cmd_addr = 0;
274 desc->source = src_phys;
275 desc->destination = dst_phys;
276 desc->size = actx->fill;
277 desc->payload = key_phys;
278 desc->status = 0;
279
280 ret = mxs_dcp_start_dma(actx);
281
Radu Soleafadd7a62018-10-02 19:01:52 +0000282aes_done_run:
Sean Andersondf6313d2021-07-01 14:56:37 -0400283 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
284err_dst:
285 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
286err_src:
Marek Vasut15b59e72013-12-10 20:26:21 +0100287 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
288 DMA_TO_DEVICE);
Marek Vasut15b59e72013-12-10 20:26:21 +0100289
290 return ret;
291}
292
293static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
294{
295 struct dcp *sdcp = global_sdcp;
296
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100297 struct skcipher_request *req = skcipher_request_cast(arq);
Marek Vasut15b59e72013-12-10 20:26:21 +0100298 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100299 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100300
301 struct scatterlist *dst = req->dst;
302 struct scatterlist *src = req->src;
Sean Anderson2e6d7932021-07-01 14:56:38 -0400303 int dst_nents = sg_nents(dst);
Marek Vasut15b59e72013-12-10 20:26:21 +0100304
305 const int out_off = DCP_BUF_SZ;
306 uint8_t *in_buf = sdcp->coh->aes_in_buf;
307 uint8_t *out_buf = sdcp->coh->aes_out_buf;
308
Marek Vasut15b59e72013-12-10 20:26:21 +0100309 uint32_t dst_off = 0;
Sean Anderson2e6d7932021-07-01 14:56:38 -0400310 uint8_t *src_buf = NULL;
Radu Soleafadd7a62018-10-02 19:01:52 +0000311 uint32_t last_out_len = 0;
Marek Vasut15b59e72013-12-10 20:26:21 +0100312
313 uint8_t *key = sdcp->coh->aes_key;
314
315 int ret = 0;
Sean Anderson2e6d7932021-07-01 14:56:38 -0400316 unsigned int i, len, clen, tlen = 0;
Marek Vasut15b59e72013-12-10 20:26:21 +0100317 int init = 0;
Radu Soleafadd7a62018-10-02 19:01:52 +0000318 bool limit_hit = false;
Marek Vasut15b59e72013-12-10 20:26:21 +0100319
320 actx->fill = 0;
321
322 /* Copy the key from the temporary location. */
323 memcpy(key, actx->key, actx->key_len);
324
Marek Vasut2021aba2014-01-14 18:31:01 +0100325 if (!rctx->ecb) {
Marek Vasut15b59e72013-12-10 20:26:21 +0100326 /* Copy the CBC IV just past the key. */
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100327 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
Marek Vasut15b59e72013-12-10 20:26:21 +0100328 /* CBC needs the INIT set. */
329 init = 1;
330 } else {
331 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
332 }
333
Sean Anderson2e6d7932021-07-01 14:56:38 -0400334 for_each_sg(req->src, src, sg_nents(src), i) {
Marek Vasut15b59e72013-12-10 20:26:21 +0100335 src_buf = sg_virt(src);
336 len = sg_dma_len(src);
Radu Soleafadd7a62018-10-02 19:01:52 +0000337 tlen += len;
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100338 limit_hit = tlen > req->cryptlen;
Radu Soleafadd7a62018-10-02 19:01:52 +0000339
340 if (limit_hit)
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100341 len = req->cryptlen - (tlen - len);
Marek Vasut15b59e72013-12-10 20:26:21 +0100342
343 do {
344 if (actx->fill + len > out_off)
345 clen = out_off - actx->fill;
346 else
347 clen = len;
348
349 memcpy(in_buf + actx->fill, src_buf, clen);
350 len -= clen;
351 src_buf += clen;
352 actx->fill += clen;
353
354 /*
355 * If we filled the buffer or this is the last SG,
356 * submit the buffer.
357 */
Radu Soleafadd7a62018-10-02 19:01:52 +0000358 if (actx->fill == out_off || sg_is_last(src) ||
Sean Anderson2e6d7932021-07-01 14:56:38 -0400359 limit_hit) {
Marek Vasut2021aba2014-01-14 18:31:01 +0100360 ret = mxs_dcp_run_aes(actx, req, init);
Marek Vasut15b59e72013-12-10 20:26:21 +0100361 if (ret)
362 return ret;
363 init = 0;
364
Sean Anderson2e6d7932021-07-01 14:56:38 -0400365 sg_pcopy_from_buffer(dst, dst_nents, out_buf,
366 actx->fill, dst_off);
367 dst_off += actx->fill;
Radu Soleafadd7a62018-10-02 19:01:52 +0000368 last_out_len = actx->fill;
Sean Anderson2e6d7932021-07-01 14:56:38 -0400369 actx->fill = 0;
Marek Vasut15b59e72013-12-10 20:26:21 +0100370 }
371 } while (len);
Radu Soleafadd7a62018-10-02 19:01:52 +0000372
373 if (limit_hit)
374 break;
375 }
376
377 /* Copy the IV for CBC for chaining */
378 if (!rctx->ecb) {
379 if (rctx->enc)
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100380 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
Radu Soleafadd7a62018-10-02 19:01:52 +0000381 AES_BLOCK_SIZE);
382 else
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100383 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
Radu Soleafadd7a62018-10-02 19:01:52 +0000384 AES_BLOCK_SIZE);
Marek Vasut15b59e72013-12-10 20:26:21 +0100385 }
386
387 return ret;
388}
389
390static int dcp_chan_thread_aes(void *data)
391{
392 struct dcp *sdcp = global_sdcp;
393 const int chan = DCP_CHAN_CRYPTO;
394
395 struct crypto_async_request *backlog;
396 struct crypto_async_request *arq;
397
398 int ret;
399
Leonard Crestezd80771c2018-09-21 18:03:18 +0300400 while (!kthread_should_stop()) {
401 set_current_state(TASK_INTERRUPTIBLE);
Marek Vasut15b59e72013-12-10 20:26:21 +0100402
Leonard Crestezd80771c2018-09-21 18:03:18 +0300403 spin_lock(&sdcp->lock[chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100404 backlog = crypto_get_backlog(&sdcp->queue[chan]);
405 arq = crypto_dequeue_request(&sdcp->queue[chan]);
Leonard Crestezd80771c2018-09-21 18:03:18 +0300406 spin_unlock(&sdcp->lock[chan]);
407
408 if (!backlog && !arq) {
409 schedule();
410 continue;
411 }
412
413 set_current_state(TASK_RUNNING);
Marek Vasut15b59e72013-12-10 20:26:21 +0100414
415 if (backlog)
416 backlog->complete(backlog, -EINPROGRESS);
417
418 if (arq) {
419 ret = mxs_dcp_aes_block_crypt(arq);
420 arq->complete(arq, ret);
Marek Vasut15b59e72013-12-10 20:26:21 +0100421 }
Leonard Crestezd80771c2018-09-21 18:03:18 +0300422 }
Marek Vasut15b59e72013-12-10 20:26:21 +0100423
424 return 0;
425}
426
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100427static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
Marek Vasut15b59e72013-12-10 20:26:21 +0100428{
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300430 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100431 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
Marek Vasut15b59e72013-12-10 20:26:21 +0100432 int ret;
433
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300434 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
435 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
436 req->base.complete, req->base.data);
437 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100438 req->cryptlen, req->iv);
Marek Vasut15b59e72013-12-10 20:26:21 +0100439
440 if (enc)
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300441 ret = crypto_skcipher_encrypt(&rctx->fallback_req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100442 else
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300443 ret = crypto_skcipher_decrypt(&rctx->fallback_req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100444
445 return ret;
446}
447
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100448static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
Marek Vasut15b59e72013-12-10 20:26:21 +0100449{
450 struct dcp *sdcp = global_sdcp;
451 struct crypto_async_request *arq = &req->base;
452 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100453 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100454 int ret;
455
456 if (unlikely(actx->key_len != AES_KEYSIZE_128))
457 return mxs_dcp_block_fallback(req, enc);
458
Marek Vasut2021aba2014-01-14 18:31:01 +0100459 rctx->enc = enc;
460 rctx->ecb = ecb;
Marek Vasut15b59e72013-12-10 20:26:21 +0100461 actx->chan = DCP_CHAN_CRYPTO;
462
Leonard Crestezd80771c2018-09-21 18:03:18 +0300463 spin_lock(&sdcp->lock[actx->chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100464 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
Leonard Crestezd80771c2018-09-21 18:03:18 +0300465 spin_unlock(&sdcp->lock[actx->chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100466
467 wake_up_process(sdcp->thread[actx->chan]);
468
YueHaibingdbbaffe2019-03-30 13:52:21 +0800469 return ret;
Marek Vasut15b59e72013-12-10 20:26:21 +0100470}
471
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100472static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
Marek Vasut15b59e72013-12-10 20:26:21 +0100473{
474 return mxs_dcp_aes_enqueue(req, 0, 1);
475}
476
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100477static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
Marek Vasut15b59e72013-12-10 20:26:21 +0100478{
479 return mxs_dcp_aes_enqueue(req, 1, 1);
480}
481
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100482static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
Marek Vasut15b59e72013-12-10 20:26:21 +0100483{
484 return mxs_dcp_aes_enqueue(req, 0, 0);
485}
486
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100487static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
Marek Vasut15b59e72013-12-10 20:26:21 +0100488{
489 return mxs_dcp_aes_enqueue(req, 1, 0);
490}
491
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100492static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
Marek Vasut15b59e72013-12-10 20:26:21 +0100493 unsigned int len)
494{
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100495 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
Marek Vasut15b59e72013-12-10 20:26:21 +0100496
497 /*
498 * AES 128 is supposed by the hardware, store key into temporary
499 * buffer and exit. We must use the temporary buffer here, since
500 * there can still be an operation in progress.
501 */
502 actx->key_len = len;
503 if (len == AES_KEYSIZE_128) {
504 memcpy(actx->key, key, len);
505 return 0;
506 }
507
Marek Vasut15b59e72013-12-10 20:26:21 +0100508 /*
509 * If the requested AES key size is not supported by the hardware,
510 * but is supported by in-kernel software implementation, we use
511 * software fallback.
512 */
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300513 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
514 crypto_skcipher_set_flags(actx->fallback,
Herbert Xu29406bb92016-06-29 18:04:02 +0800515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300516 return crypto_skcipher_setkey(actx->fallback, key, len);
Marek Vasut15b59e72013-12-10 20:26:21 +0100517}
518
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100519static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
Marek Vasut15b59e72013-12-10 20:26:21 +0100520{
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100521 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
522 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300523 struct crypto_skcipher *blk;
Marek Vasut15b59e72013-12-10 20:26:21 +0100524
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300525 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
Marek Vasut15b59e72013-12-10 20:26:21 +0100526 if (IS_ERR(blk))
527 return PTR_ERR(blk);
528
529 actx->fallback = blk;
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300530 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
531 crypto_skcipher_reqsize(blk));
Marek Vasut15b59e72013-12-10 20:26:21 +0100532 return 0;
533}
534
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100535static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
Marek Vasut15b59e72013-12-10 20:26:21 +0100536{
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100537 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
Marek Vasut15b59e72013-12-10 20:26:21 +0100538
Ard Biesheuvelc9598d4e2020-07-07 09:31:59 +0300539 crypto_free_skcipher(actx->fallback);
Marek Vasut15b59e72013-12-10 20:26:21 +0100540}
541
542/*
543 * Hashing (SHA1/SHA256)
544 */
545static int mxs_dcp_run_sha(struct ahash_request *req)
546{
547 struct dcp *sdcp = global_sdcp;
548 int ret;
549
550 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
551 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
552 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
Marek Vasut15b59e72013-12-10 20:26:21 +0100553 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
Marek Vasut15b59e72013-12-10 20:26:21 +0100554
Marek Vasut04d088c2014-03-03 13:40:30 +0100555 dma_addr_t digest_phys = 0;
Marek Vasut15b59e72013-12-10 20:26:21 +0100556 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
557 DCP_BUF_SZ, DMA_TO_DEVICE);
558
Sean Andersondf6313d2021-07-01 14:56:37 -0400559 ret = dma_mapping_error(sdcp->dev, buf_phys);
560 if (ret)
561 return ret;
562
Marek Vasut15b59e72013-12-10 20:26:21 +0100563 /* Fill in the DMA descriptor. */
564 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
565 MXS_DCP_CONTROL0_INTERRUPT |
566 MXS_DCP_CONTROL0_ENABLE_HASH;
567 if (rctx->init)
568 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
569
570 desc->control1 = actx->alg;
571 desc->next_cmd_addr = 0;
572 desc->source = buf_phys;
573 desc->destination = 0;
574 desc->size = actx->fill;
575 desc->payload = 0;
576 desc->status = 0;
577
Radu Soleac709eeb2018-10-02 19:01:50 +0000578 /*
579 * Align driver with hw behavior when generating null hashes
580 */
581 if (rctx->init && rctx->fini && desc->size == 0) {
582 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
583 const uint8_t *sha_buf =
584 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
585 sha1_null_hash : sha256_null_hash;
586 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
587 ret = 0;
588 goto done_run;
589 }
590
Marek Vasut15b59e72013-12-10 20:26:21 +0100591 /* Set HASH_TERM bit for last transfer block. */
592 if (rctx->fini) {
Radu Soleac709eeb2018-10-02 19:01:50 +0000593 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
594 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
Sean Andersondf6313d2021-07-01 14:56:37 -0400595 ret = dma_mapping_error(sdcp->dev, digest_phys);
596 if (ret)
597 goto done_run;
598
Marek Vasut15b59e72013-12-10 20:26:21 +0100599 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
600 desc->payload = digest_phys;
601 }
602
603 ret = mxs_dcp_start_dma(actx);
604
Marek Vasut04d088c2014-03-03 13:40:30 +0100605 if (rctx->fini)
Radu Soleac709eeb2018-10-02 19:01:50 +0000606 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
Marek Vasut04d088c2014-03-03 13:40:30 +0100607 DMA_FROM_DEVICE);
608
Radu Soleac709eeb2018-10-02 19:01:50 +0000609done_run:
Marek Vasut15b59e72013-12-10 20:26:21 +0100610 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
611
612 return ret;
613}
614
615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
616{
617 struct dcp *sdcp = global_sdcp;
618
619 struct ahash_request *req = ahash_request_cast(arq);
620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
621 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
622 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
623 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
Marek Vasut15b59e72013-12-10 20:26:21 +0100624
Marek Vasut15b59e72013-12-10 20:26:21 +0100625 uint8_t *in_buf = sdcp->coh->sha_in_buf;
Radu Soleac709eeb2018-10-02 19:01:50 +0000626 uint8_t *out_buf = sdcp->coh->sha_out_buf;
Marek Vasut15b59e72013-12-10 20:26:21 +0100627
Marek Vasut15b59e72013-12-10 20:26:21 +0100628 struct scatterlist *src;
629
Rosioru Dragosfa034812020-02-25 17:05:52 +0200630 unsigned int i, len, clen, oft = 0;
Marek Vasut15b59e72013-12-10 20:26:21 +0100631 int ret;
632
633 int fin = rctx->fini;
634 if (fin)
635 rctx->fini = 0;
636
Rosioru Dragosfa034812020-02-25 17:05:52 +0200637 src = req->src;
638 len = req->nbytes;
Marek Vasut15b59e72013-12-10 20:26:21 +0100639
Rosioru Dragosfa034812020-02-25 17:05:52 +0200640 while (len) {
641 if (actx->fill + len > DCP_BUF_SZ)
642 clen = DCP_BUF_SZ - actx->fill;
643 else
644 clen = len;
Marek Vasut15b59e72013-12-10 20:26:21 +0100645
Rosioru Dragosfa034812020-02-25 17:05:52 +0200646 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
647 0);
Marek Vasut15b59e72013-12-10 20:26:21 +0100648
Rosioru Dragosfa034812020-02-25 17:05:52 +0200649 len -= clen;
650 oft += clen;
651 actx->fill += clen;
652
653 /*
654 * If we filled the buffer and still have some
655 * more data, submit the buffer.
656 */
657 if (len && actx->fill == DCP_BUF_SZ) {
658 ret = mxs_dcp_run_sha(req);
659 if (ret)
660 return ret;
661 actx->fill = 0;
662 rctx->init = 0;
663 }
Marek Vasut15b59e72013-12-10 20:26:21 +0100664 }
665
666 if (fin) {
667 rctx->fini = 1;
668
669 /* Submit whatever is left. */
Marek Vasut04d088c2014-03-03 13:40:30 +0100670 if (!req->result)
671 return -EINVAL;
672
Marek Vasut15b59e72013-12-10 20:26:21 +0100673 ret = mxs_dcp_run_sha(req);
Marek Vasut04d088c2014-03-03 13:40:30 +0100674 if (ret)
Marek Vasut15b59e72013-12-10 20:26:21 +0100675 return ret;
Marek Vasut04d088c2014-03-03 13:40:30 +0100676
Marek Vasut15b59e72013-12-10 20:26:21 +0100677 actx->fill = 0;
678
Radu Soleac709eeb2018-10-02 19:01:50 +0000679 /* For some reason the result is flipped */
680 for (i = 0; i < halg->digestsize; i++)
681 req->result[i] = out_buf[halg->digestsize - i - 1];
Marek Vasut15b59e72013-12-10 20:26:21 +0100682 }
683
684 return 0;
685}
686
687static int dcp_chan_thread_sha(void *data)
688{
689 struct dcp *sdcp = global_sdcp;
690 const int chan = DCP_CHAN_HASH_SHA;
691
692 struct crypto_async_request *backlog;
693 struct crypto_async_request *arq;
YueHaibing11fe71f12019-04-10 02:47:42 +0000694 int ret;
Marek Vasut15b59e72013-12-10 20:26:21 +0100695
Leonard Crestezd80771c2018-09-21 18:03:18 +0300696 while (!kthread_should_stop()) {
697 set_current_state(TASK_INTERRUPTIBLE);
Marek Vasut15b59e72013-12-10 20:26:21 +0100698
Leonard Crestezd80771c2018-09-21 18:03:18 +0300699 spin_lock(&sdcp->lock[chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100700 backlog = crypto_get_backlog(&sdcp->queue[chan]);
701 arq = crypto_dequeue_request(&sdcp->queue[chan]);
Leonard Crestezd80771c2018-09-21 18:03:18 +0300702 spin_unlock(&sdcp->lock[chan]);
703
704 if (!backlog && !arq) {
705 schedule();
706 continue;
707 }
708
709 set_current_state(TASK_RUNNING);
Marek Vasut15b59e72013-12-10 20:26:21 +0100710
711 if (backlog)
712 backlog->complete(backlog, -EINPROGRESS);
713
714 if (arq) {
Marek Vasut15b59e72013-12-10 20:26:21 +0100715 ret = dcp_sha_req_to_buf(arq);
Marek Vasut15b59e72013-12-10 20:26:21 +0100716 arq->complete(arq, ret);
Marek Vasut15b59e72013-12-10 20:26:21 +0100717 }
Leonard Crestezd80771c2018-09-21 18:03:18 +0300718 }
Marek Vasut15b59e72013-12-10 20:26:21 +0100719
720 return 0;
721}
722
723static int dcp_sha_init(struct ahash_request *req)
724{
725 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
726 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
727
728 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
729
730 /*
731 * Start hashing session. The code below only inits the
732 * hashing session context, nothing more.
733 */
734 memset(actx, 0, sizeof(*actx));
735
736 if (strcmp(halg->base.cra_name, "sha1") == 0)
737 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
738 else
739 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
740
741 actx->fill = 0;
742 actx->hot = 0;
743 actx->chan = DCP_CHAN_HASH_SHA;
744
745 mutex_init(&actx->mutex);
746
747 return 0;
748}
749
750static int dcp_sha_update_fx(struct ahash_request *req, int fini)
751{
752 struct dcp *sdcp = global_sdcp;
753
754 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
755 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
756 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
757
758 int ret;
759
760 /*
761 * Ignore requests that have no data in them and are not
762 * the trailing requests in the stream of requests.
763 */
764 if (!req->nbytes && !fini)
765 return 0;
766
767 mutex_lock(&actx->mutex);
768
769 rctx->fini = fini;
770
771 if (!actx->hot) {
772 actx->hot = 1;
773 rctx->init = 1;
774 }
775
Leonard Crestezd80771c2018-09-21 18:03:18 +0300776 spin_lock(&sdcp->lock[actx->chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100777 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
Leonard Crestezd80771c2018-09-21 18:03:18 +0300778 spin_unlock(&sdcp->lock[actx->chan]);
Marek Vasut15b59e72013-12-10 20:26:21 +0100779
780 wake_up_process(sdcp->thread[actx->chan]);
781 mutex_unlock(&actx->mutex);
782
YueHaibingdbbaffe2019-03-30 13:52:21 +0800783 return ret;
Marek Vasut15b59e72013-12-10 20:26:21 +0100784}
785
786static int dcp_sha_update(struct ahash_request *req)
787{
788 return dcp_sha_update_fx(req, 0);
789}
790
791static int dcp_sha_final(struct ahash_request *req)
792{
793 ahash_request_set_crypt(req, NULL, req->result, 0);
794 req->nbytes = 0;
795 return dcp_sha_update_fx(req, 1);
796}
797
798static int dcp_sha_finup(struct ahash_request *req)
799{
800 return dcp_sha_update_fx(req, 1);
801}
802
803static int dcp_sha_digest(struct ahash_request *req)
804{
805 int ret;
806
807 ret = dcp_sha_init(req);
808 if (ret)
809 return ret;
810
811 return dcp_sha_finup(req);
812}
813
Dan Douglassea9e7562018-10-02 19:01:48 +0000814static int dcp_sha_import(struct ahash_request *req, const void *in)
Kamil Konieczny9190b6f2018-01-18 19:34:00 +0100815{
Dan Douglassea9e7562018-10-02 19:01:48 +0000816 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
817 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
818 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
819 const struct dcp_export_state *export = in;
820
821 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
822 memset(actx, 0, sizeof(struct dcp_async_ctx));
823 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
824 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
825
826 return 0;
Kamil Konieczny9190b6f2018-01-18 19:34:00 +0100827}
828
Dan Douglassea9e7562018-10-02 19:01:48 +0000829static int dcp_sha_export(struct ahash_request *req, void *out)
Kamil Konieczny9190b6f2018-01-18 19:34:00 +0100830{
Dan Douglassea9e7562018-10-02 19:01:48 +0000831 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
832 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
833 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
834 struct dcp_export_state *export = out;
835
836 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
837 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
838
839 return 0;
Kamil Konieczny9190b6f2018-01-18 19:34:00 +0100840}
841
Marek Vasut15b59e72013-12-10 20:26:21 +0100842static int dcp_sha_cra_init(struct crypto_tfm *tfm)
843{
844 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
845 sizeof(struct dcp_sha_req_ctx));
846 return 0;
847}
848
849static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
850{
851}
852
853/* AES 128 ECB and AES 128 CBC */
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100854static struct skcipher_alg dcp_aes_algs[] = {
Marek Vasut15b59e72013-12-10 20:26:21 +0100855 {
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100856 .base.cra_name = "ecb(aes)",
857 .base.cra_driver_name = "ecb-aes-dcp",
858 .base.cra_priority = 400,
859 .base.cra_alignmask = 15,
860 .base.cra_flags = CRYPTO_ALG_ASYNC |
Marek Vasut15b59e72013-12-10 20:26:21 +0100861 CRYPTO_ALG_NEED_FALLBACK,
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100862 .base.cra_blocksize = AES_BLOCK_SIZE,
863 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
864 .base.cra_module = THIS_MODULE,
865
866 .min_keysize = AES_MIN_KEY_SIZE,
867 .max_keysize = AES_MAX_KEY_SIZE,
868 .setkey = mxs_dcp_aes_setkey,
869 .encrypt = mxs_dcp_aes_ecb_encrypt,
870 .decrypt = mxs_dcp_aes_ecb_decrypt,
871 .init = mxs_dcp_aes_fallback_init_tfm,
872 .exit = mxs_dcp_aes_fallback_exit_tfm,
Marek Vasut15b59e72013-12-10 20:26:21 +0100873 }, {
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100874 .base.cra_name = "cbc(aes)",
875 .base.cra_driver_name = "cbc-aes-dcp",
876 .base.cra_priority = 400,
877 .base.cra_alignmask = 15,
878 .base.cra_flags = CRYPTO_ALG_ASYNC |
Marek Vasut15b59e72013-12-10 20:26:21 +0100879 CRYPTO_ALG_NEED_FALLBACK,
Ard Biesheuvel9acb3242019-11-09 18:09:41 +0100880 .base.cra_blocksize = AES_BLOCK_SIZE,
881 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
882 .base.cra_module = THIS_MODULE,
883
884 .min_keysize = AES_MIN_KEY_SIZE,
885 .max_keysize = AES_MAX_KEY_SIZE,
886 .setkey = mxs_dcp_aes_setkey,
887 .encrypt = mxs_dcp_aes_cbc_encrypt,
888 .decrypt = mxs_dcp_aes_cbc_decrypt,
889 .ivsize = AES_BLOCK_SIZE,
890 .init = mxs_dcp_aes_fallback_init_tfm,
891 .exit = mxs_dcp_aes_fallback_exit_tfm,
Marek Vasut15b59e72013-12-10 20:26:21 +0100892 },
893};
894
895/* SHA1 */
896static struct ahash_alg dcp_sha1_alg = {
897 .init = dcp_sha_init,
898 .update = dcp_sha_update,
899 .final = dcp_sha_final,
900 .finup = dcp_sha_finup,
901 .digest = dcp_sha_digest,
Dan Douglassea9e7562018-10-02 19:01:48 +0000902 .import = dcp_sha_import,
903 .export = dcp_sha_export,
Marek Vasut15b59e72013-12-10 20:26:21 +0100904 .halg = {
905 .digestsize = SHA1_DIGEST_SIZE,
Dan Douglassea9e7562018-10-02 19:01:48 +0000906 .statesize = sizeof(struct dcp_export_state),
Marek Vasut15b59e72013-12-10 20:26:21 +0100907 .base = {
908 .cra_name = "sha1",
909 .cra_driver_name = "sha1-dcp",
910 .cra_priority = 400,
911 .cra_alignmask = 63,
912 .cra_flags = CRYPTO_ALG_ASYNC,
913 .cra_blocksize = SHA1_BLOCK_SIZE,
914 .cra_ctxsize = sizeof(struct dcp_async_ctx),
915 .cra_module = THIS_MODULE,
916 .cra_init = dcp_sha_cra_init,
917 .cra_exit = dcp_sha_cra_exit,
918 },
919 },
920};
921
922/* SHA256 */
923static struct ahash_alg dcp_sha256_alg = {
924 .init = dcp_sha_init,
925 .update = dcp_sha_update,
926 .final = dcp_sha_final,
927 .finup = dcp_sha_finup,
928 .digest = dcp_sha_digest,
Dan Douglassea9e7562018-10-02 19:01:48 +0000929 .import = dcp_sha_import,
930 .export = dcp_sha_export,
Marek Vasut15b59e72013-12-10 20:26:21 +0100931 .halg = {
932 .digestsize = SHA256_DIGEST_SIZE,
Dan Douglassea9e7562018-10-02 19:01:48 +0000933 .statesize = sizeof(struct dcp_export_state),
Marek Vasut15b59e72013-12-10 20:26:21 +0100934 .base = {
935 .cra_name = "sha256",
936 .cra_driver_name = "sha256-dcp",
937 .cra_priority = 400,
938 .cra_alignmask = 63,
939 .cra_flags = CRYPTO_ALG_ASYNC,
940 .cra_blocksize = SHA256_BLOCK_SIZE,
941 .cra_ctxsize = sizeof(struct dcp_async_ctx),
942 .cra_module = THIS_MODULE,
943 .cra_init = dcp_sha_cra_init,
944 .cra_exit = dcp_sha_cra_exit,
945 },
946 },
947};
948
949static irqreturn_t mxs_dcp_irq(int irq, void *context)
950{
951 struct dcp *sdcp = context;
952 uint32_t stat;
953 int i;
954
955 stat = readl(sdcp->base + MXS_DCP_STAT);
956 stat &= MXS_DCP_STAT_IRQ_MASK;
957 if (!stat)
958 return IRQ_NONE;
959
960 /* Clear the interrupts. */
961 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
962
963 /* Complete the DMA requests that finished. */
964 for (i = 0; i < DCP_MAX_CHANS; i++)
965 if (stat & (1 << i))
966 complete(&sdcp->completion[i]);
967
968 return IRQ_HANDLED;
969}
970
971static int mxs_dcp_probe(struct platform_device *pdev)
972{
973 struct device *dev = &pdev->dev;
974 struct dcp *sdcp = NULL;
975 int i, ret;
Marek Vasut15b59e72013-12-10 20:26:21 +0100976 int dcp_vmi_irq, dcp_irq;
977
Marek Vasut15b59e72013-12-10 20:26:21 +0100978 if (global_sdcp) {
979 dev_err(dev, "Only one DCP instance allowed!\n");
Fabio Estevam5fc80052014-05-12 08:44:28 -0300980 return -ENODEV;
Marek Vasut15b59e72013-12-10 20:26:21 +0100981 }
982
Marek Vasut15b59e72013-12-10 20:26:21 +0100983 dcp_vmi_irq = platform_get_irq(pdev, 0);
Stephen Boyd514838e2019-07-30 11:15:05 -0700984 if (dcp_vmi_irq < 0)
Fabio Estevam5fc80052014-05-12 08:44:28 -0300985 return dcp_vmi_irq;
Fabio Estevamd9588f82014-02-14 01:04:44 -0200986
Marek Vasut15b59e72013-12-10 20:26:21 +0100987 dcp_irq = platform_get_irq(pdev, 1);
Stephen Boyd514838e2019-07-30 11:15:05 -0700988 if (dcp_irq < 0)
Fabio Estevam5fc80052014-05-12 08:44:28 -0300989 return dcp_irq;
Marek Vasut15b59e72013-12-10 20:26:21 +0100990
991 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
Fabio Estevam5fc80052014-05-12 08:44:28 -0300992 if (!sdcp)
993 return -ENOMEM;
Marek Vasut15b59e72013-12-10 20:26:21 +0100994
995 sdcp->dev = dev;
Fabio Estevamcec1caa2019-06-06 13:13:48 -0300996 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
Fabio Estevam5fc80052014-05-12 08:44:28 -0300997 if (IS_ERR(sdcp->base))
998 return PTR_ERR(sdcp->base);
999
Marek Vasut15b59e72013-12-10 20:26:21 +01001000
1001 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1002 "dcp-vmi-irq", sdcp);
1003 if (ret) {
1004 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
Fabio Estevam5fc80052014-05-12 08:44:28 -03001005 return ret;
Marek Vasut15b59e72013-12-10 20:26:21 +01001006 }
1007
1008 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1009 "dcp-irq", sdcp);
1010 if (ret) {
1011 dev_err(dev, "Failed to claim DCP IRQ!\n");
Fabio Estevam5fc80052014-05-12 08:44:28 -03001012 return ret;
Marek Vasut15b59e72013-12-10 20:26:21 +01001013 }
1014
1015 /* Allocate coherent helper block. */
Marek Vasut1a7c6852014-03-03 01:23:15 +01001016 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1017 GFP_KERNEL);
Fabio Estevam5fc80052014-05-12 08:44:28 -03001018 if (!sdcp->coh)
1019 return -ENOMEM;
Marek Vasut15b59e72013-12-10 20:26:21 +01001020
Marek Vasut1a7c6852014-03-03 01:23:15 +01001021 /* Re-align the structure so it fits the DCP constraints. */
1022 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1023
Leonard Crestez57f00282018-11-07 15:33:32 +00001024 /* DCP clock is optional, only used on some SOCs */
1025 sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1026 if (IS_ERR(sdcp->dcp_clk)) {
1027 if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1028 return PTR_ERR(sdcp->dcp_clk);
1029 sdcp->dcp_clk = NULL;
1030 }
1031 ret = clk_prepare_enable(sdcp->dcp_clk);
Fabio Estevamfecfd7f2014-01-28 22:36:12 -02001032 if (ret)
Fabio Estevam5fc80052014-05-12 08:44:28 -03001033 return ret;
Marek Vasut15b59e72013-12-10 20:26:21 +01001034
Leonard Crestez57f00282018-11-07 15:33:32 +00001035 /* Restart the DCP block. */
1036 ret = stmp_reset_block(sdcp->base);
1037 if (ret) {
1038 dev_err(dev, "Failed reset\n");
1039 goto err_disable_unprepare_clk;
1040 }
1041
Marek Vasut15b59e72013-12-10 20:26:21 +01001042 /* Initialize control register. */
1043 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1044 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1045 sdcp->base + MXS_DCP_CTRL);
1046
1047 /* Enable all DCP DMA channels. */
1048 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1049 sdcp->base + MXS_DCP_CHANNELCTRL);
1050
1051 /*
1052 * We do not enable context switching. Give the context buffer a
1053 * pointer to an illegal address so if context switching is
1054 * inadvertantly enabled, the DCP will return an error instead of
1055 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1056 * address will do.
1057 */
1058 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1059 for (i = 0; i < DCP_MAX_CHANS; i++)
1060 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1061 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1062
1063 global_sdcp = sdcp;
1064
1065 platform_set_drvdata(pdev, sdcp);
1066
1067 for (i = 0; i < DCP_MAX_CHANS; i++) {
Leonard Crestezd80771c2018-09-21 18:03:18 +03001068 spin_lock_init(&sdcp->lock[i]);
Marek Vasut15b59e72013-12-10 20:26:21 +01001069 init_completion(&sdcp->completion[i]);
1070 crypto_init_queue(&sdcp->queue[i], 50);
1071 }
1072
1073 /* Create the SHA and AES handler threads. */
1074 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1075 NULL, "mxs_dcp_chan/sha");
1076 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1077 dev_err(dev, "Error starting SHA thread!\n");
Leonard Crestez57f00282018-11-07 15:33:32 +00001078 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1079 goto err_disable_unprepare_clk;
Marek Vasut15b59e72013-12-10 20:26:21 +01001080 }
1081
1082 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1083 NULL, "mxs_dcp_chan/aes");
1084 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1085 dev_err(dev, "Error starting SHA thread!\n");
1086 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1087 goto err_destroy_sha_thread;
1088 }
1089
1090 /* Register the various crypto algorithms. */
1091 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1092
1093 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
Ard Biesheuvel9acb3242019-11-09 18:09:41 +01001094 ret = crypto_register_skciphers(dcp_aes_algs,
1095 ARRAY_SIZE(dcp_aes_algs));
Marek Vasut15b59e72013-12-10 20:26:21 +01001096 if (ret) {
1097 /* Failed to register algorithm. */
1098 dev_err(dev, "Failed to register AES crypto!\n");
1099 goto err_destroy_aes_thread;
1100 }
1101 }
1102
1103 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1104 ret = crypto_register_ahash(&dcp_sha1_alg);
1105 if (ret) {
1106 dev_err(dev, "Failed to register %s hash!\n",
1107 dcp_sha1_alg.halg.base.cra_name);
1108 goto err_unregister_aes;
1109 }
1110 }
1111
1112 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1113 ret = crypto_register_ahash(&dcp_sha256_alg);
1114 if (ret) {
1115 dev_err(dev, "Failed to register %s hash!\n",
1116 dcp_sha256_alg.halg.base.cra_name);
1117 goto err_unregister_sha1;
1118 }
1119 }
1120
1121 return 0;
1122
1123err_unregister_sha1:
1124 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1125 crypto_unregister_ahash(&dcp_sha1_alg);
1126
1127err_unregister_aes:
1128 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
Ard Biesheuvel9acb3242019-11-09 18:09:41 +01001129 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
Marek Vasut15b59e72013-12-10 20:26:21 +01001130
1131err_destroy_aes_thread:
1132 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1133
1134err_destroy_sha_thread:
1135 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
Leonard Crestez57f00282018-11-07 15:33:32 +00001136
1137err_disable_unprepare_clk:
1138 clk_disable_unprepare(sdcp->dcp_clk);
1139
Marek Vasut15b59e72013-12-10 20:26:21 +01001140 return ret;
1141}
1142
1143static int mxs_dcp_remove(struct platform_device *pdev)
1144{
1145 struct dcp *sdcp = platform_get_drvdata(pdev);
1146
Marek Vasut15b59e72013-12-10 20:26:21 +01001147 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1148 crypto_unregister_ahash(&dcp_sha256_alg);
1149
1150 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1151 crypto_unregister_ahash(&dcp_sha1_alg);
1152
1153 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
Ard Biesheuvel9acb3242019-11-09 18:09:41 +01001154 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
Marek Vasut15b59e72013-12-10 20:26:21 +01001155
1156 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1157 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1158
Leonard Crestez57f00282018-11-07 15:33:32 +00001159 clk_disable_unprepare(sdcp->dcp_clk);
1160
Marek Vasut15b59e72013-12-10 20:26:21 +01001161 platform_set_drvdata(pdev, NULL);
1162
Marek Vasut15b59e72013-12-10 20:26:21 +01001163 global_sdcp = NULL;
Marek Vasut15b59e72013-12-10 20:26:21 +01001164
1165 return 0;
1166}
1167
1168static const struct of_device_id mxs_dcp_dt_ids[] = {
1169 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1170 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1171 { /* sentinel */ }
1172};
1173
1174MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1175
1176static struct platform_driver mxs_dcp_driver = {
1177 .probe = mxs_dcp_probe,
1178 .remove = mxs_dcp_remove,
1179 .driver = {
1180 .name = "mxs-dcp",
Marek Vasut15b59e72013-12-10 20:26:21 +01001181 .of_match_table = mxs_dcp_dt_ids,
1182 },
1183};
1184
1185module_platform_driver(mxs_dcp_driver);
1186
1187MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1188MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1189MODULE_LICENSE("GPL");
1190MODULE_ALIAS("platform:mxs-dcp");