blob: b0b8e3d48aef1c7fc9bed475d9eadec929152614 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Javier Martin5de88752013-03-01 12:37:53 +01002/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01007 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
Javier Martin5de88752013-03-01 12:37:53 +01008 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
Javier Martin5de88752013-03-01 12:37:53 +010011 * Based on omap-aes.c and tegra-aes.c
12 */
13
Javier Martin5de88752013-03-01 12:37:53 +010014#include <crypto/aes.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010015#include <crypto/internal/hash.h>
Herbert Xu678adec2016-06-29 18:04:05 +080016#include <crypto/internal/skcipher.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010017#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
Javier Martin5de88752013-03-01 12:37:53 +010019
20#include <linux/clk.h>
21#include <linux/crypto.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010026#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010027#include <linux/module.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010028#include <linux/mutex.h>
Javier Martin5de88752013-03-01 12:37:53 +010029#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010030#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010031#include <linux/platform_device.h>
32
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010033#define SHA_BUFFER_LEN PAGE_SIZE
34#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
35
Javier Martin5de88752013-03-01 12:37:53 +010036#define SAHARA_NAME "sahara"
37#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010038#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010039#define SAHARA_TIMEOUT_MS 1000
40#define SAHARA_MAX_HW_DESC 2
41#define SAHARA_MAX_HW_LINK 20
42
43#define FLAGS_MODE_MASK 0x000f
44#define FLAGS_ENCRYPT BIT(0)
45#define FLAGS_CBC BIT(1)
46#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010047
48#define SAHARA_HDR_BASE 0x00800000
49#define SAHARA_HDR_SKHA_ALG_AES 0
50#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
51#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
52#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
53#define SAHARA_HDR_FORM_DATA (5 << 16)
54#define SAHARA_HDR_FORM_KEY (8 << 16)
55#define SAHARA_HDR_LLO (1 << 24)
56#define SAHARA_HDR_CHA_SKHA (1 << 28)
57#define SAHARA_HDR_CHA_MDHA (2 << 28)
58#define SAHARA_HDR_PARITY_BIT (1 << 31)
59
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010060#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
61#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
62#define SAHARA_HDR_MDHA_HASH 0xA0850000
63#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
64#define SAHARA_HDR_MDHA_ALG_SHA1 0
65#define SAHARA_HDR_MDHA_ALG_MD5 1
66#define SAHARA_HDR_MDHA_ALG_SHA256 2
67#define SAHARA_HDR_MDHA_ALG_SHA224 3
68#define SAHARA_HDR_MDHA_PDATA (1 << 2)
69#define SAHARA_HDR_MDHA_HMAC (1 << 3)
70#define SAHARA_HDR_MDHA_INIT (1 << 5)
71#define SAHARA_HDR_MDHA_IPAD (1 << 6)
72#define SAHARA_HDR_MDHA_OPAD (1 << 7)
73#define SAHARA_HDR_MDHA_SWAP (1 << 8)
74#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
75#define SAHARA_HDR_MDHA_SSL (1 << 10)
76
Javier Martin5de88752013-03-01 12:37:53 +010077/* SAHARA can only process one request at a time */
78#define SAHARA_QUEUE_LENGTH 1
79
80#define SAHARA_REG_VERSION 0x00
81#define SAHARA_REG_DAR 0x04
82#define SAHARA_REG_CONTROL 0x08
83#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
84#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
85#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
86#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
87#define SAHARA_REG_CMD 0x0C
88#define SAHARA_CMD_RESET (1 << 0)
89#define SAHARA_CMD_CLEAR_INT (1 << 8)
90#define SAHARA_CMD_CLEAR_ERR (1 << 9)
91#define SAHARA_CMD_SINGLE_STEP (1 << 10)
92#define SAHARA_CMD_MODE_BATCH (1 << 16)
93#define SAHARA_CMD_MODE_DEBUG (1 << 18)
94#define SAHARA_REG_STATUS 0x10
95#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
96#define SAHARA_STATE_IDLE 0
97#define SAHARA_STATE_BUSY 1
98#define SAHARA_STATE_ERR 2
99#define SAHARA_STATE_FAULT 3
100#define SAHARA_STATE_COMPLETE 4
101#define SAHARA_STATE_COMP_FLAG (1 << 2)
102#define SAHARA_STATUS_DAR_FULL (1 << 3)
103#define SAHARA_STATUS_ERROR (1 << 4)
104#define SAHARA_STATUS_SECURE (1 << 5)
105#define SAHARA_STATUS_FAIL (1 << 6)
106#define SAHARA_STATUS_INIT (1 << 7)
107#define SAHARA_STATUS_RNG_RESEED (1 << 8)
108#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
109#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
110#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
111#define SAHARA_STATUS_MODE_BATCH (1 << 16)
112#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
113#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
114#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
115#define SAHARA_REG_ERRSTATUS 0x14
116#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
117#define SAHARA_ERRSOURCE_CHA 14
118#define SAHARA_ERRSOURCE_DMA 15
119#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
120#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
121#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
122#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
123#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
124#define SAHARA_REG_FADDR 0x18
125#define SAHARA_REG_CDAR 0x1C
126#define SAHARA_REG_IDAR 0x20
127
128struct sahara_hw_desc {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100129 u32 hdr;
130 u32 len1;
131 u32 p1;
132 u32 len2;
133 u32 p2;
134 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100135};
136
137struct sahara_hw_link {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100138 u32 len;
139 u32 p;
140 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100141};
142
143struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100144 unsigned long flags;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100145
146 /* AES-specific context */
Javier Martin5de88752013-03-01 12:37:53 +0100147 int keylen;
148 u8 key[AES_KEYSIZE_128];
Kees Cookba701522018-09-18 19:10:53 -0700149 struct crypto_sync_skcipher *fallback;
Javier Martin5de88752013-03-01 12:37:53 +0100150};
151
152struct sahara_aes_reqctx {
153 unsigned long mode;
154};
155
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100156/*
157 * struct sahara_sha_reqctx - private data per request
158 * @buf: holds data for requests smaller than block_size
159 * @rembuf: used to prepare one block_size-aligned request
160 * @context: hw-specific context for request. Digest is extracted from this
161 * @mode: specifies what type of hw-descriptor needs to be built
162 * @digest_size: length of digest for this request
163 * @context_size: length of hw-context for this request.
164 * Always digest_size + 4
165 * @buf_cnt: number of bytes saved in buf
166 * @sg_in_idx: number of hw links
167 * @in_sg: scatterlist for input data
168 * @in_sg_chain: scatterlists for chained input data
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100169 * @total: total number of bytes for transfer
170 * @last: is this the last block
171 * @first: is this the first block
172 * @active: inside a transfer
173 */
174struct sahara_sha_reqctx {
175 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 u8 context[SHA256_DIGEST_SIZE + 4];
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100178 unsigned int mode;
179 unsigned int digest_size;
180 unsigned int context_size;
181 unsigned int buf_cnt;
182 unsigned int sg_in_idx;
183 struct scatterlist *in_sg;
184 struct scatterlist in_sg_chain[2];
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100185 size_t total;
186 unsigned int last;
187 unsigned int first;
188 unsigned int active;
189};
190
Javier Martin5de88752013-03-01 12:37:53 +0100191struct sahara_dev {
192 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100193 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100194 void __iomem *regs_base;
195 struct clk *clk_ipg;
196 struct clk *clk_ahb;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100197 struct mutex queue_mutex;
198 struct task_struct *kthread;
199 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100200
201 struct sahara_ctx *ctx;
Javier Martin5de88752013-03-01 12:37:53 +0100202 struct crypto_queue queue;
203 unsigned long flags;
204
Javier Martin5de88752013-03-01 12:37:53 +0100205 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
206 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
207
208 u8 *key_base;
209 dma_addr_t key_phys_base;
210
211 u8 *iv_base;
212 dma_addr_t iv_phys_base;
213
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100214 u8 *context_base;
215 dma_addr_t context_phys_base;
216
Javier Martin5de88752013-03-01 12:37:53 +0100217 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
218 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
219
Javier Martin5de88752013-03-01 12:37:53 +0100220 size_t total;
221 struct scatterlist *in_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100222 int nb_in_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100223 struct scatterlist *out_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100224 int nb_out_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100225
226 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100227};
228
229static struct sahara_dev *dev_ptr;
230
231static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
232{
233 writel(data, dev->regs_base + reg);
234}
235
236static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
237{
238 return readl(dev->regs_base + reg);
239}
240
241static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
242{
243 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
244 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
245 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
246
247 if (dev->flags & FLAGS_CBC) {
248 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
249 hdr ^= SAHARA_HDR_PARITY_BIT;
250 }
251
252 if (dev->flags & FLAGS_ENCRYPT) {
253 hdr |= SAHARA_HDR_SKHA_OP_ENC;
254 hdr ^= SAHARA_HDR_PARITY_BIT;
255 }
256
257 return hdr;
258}
259
260static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
261{
262 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
263 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
264}
265
LABBE Corentincac367b2015-10-14 21:14:19 +0200266static const char *sahara_err_src[16] = {
Javier Martin5de88752013-03-01 12:37:53 +0100267 "No error",
268 "Header error",
269 "Descriptor length error",
270 "Descriptor length or pointer error",
271 "Link length error",
272 "Link pointer error",
273 "Input buffer error",
274 "Output buffer error",
275 "Output buffer starvation",
276 "Internal state fault",
277 "General descriptor problem",
278 "Reserved",
279 "Descriptor address error",
280 "Link address error",
281 "CHA error",
282 "DMA error"
283};
284
LABBE Corentincac367b2015-10-14 21:14:19 +0200285static const char *sahara_err_dmasize[4] = {
Javier Martin5de88752013-03-01 12:37:53 +0100286 "Byte transfer",
287 "Half-word transfer",
288 "Word transfer",
289 "Reserved"
290};
291
LABBE Corentincac367b2015-10-14 21:14:19 +0200292static const char *sahara_err_dmasrc[8] = {
Javier Martin5de88752013-03-01 12:37:53 +0100293 "No error",
294 "AHB bus error",
295 "Internal IP bus error",
296 "Parity error",
297 "DMA crosses 256 byte boundary",
298 "DMA is busy",
299 "Reserved",
300 "DMA HW error"
301};
302
LABBE Corentincac367b2015-10-14 21:14:19 +0200303static const char *sahara_cha_errsrc[12] = {
Javier Martin5de88752013-03-01 12:37:53 +0100304 "Input buffer non-empty",
305 "Illegal address",
306 "Illegal mode",
307 "Illegal data size",
308 "Illegal key size",
309 "Write during processing",
310 "CTX read during processing",
311 "HW error",
312 "Input buffer disabled/underflow",
313 "Output buffer disabled/overflow",
314 "DES key parity error",
315 "Reserved"
316};
317
LABBE Corentincac367b2015-10-14 21:14:19 +0200318static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
Javier Martin5de88752013-03-01 12:37:53 +0100319
320static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
321{
322 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
323 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
324
325 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
326
327 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
328
329 if (source == SAHARA_ERRSOURCE_DMA) {
330 if (error & SAHARA_ERRSTATUS_DMA_DIR)
331 dev_err(dev->device, " * DMA read.\n");
332 else
333 dev_err(dev->device, " * DMA write.\n");
334
335 dev_err(dev->device, " * %s.\n",
336 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
339 } else if (source == SAHARA_ERRSOURCE_CHA) {
340 dev_err(dev->device, " * %s.\n",
341 sahara_cha_errsrc[chasrc]);
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
344 }
345 dev_err(dev->device, "\n");
346}
347
LABBE Corentincac367b2015-10-14 21:14:19 +0200348static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
Javier Martin5de88752013-03-01 12:37:53 +0100349
350static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
351{
352 u8 state;
353
Joe Perches222f6b82019-04-09 09:33:13 -0700354 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100355 return;
356
357 state = SAHARA_STATUS_GET_STATE(status);
358
359 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
360 __func__, status);
361
362 dev_dbg(dev->device, " - State = %d:\n", state);
363 if (state & SAHARA_STATE_COMP_FLAG)
364 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
365
366 dev_dbg(dev->device, " * %s.\n",
367 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
368
369 if (status & SAHARA_STATUS_DAR_FULL)
370 dev_dbg(dev->device, " - DAR Full.\n");
371 if (status & SAHARA_STATUS_ERROR)
372 dev_dbg(dev->device, " - Error.\n");
373 if (status & SAHARA_STATUS_SECURE)
374 dev_dbg(dev->device, " - Secure.\n");
375 if (status & SAHARA_STATUS_FAIL)
376 dev_dbg(dev->device, " - Fail.\n");
377 if (status & SAHARA_STATUS_RNG_RESEED)
378 dev_dbg(dev->device, " - RNG Reseed Request.\n");
379 if (status & SAHARA_STATUS_ACTIVE_RNG)
380 dev_dbg(dev->device, " - RNG Active.\n");
381 if (status & SAHARA_STATUS_ACTIVE_MDHA)
382 dev_dbg(dev->device, " - MDHA Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_SKHA)
384 dev_dbg(dev->device, " - SKHA Active.\n");
385
386 if (status & SAHARA_STATUS_MODE_BATCH)
387 dev_dbg(dev->device, " - Batch Mode.\n");
388 else if (status & SAHARA_STATUS_MODE_DEDICATED)
Colin Ian King9ae811f2016-10-25 12:07:27 +0100389 dev_dbg(dev->device, " - Dedicated Mode.\n");
Javier Martin5de88752013-03-01 12:37:53 +0100390 else if (status & SAHARA_STATUS_MODE_DEBUG)
391 dev_dbg(dev->device, " - Debug Mode.\n");
392
393 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
394 SAHARA_STATUS_GET_ISTATE(status));
395
396 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
397 sahara_read(dev, SAHARA_REG_CDAR));
398 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
399 sahara_read(dev, SAHARA_REG_IDAR));
400}
401
402static void sahara_dump_descriptors(struct sahara_dev *dev)
403{
404 int i;
405
Joe Perches222f6b82019-04-09 09:33:13 -0700406 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100407 return;
408
409 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100410 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
411 i, &dev->hw_phys_desc[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100412 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
413 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
414 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
415 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
416 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
417 dev_dbg(dev->device, "\tnext = 0x%08x\n",
418 dev->hw_desc[i]->next);
419 }
420 dev_dbg(dev->device, "\n");
421}
422
423static void sahara_dump_links(struct sahara_dev *dev)
424{
425 int i;
426
Joe Perches222f6b82019-04-09 09:33:13 -0700427 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100428 return;
429
430 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100431 dev_dbg(dev->device, "Link (%d) (%pad):\n",
432 i, &dev->hw_phys_link[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100433 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
434 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
435 dev_dbg(dev->device, "\tnext = 0x%08x\n",
436 dev->hw_link[i]->next);
437 }
438 dev_dbg(dev->device, "\n");
439}
440
Javier Martin5de88752013-03-01 12:37:53 +0100441static int sahara_hw_descriptor_create(struct sahara_dev *dev)
442{
443 struct sahara_ctx *ctx = dev->ctx;
444 struct scatterlist *sg;
445 int ret;
446 int i, j;
Steffen Trumtrar17110452015-04-07 17:13:42 +0200447 int idx = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100448
449 /* Copy new key if necessary */
450 if (ctx->flags & FLAGS_NEW_KEY) {
451 memcpy(dev->key_base, ctx->key, ctx->keylen);
452 ctx->flags &= ~FLAGS_NEW_KEY;
453
454 if (dev->flags & FLAGS_CBC) {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200455 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
456 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
Javier Martin5de88752013-03-01 12:37:53 +0100457 } else {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200458 dev->hw_desc[idx]->len1 = 0;
459 dev->hw_desc[idx]->p1 = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100460 }
Steffen Trumtrar17110452015-04-07 17:13:42 +0200461 dev->hw_desc[idx]->len2 = ctx->keylen;
462 dev->hw_desc[idx]->p2 = dev->key_phys_base;
463 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
464
465 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
466
467 idx++;
Javier Martin5de88752013-03-01 12:37:53 +0100468 }
Javier Martin5de88752013-03-01 12:37:53 +0100469
LABBE Corentind23afa12015-09-18 14:57:11 +0200470 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100471 if (dev->nb_in_sg < 0) {
472 dev_err(dev->device, "Invalid numbers of src SG.\n");
473 return dev->nb_in_sg;
474 }
LABBE Corentind23afa12015-09-18 14:57:11 +0200475 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100476 if (dev->nb_out_sg < 0) {
477 dev_err(dev->device, "Invalid numbers of dst SG.\n");
478 return dev->nb_out_sg;
479 }
Javier Martin5de88752013-03-01 12:37:53 +0100480 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
481 dev_err(dev->device, "not enough hw links (%d)\n",
482 dev->nb_in_sg + dev->nb_out_sg);
483 return -EINVAL;
484 }
485
486 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
487 DMA_TO_DEVICE);
488 if (ret != dev->nb_in_sg) {
489 dev_err(dev->device, "couldn't map in sg\n");
490 goto unmap_in;
491 }
492 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
493 DMA_FROM_DEVICE);
494 if (ret != dev->nb_out_sg) {
495 dev_err(dev->device, "couldn't map out sg\n");
496 goto unmap_out;
497 }
498
499 /* Create input links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200500 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
Javier Martin5de88752013-03-01 12:37:53 +0100501 sg = dev->in_sg;
502 for (i = 0; i < dev->nb_in_sg; i++) {
503 dev->hw_link[i]->len = sg->length;
504 dev->hw_link[i]->p = sg->dma_address;
505 if (i == (dev->nb_in_sg - 1)) {
506 dev->hw_link[i]->next = 0;
507 } else {
508 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
509 sg = sg_next(sg);
510 }
511 }
512
513 /* Create output links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200514 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
Javier Martin5de88752013-03-01 12:37:53 +0100515 sg = dev->out_sg;
516 for (j = i; j < dev->nb_out_sg + i; j++) {
517 dev->hw_link[j]->len = sg->length;
518 dev->hw_link[j]->p = sg->dma_address;
519 if (j == (dev->nb_out_sg + i - 1)) {
520 dev->hw_link[j]->next = 0;
521 } else {
522 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
523 sg = sg_next(sg);
524 }
525 }
526
527 /* Fill remaining fields of hw_desc[1] */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200528 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
529 dev->hw_desc[idx]->len1 = dev->total;
530 dev->hw_desc[idx]->len2 = dev->total;
531 dev->hw_desc[idx]->next = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100532
533 sahara_dump_descriptors(dev);
534 sahara_dump_links(dev);
535
Javier Martin5de88752013-03-01 12:37:53 +0100536 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
537
538 return 0;
539
540unmap_out:
541 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200542 DMA_FROM_DEVICE);
Javier Martin5de88752013-03-01 12:37:53 +0100543unmap_in:
544 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200545 DMA_TO_DEVICE);
Javier Martin5de88752013-03-01 12:37:53 +0100546
547 return -EINVAL;
548}
549
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100550static int sahara_aes_process(struct ablkcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100551{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100552 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100553 struct sahara_ctx *ctx;
554 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100555 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500556 unsigned long timeout;
Javier Martin5de88752013-03-01 12:37:53 +0100557
Javier Martin5de88752013-03-01 12:37:53 +0100558 /* Request is ready to be dispatched by the device */
559 dev_dbg(dev->device,
560 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
561 req->nbytes, req->src, req->dst);
562
563 /* assign new request to device */
Javier Martin5de88752013-03-01 12:37:53 +0100564 dev->total = req->nbytes;
565 dev->in_sg = req->src;
566 dev->out_sg = req->dst;
567
568 rctx = ablkcipher_request_ctx(req);
569 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
570 rctx->mode &= FLAGS_MODE_MASK;
571 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
572
573 if ((dev->flags & FLAGS_CBC) && req->info)
574 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
575
576 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100577 dev->ctx = ctx;
578
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100579 reinit_completion(&dev->dma_completion);
580
Javier Martin5de88752013-03-01 12:37:53 +0100581 ret = sahara_hw_descriptor_create(dev);
Nicholas Mc Guire6cf02fc2015-02-07 06:27:45 -0500582 if (ret)
583 return -EINVAL;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100584
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500585 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100586 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500587 if (!timeout) {
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100588 dev_err(dev->device, "AES timeout\n");
589 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100590 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100591
592 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100593 DMA_FROM_DEVICE);
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200594 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
595 DMA_TO_DEVICE);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100596
597 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100598}
599
600static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
601 unsigned int keylen)
602{
603 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
604 int ret;
605
606 ctx->keylen = keylen;
607
608 /* SAHARA only supports 128bit keys */
609 if (keylen == AES_KEYSIZE_128) {
610 memcpy(ctx->key, key, keylen);
611 ctx->flags |= FLAGS_NEW_KEY;
612 return 0;
613 }
614
Herbert Xu678adec2016-06-29 18:04:05 +0800615 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
Javier Martin5de88752013-03-01 12:37:53 +0100616 return -EINVAL;
617
618 /*
619 * The requested key size is not supported by HW, do a fallback.
620 */
Kees Cookba701522018-09-18 19:10:53 -0700621 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
622 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
Herbert Xu678adec2016-06-29 18:04:05 +0800623 CRYPTO_TFM_REQ_MASK);
Javier Martin5de88752013-03-01 12:37:53 +0100624
Kees Cookba701522018-09-18 19:10:53 -0700625 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
Javier Martin5de88752013-03-01 12:37:53 +0100626
Herbert Xu678adec2016-06-29 18:04:05 +0800627 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
Kees Cookba701522018-09-18 19:10:53 -0700628 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
Herbert Xu678adec2016-06-29 18:04:05 +0800629 CRYPTO_TFM_RES_MASK;
Javier Martin5de88752013-03-01 12:37:53 +0100630 return ret;
631}
632
633static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
634{
Javier Martin5de88752013-03-01 12:37:53 +0100635 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
636 struct sahara_dev *dev = dev_ptr;
637 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100638
639 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
640 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
641
642 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
643 dev_err(dev->device,
644 "request size is not exact amount of AES blocks\n");
645 return -EINVAL;
646 }
647
Javier Martin5de88752013-03-01 12:37:53 +0100648 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100649
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100650 mutex_lock(&dev->queue_mutex);
651 err = ablkcipher_enqueue_request(&dev->queue, req);
652 mutex_unlock(&dev->queue_mutex);
653
654 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100655
656 return err;
657}
658
659static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
660{
Javier Martin5de88752013-03-01 12:37:53 +0100661 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
662 crypto_ablkcipher_reqtfm(req));
663 int err;
664
665 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Kees Cookba701522018-09-18 19:10:53 -0700666 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800667
Kees Cookba701522018-09-18 19:10:53 -0700668 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800669 skcipher_request_set_callback(subreq, req->base.flags,
670 NULL, NULL);
671 skcipher_request_set_crypt(subreq, req->src, req->dst,
672 req->nbytes, req->info);
673 err = crypto_skcipher_encrypt(subreq);
674 skcipher_request_zero(subreq);
Javier Martin5de88752013-03-01 12:37:53 +0100675 return err;
676 }
677
678 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
679}
680
681static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
682{
Javier Martin5de88752013-03-01 12:37:53 +0100683 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
684 crypto_ablkcipher_reqtfm(req));
685 int err;
686
687 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Kees Cookba701522018-09-18 19:10:53 -0700688 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800689
Kees Cookba701522018-09-18 19:10:53 -0700690 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800691 skcipher_request_set_callback(subreq, req->base.flags,
692 NULL, NULL);
693 skcipher_request_set_crypt(subreq, req->src, req->dst,
694 req->nbytes, req->info);
695 err = crypto_skcipher_decrypt(subreq);
696 skcipher_request_zero(subreq);
Javier Martin5de88752013-03-01 12:37:53 +0100697 return err;
698 }
699
700 return sahara_aes_crypt(req, 0);
701}
702
703static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
704{
Javier Martin5de88752013-03-01 12:37:53 +0100705 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
706 crypto_ablkcipher_reqtfm(req));
707 int err;
708
709 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Kees Cookba701522018-09-18 19:10:53 -0700710 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800711
Kees Cookba701522018-09-18 19:10:53 -0700712 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800713 skcipher_request_set_callback(subreq, req->base.flags,
714 NULL, NULL);
715 skcipher_request_set_crypt(subreq, req->src, req->dst,
716 req->nbytes, req->info);
717 err = crypto_skcipher_encrypt(subreq);
718 skcipher_request_zero(subreq);
Javier Martin5de88752013-03-01 12:37:53 +0100719 return err;
720 }
721
722 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
723}
724
725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
726{
Javier Martin5de88752013-03-01 12:37:53 +0100727 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
728 crypto_ablkcipher_reqtfm(req));
729 int err;
730
731 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Kees Cookba701522018-09-18 19:10:53 -0700732 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800733
Kees Cookba701522018-09-18 19:10:53 -0700734 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
Herbert Xu678adec2016-06-29 18:04:05 +0800735 skcipher_request_set_callback(subreq, req->base.flags,
736 NULL, NULL);
737 skcipher_request_set_crypt(subreq, req->src, req->dst,
738 req->nbytes, req->info);
739 err = crypto_skcipher_decrypt(subreq);
740 skcipher_request_zero(subreq);
Javier Martin5de88752013-03-01 12:37:53 +0100741 return err;
742 }
743
744 return sahara_aes_crypt(req, FLAGS_CBC);
745}
746
747static int sahara_aes_cra_init(struct crypto_tfm *tfm)
748{
Marek Vasutefa59e22014-05-14 11:41:03 +0200749 const char *name = crypto_tfm_alg_name(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100750 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
751
Kees Cookba701522018-09-18 19:10:53 -0700752 ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
Herbert Xu678adec2016-06-29 18:04:05 +0800753 CRYPTO_ALG_NEED_FALLBACK);
Javier Martin5de88752013-03-01 12:37:53 +0100754 if (IS_ERR(ctx->fallback)) {
755 pr_err("Error allocating fallback algo %s\n", name);
756 return PTR_ERR(ctx->fallback);
757 }
758
759 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
760
761 return 0;
762}
763
764static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
765{
766 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
767
Kees Cookba701522018-09-18 19:10:53 -0700768 crypto_free_sync_skcipher(ctx->fallback);
Javier Martin5de88752013-03-01 12:37:53 +0100769}
770
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100771static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
772 struct sahara_sha_reqctx *rctx)
773{
774 u32 hdr = 0;
775
776 hdr = rctx->mode;
777
778 if (rctx->first) {
779 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
780 hdr |= SAHARA_HDR_MDHA_INIT;
781 } else {
782 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
783 }
784
785 if (rctx->last)
786 hdr |= SAHARA_HDR_MDHA_PDATA;
787
788 if (hweight_long(hdr) % 2 == 0)
789 hdr |= SAHARA_HDR_PARITY_BIT;
790
791 return hdr;
792}
793
794static int sahara_sha_hw_links_create(struct sahara_dev *dev,
795 struct sahara_sha_reqctx *rctx,
796 int start)
797{
798 struct scatterlist *sg;
799 unsigned int i;
800 int ret;
801
802 dev->in_sg = rctx->in_sg;
803
LABBE Corentind23afa12015-09-18 14:57:11 +0200804 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100805 if (dev->nb_in_sg < 0) {
806 dev_err(dev->device, "Invalid numbers of src SG.\n");
807 return dev->nb_in_sg;
808 }
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100809 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
810 dev_err(dev->device, "not enough hw links (%d)\n",
811 dev->nb_in_sg + dev->nb_out_sg);
812 return -EINVAL;
813 }
814
LABBE Corentin640eec52015-09-23 13:55:28 +0200815 sg = dev->in_sg;
816 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
817 if (!ret)
818 return -EFAULT;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100819
LABBE Corentin640eec52015-09-23 13:55:28 +0200820 for (i = start; i < dev->nb_in_sg + start; i++) {
821 dev->hw_link[i]->len = sg->length;
822 dev->hw_link[i]->p = sg->dma_address;
823 if (i == (dev->nb_in_sg + start - 1)) {
824 dev->hw_link[i]->next = 0;
825 } else {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100826 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
827 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100828 }
829 }
830
831 return i;
832}
833
834static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
835 struct sahara_sha_reqctx *rctx,
836 struct ahash_request *req,
837 int index)
838{
839 unsigned result_len;
840 int i = index;
841
842 if (rctx->first)
843 /* Create initial descriptor: #8*/
844 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
845 else
846 /* Create hash descriptor: #10. Must follow #6. */
847 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
848
849 dev->hw_desc[index]->len1 = rctx->total;
850 if (dev->hw_desc[index]->len1 == 0) {
851 /* if len1 is 0, p1 must be 0, too */
852 dev->hw_desc[index]->p1 = 0;
853 rctx->sg_in_idx = 0;
854 } else {
855 /* Create input links */
856 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
857 i = sahara_sha_hw_links_create(dev, rctx, index);
858
859 rctx->sg_in_idx = index;
860 if (i < 0)
861 return i;
862 }
863
864 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
865
866 /* Save the context for the next operation */
867 result_len = rctx->context_size;
868 dev->hw_link[i]->p = dev->context_phys_base;
869
870 dev->hw_link[i]->len = result_len;
871 dev->hw_desc[index]->len2 = result_len;
872
873 dev->hw_link[i]->next = 0;
874
875 return 0;
876}
877
878/*
879 * Load descriptor aka #6
880 *
881 * To load a previously saved context back to the MDHA unit
882 *
883 * p1: Saved Context
884 * p2: NULL
885 *
886 */
887static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
888 struct sahara_sha_reqctx *rctx,
889 struct ahash_request *req,
890 int index)
891{
892 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
893
894 dev->hw_desc[index]->len1 = rctx->context_size;
895 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
896 dev->hw_desc[index]->len2 = 0;
897 dev->hw_desc[index]->p2 = 0;
898
899 dev->hw_link[index]->len = rctx->context_size;
900 dev->hw_link[index]->p = dev->context_phys_base;
901 dev->hw_link[index]->next = 0;
902
903 return 0;
904}
905
906static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
907{
908 if (!sg || !sg->length)
909 return nbytes;
910
911 while (nbytes && sg) {
912 if (nbytes <= sg->length) {
913 sg->length = nbytes;
914 sg_mark_end(sg);
915 break;
916 }
917 nbytes -= sg->length;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200918 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100919 }
920
921 return nbytes;
922}
923
924static int sahara_sha_prepare_request(struct ahash_request *req)
925{
926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
927 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
928 unsigned int hash_later;
929 unsigned int block_size;
930 unsigned int len;
931
932 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
933
934 /* append bytes from previous operation */
935 len = rctx->buf_cnt + req->nbytes;
936
937 /* only the last transfer can be padded in hardware */
938 if (!rctx->last && (len < block_size)) {
939 /* to few data, save for next operation */
940 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
941 0, req->nbytes, 0);
942 rctx->buf_cnt += req->nbytes;
943
944 return 0;
945 }
946
947 /* add data from previous operation first */
948 if (rctx->buf_cnt)
949 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
950
951 /* data must always be a multiple of block_size */
952 hash_later = rctx->last ? 0 : len & (block_size - 1);
953 if (hash_later) {
954 unsigned int offset = req->nbytes - hash_later;
955 /* Save remaining bytes for later use */
956 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
957 hash_later, 0);
958 }
959
960 /* nbytes should now be multiple of blocksize */
961 req->nbytes = req->nbytes - hash_later;
962
963 sahara_walk_and_recalc(req->src, req->nbytes);
964
965 /* have data from previous operation and current */
966 if (rctx->buf_cnt && req->nbytes) {
967 sg_init_table(rctx->in_sg_chain, 2);
968 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
969
Dan Williamsc56f6d12015-08-07 18:15:13 +0200970 sg_chain(rctx->in_sg_chain, 2, req->src);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100971
972 rctx->total = req->nbytes + rctx->buf_cnt;
973 rctx->in_sg = rctx->in_sg_chain;
974
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100975 req->src = rctx->in_sg_chain;
976 /* only data from previous operation */
977 } else if (rctx->buf_cnt) {
978 if (req->src)
979 rctx->in_sg = req->src;
980 else
981 rctx->in_sg = rctx->in_sg_chain;
982 /* buf was copied into rembuf above */
983 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
984 rctx->total = rctx->buf_cnt;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100985 /* no data from previous operation */
986 } else {
987 rctx->in_sg = req->src;
988 rctx->total = req->nbytes;
989 req->src = rctx->in_sg;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100990 }
991
992 /* on next call, we only have the remaining data in the buffer */
993 rctx->buf_cnt = hash_later;
994
995 return -EINPROGRESS;
996}
997
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100998static int sahara_sha_process(struct ahash_request *req)
999{
1000 struct sahara_dev *dev = dev_ptr;
1001 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
Nicholas Mc Guiredf586cb2015-02-07 06:16:46 -05001002 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001003 unsigned long timeout;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001004
1005 ret = sahara_sha_prepare_request(req);
1006 if (!ret)
1007 return ret;
1008
1009 if (rctx->first) {
1010 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1011 dev->hw_desc[0]->next = 0;
1012 rctx->first = 0;
1013 } else {
1014 memcpy(dev->context_base, rctx->context, rctx->context_size);
1015
1016 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1017 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1018 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1019 dev->hw_desc[1]->next = 0;
1020 }
1021
1022 sahara_dump_descriptors(dev);
1023 sahara_dump_links(dev);
1024
1025 reinit_completion(&dev->dma_completion);
1026
1027 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1028
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001029 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001030 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001031 if (!timeout) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001032 dev_err(dev->device, "SHA timeout\n");
1033 return -ETIMEDOUT;
1034 }
1035
1036 if (rctx->sg_in_idx)
LABBE Corentin640eec52015-09-23 13:55:28 +02001037 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1038 DMA_TO_DEVICE);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001039
1040 memcpy(rctx->context, dev->context_base, rctx->context_size);
1041
1042 if (req->result)
1043 memcpy(req->result, rctx->context, rctx->digest_size);
1044
1045 return 0;
1046}
1047
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001048static int sahara_queue_manage(void *data)
1049{
1050 struct sahara_dev *dev = (struct sahara_dev *)data;
1051 struct crypto_async_request *async_req;
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001052 struct crypto_async_request *backlog;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001053 int ret = 0;
1054
1055 do {
1056 __set_current_state(TASK_INTERRUPTIBLE);
1057
1058 mutex_lock(&dev->queue_mutex);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001059 backlog = crypto_get_backlog(&dev->queue);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001060 async_req = crypto_dequeue_request(&dev->queue);
1061 mutex_unlock(&dev->queue_mutex);
1062
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001063 if (backlog)
1064 backlog->complete(backlog, -EINPROGRESS);
1065
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001066 if (async_req) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001067 if (crypto_tfm_alg_type(async_req->tfm) ==
1068 CRYPTO_ALG_TYPE_AHASH) {
1069 struct ahash_request *req =
1070 ahash_request_cast(async_req);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001071
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001072 ret = sahara_sha_process(req);
1073 } else {
1074 struct ablkcipher_request *req =
1075 ablkcipher_request_cast(async_req);
1076
1077 ret = sahara_aes_process(req);
1078 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001079
1080 async_req->complete(async_req, ret);
1081
1082 continue;
1083 }
1084
1085 schedule();
1086 } while (!kthread_should_stop());
1087
1088 return 0;
1089}
1090
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001091static int sahara_sha_enqueue(struct ahash_request *req, int last)
1092{
1093 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1094 struct sahara_dev *dev = dev_ptr;
1095 int ret;
1096
1097 if (!req->nbytes && !last)
1098 return 0;
1099
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001100 rctx->last = last;
1101
1102 if (!rctx->active) {
1103 rctx->active = 1;
1104 rctx->first = 1;
1105 }
1106
1107 mutex_lock(&dev->queue_mutex);
1108 ret = crypto_enqueue_request(&dev->queue, &req->base);
1109 mutex_unlock(&dev->queue_mutex);
1110
1111 wake_up_process(dev->kthread);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001112
1113 return ret;
1114}
1115
1116static int sahara_sha_init(struct ahash_request *req)
1117{
1118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1119 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1120
1121 memset(rctx, 0, sizeof(*rctx));
1122
1123 switch (crypto_ahash_digestsize(tfm)) {
1124 case SHA1_DIGEST_SIZE:
1125 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1126 rctx->digest_size = SHA1_DIGEST_SIZE;
1127 break;
1128 case SHA256_DIGEST_SIZE:
1129 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1130 rctx->digest_size = SHA256_DIGEST_SIZE;
1131 break;
1132 default:
1133 return -EINVAL;
1134 }
1135
1136 rctx->context_size = rctx->digest_size + 4;
1137 rctx->active = 0;
1138
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001139 return 0;
1140}
1141
1142static int sahara_sha_update(struct ahash_request *req)
1143{
1144 return sahara_sha_enqueue(req, 0);
1145}
1146
1147static int sahara_sha_final(struct ahash_request *req)
1148{
1149 req->nbytes = 0;
1150 return sahara_sha_enqueue(req, 1);
1151}
1152
1153static int sahara_sha_finup(struct ahash_request *req)
1154{
1155 return sahara_sha_enqueue(req, 1);
1156}
1157
1158static int sahara_sha_digest(struct ahash_request *req)
1159{
1160 sahara_sha_init(req);
1161
1162 return sahara_sha_finup(req);
1163}
1164
1165static int sahara_sha_export(struct ahash_request *req, void *out)
1166{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001167 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1168
Fabio Estevambceab442016-02-03 10:46:51 -02001169 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001170
1171 return 0;
1172}
1173
1174static int sahara_sha_import(struct ahash_request *req, const void *in)
1175{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001176 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1177
Fabio Estevambceab442016-02-03 10:46:51 -02001178 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001179
1180 return 0;
1181}
1182
1183static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1184{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001185 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1186 sizeof(struct sahara_sha_reqctx) +
1187 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1188
1189 return 0;
1190}
1191
Javier Martin5de88752013-03-01 12:37:53 +01001192static struct crypto_alg aes_algs[] = {
1193{
1194 .cra_name = "ecb(aes)",
1195 .cra_driver_name = "sahara-ecb-aes",
1196 .cra_priority = 300,
1197 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1198 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1199 .cra_blocksize = AES_BLOCK_SIZE,
1200 .cra_ctxsize = sizeof(struct sahara_ctx),
1201 .cra_alignmask = 0x0,
1202 .cra_type = &crypto_ablkcipher_type,
1203 .cra_module = THIS_MODULE,
1204 .cra_init = sahara_aes_cra_init,
1205 .cra_exit = sahara_aes_cra_exit,
1206 .cra_u.ablkcipher = {
1207 .min_keysize = AES_MIN_KEY_SIZE ,
1208 .max_keysize = AES_MAX_KEY_SIZE,
1209 .setkey = sahara_aes_setkey,
1210 .encrypt = sahara_aes_ecb_encrypt,
1211 .decrypt = sahara_aes_ecb_decrypt,
1212 }
1213}, {
1214 .cra_name = "cbc(aes)",
1215 .cra_driver_name = "sahara-cbc-aes",
1216 .cra_priority = 300,
1217 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1218 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1219 .cra_blocksize = AES_BLOCK_SIZE,
1220 .cra_ctxsize = sizeof(struct sahara_ctx),
1221 .cra_alignmask = 0x0,
1222 .cra_type = &crypto_ablkcipher_type,
1223 .cra_module = THIS_MODULE,
1224 .cra_init = sahara_aes_cra_init,
1225 .cra_exit = sahara_aes_cra_exit,
1226 .cra_u.ablkcipher = {
1227 .min_keysize = AES_MIN_KEY_SIZE ,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1230 .setkey = sahara_aes_setkey,
1231 .encrypt = sahara_aes_cbc_encrypt,
1232 .decrypt = sahara_aes_cbc_decrypt,
1233 }
1234}
1235};
1236
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001237static struct ahash_alg sha_v3_algs[] = {
1238{
1239 .init = sahara_sha_init,
1240 .update = sahara_sha_update,
1241 .final = sahara_sha_final,
1242 .finup = sahara_sha_finup,
1243 .digest = sahara_sha_digest,
1244 .export = sahara_sha_export,
1245 .import = sahara_sha_import,
1246 .halg.digestsize = SHA1_DIGEST_SIZE,
Fabio Estevamd42cf2f12016-02-03 10:46:52 -02001247 .halg.statesize = sizeof(struct sahara_sha_reqctx),
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001248 .halg.base = {
1249 .cra_name = "sha1",
1250 .cra_driver_name = "sahara-sha1",
1251 .cra_priority = 300,
Eric Biggers6a38f622018-06-30 15:16:12 -07001252 .cra_flags = CRYPTO_ALG_ASYNC |
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001253 CRYPTO_ALG_NEED_FALLBACK,
1254 .cra_blocksize = SHA1_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct sahara_ctx),
1256 .cra_alignmask = 0,
1257 .cra_module = THIS_MODULE,
1258 .cra_init = sahara_sha_cra_init,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001259 }
1260},
1261};
1262
1263static struct ahash_alg sha_v4_algs[] = {
1264{
1265 .init = sahara_sha_init,
1266 .update = sahara_sha_update,
1267 .final = sahara_sha_final,
1268 .finup = sahara_sha_finup,
1269 .digest = sahara_sha_digest,
1270 .export = sahara_sha_export,
1271 .import = sahara_sha_import,
1272 .halg.digestsize = SHA256_DIGEST_SIZE,
Fabio Estevamd42cf2f12016-02-03 10:46:52 -02001273 .halg.statesize = sizeof(struct sahara_sha_reqctx),
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001274 .halg.base = {
1275 .cra_name = "sha256",
1276 .cra_driver_name = "sahara-sha256",
1277 .cra_priority = 300,
Eric Biggers6a38f622018-06-30 15:16:12 -07001278 .cra_flags = CRYPTO_ALG_ASYNC |
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001279 CRYPTO_ALG_NEED_FALLBACK,
1280 .cra_blocksize = SHA256_BLOCK_SIZE,
1281 .cra_ctxsize = sizeof(struct sahara_ctx),
1282 .cra_alignmask = 0,
1283 .cra_module = THIS_MODULE,
1284 .cra_init = sahara_sha_cra_init,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001285 }
1286},
1287};
1288
Javier Martin5de88752013-03-01 12:37:53 +01001289static irqreturn_t sahara_irq_handler(int irq, void *data)
1290{
1291 struct sahara_dev *dev = (struct sahara_dev *)data;
1292 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1293 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1294
Javier Martin5de88752013-03-01 12:37:53 +01001295 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1296 SAHARA_REG_CMD);
1297
1298 sahara_decode_status(dev, stat);
1299
1300 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1301 return IRQ_NONE;
1302 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1303 dev->error = 0;
1304 } else {
1305 sahara_decode_error(dev, err);
1306 dev->error = -EINVAL;
1307 }
1308
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001309 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001310
1311 return IRQ_HANDLED;
1312}
1313
1314
1315static int sahara_register_algs(struct sahara_dev *dev)
1316{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001317 int err;
1318 unsigned int i, j, k, l;
Javier Martin5de88752013-03-01 12:37:53 +01001319
1320 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
Javier Martin5de88752013-03-01 12:37:53 +01001321 err = crypto_register_alg(&aes_algs[i]);
1322 if (err)
1323 goto err_aes_algs;
1324 }
1325
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001326 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1327 err = crypto_register_ahash(&sha_v3_algs[k]);
1328 if (err)
1329 goto err_sha_v3_algs;
1330 }
1331
1332 if (dev->version > SAHARA_VERSION_3)
1333 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1334 err = crypto_register_ahash(&sha_v4_algs[l]);
1335 if (err)
1336 goto err_sha_v4_algs;
1337 }
1338
Javier Martin5de88752013-03-01 12:37:53 +01001339 return 0;
1340
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001341err_sha_v4_algs:
1342 for (j = 0; j < l; j++)
1343 crypto_unregister_ahash(&sha_v4_algs[j]);
1344
1345err_sha_v3_algs:
1346 for (j = 0; j < k; j++)
Michael Müller0e7d4d92018-07-15 00:27:06 +02001347 crypto_unregister_ahash(&sha_v3_algs[j]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001348
Javier Martin5de88752013-03-01 12:37:53 +01001349err_aes_algs:
1350 for (j = 0; j < i; j++)
1351 crypto_unregister_alg(&aes_algs[j]);
1352
1353 return err;
1354}
1355
1356static void sahara_unregister_algs(struct sahara_dev *dev)
1357{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001358 unsigned int i;
Javier Martin5de88752013-03-01 12:37:53 +01001359
1360 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1361 crypto_unregister_alg(&aes_algs[i]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001362
Michael Müller0e7d4d92018-07-15 00:27:06 +02001363 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001364 crypto_unregister_ahash(&sha_v3_algs[i]);
1365
1366 if (dev->version > SAHARA_VERSION_3)
1367 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1368 crypto_unregister_ahash(&sha_v4_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001369}
1370
Arvind Yadav249cb062017-08-13 14:34:00 +05301371static const struct platform_device_id sahara_platform_ids[] = {
Javier Martin5de88752013-03-01 12:37:53 +01001372 { .name = "sahara-imx27" },
1373 { /* sentinel */ }
1374};
1375MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1376
Arvind Yadav30aabe32017-06-27 17:11:23 +05301377static const struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001378 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +01001379 { .compatible = "fsl,imx27-sahara" },
1380 { /* sentinel */ }
1381};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +02001382MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +01001383
1384static int sahara_probe(struct platform_device *pdev)
1385{
1386 struct sahara_dev *dev;
Javier Martin5de88752013-03-01 12:37:53 +01001387 u32 version;
1388 int irq;
1389 int err;
1390 int i;
1391
Markus Elfringa8bc22f2018-02-14 14:14:05 +01001392 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
Markus Elfring0d576d92018-02-14 14:10:03 +01001393 if (!dev)
Javier Martin5de88752013-03-01 12:37:53 +01001394 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001395
1396 dev->device = &pdev->dev;
1397 platform_set_drvdata(pdev, dev);
1398
1399 /* Get the base address */
Fabio Estevamb0d76522019-06-06 13:13:49 -03001400 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
Jingoo Han9e952752014-02-12 13:23:37 +09001401 if (IS_ERR(dev->regs_base))
1402 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +01001403
1404 /* Get the IRQ */
1405 irq = platform_get_irq(pdev, 0);
1406 if (irq < 0) {
1407 dev_err(&pdev->dev, "failed to get irq resource\n");
1408 return irq;
1409 }
1410
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001411 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1412 0, dev_name(&pdev->dev), dev);
1413 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +01001414 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001415 return err;
Javier Martin5de88752013-03-01 12:37:53 +01001416 }
1417
1418 /* clocks */
1419 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1420 if (IS_ERR(dev->clk_ipg)) {
1421 dev_err(&pdev->dev, "Could not get ipg clock\n");
1422 return PTR_ERR(dev->clk_ipg);
1423 }
1424
1425 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1426 if (IS_ERR(dev->clk_ahb)) {
1427 dev_err(&pdev->dev, "Could not get ahb clock\n");
1428 return PTR_ERR(dev->clk_ahb);
1429 }
1430
1431 /* Allocate HW descriptors */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301432 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001433 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1434 &dev->hw_phys_desc[0], GFP_KERNEL);
1435 if (!dev->hw_desc[0]) {
1436 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1437 return -ENOMEM;
1438 }
1439 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1440 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1441 sizeof(struct sahara_hw_desc);
1442
1443 /* Allocate space for iv and key */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301444 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
Javier Martin5de88752013-03-01 12:37:53 +01001445 &dev->key_phys_base, GFP_KERNEL);
1446 if (!dev->key_base) {
1447 dev_err(&pdev->dev, "Could not allocate memory for key\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301448 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001449 }
1450 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1451 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1452
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001453 /* Allocate space for context: largest digest + message length field */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301454 dev->context_base = dmam_alloc_coherent(&pdev->dev,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001455 SHA256_DIGEST_SIZE + 4,
1456 &dev->context_phys_base, GFP_KERNEL);
1457 if (!dev->context_base) {
1458 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301459 return -ENOMEM;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001460 }
1461
Javier Martin5de88752013-03-01 12:37:53 +01001462 /* Allocate space for HW links */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301463 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001464 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1465 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +03001466 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +01001467 dev_err(&pdev->dev, "Could not allocate hw links\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301468 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001469 }
1470 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1471 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1472 sizeof(struct sahara_hw_link);
1473 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1474 }
1475
1476 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1477
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001478 mutex_init(&dev->queue_mutex);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001479
Javier Martin5de88752013-03-01 12:37:53 +01001480 dev_ptr = dev;
1481
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001482 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1483 if (IS_ERR(dev->kthread)) {
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301484 return PTR_ERR(dev->kthread);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001485 }
Javier Martin5de88752013-03-01 12:37:53 +01001486
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001487 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001488
Fabio Estevam7eac7142015-06-20 15:30:22 -03001489 err = clk_prepare_enable(dev->clk_ipg);
1490 if (err)
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301491 return err;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001492 err = clk_prepare_enable(dev->clk_ahb);
1493 if (err)
1494 goto clk_ipg_disable;
Javier Martin5de88752013-03-01 12:37:53 +01001495
1496 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001497 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1498 if (version != SAHARA_VERSION_3)
1499 err = -ENODEV;
1500 } else if (of_device_is_compatible(pdev->dev.of_node,
1501 "fsl,imx53-sahara")) {
1502 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1503 err = -ENODEV;
1504 version = (version >> 8) & 0xff;
1505 }
1506 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +01001507 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001508 version);
Javier Martin5de88752013-03-01 12:37:53 +01001509 goto err_algs;
1510 }
1511
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001512 dev->version = version;
1513
Javier Martin5de88752013-03-01 12:37:53 +01001514 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1515 SAHARA_REG_CMD);
1516 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1517 SAHARA_CONTROL_SET_MAXBURST(8) |
1518 SAHARA_CONTROL_RNG_AUTORSD |
1519 SAHARA_CONTROL_ENABLE_INT,
1520 SAHARA_REG_CONTROL);
1521
1522 err = sahara_register_algs(dev);
1523 if (err)
1524 goto err_algs;
1525
1526 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1527
1528 return 0;
1529
1530err_algs:
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001531 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001532 dev_ptr = NULL;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001533 clk_disable_unprepare(dev->clk_ahb);
1534clk_ipg_disable:
1535 clk_disable_unprepare(dev->clk_ipg);
Javier Martin5de88752013-03-01 12:37:53 +01001536
1537 return err;
1538}
1539
1540static int sahara_remove(struct platform_device *pdev)
1541{
1542 struct sahara_dev *dev = platform_get_drvdata(pdev);
1543
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001544 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001545
1546 sahara_unregister_algs(dev);
1547
1548 clk_disable_unprepare(dev->clk_ipg);
1549 clk_disable_unprepare(dev->clk_ahb);
1550
1551 dev_ptr = NULL;
1552
1553 return 0;
1554}
1555
1556static struct platform_driver sahara_driver = {
1557 .probe = sahara_probe,
1558 .remove = sahara_remove,
1559 .driver = {
1560 .name = SAHARA_NAME,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301561 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001562 },
1563 .id_table = sahara_platform_ids,
1564};
1565
1566module_platform_driver(sahara_driver);
1567
1568MODULE_LICENSE("GPL");
1569MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001570MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
Javier Martin5de88752013-03-01 12:37:53 +01001571MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");