blob: 2dbed8bb8d264b5c4109122f1203687a29b52860 [file] [log] [blame]
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020027#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020030#include <linux/irq.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020031#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +020033#include <linux/of_device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020034#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Nicolas Royerd4905b32013-02-20 17:10:26 +010042#include <linux/platform_data/crypto-atmel.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020043#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
54#define SHA_FLAGS_FINUP BIT(16)
55#define SHA_FLAGS_SG BIT(17)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010056#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020057#define SHA_FLAGS_SHA1 BIT(18)
Nicolas Royerd4905b32013-02-20 17:10:26 +010058#define SHA_FLAGS_SHA224 BIT(19)
59#define SHA_FLAGS_SHA256 BIT(20)
60#define SHA_FLAGS_SHA384 BIT(21)
61#define SHA_FLAGS_SHA512 BIT(22)
62#define SHA_FLAGS_ERROR BIT(23)
63#define SHA_FLAGS_PAD BIT(24)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010064#define SHA_FLAGS_RESTORE BIT(25)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020065
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
68
Cyrille Pitchencc831d32016-01-29 17:04:02 +010069#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020070
71#define ATMEL_SHA_DMA_THRESHOLD 56
72
Nicolas Royerd4905b32013-02-20 17:10:26 +010073struct atmel_sha_caps {
74 bool has_dma;
75 bool has_dualbuff;
76 bool has_sha224;
77 bool has_sha_384_512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010078 bool has_uihv;
Nicolas Royerd4905b32013-02-20 17:10:26 +010079};
Nicolas Royerebc82ef2012-07-01 19:19:46 +020080
81struct atmel_sha_dev;
82
Cyrille Pitchencc831d32016-01-29 17:04:02 +010083/*
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +010084 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
Cyrille Pitchencc831d32016-01-29 17:04:02 +010085 * tested by the ahash_prepare_alg() function.
86 */
Nicolas Royerebc82ef2012-07-01 19:19:46 +020087struct atmel_sha_reqctx {
88 struct atmel_sha_dev *dd;
89 unsigned long flags;
90 unsigned long op;
91
Nicolas Royerd4905b32013-02-20 17:10:26 +010092 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
93 u64 digcnt[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +020094 size_t bufcnt;
95 size_t buflen;
96 dma_addr_t dma_addr;
97
98 /* walk state */
99 struct scatterlist *sg;
100 unsigned int offset; /* offset in current sg */
101 unsigned int total; /* total request */
102
Nicolas Royerd4905b32013-02-20 17:10:26 +0100103 size_t block_size;
104
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +0100105 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200106};
107
Cyrille Pitchena29af932017-01-26 17:07:47 +0100108typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
109
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200110struct atmel_sha_ctx {
111 struct atmel_sha_dev *dd;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100112 atmel_sha_fn_t start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200113
114 unsigned long flags;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200115};
116
Nicolas Royerd4905b32013-02-20 17:10:26 +0100117#define ATMEL_SHA_QUEUE_LENGTH 50
118
119struct atmel_sha_dma {
120 struct dma_chan *chan;
121 struct dma_slave_config dma_conf;
122};
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200123
124struct atmel_sha_dev {
125 struct list_head list;
126 unsigned long phys_base;
127 struct device *dev;
128 struct clk *iclk;
129 int irq;
130 void __iomem *io_base;
131
132 spinlock_t lock;
133 int err;
134 struct tasklet_struct done_task;
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100135 struct tasklet_struct queue_task;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200136
137 unsigned long flags;
138 struct crypto_queue queue;
139 struct ahash_request *req;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100140 bool is_async;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100141
142 struct atmel_sha_dma dma_lch_in;
143
144 struct atmel_sha_caps caps;
145
146 u32 hw_version;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200147};
148
149struct atmel_sha_drv {
150 struct list_head dev_list;
151 spinlock_t lock;
152};
153
154static struct atmel_sha_drv atmel_sha = {
155 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
156 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
157};
158
159static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
160{
161 return readl_relaxed(dd->io_base + offset);
162}
163
164static inline void atmel_sha_write(struct atmel_sha_dev *dd,
165 u32 offset, u32 value)
166{
167 writel_relaxed(value, dd->io_base + offset);
168}
169
Cyrille Pitchena29af932017-01-26 17:07:47 +0100170static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
171{
172 struct ahash_request *req = dd->req;
173
174 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
175 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
176
177 clk_disable(dd->iclk);
178
179 if (dd->is_async && req->base.complete)
180 req->base.complete(&req->base, err);
181
182 /* handle new request */
183 tasklet_schedule(&dd->queue_task);
184
185 return err;
186}
187
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200188static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
189{
190 size_t count;
191
192 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
193 count = min(ctx->sg->length - ctx->offset, ctx->total);
194 count = min(count, ctx->buflen - ctx->bufcnt);
195
Leilei Zhao803eeae2015-04-07 17:45:05 +0800196 if (count <= 0) {
197 /*
198 * Check if count <= 0 because the buffer is full or
199 * because the sg length is 0. In the latest case,
200 * check if there is another sg in the list, a 0 length
201 * sg doesn't necessarily mean the end of the sg list.
202 */
203 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
204 ctx->sg = sg_next(ctx->sg);
205 continue;
206 } else {
207 break;
208 }
209 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200210
211 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
212 ctx->offset, count, 0);
213
214 ctx->bufcnt += count;
215 ctx->offset += count;
216 ctx->total -= count;
217
218 if (ctx->offset == ctx->sg->length) {
219 ctx->sg = sg_next(ctx->sg);
220 if (ctx->sg)
221 ctx->offset = 0;
222 else
223 ctx->total = 0;
224 }
225 }
226
227 return 0;
228}
229
230/*
Nicolas Royerd4905b32013-02-20 17:10:26 +0100231 * The purpose of this padding is to ensure that the padded message is a
232 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
233 * The bit "1" is appended at the end of the message followed by
234 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
235 * 128 bits block (SHA384/SHA512) equals to the message length in bits
236 * is appended.
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200237 *
Nicolas Royerd4905b32013-02-20 17:10:26 +0100238 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200239 * - if message length < 56 bytes then padlen = 56 - message length
240 * - else padlen = 64 + 56 - message length
Nicolas Royerd4905b32013-02-20 17:10:26 +0100241 *
242 * For SHA384/SHA512, padlen is calculated as followed:
243 * - if message length < 112 bytes then padlen = 112 - message length
244 * - else padlen = 128 + 112 - message length
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200245 */
246static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
247{
248 unsigned int index, padlen;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100249 u64 bits[2];
250 u64 size[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200251
Nicolas Royerd4905b32013-02-20 17:10:26 +0100252 size[0] = ctx->digcnt[0];
253 size[1] = ctx->digcnt[1];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200254
Nicolas Royerd4905b32013-02-20 17:10:26 +0100255 size[0] += ctx->bufcnt;
256 if (size[0] < ctx->bufcnt)
257 size[1]++;
258
259 size[0] += length;
260 if (size[0] < length)
261 size[1]++;
262
263 bits[1] = cpu_to_be64(size[0] << 3);
264 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
265
266 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
267 index = ctx->bufcnt & 0x7f;
268 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
269 *(ctx->buffer + ctx->bufcnt) = 0x80;
270 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
271 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
272 ctx->bufcnt += padlen + 16;
273 ctx->flags |= SHA_FLAGS_PAD;
274 } else {
275 index = ctx->bufcnt & 0x3f;
276 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
277 *(ctx->buffer + ctx->bufcnt) = 0x80;
278 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
279 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
280 ctx->bufcnt += padlen + 8;
281 ctx->flags |= SHA_FLAGS_PAD;
282 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200283}
284
Cyrille Pitchen8340c7f2017-01-26 17:07:46 +0100285static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200286{
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200287 struct atmel_sha_dev *dd = NULL;
288 struct atmel_sha_dev *tmp;
289
290 spin_lock_bh(&atmel_sha.lock);
291 if (!tctx->dd) {
292 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
293 dd = tmp;
294 break;
295 }
296 tctx->dd = dd;
297 } else {
298 dd = tctx->dd;
299 }
300
301 spin_unlock_bh(&atmel_sha.lock);
302
Cyrille Pitchen8340c7f2017-01-26 17:07:46 +0100303 return dd;
304}
305
306static int atmel_sha_init(struct ahash_request *req)
307{
308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
309 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
310 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
311 struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
312
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200313 ctx->dd = dd;
314
315 ctx->flags = 0;
316
317 dev_dbg(dd->dev, "init: digest size: %d\n",
318 crypto_ahash_digestsize(tfm));
319
Nicolas Royerd4905b32013-02-20 17:10:26 +0100320 switch (crypto_ahash_digestsize(tfm)) {
321 case SHA1_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200322 ctx->flags |= SHA_FLAGS_SHA1;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100323 ctx->block_size = SHA1_BLOCK_SIZE;
324 break;
325 case SHA224_DIGEST_SIZE:
326 ctx->flags |= SHA_FLAGS_SHA224;
327 ctx->block_size = SHA224_BLOCK_SIZE;
328 break;
329 case SHA256_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200330 ctx->flags |= SHA_FLAGS_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100331 ctx->block_size = SHA256_BLOCK_SIZE;
332 break;
333 case SHA384_DIGEST_SIZE:
334 ctx->flags |= SHA_FLAGS_SHA384;
335 ctx->block_size = SHA384_BLOCK_SIZE;
336 break;
337 case SHA512_DIGEST_SIZE:
338 ctx->flags |= SHA_FLAGS_SHA512;
339 ctx->block_size = SHA512_BLOCK_SIZE;
340 break;
341 default:
342 return -EINVAL;
343 break;
344 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200345
346 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100347 ctx->digcnt[0] = 0;
348 ctx->digcnt[1] = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200349 ctx->buflen = SHA_BUFFER_LEN;
350
351 return 0;
352}
353
354static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
355{
356 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100357 u32 valmr = SHA_MR_MODE_AUTO;
358 unsigned int i, hashsize = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200359
360 if (likely(dma)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100361 if (!dd->caps.has_dma)
362 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200363 valmr = SHA_MR_MODE_PDC;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100364 if (dd->caps.has_dualbuff)
365 valmr |= SHA_MR_DUALBUFF;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200366 } else {
367 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
368 }
369
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100370 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
371 case SHA_FLAGS_SHA1:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100372 valmr |= SHA_MR_ALGO_SHA1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100373 hashsize = SHA1_DIGEST_SIZE;
374 break;
375
376 case SHA_FLAGS_SHA224:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100377 valmr |= SHA_MR_ALGO_SHA224;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100378 hashsize = SHA256_DIGEST_SIZE;
379 break;
380
381 case SHA_FLAGS_SHA256:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200382 valmr |= SHA_MR_ALGO_SHA256;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100383 hashsize = SHA256_DIGEST_SIZE;
384 break;
385
386 case SHA_FLAGS_SHA384:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100387 valmr |= SHA_MR_ALGO_SHA384;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100388 hashsize = SHA512_DIGEST_SIZE;
389 break;
390
391 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100392 valmr |= SHA_MR_ALGO_SHA512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100393 hashsize = SHA512_DIGEST_SIZE;
394 break;
395
396 default:
397 break;
398 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200399
400 /* Setting CR_FIRST only for the first iteration */
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100401 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
402 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
403 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
404 const u32 *hash = (const u32 *)ctx->digest;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200405
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100406 /*
407 * Restore the hardware context: update the User Initialize
408 * Hash Value (UIHV) with the value saved when the latest
409 * 'update' operation completed on this very same crypto
410 * request.
411 */
412 ctx->flags &= ~SHA_FLAGS_RESTORE;
413 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
414 for (i = 0; i < hashsize / sizeof(u32); ++i)
415 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
416 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
417 valmr |= SHA_MR_UIHV;
418 }
419 /*
420 * WARNING: If the UIHV feature is not available, the hardware CANNOT
421 * process concurrent requests: the internal registers used to store
422 * the hash/digest are still set to the partial digest output values
423 * computed during the latest round.
424 */
425
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200426 atmel_sha_write(dd, SHA_MR, valmr);
427}
428
429static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
430 size_t length, int final)
431{
432 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
433 int count, len32;
434 const u32 *buffer = (const u32 *)buf;
435
Nicolas Royerd4905b32013-02-20 17:10:26 +0100436 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
437 ctx->digcnt[1], ctx->digcnt[0], length, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200438
439 atmel_sha_write_ctrl(dd, 0);
440
441 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100442 ctx->digcnt[0] += length;
443 if (ctx->digcnt[0] < length)
444 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200445
446 if (final)
447 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
448
449 len32 = DIV_ROUND_UP(length, sizeof(u32));
450
451 dd->flags |= SHA_FLAGS_CPU;
452
453 for (count = 0; count < len32; count++)
454 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
455
456 return -EINPROGRESS;
457}
458
459static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
460 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
461{
462 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
463 int len32;
464
Nicolas Royerd4905b32013-02-20 17:10:26 +0100465 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
466 ctx->digcnt[1], ctx->digcnt[0], length1, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200467
468 len32 = DIV_ROUND_UP(length1, sizeof(u32));
469 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
470 atmel_sha_write(dd, SHA_TPR, dma_addr1);
471 atmel_sha_write(dd, SHA_TCR, len32);
472
473 len32 = DIV_ROUND_UP(length2, sizeof(u32));
474 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
475 atmel_sha_write(dd, SHA_TNCR, len32);
476
477 atmel_sha_write_ctrl(dd, 1);
478
479 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100480 ctx->digcnt[0] += length1;
481 if (ctx->digcnt[0] < length1)
482 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200483
484 if (final)
485 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
486
487 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
488
489 /* Start DMA transfer */
490 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
491
492 return -EINPROGRESS;
493}
494
Nicolas Royerd4905b32013-02-20 17:10:26 +0100495static void atmel_sha_dma_callback(void *data)
496{
497 struct atmel_sha_dev *dd = data;
498
Cyrille Pitchena29af932017-01-26 17:07:47 +0100499 dd->is_async = true;
500
Nicolas Royerd4905b32013-02-20 17:10:26 +0100501 /* dma_lch_in - completed - wait DATRDY */
502 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
503}
504
505static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
506 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
507{
508 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
509 struct dma_async_tx_descriptor *in_desc;
510 struct scatterlist sg[2];
511
512 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
513 ctx->digcnt[1], ctx->digcnt[0], length1, final);
514
Leilei Zhao3f1992c2015-04-07 17:45:07 +0800515 dd->dma_lch_in.dma_conf.src_maxburst = 16;
516 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100517
518 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
519
520 if (length2) {
521 sg_init_table(sg, 2);
522 sg_dma_address(&sg[0]) = dma_addr1;
523 sg_dma_len(&sg[0]) = length1;
524 sg_dma_address(&sg[1]) = dma_addr2;
525 sg_dma_len(&sg[1]) = length2;
526 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
527 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
528 } else {
529 sg_init_table(sg, 1);
530 sg_dma_address(&sg[0]) = dma_addr1;
531 sg_dma_len(&sg[0]) = length1;
532 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
533 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
534 }
535 if (!in_desc)
Cyrille Pitchena29af932017-01-26 17:07:47 +0100536 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100537
538 in_desc->callback = atmel_sha_dma_callback;
539 in_desc->callback_param = dd;
540
541 atmel_sha_write_ctrl(dd, 1);
542
543 /* should be non-zero before next lines to disable clocks later */
544 ctx->digcnt[0] += length1;
545 if (ctx->digcnt[0] < length1)
546 ctx->digcnt[1]++;
547
548 if (final)
549 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
550
551 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
552
553 /* Start DMA transfer */
554 dmaengine_submit(in_desc);
555 dma_async_issue_pending(dd->dma_lch_in.chan);
556
557 return -EINPROGRESS;
558}
559
560static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
561 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
562{
563 if (dd->caps.has_dma)
564 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
565 dma_addr2, length2, final);
566 else
567 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
568 dma_addr2, length2, final);
569}
570
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200571static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
572{
573 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
574 int bufcnt;
575
576 atmel_sha_append_sg(ctx);
577 atmel_sha_fill_padding(ctx, 0);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200578 bufcnt = ctx->bufcnt;
579 ctx->bufcnt = 0;
580
581 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
582}
583
584static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
585 struct atmel_sha_reqctx *ctx,
586 size_t length, int final)
587{
588 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100589 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200590 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
591 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100592 ctx->block_size);
Cyrille Pitchena29af932017-01-26 17:07:47 +0100593 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200594 }
595
596 ctx->flags &= ~SHA_FLAGS_SG;
597
598 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100599 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200600}
601
602static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
603{
604 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
605 unsigned int final;
606 size_t count;
607
608 atmel_sha_append_sg(ctx);
609
610 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
611
Nicolas Royerd4905b32013-02-20 17:10:26 +0100612 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
613 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200614
615 if (final)
616 atmel_sha_fill_padding(ctx, 0);
617
Ludovic Desroches00992862015-04-07 17:45:04 +0800618 if (final || (ctx->bufcnt == ctx->buflen)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200619 count = ctx->bufcnt;
620 ctx->bufcnt = 0;
621 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
622 }
623
624 return 0;
625}
626
627static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
628{
629 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
630 unsigned int length, final, tail;
631 struct scatterlist *sg;
632 unsigned int count;
633
634 if (!ctx->total)
635 return 0;
636
637 if (ctx->bufcnt || ctx->offset)
638 return atmel_sha_update_dma_slow(dd);
639
Nicolas Royerd4905b32013-02-20 17:10:26 +0100640 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
641 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200642
643 sg = ctx->sg;
644
645 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
646 return atmel_sha_update_dma_slow(dd);
647
Nicolas Royerd4905b32013-02-20 17:10:26 +0100648 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
649 /* size is not ctx->block_size aligned */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200650 return atmel_sha_update_dma_slow(dd);
651
652 length = min(ctx->total, sg->length);
653
654 if (sg_is_last(sg)) {
655 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100656 /* not last sg must be ctx->block_size aligned */
657 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200658 length -= tail;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200659 }
660 }
661
662 ctx->total -= length;
663 ctx->offset = length; /* offset where to start slow */
664
665 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
666
667 /* Add padding */
668 if (final) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100669 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200670 length -= tail;
671 ctx->total += tail;
672 ctx->offset = length; /* offset where to start slow */
673
674 sg = ctx->sg;
675 atmel_sha_append_sg(ctx);
676
677 atmel_sha_fill_padding(ctx, length);
678
679 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100680 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200681 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
682 dev_err(dd->dev, "dma %u bytes error\n",
Nicolas Royerd4905b32013-02-20 17:10:26 +0100683 ctx->buflen + ctx->block_size);
Cyrille Pitchena29af932017-01-26 17:07:47 +0100684 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200685 }
686
687 if (length == 0) {
688 ctx->flags &= ~SHA_FLAGS_SG;
689 count = ctx->bufcnt;
690 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100691 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200692 0, final);
693 } else {
694 ctx->sg = sg;
695 if (!dma_map_sg(dd->dev, ctx->sg, 1,
696 DMA_TO_DEVICE)) {
697 dev_err(dd->dev, "dma_map_sg error\n");
Cyrille Pitchena29af932017-01-26 17:07:47 +0100698 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200699 }
700
701 ctx->flags |= SHA_FLAGS_SG;
702
703 count = ctx->bufcnt;
704 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100705 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200706 length, ctx->dma_addr, count, final);
707 }
708 }
709
710 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
711 dev_err(dd->dev, "dma_map_sg error\n");
Cyrille Pitchena29af932017-01-26 17:07:47 +0100712 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200713 }
714
715 ctx->flags |= SHA_FLAGS_SG;
716
717 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100718 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200719 0, final);
720}
721
722static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
723{
724 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
725
726 if (ctx->flags & SHA_FLAGS_SG) {
727 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
728 if (ctx->sg->length == ctx->offset) {
729 ctx->sg = sg_next(ctx->sg);
730 if (ctx->sg)
731 ctx->offset = 0;
732 }
Nicolas Royerd4905b32013-02-20 17:10:26 +0100733 if (ctx->flags & SHA_FLAGS_PAD) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200734 dma_unmap_single(dd->dev, ctx->dma_addr,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100735 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
736 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200737 } else {
738 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100739 ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200740 }
741
742 return 0;
743}
744
745static int atmel_sha_update_req(struct atmel_sha_dev *dd)
746{
747 struct ahash_request *req = dd->req;
748 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
749 int err;
750
Nicolas Royerd4905b32013-02-20 17:10:26 +0100751 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
752 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200753
754 if (ctx->flags & SHA_FLAGS_CPU)
755 err = atmel_sha_update_cpu(dd);
756 else
757 err = atmel_sha_update_dma_start(dd);
758
759 /* wait for dma completion before can take more data */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100760 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
761 err, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200762
763 return err;
764}
765
766static int atmel_sha_final_req(struct atmel_sha_dev *dd)
767{
768 struct ahash_request *req = dd->req;
769 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
770 int err = 0;
771 int count;
772
773 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
774 atmel_sha_fill_padding(ctx, 0);
775 count = ctx->bufcnt;
776 ctx->bufcnt = 0;
777 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
778 }
779 /* faster to handle last block with cpu */
780 else {
781 atmel_sha_fill_padding(ctx, 0);
782 count = ctx->bufcnt;
783 ctx->bufcnt = 0;
784 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
785 }
786
787 dev_dbg(dd->dev, "final_req: err: %d\n", err);
788
789 return err;
790}
791
792static void atmel_sha_copy_hash(struct ahash_request *req)
793{
794 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
795 u32 *hash = (u32 *)ctx->digest;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100796 unsigned int i, hashsize;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200797
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100798 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
799 case SHA_FLAGS_SHA1:
800 hashsize = SHA1_DIGEST_SIZE;
801 break;
802
803 case SHA_FLAGS_SHA224:
804 case SHA_FLAGS_SHA256:
805 hashsize = SHA256_DIGEST_SIZE;
806 break;
807
808 case SHA_FLAGS_SHA384:
809 case SHA_FLAGS_SHA512:
810 hashsize = SHA512_DIGEST_SIZE;
811 break;
812
813 default:
814 /* Should not happen... */
815 return;
816 }
817
818 for (i = 0; i < hashsize / sizeof(u32); ++i)
819 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
820 ctx->flags |= SHA_FLAGS_RESTORE;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200821}
822
823static void atmel_sha_copy_ready_hash(struct ahash_request *req)
824{
825 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
826
827 if (!req->result)
828 return;
829
Nicolas Royerd4905b32013-02-20 17:10:26 +0100830 if (ctx->flags & SHA_FLAGS_SHA1)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200831 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100832 else if (ctx->flags & SHA_FLAGS_SHA224)
833 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
834 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200835 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100836 else if (ctx->flags & SHA_FLAGS_SHA384)
837 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
838 else
839 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200840}
841
842static int atmel_sha_finish(struct ahash_request *req)
843{
844 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
845 struct atmel_sha_dev *dd = ctx->dd;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200846
Nicolas Royerd4905b32013-02-20 17:10:26 +0100847 if (ctx->digcnt[0] || ctx->digcnt[1])
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200848 atmel_sha_copy_ready_hash(req);
849
Nicolas Royerd4905b32013-02-20 17:10:26 +0100850 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
851 ctx->digcnt[0], ctx->bufcnt);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200852
Rahul Pathak871b88a2015-12-14 08:44:19 +0000853 return 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200854}
855
856static void atmel_sha_finish_req(struct ahash_request *req, int err)
857{
858 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
859 struct atmel_sha_dev *dd = ctx->dd;
860
861 if (!err) {
862 atmel_sha_copy_hash(req);
863 if (SHA_FLAGS_FINAL & dd->flags)
864 err = atmel_sha_finish(req);
865 } else {
866 ctx->flags |= SHA_FLAGS_ERROR;
867 }
868
869 /* atomic operation is not needed here */
Cyrille Pitchena29af932017-01-26 17:07:47 +0100870 (void)atmel_sha_complete(dd, err);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200871}
872
873static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
874{
LABBE Corentin9d83d292015-10-02 14:12:58 +0200875 int err;
876
Cyrille Pitchenc0330422016-02-05 13:45:13 +0100877 err = clk_enable(dd->iclk);
LABBE Corentin9d83d292015-10-02 14:12:58 +0200878 if (err)
879 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200880
Nicolas Royerd4905b32013-02-20 17:10:26 +0100881 if (!(SHA_FLAGS_INIT & dd->flags)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200882 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200883 dd->flags |= SHA_FLAGS_INIT;
884 dd->err = 0;
885 }
886
887 return 0;
888}
889
Nicolas Royerd4905b32013-02-20 17:10:26 +0100890static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
891{
892 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
893}
894
895static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
896{
897 atmel_sha_hw_init(dd);
898
899 dd->hw_version = atmel_sha_get_version(dd);
900
901 dev_info(dd->dev,
902 "version: 0x%x\n", dd->hw_version);
903
Cyrille Pitchenc0330422016-02-05 13:45:13 +0100904 clk_disable(dd->iclk);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100905}
906
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200907static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
908 struct ahash_request *req)
909{
910 struct crypto_async_request *async_req, *backlog;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100911 struct atmel_sha_ctx *ctx;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200912 unsigned long flags;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100913 bool start_async;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200914 int err = 0, ret = 0;
915
916 spin_lock_irqsave(&dd->lock, flags);
917 if (req)
918 ret = ahash_enqueue_request(&dd->queue, req);
919
920 if (SHA_FLAGS_BUSY & dd->flags) {
921 spin_unlock_irqrestore(&dd->lock, flags);
922 return ret;
923 }
924
925 backlog = crypto_get_backlog(&dd->queue);
926 async_req = crypto_dequeue_request(&dd->queue);
927 if (async_req)
928 dd->flags |= SHA_FLAGS_BUSY;
929
930 spin_unlock_irqrestore(&dd->lock, flags);
931
932 if (!async_req)
933 return ret;
934
935 if (backlog)
936 backlog->complete(backlog, -EINPROGRESS);
937
Cyrille Pitchena29af932017-01-26 17:07:47 +0100938 ctx = crypto_tfm_ctx(async_req->tfm);
939
940 dd->req = ahash_request_cast(async_req);
941 start_async = (dd->req != req);
942 dd->is_async = start_async;
943
944 /* WARNING: ctx->start() MAY change dd->is_async. */
945 err = ctx->start(dd);
946 return (start_async) ? ret : err;
947}
948
949static int atmel_sha_start(struct atmel_sha_dev *dd)
950{
951 struct ahash_request *req = dd->req;
952 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
953 int err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200954
955 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
956 ctx->op, req->nbytes);
957
958 err = atmel_sha_hw_init(dd);
959
960 if (err)
961 goto err1;
962
963 if (ctx->op == SHA_OP_UPDATE) {
964 err = atmel_sha_update_req(dd);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100965 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200966 /* no final() after finup() */
967 err = atmel_sha_final_req(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200968 } else if (ctx->op == SHA_OP_FINAL) {
969 err = atmel_sha_final_req(dd);
970 }
971
972err1:
973 if (err != -EINPROGRESS)
974 /* done_task will not finish it, so do it here */
975 atmel_sha_finish_req(req, err);
976
977 dev_dbg(dd->dev, "exit, err: %d\n", err);
978
Cyrille Pitchena29af932017-01-26 17:07:47 +0100979 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200980}
981
982static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
983{
984 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
985 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
986 struct atmel_sha_dev *dd = tctx->dd;
987
988 ctx->op = op;
989
990 return atmel_sha_handle_queue(dd, req);
991}
992
993static int atmel_sha_update(struct ahash_request *req)
994{
995 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
996
997 if (!req->nbytes)
998 return 0;
999
1000 ctx->total = req->nbytes;
1001 ctx->sg = req->src;
1002 ctx->offset = 0;
1003
1004 if (ctx->flags & SHA_FLAGS_FINUP) {
1005 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1006 /* faster to use CPU for short transfers */
1007 ctx->flags |= SHA_FLAGS_CPU;
1008 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1009 atmel_sha_append_sg(ctx);
1010 return 0;
1011 }
1012 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1013}
1014
1015static int atmel_sha_final(struct ahash_request *req)
1016{
1017 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001018
1019 ctx->flags |= SHA_FLAGS_FINUP;
1020
1021 if (ctx->flags & SHA_FLAGS_ERROR)
1022 return 0; /* uncompleted hash is not needed */
1023
Cyrille Pitchenad841122016-02-08 16:26:49 +01001024 if (ctx->flags & SHA_FLAGS_PAD)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001025 /* copy ready hash (+ finalize hmac) */
1026 return atmel_sha_finish(req);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001027
Cyrille Pitchenad841122016-02-08 16:26:49 +01001028 return atmel_sha_enqueue(req, SHA_OP_FINAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001029}
1030
1031static int atmel_sha_finup(struct ahash_request *req)
1032{
1033 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1034 int err1, err2;
1035
1036 ctx->flags |= SHA_FLAGS_FINUP;
1037
1038 err1 = atmel_sha_update(req);
1039 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1040 return err1;
1041
1042 /*
1043 * final() has to be always called to cleanup resources
1044 * even if udpate() failed, except EINPROGRESS
1045 */
1046 err2 = atmel_sha_final(req);
1047
1048 return err1 ?: err2;
1049}
1050
1051static int atmel_sha_digest(struct ahash_request *req)
1052{
1053 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1054}
1055
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001056
1057static int atmel_sha_export(struct ahash_request *req, void *out)
1058{
1059 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001060
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001061 memcpy(out, ctx, sizeof(*ctx));
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001062 return 0;
1063}
1064
1065static int atmel_sha_import(struct ahash_request *req, const void *in)
1066{
1067 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001068
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001069 memcpy(ctx, in, sizeof(*ctx));
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001070 return 0;
1071}
1072
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001073static int atmel_sha_cra_init(struct crypto_tfm *tfm)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001074{
Cyrille Pitchena29af932017-01-26 17:07:47 +01001075 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1076
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001077 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001078 sizeof(struct atmel_sha_reqctx));
Cyrille Pitchena29af932017-01-26 17:07:47 +01001079 ctx->start = atmel_sha_start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001080
1081 return 0;
1082}
1083
Nicolas Royerd4905b32013-02-20 17:10:26 +01001084static struct ahash_alg sha_1_256_algs[] = {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001085{
1086 .init = atmel_sha_init,
1087 .update = atmel_sha_update,
1088 .final = atmel_sha_final,
1089 .finup = atmel_sha_finup,
1090 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001091 .export = atmel_sha_export,
1092 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001093 .halg = {
1094 .digestsize = SHA1_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001095 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001096 .base = {
1097 .cra_name = "sha1",
1098 .cra_driver_name = "atmel-sha1",
1099 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001100 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001101 .cra_blocksize = SHA1_BLOCK_SIZE,
1102 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1103 .cra_alignmask = 0,
1104 .cra_module = THIS_MODULE,
1105 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001106 }
1107 }
1108},
1109{
1110 .init = atmel_sha_init,
1111 .update = atmel_sha_update,
1112 .final = atmel_sha_final,
1113 .finup = atmel_sha_finup,
1114 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001115 .export = atmel_sha_export,
1116 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001117 .halg = {
1118 .digestsize = SHA256_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001119 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001120 .base = {
1121 .cra_name = "sha256",
1122 .cra_driver_name = "atmel-sha256",
1123 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001124 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001125 .cra_blocksize = SHA256_BLOCK_SIZE,
1126 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1127 .cra_alignmask = 0,
1128 .cra_module = THIS_MODULE,
1129 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001130 }
1131 }
1132},
1133};
1134
Nicolas Royerd4905b32013-02-20 17:10:26 +01001135static struct ahash_alg sha_224_alg = {
1136 .init = atmel_sha_init,
1137 .update = atmel_sha_update,
1138 .final = atmel_sha_final,
1139 .finup = atmel_sha_finup,
1140 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001141 .export = atmel_sha_export,
1142 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001143 .halg = {
1144 .digestsize = SHA224_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001145 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001146 .base = {
1147 .cra_name = "sha224",
1148 .cra_driver_name = "atmel-sha224",
1149 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001150 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001151 .cra_blocksize = SHA224_BLOCK_SIZE,
1152 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1153 .cra_alignmask = 0,
1154 .cra_module = THIS_MODULE,
1155 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001156 }
1157 }
1158};
1159
1160static struct ahash_alg sha_384_512_algs[] = {
1161{
1162 .init = atmel_sha_init,
1163 .update = atmel_sha_update,
1164 .final = atmel_sha_final,
1165 .finup = atmel_sha_finup,
1166 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001167 .export = atmel_sha_export,
1168 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001169 .halg = {
1170 .digestsize = SHA384_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001171 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001172 .base = {
1173 .cra_name = "sha384",
1174 .cra_driver_name = "atmel-sha384",
1175 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001176 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001177 .cra_blocksize = SHA384_BLOCK_SIZE,
1178 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1179 .cra_alignmask = 0x3,
1180 .cra_module = THIS_MODULE,
1181 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001182 }
1183 }
1184},
1185{
1186 .init = atmel_sha_init,
1187 .update = atmel_sha_update,
1188 .final = atmel_sha_final,
1189 .finup = atmel_sha_finup,
1190 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001191 .export = atmel_sha_export,
1192 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001193 .halg = {
1194 .digestsize = SHA512_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001195 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001196 .base = {
1197 .cra_name = "sha512",
1198 .cra_driver_name = "atmel-sha512",
1199 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001200 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001201 .cra_blocksize = SHA512_BLOCK_SIZE,
1202 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1203 .cra_alignmask = 0x3,
1204 .cra_module = THIS_MODULE,
1205 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001206 }
1207 }
1208},
1209};
1210
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001211static void atmel_sha_queue_task(unsigned long data)
1212{
1213 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1214
1215 atmel_sha_handle_queue(dd, NULL);
1216}
1217
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001218static void atmel_sha_done_task(unsigned long data)
1219{
1220 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1221 int err = 0;
1222
Cyrille Pitchena29af932017-01-26 17:07:47 +01001223 dd->is_async = true;
1224
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001225 if (SHA_FLAGS_CPU & dd->flags) {
1226 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1227 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1228 goto finish;
1229 }
1230 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1231 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1232 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1233 atmel_sha_update_dma_stop(dd);
1234 if (dd->err) {
1235 err = dd->err;
1236 goto finish;
1237 }
1238 }
1239 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1240 /* hash or semi-hash ready */
1241 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1242 SHA_FLAGS_OUTPUT_READY);
1243 err = atmel_sha_update_dma_start(dd);
1244 if (err != -EINPROGRESS)
1245 goto finish;
1246 }
1247 }
1248 return;
1249
1250finish:
1251 /* finish curent request */
1252 atmel_sha_finish_req(dd->req, err);
1253}
1254
1255static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1256{
1257 struct atmel_sha_dev *sha_dd = dev_id;
1258 u32 reg;
1259
1260 reg = atmel_sha_read(sha_dd, SHA_ISR);
1261 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1262 atmel_sha_write(sha_dd, SHA_IDR, reg);
1263 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1264 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1265 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1266 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1267 tasklet_schedule(&sha_dd->done_task);
1268 } else {
1269 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1270 }
1271 return IRQ_HANDLED;
1272 }
1273
1274 return IRQ_NONE;
1275}
1276
1277static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1278{
1279 int i;
1280
Nicolas Royerd4905b32013-02-20 17:10:26 +01001281 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1282 crypto_unregister_ahash(&sha_1_256_algs[i]);
1283
1284 if (dd->caps.has_sha224)
1285 crypto_unregister_ahash(&sha_224_alg);
1286
1287 if (dd->caps.has_sha_384_512) {
1288 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1289 crypto_unregister_ahash(&sha_384_512_algs[i]);
1290 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001291}
1292
1293static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1294{
1295 int err, i, j;
1296
Nicolas Royerd4905b32013-02-20 17:10:26 +01001297 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1298 err = crypto_register_ahash(&sha_1_256_algs[i]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001299 if (err)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001300 goto err_sha_1_256_algs;
1301 }
1302
1303 if (dd->caps.has_sha224) {
1304 err = crypto_register_ahash(&sha_224_alg);
1305 if (err)
1306 goto err_sha_224_algs;
1307 }
1308
1309 if (dd->caps.has_sha_384_512) {
1310 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1311 err = crypto_register_ahash(&sha_384_512_algs[i]);
1312 if (err)
1313 goto err_sha_384_512_algs;
1314 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001315 }
1316
1317 return 0;
1318
Nicolas Royerd4905b32013-02-20 17:10:26 +01001319err_sha_384_512_algs:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001320 for (j = 0; j < i; j++)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001321 crypto_unregister_ahash(&sha_384_512_algs[j]);
1322 crypto_unregister_ahash(&sha_224_alg);
1323err_sha_224_algs:
1324 i = ARRAY_SIZE(sha_1_256_algs);
1325err_sha_1_256_algs:
1326 for (j = 0; j < i; j++)
1327 crypto_unregister_ahash(&sha_1_256_algs[j]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001328
1329 return err;
1330}
1331
Nicolas Royerd4905b32013-02-20 17:10:26 +01001332static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1333{
1334 struct at_dma_slave *sl = slave;
1335
1336 if (sl && sl->dma_dev == chan->device->dev) {
1337 chan->private = sl;
1338 return true;
1339 } else {
1340 return false;
1341 }
1342}
1343
1344static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1345 struct crypto_platform_data *pdata)
1346{
1347 int err = -ENOMEM;
1348 dma_cap_mask_t mask_in;
1349
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001350 /* Try to grab DMA channel */
1351 dma_cap_zero(mask_in);
1352 dma_cap_set(DMA_SLAVE, mask_in);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001353
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001354 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1355 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1356 if (!dd->dma_lch_in.chan) {
1357 dev_warn(dd->dev, "no DMA channel available\n");
1358 return err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001359 }
1360
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001361 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1362 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1363 SHA_REG_DIN(0);
1364 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1365 dd->dma_lch_in.dma_conf.src_addr_width =
1366 DMA_SLAVE_BUSWIDTH_4_BYTES;
1367 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1368 dd->dma_lch_in.dma_conf.dst_addr_width =
1369 DMA_SLAVE_BUSWIDTH_4_BYTES;
1370 dd->dma_lch_in.dma_conf.device_fc = false;
1371
1372 return 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001373}
1374
1375static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1376{
1377 dma_release_channel(dd->dma_lch_in.chan);
1378}
1379
1380static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1381{
1382
1383 dd->caps.has_dma = 0;
1384 dd->caps.has_dualbuff = 0;
1385 dd->caps.has_sha224 = 0;
1386 dd->caps.has_sha_384_512 = 0;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001387 dd->caps.has_uihv = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001388
1389 /* keep only major version number */
1390 switch (dd->hw_version & 0xff0) {
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001391 case 0x510:
1392 dd->caps.has_dma = 1;
1393 dd->caps.has_dualbuff = 1;
1394 dd->caps.has_sha224 = 1;
1395 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001396 dd->caps.has_uihv = 1;
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001397 break;
Leilei Zhao141824d2015-04-07 17:45:03 +08001398 case 0x420:
1399 dd->caps.has_dma = 1;
1400 dd->caps.has_dualbuff = 1;
1401 dd->caps.has_sha224 = 1;
1402 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001403 dd->caps.has_uihv = 1;
Leilei Zhao141824d2015-04-07 17:45:03 +08001404 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001405 case 0x410:
1406 dd->caps.has_dma = 1;
1407 dd->caps.has_dualbuff = 1;
1408 dd->caps.has_sha224 = 1;
1409 dd->caps.has_sha_384_512 = 1;
1410 break;
1411 case 0x400:
1412 dd->caps.has_dma = 1;
1413 dd->caps.has_dualbuff = 1;
1414 dd->caps.has_sha224 = 1;
1415 break;
1416 case 0x320:
1417 break;
1418 default:
1419 dev_warn(dd->dev,
1420 "Unmanaged sha version, set minimum capabilities\n");
1421 break;
1422 }
1423}
1424
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001425#if defined(CONFIG_OF)
1426static const struct of_device_id atmel_sha_dt_ids[] = {
1427 { .compatible = "atmel,at91sam9g46-sha" },
1428 { /* sentinel */ }
1429};
1430
1431MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1432
1433static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1434{
1435 struct device_node *np = pdev->dev.of_node;
1436 struct crypto_platform_data *pdata;
1437
1438 if (!np) {
1439 dev_err(&pdev->dev, "device node not found\n");
1440 return ERR_PTR(-EINVAL);
1441 }
1442
1443 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1444 if (!pdata) {
1445 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1446 return ERR_PTR(-ENOMEM);
1447 }
1448
1449 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1450 sizeof(*(pdata->dma_slave)),
1451 GFP_KERNEL);
1452 if (!pdata->dma_slave) {
1453 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001454 return ERR_PTR(-ENOMEM);
1455 }
1456
1457 return pdata;
1458}
1459#else /* CONFIG_OF */
1460static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1461{
1462 return ERR_PTR(-EINVAL);
1463}
1464#endif
1465
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001466static int atmel_sha_probe(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001467{
1468 struct atmel_sha_dev *sha_dd;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001469 struct crypto_platform_data *pdata;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001470 struct device *dev = &pdev->dev;
1471 struct resource *sha_res;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001472 int err;
1473
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001474 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001475 if (sha_dd == NULL) {
1476 dev_err(dev, "unable to alloc data struct.\n");
1477 err = -ENOMEM;
1478 goto sha_dd_err;
1479 }
1480
1481 sha_dd->dev = dev;
1482
1483 platform_set_drvdata(pdev, sha_dd);
1484
1485 INIT_LIST_HEAD(&sha_dd->list);
Leilei Zhao62728e82015-04-07 17:45:06 +08001486 spin_lock_init(&sha_dd->lock);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001487
1488 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1489 (unsigned long)sha_dd);
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001490 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
1491 (unsigned long)sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001492
1493 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1494
1495 sha_dd->irq = -1;
1496
1497 /* Get the base address */
1498 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1499 if (!sha_res) {
1500 dev_err(dev, "no MEM resource info\n");
1501 err = -ENODEV;
1502 goto res_err;
1503 }
1504 sha_dd->phys_base = sha_res->start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001505
1506 /* Get the IRQ */
1507 sha_dd->irq = platform_get_irq(pdev, 0);
1508 if (sha_dd->irq < 0) {
1509 dev_err(dev, "no IRQ resource info\n");
1510 err = sha_dd->irq;
1511 goto res_err;
1512 }
1513
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001514 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1515 IRQF_SHARED, "atmel-sha", sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001516 if (err) {
1517 dev_err(dev, "unable to request sha irq.\n");
1518 goto res_err;
1519 }
1520
1521 /* Initializing the clock */
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001522 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001523 if (IS_ERR(sha_dd->iclk)) {
Colin Ian Kingbe208352015-02-28 20:40:10 +00001524 dev_err(dev, "clock initialization failed.\n");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001525 err = PTR_ERR(sha_dd->iclk);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001526 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001527 }
1528
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001529 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
Vladimir Zapolskiy9b52d552016-03-06 03:21:52 +02001530 if (IS_ERR(sha_dd->io_base)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001531 dev_err(dev, "can't ioremap\n");
Vladimir Zapolskiy9b52d552016-03-06 03:21:52 +02001532 err = PTR_ERR(sha_dd->io_base);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001533 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001534 }
1535
Cyrille Pitchenc0330422016-02-05 13:45:13 +01001536 err = clk_prepare(sha_dd->iclk);
1537 if (err)
1538 goto res_err;
1539
Nicolas Royerd4905b32013-02-20 17:10:26 +01001540 atmel_sha_hw_version_init(sha_dd);
1541
1542 atmel_sha_get_cap(sha_dd);
1543
1544 if (sha_dd->caps.has_dma) {
1545 pdata = pdev->dev.platform_data;
1546 if (!pdata) {
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001547 pdata = atmel_sha_of_init(pdev);
1548 if (IS_ERR(pdata)) {
1549 dev_err(&pdev->dev, "platform data not available\n");
1550 err = PTR_ERR(pdata);
Cyrille Pitchenc0330422016-02-05 13:45:13 +01001551 goto iclk_unprepare;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001552 }
1553 }
1554 if (!pdata->dma_slave) {
Nicolas Royerd4905b32013-02-20 17:10:26 +01001555 err = -ENXIO;
Cyrille Pitchenc0330422016-02-05 13:45:13 +01001556 goto iclk_unprepare;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001557 }
1558 err = atmel_sha_dma_init(sha_dd, pdata);
1559 if (err)
1560 goto err_sha_dma;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001561
1562 dev_info(dev, "using %s for DMA transfers\n",
1563 dma_chan_name(sha_dd->dma_lch_in.chan));
Nicolas Royerd4905b32013-02-20 17:10:26 +01001564 }
1565
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001566 spin_lock(&atmel_sha.lock);
1567 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1568 spin_unlock(&atmel_sha.lock);
1569
1570 err = atmel_sha_register_algs(sha_dd);
1571 if (err)
1572 goto err_algs;
1573
Nicolas Ferre1ca5b7d2013-10-15 16:37:44 +02001574 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1575 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1576 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001577
1578 return 0;
1579
1580err_algs:
1581 spin_lock(&atmel_sha.lock);
1582 list_del(&sha_dd->list);
1583 spin_unlock(&atmel_sha.lock);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001584 if (sha_dd->caps.has_dma)
1585 atmel_sha_dma_cleanup(sha_dd);
1586err_sha_dma:
Cyrille Pitchenc0330422016-02-05 13:45:13 +01001587iclk_unprepare:
1588 clk_unprepare(sha_dd->iclk);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001589res_err:
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001590 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001591 tasklet_kill(&sha_dd->done_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001592sha_dd_err:
1593 dev_err(dev, "initialization failed.\n");
1594
1595 return err;
1596}
1597
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001598static int atmel_sha_remove(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001599{
1600 static struct atmel_sha_dev *sha_dd;
1601
1602 sha_dd = platform_get_drvdata(pdev);
1603 if (!sha_dd)
1604 return -ENODEV;
1605 spin_lock(&atmel_sha.lock);
1606 list_del(&sha_dd->list);
1607 spin_unlock(&atmel_sha.lock);
1608
1609 atmel_sha_unregister_algs(sha_dd);
1610
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001611 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001612 tasklet_kill(&sha_dd->done_task);
1613
Nicolas Royerd4905b32013-02-20 17:10:26 +01001614 if (sha_dd->caps.has_dma)
1615 atmel_sha_dma_cleanup(sha_dd);
1616
Cyrille Pitchenc0330422016-02-05 13:45:13 +01001617 clk_unprepare(sha_dd->iclk);
1618
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001619 return 0;
1620}
1621
1622static struct platform_driver atmel_sha_driver = {
1623 .probe = atmel_sha_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001624 .remove = atmel_sha_remove,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001625 .driver = {
1626 .name = "atmel_sha",
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001627 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001628 },
1629};
1630
1631module_platform_driver(atmel_sha_driver);
1632
Nicolas Royerd4905b32013-02-20 17:10:26 +01001633MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001634MODULE_LICENSE("GPL v2");
1635MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");