blob: f6bf53c00b6143948d85b2f7d77ce3fd5e69ece2 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002/*
3 * Cryptographic API.
4 *
5 * Support for OMAP SHA1/MD5 HW acceleration.
6 *
7 * Copyright (c) 2010 Nokia Corporation
8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Mark A. Greer0d373d62012-12-21 10:04:08 -07009 * Copyright (c) 2011 Texas Instruments Incorporated
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080010 *
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080011 * Some ideas are from old omap-sha1-md5.c driver.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080016#include <linux/err.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080023#include <linux/irq.h>
24#include <linux/io.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-mapping.h>
Mark A. Greerdfd061d2012-12-21 10:04:04 -070028#include <linux/dmaengine.h>
Mark A. Greerb359f032012-12-21 10:04:02 -070029#include <linux/pm_runtime.h>
Mark A. Greer03feec92012-12-21 10:04:06 -070030#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_address.h>
33#include <linux/of_irq.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080034#include <linux/delay.h>
35#include <linux/crypto.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080036#include <crypto/scatterwalk.h>
37#include <crypto/algapi.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080038#include <crypto/sha1.h>
39#include <crypto/sha2.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080040#include <crypto/hash.h>
Corentin LABBEebd401e2017-05-19 08:53:28 +020041#include <crypto/hmac.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080042#include <crypto/internal/hash.h>
Tero Kristo133c3d42020-09-07 10:56:10 +030043#include <crypto/engine.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080044
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080045#define MD5_DIGEST_SIZE 16
46
Mark A. Greer0d373d62012-12-21 10:04:08 -070047#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
48#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
49#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
50
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053051#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080052
53#define SHA_REG_CTRL 0x18
54#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
55#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
56#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
57#define SHA_REG_CTRL_ALGO (1 << 2)
58#define SHA_REG_CTRL_INPUT_READY (1 << 1)
59#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
60
Mark A. Greer0d373d62012-12-21 10:04:08 -070061#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080062
Mark A. Greer0d373d62012-12-21 10:04:08 -070063#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080064#define SHA_REG_MASK_DMA_EN (1 << 3)
65#define SHA_REG_MASK_IT_EN (1 << 2)
66#define SHA_REG_MASK_SOFTRESET (1 << 1)
67#define SHA_REG_AUTOIDLE (1 << 0)
68
Mark A. Greer0d373d62012-12-21 10:04:08 -070069#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080070#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
71
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053072#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
Mark A. Greer0d373d62012-12-21 10:04:08 -070073#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
74#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
75#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
76#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
Mark A. Greer0d373d62012-12-21 10:04:08 -070077
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053078#define SHA_REG_MODE_ALGO_MASK (7 << 0)
79#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
80#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
81#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
82#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
83#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
84#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
85
86#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
Mark A. Greer0d373d62012-12-21 10:04:08 -070087
88#define SHA_REG_IRQSTATUS 0x118
89#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
90#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
91#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
92#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
93
94#define SHA_REG_IRQENA 0x11C
95#define SHA_REG_IRQENA_CTX_RDY (1 << 3)
96#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
97#define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
98#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
99
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800100#define DEFAULT_TIMEOUT_INTERVAL HZ
101
Tero Kristoe93f7672016-06-22 16:23:34 +0300102#define DEFAULT_AUTOSUSPEND_DELAY 1000
103
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300104/* mostly device flags */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300105#define FLAGS_FINAL 1
106#define FLAGS_DMA_ACTIVE 2
107#define FLAGS_OUTPUT_READY 3
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300108#define FLAGS_CPU 5
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300109#define FLAGS_DMA_READY 6
Mark A. Greer0d373d62012-12-21 10:04:08 -0700110#define FLAGS_AUTO_XOR 7
111#define FLAGS_BE32_SHA1 8
Tero Kristof19de1b2016-09-19 18:22:15 +0300112#define FLAGS_SGS_COPIED 9
113#define FLAGS_SGS_ALLOCED 10
Tero Kristo462519f2019-11-05 16:00:50 +0200114#define FLAGS_HUGE 11
115
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300116/* context flags */
117#define FLAGS_FINUP 16
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800118
Mark A. Greer0d373d62012-12-21 10:04:08 -0700119#define FLAGS_MODE_SHIFT 18
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530120#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
121#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
127
128#define FLAGS_HMAC 21
129#define FLAGS_ERROR 22
Mark A. Greer0d373d62012-12-21 10:04:08 -0700130
131#define OP_UPDATE 1
132#define OP_FINAL 2
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800133
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200134#define OMAP_ALIGN_MASK (sizeof(u32)-1)
135#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
136
Tero Kristo182e2832016-09-19 18:22:19 +0300137#define BUFLEN SHA512_BLOCK_SIZE
Tero Kristo2c5bd1e2016-09-19 18:22:16 +0300138#define OMAP_SHA_DMA_THRESHOLD 256
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200139
Tero Kristo462519f2019-11-05 16:00:50 +0200140#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
141
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800142struct omap_sham_dev;
143
144struct omap_sham_reqctx {
145 struct omap_sham_dev *dd;
146 unsigned long flags;
Tero Kristo133c3d42020-09-07 10:56:10 +0300147 u8 op;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800148
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530149 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800150 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800151 size_t bufcnt;
152 size_t buflen;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800153
154 /* walk state */
155 struct scatterlist *sg;
Tero Kristof19de1b2016-09-19 18:22:15 +0300156 struct scatterlist sgl[2];
Tero Kristo8043bb12016-09-19 18:22:17 +0300157 int offset; /* offset in current sg */
Tero Kristof19de1b2016-09-19 18:22:15 +0300158 int sg_len;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800159 unsigned int total; /* total request */
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200160
Gustavo A. R. Silva5a8a0762020-02-24 10:21:00 -0600161 u8 buffer[] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800162};
163
164struct omap_sham_hmac_ctx {
165 struct crypto_shash *shash;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530166 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800168};
169
170struct omap_sham_ctx {
Tero Kristo133c3d42020-09-07 10:56:10 +0300171 struct crypto_engine_ctx enginectx;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800172 unsigned long flags;
173
174 /* fallback stuff */
175 struct crypto_shash *fallback;
176
Gustavo A. R. Silva5a8a0762020-02-24 10:21:00 -0600177 struct omap_sham_hmac_ctx base[];
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800178};
179
Tero Kristo65e7a542016-06-22 16:23:35 +0300180#define OMAP_SHAM_QUEUE_LENGTH 10
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800181
Mark A. Greerd20fb182012-12-21 10:04:09 -0700182struct omap_sham_algs_info {
183 struct ahash_alg *algs_list;
184 unsigned int size;
185 unsigned int registered;
186};
187
Mark A. Greer0d373d62012-12-21 10:04:08 -0700188struct omap_sham_pdata {
Mark A. Greerd20fb182012-12-21 10:04:09 -0700189 struct omap_sham_algs_info *algs_info;
190 unsigned int algs_info_size;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700191 unsigned long flags;
192 int digest_size;
193
194 void (*copy_hash)(struct ahash_request *req, int out);
195 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
196 int final, int dma);
197 void (*trigger)(struct omap_sham_dev *dd, size_t length);
198 int (*poll_irq)(struct omap_sham_dev *dd);
199 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
200
201 u32 odigest_ofs;
202 u32 idigest_ofs;
203 u32 din_ofs;
204 u32 digcnt_ofs;
205 u32 rev_ofs;
206 u32 mask_ofs;
207 u32 sysstatus_ofs;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530208 u32 mode_ofs;
209 u32 length_ofs;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700210
211 u32 major_mask;
212 u32 major_shift;
213 u32 minor_mask;
214 u32 minor_shift;
215};
216
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800217struct omap_sham_dev {
218 struct list_head list;
219 unsigned long phys_base;
220 struct device *dev;
221 void __iomem *io_base;
222 int irq;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200223 int err;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700224 struct dma_chan *dma_lch;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800225 struct tasklet_struct done_task;
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530226 u8 polling_mode;
Tero Kristoc28e8f22017-05-24 10:35:34 +0300227 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800228
229 unsigned long flags;
Tero Kristoc9af5992018-02-27 15:30:36 +0200230 int fallback_sz;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800231 struct crypto_queue queue;
232 struct ahash_request *req;
Tero Kristo133c3d42020-09-07 10:56:10 +0300233 struct crypto_engine *engine;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700234
235 const struct omap_sham_pdata *pdata;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800236};
237
238struct omap_sham_drv {
239 struct list_head dev_list;
240 spinlock_t lock;
241 unsigned long flags;
242};
243
244static struct omap_sham_drv sham = {
245 .dev_list = LIST_HEAD_INIT(sham.dev_list),
246 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
247};
248
Tero Kristo133c3d42020-09-07 10:56:10 +0300249static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
250static void omap_sham_finish_req(struct ahash_request *req, int err);
251
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800252static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
253{
254 return __raw_readl(dd->io_base + offset);
255}
256
257static inline void omap_sham_write(struct omap_sham_dev *dd,
258 u32 offset, u32 value)
259{
260 __raw_writel(value, dd->io_base + offset);
261}
262
263static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
264 u32 value, u32 mask)
265{
266 u32 val;
267
268 val = omap_sham_read(dd, address);
269 val &= ~mask;
270 val |= value;
271 omap_sham_write(dd, address, val);
272}
273
274static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
275{
276 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
277
278 while (!(omap_sham_read(dd, offset) & bit)) {
279 if (time_is_before_jiffies(timeout))
280 return -ETIMEDOUT;
281 }
282
283 return 0;
284}
285
Mark A. Greer0d373d62012-12-21 10:04:08 -0700286static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800287{
288 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700289 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200290 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800291 int i;
292
Mark A. Greer0d373d62012-12-21 10:04:08 -0700293 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200294 if (out)
Mark A. Greer0d373d62012-12-21 10:04:08 -0700295 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200296 else
Mark A. Greer0d373d62012-12-21 10:04:08 -0700297 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200298 }
299}
300
Mark A. Greer0d373d62012-12-21 10:04:08 -0700301static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
302{
303 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
304 struct omap_sham_dev *dd = ctx->dd;
305 int i;
306
307 if (ctx->flags & BIT(FLAGS_HMAC)) {
308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
309 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
310 struct omap_sham_hmac_ctx *bctx = tctx->base;
311 u32 *opad = (u32 *)bctx->opad;
312
313 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
314 if (out)
315 opad[i] = omap_sham_read(dd,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530316 SHA_REG_ODIGEST(dd, i));
Mark A. Greer0d373d62012-12-21 10:04:08 -0700317 else
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530318 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
Mark A. Greer0d373d62012-12-21 10:04:08 -0700319 opad[i]);
320 }
321 }
322
323 omap_sham_copy_hash_omap2(req, out);
324}
325
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200326static void omap_sham_copy_ready_hash(struct ahash_request *req)
327{
328 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
329 u32 *in = (u32 *)ctx->digest;
330 u32 *hash = (u32 *)req->result;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700331 int i, d, big_endian = 0;
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200332
333 if (!hash)
334 return;
335
Mark A. Greer0d373d62012-12-21 10:04:08 -0700336 switch (ctx->flags & FLAGS_MODE_MASK) {
337 case FLAGS_MODE_MD5:
338 d = MD5_DIGEST_SIZE / sizeof(u32);
339 break;
340 case FLAGS_MODE_SHA1:
341 /* OMAP2 SHA1 is big endian */
342 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
343 big_endian = 1;
344 d = SHA1_DIGEST_SIZE / sizeof(u32);
345 break;
Mark A. Greerd20fb182012-12-21 10:04:09 -0700346 case FLAGS_MODE_SHA224:
347 d = SHA224_DIGEST_SIZE / sizeof(u32);
348 break;
349 case FLAGS_MODE_SHA256:
350 d = SHA256_DIGEST_SIZE / sizeof(u32);
351 break;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530352 case FLAGS_MODE_SHA384:
353 d = SHA384_DIGEST_SIZE / sizeof(u32);
354 break;
355 case FLAGS_MODE_SHA512:
356 d = SHA512_DIGEST_SIZE / sizeof(u32);
357 break;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700358 default:
359 d = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800360 }
Mark A. Greer0d373d62012-12-21 10:04:08 -0700361
362 if (big_endian)
363 for (i = 0; i < d; i++)
Herbert Xu758f4872020-06-15 21:37:38 +1000364 hash[i] = be32_to_cpup((__be32 *)in + i);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700365 else
366 for (i = 0; i < d; i++)
Herbert Xu758f4872020-06-15 21:37:38 +1000367 hash[i] = le32_to_cpup((__le32 *)in + i);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800368}
369
Mark A. Greer0d373d62012-12-21 10:04:08 -0700370static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800371 int final, int dma)
372{
373 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
374 u32 val = length << 5, mask;
375
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200376 if (likely(ctx->digcnt))
Mark A. Greer0d373d62012-12-21 10:04:08 -0700377 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800378
Mark A. Greer0d373d62012-12-21 10:04:08 -0700379 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800380 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
381 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
382 /*
383 * Setting ALGO_CONST only for the first iteration
384 * and CLOSE_HASH only for the last one.
385 */
Mark A. Greer0d373d62012-12-21 10:04:08 -0700386 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800387 val |= SHA_REG_CTRL_ALGO;
388 if (!ctx->digcnt)
389 val |= SHA_REG_CTRL_ALGO_CONST;
390 if (final)
391 val |= SHA_REG_CTRL_CLOSE_HASH;
392
393 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
394 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
395
396 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800397}
398
Mark A. Greer0d373d62012-12-21 10:04:08 -0700399static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
400{
401}
402
403static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
404{
405 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
406}
407
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530408static int get_block_size(struct omap_sham_reqctx *ctx)
409{
410 int d;
411
412 switch (ctx->flags & FLAGS_MODE_MASK) {
413 case FLAGS_MODE_MD5:
414 case FLAGS_MODE_SHA1:
415 d = SHA1_BLOCK_SIZE;
416 break;
417 case FLAGS_MODE_SHA224:
418 case FLAGS_MODE_SHA256:
419 d = SHA256_BLOCK_SIZE;
420 break;
421 case FLAGS_MODE_SHA384:
422 case FLAGS_MODE_SHA512:
423 d = SHA512_BLOCK_SIZE;
424 break;
425 default:
426 d = 0;
427 }
428
429 return d;
430}
431
Mark A. Greer0d373d62012-12-21 10:04:08 -0700432static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
433 u32 *value, int count)
434{
435 for (; count--; value++, offset += 4)
436 omap_sham_write(dd, offset, *value);
437}
438
439static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
440 int final, int dma)
441{
442 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
443 u32 val, mask;
444
Tero Kristo3faf7572020-09-07 10:56:24 +0300445 if (likely(ctx->digcnt))
446 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
447
Mark A. Greer0d373d62012-12-21 10:04:08 -0700448 /*
449 * Setting ALGO_CONST only for the first iteration and
450 * CLOSE_HASH only for the last one. Note that flags mode bits
451 * correspond to algorithm encoding in mode register.
452 */
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530453 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700454 if (!ctx->digcnt) {
455 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
456 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
457 struct omap_sham_hmac_ctx *bctx = tctx->base;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530458 int bs, nr_dr;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700459
460 val |= SHA_REG_MODE_ALGO_CONSTANT;
461
462 if (ctx->flags & BIT(FLAGS_HMAC)) {
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530463 bs = get_block_size(ctx);
464 nr_dr = bs / (2 * sizeof(u32));
Mark A. Greer0d373d62012-12-21 10:04:08 -0700465 val |= SHA_REG_MODE_HMAC_KEY_PROC;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530466 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
467 (u32 *)bctx->ipad, nr_dr);
468 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
469 (u32 *)bctx->ipad + nr_dr, nr_dr);
470 ctx->digcnt += bs;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700471 }
472 }
473
474 if (final) {
475 val |= SHA_REG_MODE_CLOSE_HASH;
476
477 if (ctx->flags & BIT(FLAGS_HMAC))
478 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
479 }
480
481 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
482 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
483 SHA_REG_MODE_HMAC_KEY_PROC;
484
485 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530486 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700487 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
488 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
489 SHA_REG_MASK_IT_EN |
490 (dma ? SHA_REG_MASK_DMA_EN : 0),
491 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
492}
493
494static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
495{
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530496 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700497}
498
499static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
500{
501 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
502 SHA_REG_IRQSTATUS_INPUT_RDY);
503}
504
Tero Kristo8043bb12016-09-19 18:22:17 +0300505static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
506 int final)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800507{
508 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530509 int count, len32, bs32, offset = 0;
Tero Kristo8043bb12016-09-19 18:22:17 +0300510 const u32 *buffer;
511 int mlen;
512 struct sg_mapping_iter mi;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800513
Herbert Xu758f4872020-06-15 21:37:38 +1000514 dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800515 ctx->digcnt, length, final);
516
Mark A. Greer0d373d62012-12-21 10:04:08 -0700517 dd->pdata->write_ctrl(dd, length, final, 0);
518 dd->pdata->trigger(dd, length);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800519
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200520 /* should be non-zero before next lines to disable clocks later */
521 ctx->digcnt += length;
Tero Kristo8043bb12016-09-19 18:22:17 +0300522 ctx->total -= length;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200523
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800524 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300525 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800526
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300527 set_bit(FLAGS_CPU, &dd->flags);
528
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800529 len32 = DIV_ROUND_UP(length, sizeof(u32));
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530530 bs32 = get_block_size(ctx) / sizeof(u32);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800531
Tero Kristo8043bb12016-09-19 18:22:17 +0300532 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
533 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
534
535 mlen = 0;
536
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530537 while (len32) {
538 if (dd->pdata->poll_irq(dd))
539 return -ETIMEDOUT;
540
Tero Kristo8043bb12016-09-19 18:22:17 +0300541 for (count = 0; count < min(len32, bs32); count++, offset++) {
542 if (!mlen) {
543 sg_miter_next(&mi);
544 mlen = mi.length;
545 if (!mlen) {
546 pr_err("sg miter failure.\n");
547 return -EINVAL;
548 }
549 offset = 0;
550 buffer = mi.addr;
551 }
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530552 omap_sham_write(dd, SHA_REG_DIN(dd, count),
553 buffer[offset]);
Tero Kristo8043bb12016-09-19 18:22:17 +0300554 mlen -= 4;
555 }
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530556 len32 -= min(len32, bs32);
557 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800558
Tero Kristo8043bb12016-09-19 18:22:17 +0300559 sg_miter_stop(&mi);
560
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800561 return -EINPROGRESS;
562}
563
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700564static void omap_sham_dma_callback(void *param)
565{
566 struct omap_sham_dev *dd = param;
567
568 set_bit(FLAGS_DMA_READY, &dd->flags);
569 tasklet_schedule(&dd->done_task);
570}
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700571
Tero Kristo8043bb12016-09-19 18:22:17 +0300572static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
573 int final)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800574{
575 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700576 struct dma_async_tx_descriptor *tx;
577 struct dma_slave_config cfg;
Tero Kristo8043bb12016-09-19 18:22:17 +0300578 int ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800579
Herbert Xu758f4872020-06-15 21:37:38 +1000580 dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800581 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800582
Tero Kristo8043bb12016-09-19 18:22:17 +0300583 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
584 dev_err(dd->dev, "dma_map_sg error\n");
585 return -EINVAL;
586 }
587
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700588 memset(&cfg, 0, sizeof(cfg));
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800589
Mark A. Greer0d373d62012-12-21 10:04:08 -0700590 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700591 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Tero Kristo8043bb12016-09-19 18:22:17 +0300592 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800593
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700594 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
595 if (ret) {
596 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
597 return ret;
598 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800599
Tero Kristo8043bb12016-09-19 18:22:17 +0300600 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
601 DMA_MEM_TO_DEV,
602 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700603
604 if (!tx) {
Tero Kristo8043bb12016-09-19 18:22:17 +0300605 dev_err(dd->dev, "prep_slave_sg failed\n");
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700606 return -EINVAL;
607 }
608
609 tx->callback = omap_sham_dma_callback;
610 tx->callback_param = dd;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700611
Mark A. Greer0d373d62012-12-21 10:04:08 -0700612 dd->pdata->write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800613
614 ctx->digcnt += length;
Tero Kristo8043bb12016-09-19 18:22:17 +0300615 ctx->total -= length;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800616
617 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300618 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800619
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300620 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800621
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700622 dmaengine_submit(tx);
623 dma_async_issue_pending(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800624
Mark A. Greer0d373d62012-12-21 10:04:08 -0700625 dd->pdata->trigger(dd, length);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800626
627 return -EINPROGRESS;
628}
629
Tero Kristof19de1b2016-09-19 18:22:15 +0300630static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
631 struct scatterlist *sg, int bs, int new_len)
632{
633 int n = sg_nents(sg);
634 struct scatterlist *tmp;
635 int offset = ctx->offset;
636
Tero Kristo60a08942019-11-05 16:00:57 +0200637 ctx->total = new_len;
638
Tero Kristof19de1b2016-09-19 18:22:15 +0300639 if (ctx->bufcnt)
640 n++;
641
642 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
643 if (!ctx->sg)
644 return -ENOMEM;
645
646 sg_init_table(ctx->sg, n);
647
648 tmp = ctx->sg;
649
650 ctx->sg_len = 0;
651
652 if (ctx->bufcnt) {
653 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
654 tmp = sg_next(tmp);
655 ctx->sg_len++;
Tero Kristo60a08942019-11-05 16:00:57 +0200656 new_len -= ctx->bufcnt;
Tero Kristof19de1b2016-09-19 18:22:15 +0300657 }
658
659 while (sg && new_len) {
660 int len = sg->length - offset;
661
Tero Kristo1cfd9f32019-11-05 16:01:07 +0200662 if (len <= 0) {
Tero Kristof19de1b2016-09-19 18:22:15 +0300663 offset -= sg->length;
Tero Kristo1cfd9f32019-11-05 16:01:07 +0200664 sg = sg_next(sg);
665 continue;
Tero Kristof19de1b2016-09-19 18:22:15 +0300666 }
667
668 if (new_len < len)
669 len = new_len;
670
671 if (len > 0) {
672 new_len -= len;
Tero Kristo1cfd9f32019-11-05 16:01:07 +0200673 sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
674 offset = 0;
675 ctx->offset = 0;
Tero Kristof19de1b2016-09-19 18:22:15 +0300676 ctx->sg_len++;
Tero Kristo60a08942019-11-05 16:00:57 +0200677 if (new_len <= 0)
678 break;
679 tmp = sg_next(tmp);
Tero Kristof19de1b2016-09-19 18:22:15 +0300680 }
681
682 sg = sg_next(sg);
683 }
684
Tero Kristo60a08942019-11-05 16:00:57 +0200685 if (tmp)
686 sg_mark_end(tmp);
687
Tero Kristof19de1b2016-09-19 18:22:15 +0300688 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
689
Tero Kristo462519f2019-11-05 16:00:50 +0200690 ctx->offset += new_len - ctx->bufcnt;
Tero Kristof19de1b2016-09-19 18:22:15 +0300691 ctx->bufcnt = 0;
692
693 return 0;
694}
695
696static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
Tero Kristo462519f2019-11-05 16:00:50 +0200697 struct scatterlist *sg, int bs,
698 unsigned int new_len)
Tero Kristof19de1b2016-09-19 18:22:15 +0300699{
700 int pages;
701 void *buf;
Tero Kristof19de1b2016-09-19 18:22:15 +0300702
Tero Kristo462519f2019-11-05 16:00:50 +0200703 pages = get_order(new_len);
Tero Kristof19de1b2016-09-19 18:22:15 +0300704
705 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
706 if (!buf) {
707 pr_err("Couldn't allocate pages for unaligned cases.\n");
708 return -ENOMEM;
709 }
710
711 if (ctx->bufcnt)
712 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
713
714 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
Tero Kristo462519f2019-11-05 16:00:50 +0200715 min(new_len, ctx->total) - ctx->bufcnt, 0);
Tero Kristof19de1b2016-09-19 18:22:15 +0300716 sg_init_table(ctx->sgl, 1);
Tero Kristo462519f2019-11-05 16:00:50 +0200717 sg_set_buf(ctx->sgl, buf, new_len);
Tero Kristof19de1b2016-09-19 18:22:15 +0300718 ctx->sg = ctx->sgl;
719 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
720 ctx->sg_len = 1;
Tero Kristo462519f2019-11-05 16:00:50 +0200721 ctx->offset += new_len - ctx->bufcnt;
Tero Kristof19de1b2016-09-19 18:22:15 +0300722 ctx->bufcnt = 0;
Tero Kristo60a08942019-11-05 16:00:57 +0200723 ctx->total = new_len;
Tero Kristof19de1b2016-09-19 18:22:15 +0300724
725 return 0;
726}
727
728static int omap_sham_align_sgs(struct scatterlist *sg,
729 int nbytes, int bs, bool final,
730 struct omap_sham_reqctx *rctx)
731{
732 int n = 0;
733 bool aligned = true;
734 bool list_ok = true;
735 struct scatterlist *sg_tmp = sg;
736 int new_len;
737 int offset = rctx->offset;
Tero Kristo2b352482019-11-05 16:00:55 +0200738 int bufcnt = rctx->bufcnt;
Tero Kristof19de1b2016-09-19 18:22:15 +0300739
Tero Kristo63951662020-05-27 15:24:26 +0300740 if (!sg || !sg->length || !nbytes) {
741 if (bufcnt) {
Tero Kristo63832a02020-05-27 15:24:27 +0300742 bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
Tero Kristo63951662020-05-27 15:24:26 +0300743 sg_init_table(rctx->sgl, 1);
744 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
745 rctx->sg = rctx->sgl;
Tero Kristo63832a02020-05-27 15:24:27 +0300746 rctx->sg_len = 1;
Tero Kristo63951662020-05-27 15:24:26 +0300747 }
748
Tero Kristof19de1b2016-09-19 18:22:15 +0300749 return 0;
Tero Kristo63951662020-05-27 15:24:26 +0300750 }
Tero Kristof19de1b2016-09-19 18:22:15 +0300751
Tero Kristo2b352482019-11-05 16:00:55 +0200752 new_len = nbytes;
Tero Kristof19de1b2016-09-19 18:22:15 +0300753
754 if (offset)
755 list_ok = false;
756
757 if (final)
758 new_len = DIV_ROUND_UP(new_len, bs) * bs;
759 else
Tero Kristo898d86a2017-05-24 10:35:33 +0300760 new_len = (new_len - 1) / bs * bs;
761
Tero Kristo462519f2019-11-05 16:00:50 +0200762 if (!new_len)
763 return 0;
764
Tero Kristo898d86a2017-05-24 10:35:33 +0300765 if (nbytes != new_len)
766 list_ok = false;
Tero Kristof19de1b2016-09-19 18:22:15 +0300767
768 while (nbytes > 0 && sg_tmp) {
769 n++;
770
Tero Kristo2b352482019-11-05 16:00:55 +0200771 if (bufcnt) {
772 if (!IS_ALIGNED(bufcnt, bs)) {
773 aligned = false;
774 break;
775 }
776 nbytes -= bufcnt;
777 bufcnt = 0;
Tero Kristo60a08942019-11-05 16:00:57 +0200778 if (!nbytes)
779 list_ok = false;
780
Tero Kristo2b352482019-11-05 16:00:55 +0200781 continue;
782 }
783
Tero Kristo4c219852018-02-27 15:30:34 +0200784#ifdef CONFIG_ZONE_DMA
785 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
786 aligned = false;
787 break;
788 }
789#endif
790
Tero Kristof19de1b2016-09-19 18:22:15 +0300791 if (offset < sg_tmp->length) {
792 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
793 aligned = false;
794 break;
795 }
796
797 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
798 aligned = false;
799 break;
800 }
801 }
802
803 if (offset) {
804 offset -= sg_tmp->length;
805 if (offset < 0) {
806 nbytes += offset;
807 offset = 0;
808 }
809 } else {
810 nbytes -= sg_tmp->length;
811 }
812
813 sg_tmp = sg_next(sg_tmp);
814
815 if (nbytes < 0) {
816 list_ok = false;
817 break;
818 }
819 }
820
Tero Kristo462519f2019-11-05 16:00:50 +0200821 if (new_len > OMAP_SHA_MAX_DMA_LEN) {
822 new_len = OMAP_SHA_MAX_DMA_LEN;
823 aligned = false;
824 }
825
Tero Kristof19de1b2016-09-19 18:22:15 +0300826 if (!aligned)
827 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
828 else if (!list_ok)
829 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
830
Tero Kristo60a08942019-11-05 16:00:57 +0200831 rctx->total = new_len;
832 rctx->offset += new_len;
Tero Kristof19de1b2016-09-19 18:22:15 +0300833 rctx->sg_len = n;
Tero Kristo1cfd9f32019-11-05 16:01:07 +0200834 if (rctx->bufcnt) {
835 sg_init_table(rctx->sgl, 2);
836 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
837 sg_chain(rctx->sgl, 2, sg);
838 rctx->sg = rctx->sgl;
839 } else {
840 rctx->sg = sg;
841 }
Tero Kristof19de1b2016-09-19 18:22:15 +0300842
843 return 0;
844}
845
Tero Kristo133c3d42020-09-07 10:56:10 +0300846static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
Tero Kristof19de1b2016-09-19 18:22:15 +0300847{
Tero Kristo133c3d42020-09-07 10:56:10 +0300848 struct ahash_request *req = container_of(areq, struct ahash_request,
849 base);
Tero Kristof19de1b2016-09-19 18:22:15 +0300850 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
851 int bs;
852 int ret;
Tero Kristo60a08942019-11-05 16:00:57 +0200853 unsigned int nbytes;
Tero Kristof19de1b2016-09-19 18:22:15 +0300854 bool final = rctx->flags & BIT(FLAGS_FINUP);
Tero Kristo133c3d42020-09-07 10:56:10 +0300855 bool update = rctx->op == OP_UPDATE;
Tero Kristo60a08942019-11-05 16:00:57 +0200856 int hash_later;
Tero Kristof19de1b2016-09-19 18:22:15 +0300857
Tero Kristof19de1b2016-09-19 18:22:15 +0300858 bs = get_block_size(rctx);
859
Tero Kristo60a08942019-11-05 16:00:57 +0200860 nbytes = rctx->bufcnt;
Tero Kristof19de1b2016-09-19 18:22:15 +0300861
Tero Kristo60a08942019-11-05 16:00:57 +0200862 if (update)
863 nbytes += req->nbytes - rctx->offset;
Tero Kristo462519f2019-11-05 16:00:50 +0200864
865 dev_dbg(rctx->dd->dev,
Herbert Xu758f4872020-06-15 21:37:38 +1000866 "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
Tero Kristo462519f2019-11-05 16:00:50 +0200867 __func__, nbytes, bs, rctx->total, rctx->offset,
868 rctx->bufcnt);
Tero Kristof19de1b2016-09-19 18:22:15 +0300869
Tero Kristo60a08942019-11-05 16:00:57 +0200870 if (!nbytes)
Tero Kristof19de1b2016-09-19 18:22:15 +0300871 return 0;
872
Tero Kristo60a08942019-11-05 16:00:57 +0200873 rctx->total = nbytes;
874
875 if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
Tero Kristof19de1b2016-09-19 18:22:15 +0300876 int len = bs - rctx->bufcnt % bs;
877
Tero Kristo60a08942019-11-05 16:00:57 +0200878 if (len > req->nbytes)
879 len = req->nbytes;
Tero Kristof19de1b2016-09-19 18:22:15 +0300880 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
881 0, len, 0);
882 rctx->bufcnt += len;
Tero Kristof19de1b2016-09-19 18:22:15 +0300883 rctx->offset = len;
884 }
885
886 if (rctx->bufcnt)
887 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
888
Tero Kristo60a08942019-11-05 16:00:57 +0200889 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
Tero Kristof19de1b2016-09-19 18:22:15 +0300890 if (ret)
891 return ret;
892
Tero Kristo60a08942019-11-05 16:00:57 +0200893 hash_later = nbytes - rctx->total;
Tero Kristof19de1b2016-09-19 18:22:15 +0300894 if (hash_later < 0)
895 hash_later = 0;
896
Tero Kristo63951662020-05-27 15:24:26 +0300897 if (hash_later && hash_later <= rctx->buflen) {
Tero Kristo60a08942019-11-05 16:00:57 +0200898 scatterwalk_map_and_copy(rctx->buffer,
899 req->src,
900 req->nbytes - hash_later,
901 hash_later, 0);
Tero Kristo5d78d572017-05-24 10:35:32 +0300902
Tero Kristof19de1b2016-09-19 18:22:15 +0300903 rctx->bufcnt = hash_later;
904 } else {
905 rctx->bufcnt = 0;
906 }
907
Tero Kristo462519f2019-11-05 16:00:50 +0200908 if (hash_later > rctx->buflen)
909 set_bit(FLAGS_HUGE, &rctx->dd->flags);
910
Tero Kristo60a08942019-11-05 16:00:57 +0200911 rctx->total = min(nbytes, rctx->total);
Tero Kristof19de1b2016-09-19 18:22:15 +0300912
913 return 0;
914}
915
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800916static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
917{
918 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
919
Tero Kristo8043bb12016-09-19 18:22:17 +0300920 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700921
Tero Kristo8043bb12016-09-19 18:22:17 +0300922 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800923
924 return 0;
925}
926
Herbert Xu758f4872020-06-15 21:37:38 +1000927static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
Tero Kristo281c3772020-05-27 15:24:29 +0300928{
929 struct omap_sham_dev *dd;
930
931 if (ctx->dd)
932 return ctx->dd;
933
934 spin_lock_bh(&sham.lock);
935 dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
936 list_move_tail(&dd->list, &sham.dev_list);
937 ctx->dd = dd;
938 spin_unlock_bh(&sham.lock);
939
940 return dd;
941}
942
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800943static int omap_sham_init(struct ahash_request *req)
944{
945 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
946 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
947 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Tero Kristo281c3772020-05-27 15:24:29 +0300948 struct omap_sham_dev *dd;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530949 int bs = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800950
Tero Kristo281c3772020-05-27 15:24:29 +0300951 ctx->dd = NULL;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800952
Tero Kristo281c3772020-05-27 15:24:29 +0300953 dd = omap_sham_find_dev(ctx);
954 if (!dd)
955 return -ENODEV;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800956
957 ctx->flags = 0;
958
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800959 dev_dbg(dd->dev, "init: digest size: %d\n",
960 crypto_ahash_digestsize(tfm));
961
Mark A. Greer0d373d62012-12-21 10:04:08 -0700962 switch (crypto_ahash_digestsize(tfm)) {
963 case MD5_DIGEST_SIZE:
964 ctx->flags |= FLAGS_MODE_MD5;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530965 bs = SHA1_BLOCK_SIZE;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700966 break;
967 case SHA1_DIGEST_SIZE:
968 ctx->flags |= FLAGS_MODE_SHA1;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530969 bs = SHA1_BLOCK_SIZE;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700970 break;
Mark A. Greerd20fb182012-12-21 10:04:09 -0700971 case SHA224_DIGEST_SIZE:
972 ctx->flags |= FLAGS_MODE_SHA224;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530973 bs = SHA224_BLOCK_SIZE;
Mark A. Greerd20fb182012-12-21 10:04:09 -0700974 break;
975 case SHA256_DIGEST_SIZE:
976 ctx->flags |= FLAGS_MODE_SHA256;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530977 bs = SHA256_BLOCK_SIZE;
978 break;
979 case SHA384_DIGEST_SIZE:
980 ctx->flags |= FLAGS_MODE_SHA384;
981 bs = SHA384_BLOCK_SIZE;
982 break;
983 case SHA512_DIGEST_SIZE:
984 ctx->flags |= FLAGS_MODE_SHA512;
985 bs = SHA512_BLOCK_SIZE;
Mark A. Greerd20fb182012-12-21 10:04:09 -0700986 break;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700987 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800988
989 ctx->bufcnt = 0;
990 ctx->digcnt = 0;
Tero Kristo8043bb12016-09-19 18:22:17 +0300991 ctx->total = 0;
992 ctx->offset = 0;
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200993 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800994
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300995 if (tctx->flags & BIT(FLAGS_HMAC)) {
Mark A. Greer0d373d62012-12-21 10:04:08 -0700996 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
997 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800998
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530999 memcpy(ctx->buffer, bctx->ipad, bs);
1000 ctx->bufcnt = bs;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001001 }
1002
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001003 ctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001004 }
1005
1006 return 0;
1007
1008}
1009
1010static int omap_sham_update_req(struct omap_sham_dev *dd)
1011{
1012 struct ahash_request *req = dd->req;
1013 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1014 int err;
Tero Kristo462519f2019-11-05 16:00:50 +02001015 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
Tero Kristo133c3d42020-09-07 10:56:10 +03001016 !(dd->flags & BIT(FLAGS_HUGE));
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001017
Herbert Xu758f4872020-06-15 21:37:38 +10001018 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
Tero Kristo462519f2019-11-05 16:00:50 +02001019 ctx->total, ctx->digcnt, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001020
Tero Kristo8043bb12016-09-19 18:22:17 +03001021 if (ctx->total < get_block_size(ctx) ||
Tero Kristoc9af5992018-02-27 15:30:36 +02001022 ctx->total < dd->fallback_sz)
Tero Kristo8043bb12016-09-19 18:22:17 +03001023 ctx->flags |= BIT(FLAGS_CPU);
1024
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001025 if (ctx->flags & BIT(FLAGS_CPU))
Tero Kristo8043bb12016-09-19 18:22:17 +03001026 err = omap_sham_xmit_cpu(dd, ctx->total, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001027 else
Tero Kristo8043bb12016-09-19 18:22:17 +03001028 err = omap_sham_xmit_dma(dd, ctx->total, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001029
1030 /* wait for dma completion before can take more data */
Herbert Xu758f4872020-06-15 21:37:38 +10001031 dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001032
1033 return err;
1034}
1035
1036static int omap_sham_final_req(struct omap_sham_dev *dd)
1037{
1038 struct ahash_request *req = dd->req;
1039 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1040 int err = 0, use_dma = 1;
1041
Tero Kristo462519f2019-11-05 16:00:50 +02001042 if (dd->flags & BIT(FLAGS_HUGE))
1043 return 0;
1044
Tero Kristo8043bb12016-09-19 18:22:17 +03001045 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301046 /*
1047 * faster to handle last block with cpu or
1048 * use cpu when dma is not present.
1049 */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001050 use_dma = 0;
1051
1052 if (use_dma)
Tero Kristo8043bb12016-09-19 18:22:17 +03001053 err = omap_sham_xmit_dma(dd, ctx->total, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001054 else
Tero Kristo8043bb12016-09-19 18:22:17 +03001055 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001056
1057 ctx->bufcnt = 0;
1058
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001059 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1060
1061 return err;
1062}
1063
Tero Kristo133c3d42020-09-07 10:56:10 +03001064static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1065{
1066 struct ahash_request *req = container_of(areq, struct ahash_request,
1067 base);
1068 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1069 struct omap_sham_dev *dd = ctx->dd;
1070 int err;
1071 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1072 !(dd->flags & BIT(FLAGS_HUGE));
1073
1074 dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1075 ctx->op, ctx->total, ctx->digcnt, final);
1076
Tony Lindgrenf23f2182021-07-27 13:23:37 +03001077 err = pm_runtime_resume_and_get(dd->dev);
1078 if (err < 0) {
1079 dev_err(dd->dev, "failed to get sync: %d\n", err);
Tero Kristo133c3d42020-09-07 10:56:10 +03001080 return err;
Tony Lindgrenf23f2182021-07-27 13:23:37 +03001081 }
Tero Kristo133c3d42020-09-07 10:56:10 +03001082
Tony Lindgrenf23f2182021-07-27 13:23:37 +03001083 dd->err = 0;
Tony Lindgren6a1ec892021-07-27 13:23:35 +03001084 dd->req = req;
1085
Tero Kristo133c3d42020-09-07 10:56:10 +03001086 if (ctx->digcnt)
1087 dd->pdata->copy_hash(req, 0);
1088
1089 if (ctx->op == OP_UPDATE)
1090 err = omap_sham_update_req(dd);
1091 else if (ctx->op == OP_FINAL)
1092 err = omap_sham_final_req(dd);
1093
1094 if (err != -EINPROGRESS)
1095 omap_sham_finish_req(req, err);
1096
1097 return 0;
1098}
1099
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001100static int omap_sham_finish_hmac(struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001101{
1102 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1103 struct omap_sham_hmac_ctx *bctx = tctx->base;
1104 int bs = crypto_shash_blocksize(bctx->shash);
1105 int ds = crypto_shash_digestsize(bctx->shash);
Behan Webster7bc53c32014-04-04 18:18:00 -03001106 SHASH_DESC_ON_STACK(shash, bctx->shash);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001107
Behan Webster7bc53c32014-04-04 18:18:00 -03001108 shash->tfm = bctx->shash;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001109
Behan Webster7bc53c32014-04-04 18:18:00 -03001110 return crypto_shash_init(shash) ?:
1111 crypto_shash_update(shash, bctx->opad, bs) ?:
1112 crypto_shash_finup(shash, req->result, ds, req->result);
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001113}
1114
1115static int omap_sham_finish(struct ahash_request *req)
1116{
1117 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1118 struct omap_sham_dev *dd = ctx->dd;
1119 int err = 0;
1120
1121 if (ctx->digcnt) {
1122 omap_sham_copy_ready_hash(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001123 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1124 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001125 err = omap_sham_finish_hmac(req);
1126 }
1127
Herbert Xu758f4872020-06-15 21:37:38 +10001128 dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001129
1130 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001131}
1132
1133static void omap_sham_finish_req(struct ahash_request *req, int err)
1134{
1135 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001136 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001137
Tero Kristo8043bb12016-09-19 18:22:17 +03001138 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1139 free_pages((unsigned long)sg_virt(ctx->sg),
Tero Kristo462519f2019-11-05 16:00:50 +02001140 get_order(ctx->sg->length));
Tero Kristo8043bb12016-09-19 18:22:17 +03001141
1142 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1143 kfree(ctx->sg);
1144
1145 ctx->sg = NULL;
1146
Tero Kristo133c3d42020-09-07 10:56:10 +03001147 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1148 BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1149 BIT(FLAGS_OUTPUT_READY));
1150
1151 if (!err)
1152 dd->pdata->copy_hash(req, 1);
Tero Kristo8043bb12016-09-19 18:22:17 +03001153
Tero Kristo462519f2019-11-05 16:00:50 +02001154 if (dd->flags & BIT(FLAGS_HUGE)) {
Tero Kristo133c3d42020-09-07 10:56:10 +03001155 /* Re-enqueue the request */
1156 omap_sham_enqueue(req, ctx->op);
Tero Kristo462519f2019-11-05 16:00:50 +02001157 return;
1158 }
1159
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001160 if (!err) {
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001161 if (test_bit(FLAGS_FINAL, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001162 err = omap_sham_finish(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001163 } else {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001164 ctx->flags |= BIT(FLAGS_ERROR);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001165 }
1166
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +03001167 /* atomic operation is not needed here */
Tero Kristo133c3d42020-09-07 10:56:10 +03001168 dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +03001169 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
Mark A. Greerb359f032012-12-21 10:04:02 -07001170
Tero Kristoe93f7672016-06-22 16:23:34 +03001171 pm_runtime_mark_last_busy(dd->dev);
1172 pm_runtime_put_autosuspend(dd->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001173
Tero Kristo462519f2019-11-05 16:00:50 +02001174 ctx->offset = 0;
1175
Tero Kristo133c3d42020-09-07 10:56:10 +03001176 crypto_finalize_hash_request(dd->engine, req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001177}
1178
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001179static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1180 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001181{
Tero Kristo133c3d42020-09-07 10:56:10 +03001182 return crypto_transfer_hash_request_to_engine(dd->engine, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001183}
1184
1185static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1186{
1187 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Tero Kristo281c3772020-05-27 15:24:29 +03001188 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001189
1190 ctx->op = op;
1191
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001192 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001193}
1194
1195static int omap_sham_update(struct ahash_request *req)
1196{
1197 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Tero Kristo281c3772020-05-27 15:24:29 +03001198 struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001199
1200 if (!req->nbytes)
1201 return 0;
1202
Tero Kristo5d78d572017-05-24 10:35:32 +03001203 if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
Tero Kristo8043bb12016-09-19 18:22:17 +03001204 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1205 0, req->nbytes, 0);
1206 ctx->bufcnt += req->nbytes;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001207 return 0;
1208 }
1209
Lokesh Vutlaacef7b02013-12-18 19:03:33 +05301210 if (dd->polling_mode)
1211 ctx->flags |= BIT(FLAGS_CPU);
1212
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001213 return omap_sham_enqueue(req, OP_UPDATE);
1214}
1215
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001216static int omap_sham_final_shash(struct ahash_request *req)
1217{
1218 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1219 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Tero Kristocb8d5c82016-08-04 13:28:40 +03001220 int offset = 0;
1221
1222 /*
1223 * If we are running HMAC on limited hardware support, skip
1224 * the ipad in the beginning of the buffer if we are going for
1225 * software fallback algorithm.
1226 */
1227 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1228 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1229 offset = get_block_size(ctx);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001230
Eric Biggerse29ba412020-05-01 22:31:12 -07001231 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1232 ctx->bufcnt - offset, req->result);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001233}
1234
1235static int omap_sham_final(struct ahash_request *req)
1236{
1237 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001238
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001239 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001240
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001241 if (ctx->flags & BIT(FLAGS_ERROR))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001242 return 0; /* uncompleted hash is not needed */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001243
Bin Liu85e06872016-06-22 16:23:37 +03001244 /*
1245 * OMAP HW accel works only with buffers >= 9.
1246 * HMAC is always >= 9 because ipad == block size.
Tero Kristoc9af5992018-02-27 15:30:36 +02001247 * If buffersize is less than fallback_sz, we use fallback
Tero Kristo2c5bd1e2016-09-19 18:22:16 +03001248 * SW encoding, as using DMA + HW in this case doesn't provide
1249 * any benefit.
Bin Liu85e06872016-06-22 16:23:37 +03001250 */
Tero Kristoc9af5992018-02-27 15:30:36 +02001251 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001252 return omap_sham_final_shash(req);
1253 else if (ctx->bufcnt)
1254 return omap_sham_enqueue(req, OP_FINAL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001255
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001256 /* copy ready hash (+ finalize hmac) */
1257 return omap_sham_finish(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001258}
1259
1260static int omap_sham_finup(struct ahash_request *req)
1261{
1262 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1263 int err1, err2;
1264
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001265 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001266
1267 err1 = omap_sham_update(req);
Markku Kylanpaa455e3382011-04-20 13:34:55 +03001268 if (err1 == -EINPROGRESS || err1 == -EBUSY)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001269 return err1;
1270 /*
1271 * final() has to be always called to cleanup resources
1272 * even if udpate() failed, except EINPROGRESS
1273 */
1274 err2 = omap_sham_final(req);
1275
1276 return err1 ?: err2;
1277}
1278
1279static int omap_sham_digest(struct ahash_request *req)
1280{
1281 return omap_sham_init(req) ?: omap_sham_finup(req);
1282}
1283
1284static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1285 unsigned int keylen)
1286{
1287 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1288 struct omap_sham_hmac_ctx *bctx = tctx->base;
1289 int bs = crypto_shash_blocksize(bctx->shash);
1290 int ds = crypto_shash_digestsize(bctx->shash);
1291 int err, i;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001292
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001293 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1294 if (err)
1295 return err;
1296
1297 if (keylen > bs) {
Eric Biggerse29ba412020-05-01 22:31:12 -07001298 err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1299 bctx->ipad);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001300 if (err)
1301 return err;
1302 keylen = ds;
1303 } else {
1304 memcpy(bctx->ipad, key, keylen);
1305 }
1306
1307 memset(bctx->ipad + keylen, 0, bs - keylen);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001308
Tero Kristo281c3772020-05-27 15:24:29 +03001309 if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
Mark A. Greer0d373d62012-12-21 10:04:08 -07001310 memcpy(bctx->opad, bctx->ipad, bs);
1311
1312 for (i = 0; i < bs; i++) {
Corentin LABBEebd401e2017-05-19 08:53:28 +02001313 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1314 bctx->opad[i] ^= HMAC_OPAD_VALUE;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001315 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001316 }
1317
1318 return err;
1319}
1320
1321static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1322{
1323 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1324 const char *alg_name = crypto_tfm_alg_name(tfm);
1325
1326 /* Allocate a fallback and abort if it failed. */
1327 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1328 CRYPTO_ALG_NEED_FALLBACK);
1329 if (IS_ERR(tctx->fallback)) {
1330 pr_err("omap-sham: fallback driver '%s' "
1331 "could not be loaded.\n", alg_name);
1332 return PTR_ERR(tctx->fallback);
1333 }
1334
1335 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001336 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001337
1338 if (alg_base) {
1339 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001340 tctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001341 bctx->shash = crypto_alloc_shash(alg_base, 0,
1342 CRYPTO_ALG_NEED_FALLBACK);
1343 if (IS_ERR(bctx->shash)) {
1344 pr_err("omap-sham: base driver '%s' "
1345 "could not be loaded.\n", alg_base);
1346 crypto_free_shash(tctx->fallback);
1347 return PTR_ERR(bctx->shash);
1348 }
1349
1350 }
1351
Tero Kristo133c3d42020-09-07 10:56:10 +03001352 tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
1353 tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
1354 tctx->enginectx.op.unprepare_request = NULL;
1355
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001356 return 0;
1357}
1358
1359static int omap_sham_cra_init(struct crypto_tfm *tfm)
1360{
1361 return omap_sham_cra_init_alg(tfm, NULL);
1362}
1363
1364static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1365{
1366 return omap_sham_cra_init_alg(tfm, "sha1");
1367}
1368
Mark A. Greerd20fb182012-12-21 10:04:09 -07001369static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1370{
1371 return omap_sham_cra_init_alg(tfm, "sha224");
1372}
1373
1374static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1375{
1376 return omap_sham_cra_init_alg(tfm, "sha256");
1377}
1378
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001379static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1380{
1381 return omap_sham_cra_init_alg(tfm, "md5");
1382}
1383
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301384static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1385{
1386 return omap_sham_cra_init_alg(tfm, "sha384");
1387}
1388
1389static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1390{
1391 return omap_sham_cra_init_alg(tfm, "sha512");
1392}
1393
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001394static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1395{
1396 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1397
1398 crypto_free_shash(tctx->fallback);
1399 tctx->fallback = NULL;
1400
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001401 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001402 struct omap_sham_hmac_ctx *bctx = tctx->base;
1403 crypto_free_shash(bctx->shash);
1404 }
1405}
1406
Tero Kristo99a7fff2016-09-19 18:22:12 +03001407static int omap_sham_export(struct ahash_request *req, void *out)
1408{
Tero Kristoa84d3512016-09-19 18:22:18 +03001409 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1410
1411 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1412
1413 return 0;
Tero Kristo99a7fff2016-09-19 18:22:12 +03001414}
1415
1416static int omap_sham_import(struct ahash_request *req, const void *in)
1417{
Tero Kristoa84d3512016-09-19 18:22:18 +03001418 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1419 const struct omap_sham_reqctx *ctx_in = in;
1420
1421 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1422
1423 return 0;
Tero Kristo99a7fff2016-09-19 18:22:12 +03001424}
1425
Mark A. Greerd20fb182012-12-21 10:04:09 -07001426static struct ahash_alg algs_sha1_md5[] = {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001427{
1428 .init = omap_sham_init,
1429 .update = omap_sham_update,
1430 .final = omap_sham_final,
1431 .finup = omap_sham_finup,
1432 .digest = omap_sham_digest,
1433 .halg.digestsize = SHA1_DIGEST_SIZE,
1434 .halg.base = {
1435 .cra_name = "sha1",
1436 .cra_driver_name = "omap-sha1",
Bin Liueb354782016-06-30 14:04:11 -05001437 .cra_priority = 400,
Eric Biggers6a38f622018-06-30 15:16:12 -07001438 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001439 CRYPTO_ALG_ASYNC |
1440 CRYPTO_ALG_NEED_FALLBACK,
1441 .cra_blocksize = SHA1_BLOCK_SIZE,
1442 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001443 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001444 .cra_module = THIS_MODULE,
1445 .cra_init = omap_sham_cra_init,
1446 .cra_exit = omap_sham_cra_exit,
1447 }
1448},
1449{
1450 .init = omap_sham_init,
1451 .update = omap_sham_update,
1452 .final = omap_sham_final,
1453 .finup = omap_sham_finup,
1454 .digest = omap_sham_digest,
1455 .halg.digestsize = MD5_DIGEST_SIZE,
1456 .halg.base = {
1457 .cra_name = "md5",
1458 .cra_driver_name = "omap-md5",
Bin Liueb354782016-06-30 14:04:11 -05001459 .cra_priority = 400,
Eric Biggers6a38f622018-06-30 15:16:12 -07001460 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001461 CRYPTO_ALG_ASYNC |
1462 CRYPTO_ALG_NEED_FALLBACK,
1463 .cra_blocksize = SHA1_BLOCK_SIZE,
1464 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001465 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001466 .cra_module = THIS_MODULE,
1467 .cra_init = omap_sham_cra_init,
1468 .cra_exit = omap_sham_cra_exit,
1469 }
1470},
1471{
1472 .init = omap_sham_init,
1473 .update = omap_sham_update,
1474 .final = omap_sham_final,
1475 .finup = omap_sham_finup,
1476 .digest = omap_sham_digest,
1477 .setkey = omap_sham_setkey,
1478 .halg.digestsize = SHA1_DIGEST_SIZE,
1479 .halg.base = {
1480 .cra_name = "hmac(sha1)",
1481 .cra_driver_name = "omap-hmac-sha1",
Bin Liueb354782016-06-30 14:04:11 -05001482 .cra_priority = 400,
Eric Biggers6a38f622018-06-30 15:16:12 -07001483 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001484 CRYPTO_ALG_ASYNC |
1485 CRYPTO_ALG_NEED_FALLBACK,
1486 .cra_blocksize = SHA1_BLOCK_SIZE,
1487 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1488 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001489 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001490 .cra_module = THIS_MODULE,
1491 .cra_init = omap_sham_cra_sha1_init,
1492 .cra_exit = omap_sham_cra_exit,
1493 }
1494},
1495{
1496 .init = omap_sham_init,
1497 .update = omap_sham_update,
1498 .final = omap_sham_final,
1499 .finup = omap_sham_finup,
1500 .digest = omap_sham_digest,
1501 .setkey = omap_sham_setkey,
1502 .halg.digestsize = MD5_DIGEST_SIZE,
1503 .halg.base = {
1504 .cra_name = "hmac(md5)",
1505 .cra_driver_name = "omap-hmac-md5",
Bin Liueb354782016-06-30 14:04:11 -05001506 .cra_priority = 400,
Eric Biggers6a38f622018-06-30 15:16:12 -07001507 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001508 CRYPTO_ALG_ASYNC |
1509 CRYPTO_ALG_NEED_FALLBACK,
1510 .cra_blocksize = SHA1_BLOCK_SIZE,
1511 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1512 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001513 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001514 .cra_module = THIS_MODULE,
1515 .cra_init = omap_sham_cra_md5_init,
1516 .cra_exit = omap_sham_cra_exit,
1517 }
1518}
1519};
1520
Mark A. Greerd20fb182012-12-21 10:04:09 -07001521/* OMAP4 has some algs in addition to what OMAP2 has */
1522static struct ahash_alg algs_sha224_sha256[] = {
1523{
1524 .init = omap_sham_init,
1525 .update = omap_sham_update,
1526 .final = omap_sham_final,
1527 .finup = omap_sham_finup,
1528 .digest = omap_sham_digest,
1529 .halg.digestsize = SHA224_DIGEST_SIZE,
1530 .halg.base = {
1531 .cra_name = "sha224",
1532 .cra_driver_name = "omap-sha224",
Bin Liueb354782016-06-30 14:04:11 -05001533 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001534 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1535 CRYPTO_ALG_ASYNC |
Mark A. Greerd20fb182012-12-21 10:04:09 -07001536 CRYPTO_ALG_NEED_FALLBACK,
1537 .cra_blocksize = SHA224_BLOCK_SIZE,
1538 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001539 .cra_alignmask = OMAP_ALIGN_MASK,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001540 .cra_module = THIS_MODULE,
1541 .cra_init = omap_sham_cra_init,
1542 .cra_exit = omap_sham_cra_exit,
1543 }
1544},
1545{
1546 .init = omap_sham_init,
1547 .update = omap_sham_update,
1548 .final = omap_sham_final,
1549 .finup = omap_sham_finup,
1550 .digest = omap_sham_digest,
1551 .halg.digestsize = SHA256_DIGEST_SIZE,
1552 .halg.base = {
1553 .cra_name = "sha256",
1554 .cra_driver_name = "omap-sha256",
Bin Liueb354782016-06-30 14:04:11 -05001555 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001556 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1557 CRYPTO_ALG_ASYNC |
Mark A. Greerd20fb182012-12-21 10:04:09 -07001558 CRYPTO_ALG_NEED_FALLBACK,
1559 .cra_blocksize = SHA256_BLOCK_SIZE,
1560 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001561 .cra_alignmask = OMAP_ALIGN_MASK,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001562 .cra_module = THIS_MODULE,
1563 .cra_init = omap_sham_cra_init,
1564 .cra_exit = omap_sham_cra_exit,
1565 }
1566},
1567{
1568 .init = omap_sham_init,
1569 .update = omap_sham_update,
1570 .final = omap_sham_final,
1571 .finup = omap_sham_finup,
1572 .digest = omap_sham_digest,
1573 .setkey = omap_sham_setkey,
1574 .halg.digestsize = SHA224_DIGEST_SIZE,
1575 .halg.base = {
1576 .cra_name = "hmac(sha224)",
1577 .cra_driver_name = "omap-hmac-sha224",
Bin Liueb354782016-06-30 14:04:11 -05001578 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001579 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1580 CRYPTO_ALG_ASYNC |
Mark A. Greerd20fb182012-12-21 10:04:09 -07001581 CRYPTO_ALG_NEED_FALLBACK,
1582 .cra_blocksize = SHA224_BLOCK_SIZE,
1583 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1584 sizeof(struct omap_sham_hmac_ctx),
1585 .cra_alignmask = OMAP_ALIGN_MASK,
1586 .cra_module = THIS_MODULE,
1587 .cra_init = omap_sham_cra_sha224_init,
1588 .cra_exit = omap_sham_cra_exit,
1589 }
1590},
1591{
1592 .init = omap_sham_init,
1593 .update = omap_sham_update,
1594 .final = omap_sham_final,
1595 .finup = omap_sham_finup,
1596 .digest = omap_sham_digest,
1597 .setkey = omap_sham_setkey,
1598 .halg.digestsize = SHA256_DIGEST_SIZE,
1599 .halg.base = {
1600 .cra_name = "hmac(sha256)",
1601 .cra_driver_name = "omap-hmac-sha256",
Bin Liueb354782016-06-30 14:04:11 -05001602 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001603 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1604 CRYPTO_ALG_ASYNC |
Mark A. Greerd20fb182012-12-21 10:04:09 -07001605 CRYPTO_ALG_NEED_FALLBACK,
1606 .cra_blocksize = SHA256_BLOCK_SIZE,
1607 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1608 sizeof(struct omap_sham_hmac_ctx),
1609 .cra_alignmask = OMAP_ALIGN_MASK,
1610 .cra_module = THIS_MODULE,
1611 .cra_init = omap_sham_cra_sha256_init,
1612 .cra_exit = omap_sham_cra_exit,
1613 }
1614},
1615};
1616
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301617static struct ahash_alg algs_sha384_sha512[] = {
1618{
1619 .init = omap_sham_init,
1620 .update = omap_sham_update,
1621 .final = omap_sham_final,
1622 .finup = omap_sham_finup,
1623 .digest = omap_sham_digest,
1624 .halg.digestsize = SHA384_DIGEST_SIZE,
1625 .halg.base = {
1626 .cra_name = "sha384",
1627 .cra_driver_name = "omap-sha384",
Bin Liueb354782016-06-30 14:04:11 -05001628 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001629 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1630 CRYPTO_ALG_ASYNC |
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301631 CRYPTO_ALG_NEED_FALLBACK,
1632 .cra_blocksize = SHA384_BLOCK_SIZE,
1633 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001634 .cra_alignmask = OMAP_ALIGN_MASK,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301635 .cra_module = THIS_MODULE,
1636 .cra_init = omap_sham_cra_init,
1637 .cra_exit = omap_sham_cra_exit,
1638 }
1639},
1640{
1641 .init = omap_sham_init,
1642 .update = omap_sham_update,
1643 .final = omap_sham_final,
1644 .finup = omap_sham_finup,
1645 .digest = omap_sham_digest,
1646 .halg.digestsize = SHA512_DIGEST_SIZE,
1647 .halg.base = {
1648 .cra_name = "sha512",
1649 .cra_driver_name = "omap-sha512",
Bin Liueb354782016-06-30 14:04:11 -05001650 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001651 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1652 CRYPTO_ALG_ASYNC |
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301653 CRYPTO_ALG_NEED_FALLBACK,
1654 .cra_blocksize = SHA512_BLOCK_SIZE,
1655 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001656 .cra_alignmask = OMAP_ALIGN_MASK,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301657 .cra_module = THIS_MODULE,
1658 .cra_init = omap_sham_cra_init,
1659 .cra_exit = omap_sham_cra_exit,
1660 }
1661},
1662{
1663 .init = omap_sham_init,
1664 .update = omap_sham_update,
1665 .final = omap_sham_final,
1666 .finup = omap_sham_finup,
1667 .digest = omap_sham_digest,
1668 .setkey = omap_sham_setkey,
1669 .halg.digestsize = SHA384_DIGEST_SIZE,
1670 .halg.base = {
1671 .cra_name = "hmac(sha384)",
1672 .cra_driver_name = "omap-hmac-sha384",
Bin Liueb354782016-06-30 14:04:11 -05001673 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001674 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1675 CRYPTO_ALG_ASYNC |
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301676 CRYPTO_ALG_NEED_FALLBACK,
1677 .cra_blocksize = SHA384_BLOCK_SIZE,
1678 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1679 sizeof(struct omap_sham_hmac_ctx),
1680 .cra_alignmask = OMAP_ALIGN_MASK,
1681 .cra_module = THIS_MODULE,
1682 .cra_init = omap_sham_cra_sha384_init,
1683 .cra_exit = omap_sham_cra_exit,
1684 }
1685},
1686{
1687 .init = omap_sham_init,
1688 .update = omap_sham_update,
1689 .final = omap_sham_final,
1690 .finup = omap_sham_finup,
1691 .digest = omap_sham_digest,
1692 .setkey = omap_sham_setkey,
1693 .halg.digestsize = SHA512_DIGEST_SIZE,
1694 .halg.base = {
1695 .cra_name = "hmac(sha512)",
1696 .cra_driver_name = "omap-hmac-sha512",
Bin Liueb354782016-06-30 14:04:11 -05001697 .cra_priority = 400,
Tero Kristo8dc43632020-05-27 15:24:24 +03001698 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1699 CRYPTO_ALG_ASYNC |
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301700 CRYPTO_ALG_NEED_FALLBACK,
1701 .cra_blocksize = SHA512_BLOCK_SIZE,
1702 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1703 sizeof(struct omap_sham_hmac_ctx),
1704 .cra_alignmask = OMAP_ALIGN_MASK,
1705 .cra_module = THIS_MODULE,
1706 .cra_init = omap_sham_cra_sha512_init,
1707 .cra_exit = omap_sham_cra_exit,
1708 }
1709},
1710};
1711
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001712static void omap_sham_done_task(unsigned long data)
1713{
1714 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001715 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001716
Tero Kristo462519f2019-11-05 16:00:50 +02001717 dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1718
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001719 if (test_bit(FLAGS_CPU, &dd->flags)) {
Tero Kristo8043bb12016-09-19 18:22:17 +03001720 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1721 goto finish;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001722 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
Tony Lindgrenfe281402021-07-27 13:23:34 +03001723 if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001724 omap_sham_update_dma_stop(dd);
1725 if (dd->err) {
1726 err = dd->err;
1727 goto finish;
1728 }
1729 }
1730 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1731 /* hash or semi-hash ready */
1732 clear_bit(FLAGS_DMA_READY, &dd->flags);
Krzysztof Kozlowski17f5b192018-03-01 21:50:11 +01001733 goto finish;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001734 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001735 }
1736
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001737 return;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001738
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001739finish:
1740 dev_dbg(dd->dev, "update done: err: %d\n", err);
1741 /* finish curent request */
1742 omap_sham_finish_req(dd->req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001743}
1744
Mark A. Greer0d373d62012-12-21 10:04:08 -07001745static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1746{
Tero Kristo133c3d42020-09-07 10:56:10 +03001747 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1748 tasklet_schedule(&dd->done_task);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001749
1750 return IRQ_HANDLED;
1751}
1752
1753static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001754{
1755 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001756
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001757 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001758 /* final -> allow device to go to power-saving mode */
1759 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1760
1761 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1762 SHA_REG_CTRL_OUTPUT_READY);
1763 omap_sham_read(dd, SHA_REG_CTRL);
1764
Mark A. Greer0d373d62012-12-21 10:04:08 -07001765 return omap_sham_irq_common(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001766}
1767
Mark A. Greer0d373d62012-12-21 10:04:08 -07001768static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001769{
Mark A. Greer0d373d62012-12-21 10:04:08 -07001770 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001771
Mark A. Greer0d373d62012-12-21 10:04:08 -07001772 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001773
Mark A. Greer0d373d62012-12-21 10:04:08 -07001774 return omap_sham_irq_common(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001775}
1776
Mark A. Greerd20fb182012-12-21 10:04:09 -07001777static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1778 {
1779 .algs_list = algs_sha1_md5,
1780 .size = ARRAY_SIZE(algs_sha1_md5),
1781 },
1782};
1783
Mark A. Greer0d373d62012-12-21 10:04:08 -07001784static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
Mark A. Greerd20fb182012-12-21 10:04:09 -07001785 .algs_info = omap_sham_algs_info_omap2,
1786 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
Mark A. Greer0d373d62012-12-21 10:04:08 -07001787 .flags = BIT(FLAGS_BE32_SHA1),
1788 .digest_size = SHA1_DIGEST_SIZE,
1789 .copy_hash = omap_sham_copy_hash_omap2,
1790 .write_ctrl = omap_sham_write_ctrl_omap2,
1791 .trigger = omap_sham_trigger_omap2,
1792 .poll_irq = omap_sham_poll_irq_omap2,
1793 .intr_hdlr = omap_sham_irq_omap2,
1794 .idigest_ofs = 0x00,
1795 .din_ofs = 0x1c,
1796 .digcnt_ofs = 0x14,
1797 .rev_ofs = 0x5c,
1798 .mask_ofs = 0x60,
1799 .sysstatus_ofs = 0x64,
1800 .major_mask = 0xf0,
1801 .major_shift = 4,
1802 .minor_mask = 0x0f,
1803 .minor_shift = 0,
1804};
1805
Mark A. Greer03feec92012-12-21 10:04:06 -07001806#ifdef CONFIG_OF
Mark A. Greerd20fb182012-12-21 10:04:09 -07001807static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1808 {
1809 .algs_list = algs_sha1_md5,
1810 .size = ARRAY_SIZE(algs_sha1_md5),
1811 },
1812 {
1813 .algs_list = algs_sha224_sha256,
1814 .size = ARRAY_SIZE(algs_sha224_sha256),
1815 },
1816};
1817
Mark A. Greer0d373d62012-12-21 10:04:08 -07001818static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
Mark A. Greerd20fb182012-12-21 10:04:09 -07001819 .algs_info = omap_sham_algs_info_omap4,
1820 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
Mark A. Greer0d373d62012-12-21 10:04:08 -07001821 .flags = BIT(FLAGS_AUTO_XOR),
1822 .digest_size = SHA256_DIGEST_SIZE,
1823 .copy_hash = omap_sham_copy_hash_omap4,
1824 .write_ctrl = omap_sham_write_ctrl_omap4,
1825 .trigger = omap_sham_trigger_omap4,
1826 .poll_irq = omap_sham_poll_irq_omap4,
1827 .intr_hdlr = omap_sham_irq_omap4,
1828 .idigest_ofs = 0x020,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301829 .odigest_ofs = 0x0,
Mark A. Greer0d373d62012-12-21 10:04:08 -07001830 .din_ofs = 0x080,
1831 .digcnt_ofs = 0x040,
1832 .rev_ofs = 0x100,
1833 .mask_ofs = 0x110,
1834 .sysstatus_ofs = 0x114,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301835 .mode_ofs = 0x44,
1836 .length_ofs = 0x48,
Mark A. Greer0d373d62012-12-21 10:04:08 -07001837 .major_mask = 0x0700,
1838 .major_shift = 8,
1839 .minor_mask = 0x003f,
1840 .minor_shift = 0,
1841};
1842
Lokesh Vutla7d7c7042013-07-26 12:29:15 +05301843static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1844 {
1845 .algs_list = algs_sha1_md5,
1846 .size = ARRAY_SIZE(algs_sha1_md5),
1847 },
1848 {
1849 .algs_list = algs_sha224_sha256,
1850 .size = ARRAY_SIZE(algs_sha224_sha256),
1851 },
1852 {
1853 .algs_list = algs_sha384_sha512,
1854 .size = ARRAY_SIZE(algs_sha384_sha512),
1855 },
1856};
1857
1858static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1859 .algs_info = omap_sham_algs_info_omap5,
1860 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1861 .flags = BIT(FLAGS_AUTO_XOR),
1862 .digest_size = SHA512_DIGEST_SIZE,
1863 .copy_hash = omap_sham_copy_hash_omap4,
1864 .write_ctrl = omap_sham_write_ctrl_omap4,
1865 .trigger = omap_sham_trigger_omap4,
1866 .poll_irq = omap_sham_poll_irq_omap4,
1867 .intr_hdlr = omap_sham_irq_omap4,
1868 .idigest_ofs = 0x240,
1869 .odigest_ofs = 0x200,
1870 .din_ofs = 0x080,
1871 .digcnt_ofs = 0x280,
1872 .rev_ofs = 0x100,
1873 .mask_ofs = 0x110,
1874 .sysstatus_ofs = 0x114,
1875 .mode_ofs = 0x284,
1876 .length_ofs = 0x288,
1877 .major_mask = 0x0700,
1878 .major_shift = 8,
1879 .minor_mask = 0x003f,
1880 .minor_shift = 0,
1881};
1882
Mark A. Greer03feec92012-12-21 10:04:06 -07001883static const struct of_device_id omap_sham_of_match[] = {
1884 {
1885 .compatible = "ti,omap2-sham",
Mark A. Greer0d373d62012-12-21 10:04:08 -07001886 .data = &omap_sham_pdata_omap2,
1887 },
1888 {
Pali Roháreddca852015-02-26 14:49:53 +01001889 .compatible = "ti,omap3-sham",
1890 .data = &omap_sham_pdata_omap2,
1891 },
1892 {
Mark A. Greer0d373d62012-12-21 10:04:08 -07001893 .compatible = "ti,omap4-sham",
1894 .data = &omap_sham_pdata_omap4,
Mark A. Greer03feec92012-12-21 10:04:06 -07001895 },
Lokesh Vutla7d7c7042013-07-26 12:29:15 +05301896 {
1897 .compatible = "ti,omap5-sham",
1898 .data = &omap_sham_pdata_omap5,
1899 },
Mark A. Greer03feec92012-12-21 10:04:06 -07001900 {},
1901};
1902MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1903
1904static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1905 struct device *dev, struct resource *res)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001906{
Mark A. Greer03feec92012-12-21 10:04:06 -07001907 struct device_node *node = dev->of_node;
Mark A. Greer03feec92012-12-21 10:04:06 -07001908 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001909
Corentin LABBE7d5569312017-09-20 20:42:48 +02001910 dd->pdata = of_device_get_match_data(dev);
1911 if (!dd->pdata) {
Mark A. Greer03feec92012-12-21 10:04:06 -07001912 dev_err(dev, "no compatible OF match\n");
1913 err = -EINVAL;
1914 goto err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001915 }
Samu Onkalo584db6a2010-09-03 19:20:19 +08001916
Mark A. Greer03feec92012-12-21 10:04:06 -07001917 err = of_address_to_resource(node, 0, res);
1918 if (err < 0) {
1919 dev_err(dev, "can't translate OF node address\n");
1920 err = -EINVAL;
1921 goto err;
1922 }
1923
Thierry Redingf7578492013-09-18 15:24:44 +02001924 dd->irq = irq_of_parse_and_map(node, 0);
Mark A. Greer03feec92012-12-21 10:04:06 -07001925 if (!dd->irq) {
1926 dev_err(dev, "can't translate OF irq value\n");
1927 err = -EINVAL;
1928 goto err;
1929 }
1930
Mark A. Greer03feec92012-12-21 10:04:06 -07001931err:
1932 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001933}
Mark A. Greer03feec92012-12-21 10:04:06 -07001934#else
Mark A. Greerc3c3b322013-01-15 13:53:02 -07001935static const struct of_device_id omap_sham_of_match[] = {
1936 {},
1937};
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001938
Mark A. Greerc3c3b322013-01-15 13:53:02 -07001939static int omap_sham_get_res_of(struct omap_sham_dev *dd,
Mark A. Greer03feec92012-12-21 10:04:06 -07001940 struct device *dev, struct resource *res)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001941{
Mark A. Greer03feec92012-12-21 10:04:06 -07001942 return -EINVAL;
1943}
1944#endif
1945
1946static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1947 struct platform_device *pdev, struct resource *res)
1948{
1949 struct device *dev = &pdev->dev;
1950 struct resource *r;
1951 int err = 0;
1952
1953 /* Get the base address */
1954 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1955 if (!r) {
1956 dev_err(dev, "no MEM resource info\n");
1957 err = -ENODEV;
1958 goto err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001959 }
Mark A. Greer03feec92012-12-21 10:04:06 -07001960 memcpy(res, r, sizeof(*res));
1961
1962 /* Get the IRQ */
1963 dd->irq = platform_get_irq(pdev, 0);
1964 if (dd->irq < 0) {
Mark A. Greer03feec92012-12-21 10:04:06 -07001965 err = dd->irq;
1966 goto err;
1967 }
1968
Mark A. Greer0d373d62012-12-21 10:04:08 -07001969 /* Only OMAP2/3 can be non-DT */
1970 dd->pdata = &omap_sham_pdata_omap2;
1971
Mark A. Greer03feec92012-12-21 10:04:06 -07001972err:
1973 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001974}
1975
Tero Kristoc9af5992018-02-27 15:30:36 +02001976static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1977 char *buf)
1978{
1979 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1980
1981 return sprintf(buf, "%d\n", dd->fallback_sz);
1982}
1983
1984static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1985 const char *buf, size_t size)
1986{
1987 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1988 ssize_t status;
1989 long value;
1990
1991 status = kstrtol(buf, 0, &value);
1992 if (status)
1993 return status;
1994
1995 /* HW accelerator only works with buffers > 9 */
1996 if (value < 9) {
1997 dev_err(dev, "minimum fallback size 9\n");
1998 return -EINVAL;
1999 }
2000
2001 dd->fallback_sz = value;
2002
2003 return size;
2004}
2005
Tero Kristo62f7c702018-02-27 15:30:37 +02002006static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2007 char *buf)
2008{
2009 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2010
2011 return sprintf(buf, "%d\n", dd->queue.max_qlen);
2012}
2013
2014static ssize_t queue_len_store(struct device *dev,
2015 struct device_attribute *attr, const char *buf,
2016 size_t size)
2017{
2018 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2019 ssize_t status;
2020 long value;
Tero Kristo62f7c702018-02-27 15:30:37 +02002021
2022 status = kstrtol(buf, 0, &value);
2023 if (status)
2024 return status;
2025
2026 if (value < 1)
2027 return -EINVAL;
2028
2029 /*
2030 * Changing the queue size in fly is safe, if size becomes smaller
2031 * than current size, it will just not accept new entries until
2032 * it has shrank enough.
2033 */
Tero Kristo62f7c702018-02-27 15:30:37 +02002034 dd->queue.max_qlen = value;
Tero Kristo62f7c702018-02-27 15:30:37 +02002035
2036 return size;
2037}
2038
2039static DEVICE_ATTR_RW(queue_len);
Tero Kristoc9af5992018-02-27 15:30:36 +02002040static DEVICE_ATTR_RW(fallback);
2041
2042static struct attribute *omap_sham_attrs[] = {
Tero Kristo62f7c702018-02-27 15:30:37 +02002043 &dev_attr_queue_len.attr,
Tero Kristoc9af5992018-02-27 15:30:36 +02002044 &dev_attr_fallback.attr,
2045 NULL,
2046};
2047
2048static struct attribute_group omap_sham_attr_group = {
2049 .attrs = omap_sham_attrs,
2050};
2051
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002052static int omap_sham_probe(struct platform_device *pdev)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002053{
2054 struct omap_sham_dev *dd;
2055 struct device *dev = &pdev->dev;
Mark A. Greer03feec92012-12-21 10:04:06 -07002056 struct resource res;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07002057 dma_cap_mask_t mask;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002058 int err, i, j;
Mark A. Greer0d373d62012-12-21 10:04:08 -07002059 u32 rev;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002060
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302061 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002062 if (dd == NULL) {
2063 dev_err(dev, "unable to alloc data struct.\n");
2064 err = -ENOMEM;
2065 goto data_err;
2066 }
2067 dd->dev = dev;
2068 platform_set_drvdata(pdev, dd);
2069
2070 INIT_LIST_HEAD(&dd->list);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002071 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002072 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2073
Mark A. Greer03feec92012-12-21 10:04:06 -07002074 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2075 omap_sham_get_res_pdev(dd, pdev, &res);
2076 if (err)
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302077 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002078
Laurent Navet30862282013-05-02 14:00:38 +02002079 dd->io_base = devm_ioremap_resource(dev, &res);
2080 if (IS_ERR(dd->io_base)) {
2081 err = PTR_ERR(dd->io_base);
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302082 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002083 }
Mark A. Greer03feec92012-12-21 10:04:06 -07002084 dd->phys_base = res.start;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002085
Lokesh Vutla0de9c382013-07-26 12:29:16 +05302086 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2087 IRQF_TRIGGER_NONE, dev_name(dev), dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002088 if (err) {
Lokesh Vutla0de9c382013-07-26 12:29:16 +05302089 dev_err(dev, "unable to request irq %d, err = %d\n",
2090 dd->irq, err);
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302091 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002092 }
2093
Mark A. Greerdfd061d2012-12-21 10:04:04 -07002094 dma_cap_zero(mask);
2095 dma_cap_set(DMA_SLAVE, mask);
2096
Peter Ujfalusidbe24622016-04-29 16:03:41 +03002097 dd->dma_lch = dma_request_chan(dev, "rx");
2098 if (IS_ERR(dd->dma_lch)) {
2099 err = PTR_ERR(dd->dma_lch);
2100 if (err == -EPROBE_DEFER)
2101 goto data_err;
2102
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05302103 dd->polling_mode = 1;
2104 dev_dbg(dev, "using polling mode instead of dma\n");
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002105 }
2106
Mark A. Greer0d373d62012-12-21 10:04:08 -07002107 dd->flags |= dd->pdata->flags;
Tero Kristo281c3772020-05-27 15:24:29 +03002108 sham.flags |= dd->pdata->flags;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002109
Tero Kristoe93f7672016-06-22 16:23:34 +03002110 pm_runtime_use_autosuspend(dev);
2111 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2112
Tero Kristoc9af5992018-02-27 15:30:36 +02002113 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2114
Mark A. Greerb359f032012-12-21 10:04:02 -07002115 pm_runtime_enable(dev);
Pali Rohár604c3102015-03-08 11:01:01 +01002116
2117 err = pm_runtime_get_sync(dev);
2118 if (err < 0) {
2119 dev_err(dev, "failed to get sync: %d\n", err);
2120 goto err_pm;
2121 }
2122
Mark A. Greer0d373d62012-12-21 10:04:08 -07002123 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2124 pm_runtime_put_sync(&pdev->dev);
Mark A. Greerb359f032012-12-21 10:04:02 -07002125
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002126 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
Mark A. Greer0d373d62012-12-21 10:04:08 -07002127 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2128 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002129
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002130 spin_lock_bh(&sham.lock);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002131 list_add_tail(&dd->list, &sham.dev_list);
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002132 spin_unlock_bh(&sham.lock);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002133
Tero Kristo133c3d42020-09-07 10:56:10 +03002134 dd->engine = crypto_engine_alloc_init(dev, 1);
2135 if (!dd->engine) {
2136 err = -ENOMEM;
2137 goto err_engine;
2138 }
2139
2140 err = crypto_engine_start(dd->engine);
2141 if (err)
2142 goto err_engine_start;
2143
Mark A. Greerd20fb182012-12-21 10:04:09 -07002144 for (i = 0; i < dd->pdata->algs_info_size; i++) {
Tero Kristo281c3772020-05-27 15:24:29 +03002145 if (dd->pdata->algs_info[i].registered)
2146 break;
2147
Mark A. Greerd20fb182012-12-21 10:04:09 -07002148 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
Tero Kristo99a7fff2016-09-19 18:22:12 +03002149 struct ahash_alg *alg;
2150
2151 alg = &dd->pdata->algs_info[i].algs_list[j];
2152 alg->export = omap_sham_export;
2153 alg->import = omap_sham_import;
Tero Kristoa84d3512016-09-19 18:22:18 +03002154 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2155 BUFLEN;
Tero Kristo99a7fff2016-09-19 18:22:12 +03002156 err = crypto_register_ahash(alg);
Mark A. Greerd20fb182012-12-21 10:04:09 -07002157 if (err)
2158 goto err_algs;
2159
2160 dd->pdata->algs_info[i].registered++;
2161 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002162 }
2163
Tero Kristoc9af5992018-02-27 15:30:36 +02002164 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2165 if (err) {
2166 dev_err(dev, "could not create sysfs device attrs\n");
2167 goto err_algs;
2168 }
2169
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002170 return 0;
2171
2172err_algs:
Mark A. Greerd20fb182012-12-21 10:04:09 -07002173 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2174 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2175 crypto_unregister_ahash(
2176 &dd->pdata->algs_info[i].algs_list[j]);
Tero Kristo133c3d42020-09-07 10:56:10 +03002177err_engine_start:
2178 crypto_engine_exit(dd->engine);
2179err_engine:
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002180 spin_lock_bh(&sham.lock);
Tero Kristo133c3d42020-09-07 10:56:10 +03002181 list_del(&dd->list);
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002182 spin_unlock_bh(&sham.lock);
Pali Rohár604c3102015-03-08 11:01:01 +01002183err_pm:
Tony Lindgrenf83fc1a2021-07-27 13:23:36 +03002184 pm_runtime_dont_use_autosuspend(dev);
Mark A. Greerb359f032012-12-21 10:04:02 -07002185 pm_runtime_disable(dev);
Dan Carpenterd462e322016-05-18 13:39:05 +03002186 if (!dd->polling_mode)
Mark A. Greerf13ab862013-11-12 13:12:27 -07002187 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002188data_err:
2189 dev_err(dev, "initialization failed.\n");
2190
2191 return err;
2192}
2193
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002194static int omap_sham_remove(struct platform_device *pdev)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002195{
Gustavo A. R. Silva0588d852017-07-18 18:03:11 -05002196 struct omap_sham_dev *dd;
Mark A. Greerd20fb182012-12-21 10:04:09 -07002197 int i, j;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002198
2199 dd = platform_get_drvdata(pdev);
2200 if (!dd)
2201 return -ENODEV;
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002202 spin_lock_bh(&sham.lock);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002203 list_del(&dd->list);
Ben Hutchingsfe4d5572021-08-11 02:06:09 +02002204 spin_unlock_bh(&sham.lock);
Mark A. Greerd20fb182012-12-21 10:04:09 -07002205 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
Tero Kristo281c3772020-05-27 15:24:29 +03002206 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
Mark A. Greerd20fb182012-12-21 10:04:09 -07002207 crypto_unregister_ahash(
2208 &dd->pdata->algs_info[i].algs_list[j]);
Tero Kristo281c3772020-05-27 15:24:29 +03002209 dd->pdata->algs_info[i].registered--;
2210 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002211 tasklet_kill(&dd->done_task);
Tony Lindgrenf83fc1a2021-07-27 13:23:36 +03002212 pm_runtime_dont_use_autosuspend(&pdev->dev);
Mark A. Greerb359f032012-12-21 10:04:02 -07002213 pm_runtime_disable(&pdev->dev);
Mark A. Greerf13ab862013-11-12 13:12:27 -07002214
Peter Ujfalusidbe24622016-04-29 16:03:41 +03002215 if (!dd->polling_mode)
Mark A. Greerf13ab862013-11-12 13:12:27 -07002216 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002217
Tero Kristob82fc912019-11-05 16:00:51 +02002218 sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2219
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002220 return 0;
2221}
2222
2223static struct platform_driver omap_sham_driver = {
2224 .probe = omap_sham_probe,
2225 .remove = omap_sham_remove,
2226 .driver = {
2227 .name = "omap-sham",
Mark A. Greer03feec92012-12-21 10:04:06 -07002228 .of_match_table = omap_sham_of_match,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002229 },
2230};
2231
Sachin Kamat02613702013-03-04 15:09:43 +05302232module_platform_driver(omap_sham_driver);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002233
2234MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2235MODULE_LICENSE("GPL v2");
2236MODULE_AUTHOR("Dmitry Kasatkin");
Joni Lapilainen718249d2013-10-26 23:00:41 +02002237MODULE_ALIAS("platform:omap-sham");