blob: 9c6db7f698c440958cdbe62ae55f0f84697c21bf [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Kim Phillips9c4a7962008-06-23 19:50:15 +08002/*
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 *
Kim Phillips5228f0f2011-07-15 11:21:38 +08005 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
Kim Phillips9c4a7962008-06-23 19:50:15 +08006 *
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
Kim Phillips9c4a7962008-06-23 19:50:15 +080013 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20#include <linux/crypto.h>
21#include <linux/hw_random.h>
Rob Herring5af50732013-09-17 14:28:33 -050022#include <linux/of_address.h>
23#include <linux/of_irq.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080024#include <linux/of_platform.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/spinlock.h>
28#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080030
31#include <crypto/algapi.h>
32#include <crypto/aes.h>
Ard Biesheuvel9d574ae2019-08-15 12:01:05 +030033#include <crypto/internal/des.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080034#include <crypto/sha.h>
Lee Nipper497f2e62010-05-19 19:20:36 +100035#include <crypto/md5.h>
Herbert Xue98014a2015-05-11 17:47:48 +080036#include <crypto/internal/aead.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080037#include <crypto/authenc.h>
Ard Biesheuvel373960d2019-11-09 18:09:49 +010038#include <crypto/internal/skcipher.h>
Lee Nipperacbf7c622010-05-19 19:19:33 +100039#include <crypto/hash.h>
40#include <crypto/internal/hash.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080041#include <crypto/scatterwalk.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080042
43#include "talitos.h"
44
LEROY Christophe922f9dc2015-04-17 16:32:07 +020045static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
LEROY Christopheda9de142017-10-06 15:04:57 +020046 unsigned int len, bool is_sec1)
Kim Phillips81eb0242009-08-13 11:51:51 +100047{
LEROY Christopheedc6bd692015-04-17 16:31:53 +020048 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
LEROY Christopheda9de142017-10-06 15:04:57 +020049 if (is_sec1) {
50 ptr->len1 = cpu_to_be16(len);
51 } else {
52 ptr->len = cpu_to_be16(len);
LEROY Christophe922f9dc2015-04-17 16:32:07 +020053 ptr->eptr = upper_32_bits(dma_addr);
LEROY Christopheda9de142017-10-06 15:04:57 +020054 }
Kim Phillips81eb0242009-08-13 11:51:51 +100055}
56
Horia Geant?340ff602016-04-19 20:33:48 +030057static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
59{
60 dst_ptr->ptr = src_ptr->ptr;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020061 if (is_sec1) {
LEROY Christopheda9de142017-10-06 15:04:57 +020062 dst_ptr->len1 = src_ptr->len1;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020063 } else {
LEROY Christopheda9de142017-10-06 15:04:57 +020064 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020066 }
LEROY Christophe538caf82015-04-17 16:31:59 +020067}
68
LEROY Christophe922f9dc2015-04-17 16:32:07 +020069static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020071{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020072 if (is_sec1)
73 return be16_to_cpu(ptr->len1);
74 else
75 return be16_to_cpu(ptr->len);
LEROY Christophe538caf82015-04-17 16:31:59 +020076}
77
LEROY Christopheb096b542016-06-06 13:20:34 +020078static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79 bool is_sec1)
LEROY Christophe185eb792015-04-17 16:31:55 +020080{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020081 if (!is_sec1)
LEROY Christopheb096b542016-06-06 13:20:34 +020082 ptr->j_extent = val;
83}
84
85static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86{
87 if (!is_sec1)
88 ptr->j_extent |= val;
LEROY Christophe185eb792015-04-17 16:31:55 +020089}
90
Kim Phillips9c4a7962008-06-23 19:50:15 +080091/*
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
93 */
LEROY Christophe6a4967c2018-02-26 17:40:06 +010094static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
98 unsigned long attrs)
99{
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
103
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105}
106
Kim Phillips9c4a7962008-06-23 19:50:15 +0800107static void map_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200108 struct talitos_ptr *ptr,
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300109 unsigned int len, void *data,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800110 enum dma_data_direction dir)
111{
LEROY Christophe6a4967c2018-02-26 17:40:06 +0100112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113}
Kim Phillips81eb0242009-08-13 11:51:51 +1000114
LEROY Christophe6a4967c2018-02-26 17:40:06 +0100115static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
119{
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800122}
123
124/*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
127static void unmap_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200128 struct talitos_ptr *ptr,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800129 enum dma_data_direction dir)
130{
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200135 from_talitos_ptr_len(ptr, is_sec1), dir);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800136}
137
138static int reset_channel(struct device *dev, int ch)
139{
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200142 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800143
LEROY Christophedd3c0982015-04-17 16:32:13 +0200144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800147
LEROY Christophedd3c0982015-04-17 16:32:13 +0200148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
Kim Phillips81eb0242009-08-13 11:51:51 +1000165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
Kim Phillips81eb0242009-08-13 11:51:51 +1000167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
LEROY Christophe37b5e882017-10-06 15:05:06 +0200168 /* enable chaining descriptors */
169 if (is_sec1)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_NE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800172
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800176 TALITOS_CCCR_LO_IWSE);
177
Kim Phillips9c4a7962008-06-23 19:50:15 +0800178 return 0;
179}
180
181static int reset_device(struct device *dev)
182{
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800187
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800188 setbits32(priv->reg + TALITOS_MCR, mcr);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800189
LEROY Christophedd3c0982015-04-17 16:32:13 +0200190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800191 && --timeout)
192 cpu_relax();
193
Kim Phillips2cdba3c2011-12-12 14:59:11 -0600194 if (priv->irq[1]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
197 }
198
Kim Phillips9c4a7962008-06-23 19:50:15 +0800199 if (timeout == 0) {
200 dev_err(dev, "failed to reset device\n");
201 return -EIO;
202 }
203
204 return 0;
205}
206
207/*
208 * Reset and initialize the device
209 */
210static int init_device(struct device *dev)
211{
212 struct talitos_private *priv = dev_get_drvdata(dev);
213 int ch, err;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200214 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800215
216 /*
217 * Master reset
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
221 */
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 err = reset_device(dev);
227 if (err)
228 return err;
229
230 /* reset channels */
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
233 if (err)
234 return err;
235 }
236
237 /* enable channel done and error interrupts */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200238 if (is_sec1) {
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243 } else {
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800247
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800251 TALITOS_MDEUICR_LO_ICE);
252
Kim Phillips9c4a7962008-06-23 19:50:15 +0800253 return 0;
254}
255
256/**
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
Kim Phillips5228f0f2011-07-15 11:21:38 +0800259 * @ch: the SEC device channel to be used
Kim Phillips9c4a7962008-06-23 19:50:15 +0800260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
263 *
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
267 */
Christophe Leroyfbb8d462019-05-21 13:34:20 +0000268static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
272 void *context)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800273{
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800276 unsigned long flags;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800277 int head;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200278 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800279
Kim Phillips4b9926282009-08-13 11:50:38 +1000280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800281
Kim Phillips4b9926282009-08-13 11:50:38 +1000282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
Kim Phillipsec6644d2008-07-17 20:16:40 +0800283 /* h/w fifo is full */
Kim Phillips4b9926282009-08-13 11:50:38 +1000284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800285 return -EAGAIN;
286 }
287
Kim Phillips4b9926282009-08-13 11:50:38 +1000288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
Kim Phillipsec6644d2008-07-17 20:16:40 +0800290
Kim Phillips9c4a7962008-06-23 19:50:15 +0800291 /* map descriptor and save caller data */
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200292 if (is_sec1) {
293 desc->hdr1 = desc->hdr;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 TALITOS_DESC_SIZE,
296 DMA_BIDIRECTIONAL);
297 } else {
298 request->dma_desc = dma_map_single(dev, desc,
299 TALITOS_DESC_SIZE,
300 DMA_BIDIRECTIONAL);
301 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800302 request->callback = callback;
303 request->context = context;
304
305 /* increment fifo head */
Kim Phillips4b9926282009-08-13 11:50:38 +1000306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800307
308 smp_wmb();
309 request->desc = desc;
310
311 /* GO! */
312 wmb();
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
Kim Phillipsa7524472010-09-23 15:56:38 +0800316 lower_32_bits(request->dma_desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800317
Kim Phillips4b9926282009-08-13 11:50:38 +1000318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800319
320 return -EINPROGRESS;
321}
322
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000323static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324{
325 struct talitos_edesc *edesc;
326
327 if (!is_sec1)
328 return request->desc->hdr;
329
330 if (!request->desc->next_desc)
331 return request->desc->hdr1;
332
333 edesc = container_of(request->desc, struct talitos_edesc, desc);
334
335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336}
Kim Phillips9c4a7962008-06-23 19:50:15 +0800337
338/*
339 * process what was done, notify callback of error if not
340 */
341static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342{
343 struct talitos_private *priv = dev_get_drvdata(dev);
344 struct talitos_request *request, saved_req;
345 unsigned long flags;
346 int tail, status;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200347 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800348
Kim Phillips4b9926282009-08-13 11:50:38 +1000349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800350
Kim Phillips4b9926282009-08-13 11:50:38 +1000351 tail = priv->chan[ch].tail;
352 while (priv->chan[ch].fifo[tail].desc) {
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200353 __be32 hdr;
354
Kim Phillips4b9926282009-08-13 11:50:38 +1000355 request = &priv->chan[ch].fifo[tail];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800356
357 /* descriptors with their done bits set don't get the error */
358 rmb();
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000359 hdr = get_request_hdr(request, is_sec1);
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200360
361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800362 status = 0;
Lee Nipperca38a812008-12-20 17:09:25 +1100363 else
Kim Phillips9c4a7962008-06-23 19:50:15 +0800364 if (!error)
365 break;
366 else
367 status = error;
368
369 dma_unmap_single(dev, request->dma_desc,
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200370 TALITOS_DESC_SIZE,
Kim Phillipse938e462009-03-29 15:53:23 +0800371 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800372
373 /* copy entries so we can call callback outside lock */
374 saved_req.desc = request->desc;
375 saved_req.callback = request->callback;
376 saved_req.context = request->context;
377
378 /* release request entry in fifo */
379 smp_wmb();
380 request->desc = NULL;
381
382 /* increment fifo tail */
Kim Phillips4b9926282009-08-13 11:50:38 +1000383 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800384
Kim Phillips4b9926282009-08-13 11:50:38 +1000385 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800386
Kim Phillips4b9926282009-08-13 11:50:38 +1000387 atomic_dec(&priv->chan[ch].submit_count);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800388
Kim Phillips9c4a7962008-06-23 19:50:15 +0800389 saved_req.callback(dev, saved_req.desc, saved_req.context,
390 status);
391 /* channel may resume processing in single desc error case */
392 if (error && !reset_ch && status == error)
393 return;
Kim Phillips4b9926282009-08-13 11:50:38 +1000394 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 tail = priv->chan[ch].tail;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800396 }
397
Kim Phillips4b9926282009-08-13 11:50:38 +1000398 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800399}
400
401/*
402 * process completed requests for channels that have done status
403 */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200404#define DEF_TALITOS1_DONE(name, ch_done_mask) \
405static void talitos1_done_##name(unsigned long data) \
406{ \
407 struct device *dev = (struct device *)data; \
408 struct talitos_private *priv = dev_get_drvdata(dev); \
409 unsigned long flags; \
410 \
411 if (ch_done_mask & 0x10000000) \
412 flush_channel(dev, 0, 0, 0); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200413 if (ch_done_mask & 0x40000000) \
414 flush_channel(dev, 1, 0, 0); \
415 if (ch_done_mask & 0x00010000) \
416 flush_channel(dev, 2, 0, 0); \
417 if (ch_done_mask & 0x00040000) \
418 flush_channel(dev, 3, 0, 0); \
419 \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
426}
427
428DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
LEROY Christophe9c02e282017-10-06 15:04:55 +0200429DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200430
431#define DEF_TALITOS2_DONE(name, ch_done_mask) \
432static void talitos2_done_##name(unsigned long data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800433{ \
434 struct device *dev = (struct device *)data; \
435 struct talitos_private *priv = dev_get_drvdata(dev); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300436 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800437 \
438 if (ch_done_mask & 1) \
439 flush_channel(dev, 0, 0, 0); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800440 if (ch_done_mask & (1 << 2)) \
441 flush_channel(dev, 1, 0, 0); \
442 if (ch_done_mask & (1 << 4)) \
443 flush_channel(dev, 2, 0, 0); \
444 if (ch_done_mask & (1 << 6)) \
445 flush_channel(dev, 3, 0, 0); \
446 \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800447 /* At this point, all completed channels have been processed */ \
448 /* Unmask done interrupts for channels completed later on. */ \
Horia Geanta511d63c2012-03-30 17:49:53 +0300449 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800450 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200451 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300452 spin_unlock_irqrestore(&priv->reg_lock, flags); \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800453}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200454
455DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
LEROY Christophe9c02e282017-10-06 15:04:55 +0200456DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200457DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800459
460/*
461 * locate current (offending) descriptor
462 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200463static u32 current_desc_hdr(struct device *dev, int ch)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800464{
465 struct talitos_private *priv = dev_get_drvdata(dev);
Horia Geantab62ffd82013-11-13 12:20:37 +0200466 int tail, iter;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800467 dma_addr_t cur_desc;
468
Horia Geantab62ffd82013-11-13 12:20:37 +0200469 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800471
Horia Geantab62ffd82013-11-13 12:20:37 +0200472 if (!cur_desc) {
473 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
474 return 0;
475 }
476
477 tail = priv->chan[ch].tail;
478
479 iter = tail;
LEROY Christophe37b5e882017-10-06 15:05:06 +0200480 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
Horia Geantab62ffd82013-11-13 12:20:37 +0200482 iter = (iter + 1) & (priv->fifo_len - 1);
483 if (iter == tail) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800484 dev_err(dev, "couldn't locate current descriptor\n");
Kim Phillips3e721ae2011-10-21 15:20:28 +0200485 return 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800486 }
487 }
488
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000489 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
490 struct talitos_edesc *edesc;
491
492 edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 struct talitos_edesc, desc);
494 return ((struct talitos_desc *)
495 (edesc->buf + edesc->dma_len))->hdr;
496 }
LEROY Christophe37b5e882017-10-06 15:05:06 +0200497
Horia Geantab62ffd82013-11-13 12:20:37 +0200498 return priv->chan[ch].fifo[iter].desc->hdr;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800499}
500
501/*
502 * user diagnostics; report root cause of error based on execution unit status
503 */
Kim Phillips3e721ae2011-10-21 15:20:28 +0200504static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800505{
506 struct talitos_private *priv = dev_get_drvdata(dev);
507 int i;
508
Kim Phillips3e721ae2011-10-21 15:20:28 +0200509 if (!desc_hdr)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800510 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
Kim Phillips3e721ae2011-10-21 15:20:28 +0200511
512 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800513 case DESC_HDR_SEL0_AFEU:
514 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200515 in_be32(priv->reg_afeu + TALITOS_EUISR),
516 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800517 break;
518 case DESC_HDR_SEL0_DEU:
519 dev_err(dev, "DEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200520 in_be32(priv->reg_deu + TALITOS_EUISR),
521 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800522 break;
523 case DESC_HDR_SEL0_MDEUA:
524 case DESC_HDR_SEL0_MDEUB:
525 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200526 in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800528 break;
529 case DESC_HDR_SEL0_RNG:
530 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200531 in_be32(priv->reg_rngu + TALITOS_ISR),
532 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800533 break;
534 case DESC_HDR_SEL0_PKEU:
535 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200536 in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800538 break;
539 case DESC_HDR_SEL0_AESU:
540 dev_err(dev, "AESUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200541 in_be32(priv->reg_aesu + TALITOS_EUISR),
542 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800543 break;
544 case DESC_HDR_SEL0_CRCU:
545 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200546 in_be32(priv->reg_crcu + TALITOS_EUISR),
547 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800548 break;
549 case DESC_HDR_SEL0_KEU:
550 dev_err(dev, "KEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200551 in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800553 break;
554 }
555
Kim Phillips3e721ae2011-10-21 15:20:28 +0200556 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800557 case DESC_HDR_SEL1_MDEUA:
558 case DESC_HDR_SEL1_MDEUB:
559 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200560 in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800562 break;
563 case DESC_HDR_SEL1_CRCU:
564 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200565 in_be32(priv->reg_crcu + TALITOS_EUISR),
566 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800567 break;
568 }
569
570 for (i = 0; i < 8; i++)
571 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800572 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800574}
575
576/*
577 * recover from error interrupts
578 */
Kim Phillips5e718a02011-12-12 14:59:12 -0600579static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800580{
Kim Phillips9c4a7962008-06-23 19:50:15 +0800581 struct talitos_private *priv = dev_get_drvdata(dev);
582 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200583 int ch, error, reset_dev = 0;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300584 u32 v_lo;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200585 bool is_sec1 = has_ftr_sec1(priv);
586 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800587
588 for (ch = 0; ch < priv->num_channels; ch++) {
589 /* skip channels without errors */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200590 if (is_sec1) {
591 /* bits 29, 31, 17, 19 */
592 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 continue;
594 } else {
595 if (!(isr & (1 << (ch * 2 + 1))))
596 continue;
597 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800598
599 error = -EINVAL;
600
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800601 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800602
603 if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 dev_err(dev, "double fetch fifo overflow error\n");
605 error = -EAGAIN;
606 reset_ch = 1;
607 }
608 if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 /* h/w dropped descriptor */
610 dev_err(dev, "single fetch fifo overflow error\n");
611 error = -EAGAIN;
612 }
613 if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 dev_err(dev, "master data transfer error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
Colin Ian King4d9b3a52016-11-01 20:14:04 -0600616 dev_err(dev, is_sec1 ? "pointer not complete error\n"
LEROY Christophedd3c0982015-04-17 16:32:13 +0200617 : "s/g data length zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800618 if (v_lo & TALITOS_CCPSR_LO_FPZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200619 dev_err(dev, is_sec1 ? "parity error\n"
620 : "fetch pointer zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800621 if (v_lo & TALITOS_CCPSR_LO_IDH)
622 dev_err(dev, "illegal descriptor header error\n");
623 if (v_lo & TALITOS_CCPSR_LO_IEU)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200624 dev_err(dev, is_sec1 ? "static assignment error\n"
625 : "invalid exec unit error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800626 if (v_lo & TALITOS_CCPSR_LO_EU)
Kim Phillips3e721ae2011-10-21 15:20:28 +0200627 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
LEROY Christophedd3c0982015-04-17 16:32:13 +0200628 if (!is_sec1) {
629 if (v_lo & TALITOS_CCPSR_LO_GB)
630 dev_err(dev, "gather boundary error\n");
631 if (v_lo & TALITOS_CCPSR_LO_GRL)
632 dev_err(dev, "gather return/length error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SB)
634 dev_err(dev, "scatter boundary error\n");
635 if (v_lo & TALITOS_CCPSR_LO_SRL)
636 dev_err(dev, "scatter return/length error\n");
637 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800638
639 flush_channel(dev, ch, error, reset_ch);
640
641 if (reset_ch) {
642 reset_channel(dev, ch);
643 } else {
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800644 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
LEROY Christophedd3c0982015-04-17 16:32:13 +0200645 TALITOS2_CCCR_CONT);
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800646 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
LEROY Christophedd3c0982015-04-17 16:32:13 +0200648 TALITOS2_CCCR_CONT) && --timeout)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800649 cpu_relax();
650 if (timeout == 0) {
651 dev_err(dev, "failed to restart channel %d\n",
652 ch);
653 reset_dev = 1;
654 }
655 }
656 }
LEROY Christophedd3c0982015-04-17 16:32:13 +0200657 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 isr, isr_lo);
662 else
663 dev_err(dev, "done overflow, internal time out, or "
664 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800665
666 /* purge request queues */
667 for (ch = 0; ch < priv->num_channels; ch++)
668 flush_channel(dev, ch, -EIO, 1);
669
670 /* reset and reinitialize the device */
671 init_device(dev);
672 }
673}
674
LEROY Christophedd3c0982015-04-17 16:32:13 +0200675#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
676static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
677{ \
678 struct device *dev = data; \
679 struct talitos_private *priv = dev_get_drvdata(dev); \
680 u32 isr, isr_lo; \
681 unsigned long flags; \
682 \
683 spin_lock_irqsave(&priv->reg_lock, flags); \
684 isr = in_be32(priv->reg + TALITOS_ISR); \
685 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
686 /* Acknowledge interrupt */ \
687 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
689 \
690 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
691 spin_unlock_irqrestore(&priv->reg_lock, flags); \
692 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 } \
694 else { \
695 if (likely(isr & ch_done_mask)) { \
696 /* mask further done interrupts. */ \
697 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
698 /* done_task will unmask done interrupts at exit */ \
699 tasklet_schedule(&priv->done_task[tlet]); \
700 } \
701 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 } \
703 \
704 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
705 IRQ_NONE; \
706}
707
708DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
709
710#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
711static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800712{ \
713 struct device *dev = data; \
714 struct talitos_private *priv = dev_get_drvdata(dev); \
715 u32 isr, isr_lo; \
Horia Geanta511d63c2012-03-30 17:49:53 +0300716 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800717 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300718 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800719 isr = in_be32(priv->reg + TALITOS_ISR); \
720 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
721 /* Acknowledge interrupt */ \
722 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
724 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300725 if (unlikely(isr & ch_err_mask || isr_lo)) { \
726 spin_unlock_irqrestore(&priv->reg_lock, flags); \
727 talitos_error(dev, isr & ch_err_mask, isr_lo); \
728 } \
729 else { \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800730 if (likely(isr & ch_done_mask)) { \
731 /* mask further done interrupts. */ \
732 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
733 /* done_task will unmask done interrupts at exit */ \
734 tasklet_schedule(&priv->done_task[tlet]); \
735 } \
Horia Geanta511d63c2012-03-30 17:49:53 +0300736 spin_unlock_irqrestore(&priv->reg_lock, flags); \
737 } \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800738 \
739 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
740 IRQ_NONE; \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800741}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200742
743DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
745 0)
746DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
747 1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800748
749/*
750 * hwrng
751 */
752static int talitos_rng_data_present(struct hwrng *rng, int wait)
753{
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 u32 ofl;
757 int i;
758
759 for (i = 0; i < 20; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200760 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
Kim Phillips9c4a7962008-06-23 19:50:15 +0800761 TALITOS_RNGUSR_LO_OFL;
762 if (ofl || !wait)
763 break;
764 udelay(10);
765 }
766
767 return !!ofl;
768}
769
770static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
771{
772 struct device *dev = (struct device *)rng->priv;
773 struct talitos_private *priv = dev_get_drvdata(dev);
774
775 /* rng fifo requires 64-bit accesses */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200776 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800778
779 return sizeof(u32);
780}
781
782static int talitos_rng_init(struct hwrng *rng)
783{
784 struct device *dev = (struct device *)rng->priv;
785 struct talitos_private *priv = dev_get_drvdata(dev);
786 unsigned int timeout = TALITOS_TIMEOUT;
787
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200788 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 & TALITOS_RNGUSR_LO_RD)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800791 && --timeout)
792 cpu_relax();
793 if (timeout == 0) {
794 dev_err(dev, "failed to reset rng hw\n");
795 return -ENODEV;
796 }
797
798 /* start generating */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200799 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800800
801 return 0;
802}
803
804static int talitos_register_rng(struct device *dev)
805{
806 struct talitos_private *priv = dev_get_drvdata(dev);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500807 int err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800808
809 priv->rng.name = dev_driver_string(dev),
810 priv->rng.init = talitos_rng_init,
811 priv->rng.data_present = talitos_rng_data_present,
812 priv->rng.data_read = talitos_rng_data_read,
813 priv->rng.priv = (unsigned long)dev;
814
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500815 err = hwrng_register(&priv->rng);
816 if (!err)
817 priv->rng_registered = true;
818
819 return err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800820}
821
822static void talitos_unregister_rng(struct device *dev)
823{
824 struct talitos_private *priv = dev_get_drvdata(dev);
825
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500826 if (!priv->rng_registered)
827 return;
828
Kim Phillips9c4a7962008-06-23 19:50:15 +0800829 hwrng_unregister(&priv->rng);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500830 priv->rng_registered = false;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800831}
832
833/*
834 * crypto alg
835 */
836#define TALITOS_CRA_PRIORITY 3000
LEROY Christophe7405c8d2016-06-06 13:20:46 +0200837/*
838 * Defines a priority for doing AEAD with descriptors type
839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
840 */
841#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
Christophe Leroy192125e2019-06-12 05:49:50 +0000842#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Martin Hicks03d2c512017-05-02 09:38:35 -0400843#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
Christophe Leroyb8fbdc22019-05-21 13:34:09 +0000844#else
845#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846#endif
Lee Nipper3952f172008-07-10 18:29:18 +0800847#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
Lee Nipper70bcaca2008-07-03 19:08:46 +0800848
Kim Phillips9c4a7962008-06-23 19:50:15 +0800849struct talitos_ctx {
850 struct device *dev;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800851 int ch;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800852 __be32 desc_hdr_template;
853 u8 key[TALITOS_MAX_KEY_SIZE];
Lee Nipper70bcaca2008-07-03 19:08:46 +0800854 u8 iv[TALITOS_MAX_IV_LENGTH];
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200855 dma_addr_t dma_key;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800856 unsigned int keylen;
857 unsigned int enckeylen;
858 unsigned int authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800859};
860
Lee Nipper497f2e62010-05-19 19:20:36 +1000861#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
862#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
863
864struct talitos_ahash_req_ctx {
Kim Phillips60f208d2010-05-19 19:21:53 +1000865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
Lee Nipper497f2e62010-05-19 19:20:36 +1000866 unsigned int hw_context_size;
LEROY Christophe3c0dd192017-10-06 15:05:08 +0200867 u8 buf[2][HASH_MAX_BLOCK_SIZE];
868 int buf_idx;
Kim Phillips60f208d2010-05-19 19:21:53 +1000869 unsigned int swinit;
Lee Nipper497f2e62010-05-19 19:20:36 +1000870 unsigned int first;
871 unsigned int last;
872 unsigned int to_hash_later;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300873 unsigned int nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +1000874 struct scatterlist bufsl[2];
875 struct scatterlist *psrc;
876};
877
Horia Geant?3639ca82016-04-21 19:24:55 +0300878struct talitos_export_state {
879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 u8 buf[HASH_MAX_BLOCK_SIZE];
881 unsigned int swinit;
882 unsigned int first;
883 unsigned int last;
884 unsigned int to_hash_later;
885 unsigned int nbuf;
886};
887
Lee Nipper56af8cd2009-03-29 15:50:50 +0800888static int aead_setkey(struct crypto_aead *authenc,
889 const u8 *key, unsigned int keylen)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800890{
891 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200892 struct device *dev = ctx->dev;
Mathias Krausec306a982013-10-15 13:49:34 +0200893 struct crypto_authenc_keys keys;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800894
Mathias Krausec306a982013-10-15 13:49:34 +0200895 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800896 goto badkey;
897
Mathias Krausec306a982013-10-15 13:49:34 +0200898 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800899 goto badkey;
900
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200901 if (ctx->keylen)
902 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
903
Mathias Krausec306a982013-10-15 13:49:34 +0200904 memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800906
Mathias Krausec306a982013-10-15 13:49:34 +0200907 ctx->keylen = keys.authkeylen + keys.enckeylen;
908 ctx->enckeylen = keys.enckeylen;
909 ctx->authkeylen = keys.authkeylen;
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200910 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
911 DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800912
Tudor-Dan Ambarus8f0691f2018-03-23 12:42:24 +0200913 memzero_explicit(&keys, sizeof(keys));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800914 return 0;
915
916badkey:
Tudor-Dan Ambarus8f0691f2018-03-23 12:42:24 +0200917 memzero_explicit(&keys, sizeof(keys));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800918 return -EINVAL;
919}
920
Herbert Xuef7c5c82019-04-11 16:51:21 +0800921static int aead_des3_setkey(struct crypto_aead *authenc,
922 const u8 *key, unsigned int keylen)
923{
924 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
925 struct device *dev = ctx->dev;
926 struct crypto_authenc_keys keys;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800927 int err;
928
929 err = crypto_authenc_extractkeys(&keys, key, keylen);
930 if (unlikely(err))
Eric Biggers674f3682019-12-30 21:19:36 -0600931 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800932
933 err = -EINVAL;
934 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Eric Biggers674f3682019-12-30 21:19:36 -0600935 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800936
Ard Biesheuvel9d574ae2019-08-15 12:01:05 +0300937 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
938 if (err)
Herbert Xuef7c5c82019-04-11 16:51:21 +0800939 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800940
941 if (ctx->keylen)
942 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
943
944 memcpy(ctx->key, keys.authkey, keys.authkeylen);
945 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
946
947 ctx->keylen = keys.authkeylen + keys.enckeylen;
948 ctx->enckeylen = keys.enckeylen;
949 ctx->authkeylen = keys.authkeylen;
950 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
951 DMA_TO_DEVICE);
952
953out:
954 memzero_explicit(&keys, sizeof(keys));
955 return err;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800956}
957
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800958static void talitos_sg_unmap(struct device *dev,
959 struct talitos_edesc *edesc,
960 struct scatterlist *src,
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200961 struct scatterlist *dst,
962 unsigned int len, unsigned int offset)
LEROY Christophe246a87c2016-06-06 13:20:36 +0200963{
964 struct talitos_private *priv = dev_get_drvdata(dev);
965 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200966 unsigned int src_nents = edesc->src_nents ? : 1;
967 unsigned int dst_nents = edesc->dst_nents ? : 1;
LEROY Christophe246a87c2016-06-06 13:20:36 +0200968
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200969 if (is_sec1 && dst && dst_nents > 1) {
970 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
971 len, DMA_FROM_DEVICE);
972 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
973 offset);
974 }
975 if (src != dst) {
976 if (src_nents == 1 || !is_sec1)
977 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
978
979 if (dst && (dst_nents == 1 || !is_sec1))
980 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
981 } else if (src_nents == 1 || !is_sec1) {
982 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
LEROY Christophe246a87c2016-06-06 13:20:36 +0200983 }
984}
985
Kim Phillips9c4a7962008-06-23 19:50:15 +0800986static void ipsec_esp_unmap(struct device *dev,
Lee Nipper56af8cd2009-03-29 15:50:50 +0800987 struct talitos_edesc *edesc,
Christophe Leroy7ede4c32019-05-21 13:34:14 +0000988 struct aead_request *areq, bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800989{
LEROY Christophe549bd8b2016-06-06 13:20:40 +0200990 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
991 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
992 unsigned int ivsize = crypto_aead_ivsize(aead);
Christophe Leroy7ede4c32019-05-21 13:34:14 +0000993 unsigned int authsize = crypto_aead_authsize(aead);
994 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
LEROY Christophe9a655602017-10-06 15:04:59 +0200995 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
996 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
LEROY Christophe549bd8b2016-06-06 13:20:40 +0200997
LEROY Christophe9a655602017-10-06 15:04:59 +0200998 if (is_ipsec_esp)
LEROY Christophe549bd8b2016-06-06 13:20:40 +0200999 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1000 DMA_FROM_DEVICE);
LEROY Christophe9a655602017-10-06 15:04:59 +02001001 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001002
Christophe Leroye3451772019-05-21 13:34:19 +00001003 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1004 cryptlen + authsize, areq->assoclen);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001005
1006 if (edesc->dma_len)
1007 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1008 DMA_BIDIRECTIONAL);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001009
LEROY Christophe9a655602017-10-06 15:04:59 +02001010 if (!is_ipsec_esp) {
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001011 unsigned int dst_nents = edesc->dst_nents ? : 1;
1012
1013 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001014 areq->assoclen + cryptlen - ivsize);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001015 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001016}
1017
1018/*
1019 * ipsec_esp descriptor callbacks
1020 */
1021static void ipsec_esp_encrypt_done(struct device *dev,
1022 struct talitos_desc *desc, void *context,
1023 int err)
1024{
1025 struct aead_request *areq = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001026 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001027 unsigned int ivsize = crypto_aead_ivsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001028 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001029
Kim Phillips19bbbc62009-03-29 15:53:59 +08001030 edesc = container_of(desc, struct talitos_edesc, desc);
1031
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001032 ipsec_esp_unmap(dev, edesc, areq, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001033
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001034 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1035
Kim Phillips9c4a7962008-06-23 19:50:15 +08001036 kfree(edesc);
1037
1038 aead_request_complete(areq, err);
1039}
1040
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001041static void ipsec_esp_decrypt_swauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001042 struct talitos_desc *desc,
1043 void *context, int err)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001044{
1045 struct aead_request *req = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001046 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001047 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001048 struct talitos_edesc *edesc;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001049 char *oicv, *icv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001050
Kim Phillips19bbbc62009-03-29 15:53:59 +08001051 edesc = container_of(desc, struct talitos_edesc, desc);
1052
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001053 ipsec_esp_unmap(dev, edesc, req, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001054
1055 if (!err) {
1056 /* auth check */
Christophe Leroye3451772019-05-21 13:34:19 +00001057 oicv = edesc->buf + edesc->dma_len;
1058 icv = oicv - authsize;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001059
David Gstir79960942015-11-15 17:14:42 +01001060 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001061 }
1062
1063 kfree(edesc);
1064
1065 aead_request_complete(req, err);
1066}
1067
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001068static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001069 struct talitos_desc *desc,
1070 void *context, int err)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001071{
1072 struct aead_request *req = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001073 struct talitos_edesc *edesc;
1074
1075 edesc = container_of(desc, struct talitos_edesc, desc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001076
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001077 ipsec_esp_unmap(dev, edesc, req, false);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001078
1079 /* check ICV auth status */
Kim Phillipse938e462009-03-29 15:53:23 +08001080 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1081 DESC_HDR_LO_ICCR1_PASS))
1082 err = -EBADMSG;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001083
1084 kfree(edesc);
1085
1086 aead_request_complete(req, err);
1087}
1088
Kim Phillips9c4a7962008-06-23 19:50:15 +08001089/*
1090 * convert scatterlist to SEC h/w link table format
1091 * stop at cryptlen bytes
1092 */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001093static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
Christophe Leroye3451772019-05-21 13:34:19 +00001094 unsigned int offset, int datalen, int elen,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001095 struct talitos_ptr *link_tbl_ptr)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001096{
Christophe Leroye3451772019-05-21 13:34:19 +00001097 int n_sg = elen ? sg_count + 1 : sg_count;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001098 int count = 0;
Christophe Leroye3451772019-05-21 13:34:19 +00001099 int cryptlen = datalen + elen;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001100
Herbert Xuaeb4c132015-07-30 17:53:22 +08001101 while (cryptlen && sg && n_sg--) {
1102 unsigned int len = sg_dma_len(sg);
1103
1104 if (offset >= len) {
1105 offset -= len;
1106 goto next;
1107 }
1108
1109 len -= offset;
1110
1111 if (len > cryptlen)
1112 len = cryptlen;
1113
Christophe Leroye3451772019-05-21 13:34:19 +00001114 if (datalen > 0 && len > datalen) {
1115 to_talitos_ptr(link_tbl_ptr + count,
1116 sg_dma_address(sg) + offset, datalen, 0);
1117 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1118 count++;
1119 len -= datalen;
1120 offset += datalen;
1121 }
Herbert Xuaeb4c132015-07-30 17:53:22 +08001122 to_talitos_ptr(link_tbl_ptr + count,
LEROY Christopheda9de142017-10-06 15:04:57 +02001123 sg_dma_address(sg) + offset, len, 0);
LEROY Christopheb096b542016-06-06 13:20:34 +02001124 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001125 count++;
1126 cryptlen -= len;
Christophe Leroye3451772019-05-21 13:34:19 +00001127 datalen -= len;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001128 offset = 0;
1129
1130next:
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001131 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001132 }
1133
Kim Phillips9c4a7962008-06-23 19:50:15 +08001134 /* tag end of link table */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001135 if (count > 0)
LEROY Christopheb096b542016-06-06 13:20:34 +02001136 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
Christophe Leroye3451772019-05-21 13:34:19 +00001137 DESC_PTR_LNKTBL_RET, 0);
Lee Nipper70bcaca2008-07-03 19:08:46 +08001138
Herbert Xuaeb4c132015-07-30 17:53:22 +08001139 return count;
1140}
1141
LEROY Christophe2b122732018-03-22 10:57:01 +01001142static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1143 unsigned int len, struct talitos_edesc *edesc,
1144 struct talitos_ptr *ptr, int sg_count,
Christophe Leroye3451772019-05-21 13:34:19 +00001145 unsigned int offset, int tbl_off, int elen,
1146 bool force)
LEROY Christophe246a87c2016-06-06 13:20:36 +02001147{
LEROY Christophe246a87c2016-06-06 13:20:36 +02001148 struct talitos_private *priv = dev_get_drvdata(dev);
1149 bool is_sec1 = has_ftr_sec1(priv);
1150
LEROY Christophe87a81dc2018-01-26 17:09:59 +01001151 if (!src) {
1152 to_talitos_ptr(ptr, 0, 0, is_sec1);
1153 return 1;
1154 }
LEROY Christophe2b122732018-03-22 10:57:01 +01001155 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
Christophe Leroye3451772019-05-21 13:34:19 +00001156 if (sg_count == 1 && !force) {
LEROY Christopheda9de142017-10-06 15:04:57 +02001157 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001158 return sg_count;
LEROY Christophe246a87c2016-06-06 13:20:36 +02001159 }
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001160 if (is_sec1) {
LEROY Christopheda9de142017-10-06 15:04:57 +02001161 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001162 return sg_count;
1163 }
Christophe Leroye3451772019-05-21 13:34:19 +00001164 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001165 &edesc->link_tbl[tbl_off]);
Christophe Leroye3451772019-05-21 13:34:19 +00001166 if (sg_count == 1 && !force) {
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001167 /* Only one segment now, so no link tbl needed*/
1168 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1169 return sg_count;
1170 }
1171 to_talitos_ptr(ptr, edesc->dma_link_tbl +
LEROY Christopheda9de142017-10-06 15:04:57 +02001172 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001173 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1174
LEROY Christophe246a87c2016-06-06 13:20:36 +02001175 return sg_count;
1176}
1177
LEROY Christophe2b122732018-03-22 10:57:01 +01001178static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1179 unsigned int len, struct talitos_edesc *edesc,
1180 struct talitos_ptr *ptr, int sg_count,
1181 unsigned int offset, int tbl_off)
1182{
1183 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
Christophe Leroye3451772019-05-21 13:34:19 +00001184 tbl_off, 0, false);
LEROY Christophe2b122732018-03-22 10:57:01 +01001185}
1186
Kim Phillips9c4a7962008-06-23 19:50:15 +08001187/*
1188 * fill in and submit ipsec_esp descriptor
1189 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001190static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001191 bool encrypt,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001192 void (*callback)(struct device *dev,
1193 struct talitos_desc *desc,
1194 void *context, int error))
Kim Phillips9c4a7962008-06-23 19:50:15 +08001195{
1196 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001197 unsigned int authsize = crypto_aead_authsize(aead);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001198 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1199 struct device *dev = ctx->dev;
1200 struct talitos_desc *desc = &edesc->desc;
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001201 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
Kim Phillipse41256f2009-08-13 11:49:06 +10001202 unsigned int ivsize = crypto_aead_ivsize(aead);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001203 int tbl_off = 0;
Kim Phillipsfa86a262008-07-17 20:20:06 +08001204 int sg_count, ret;
LEROY Christophe2b122732018-03-22 10:57:01 +01001205 int elen = 0;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001206 bool sync_needed = false;
1207 struct talitos_private *priv = dev_get_drvdata(dev);
1208 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe9a655602017-10-06 15:04:59 +02001209 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1210 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1211 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
Christophe Leroye3451772019-05-21 13:34:19 +00001212 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001213
1214 /* hmac key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001215 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001216
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001217 sg_count = edesc->src_nents ?: 1;
1218 if (is_sec1 && sg_count > 1)
1219 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1220 areq->assoclen + cryptlen);
1221 else
1222 sg_count = dma_map_sg(dev, areq->src, sg_count,
1223 (areq->src == areq->dst) ?
1224 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1225
Kim Phillips9c4a7962008-06-23 19:50:15 +08001226 /* hmac data */
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001227 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1228 &desc->ptr[1], sg_count, 0, tbl_off);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001229
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001230 if (ret > 1) {
Horia Geant?340ff602016-04-19 20:33:48 +03001231 tbl_off += ret;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001232 sync_needed = true;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001233 }
1234
Kim Phillips9c4a7962008-06-23 19:50:15 +08001235 /* cipher iv */
LEROY Christophe9a655602017-10-06 15:04:59 +02001236 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001237
1238 /* cipher key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001239 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1240 ctx->enckeylen, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001241
1242 /*
1243 * cipher in
1244 * map and adjust cipher len to aead request cryptlen.
1245 * extent is bytes of HMAC postpended to ciphertext,
1246 * typically 12 for ipsec
1247 */
LEROY Christophe2b122732018-03-22 10:57:01 +01001248 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1249 elen = authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001250
LEROY Christophe2b122732018-03-22 10:57:01 +01001251 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
Christophe Leroye3451772019-05-21 13:34:19 +00001252 sg_count, areq->assoclen, tbl_off, elen,
1253 false);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001254
LEROY Christopheec8c7d12017-10-06 15:04:33 +02001255 if (ret > 1) {
1256 tbl_off += ret;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001257 sync_needed = true;
Horia Geant?340ff602016-04-19 20:33:48 +03001258 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001259
1260 /* cipher out */
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001261 if (areq->src != areq->dst) {
1262 sg_count = edesc->dst_nents ? : 1;
1263 if (!is_sec1 || sg_count == 1)
1264 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1265 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001266
Christophe Leroye3451772019-05-21 13:34:19 +00001267 if (is_ipsec_esp && encrypt)
1268 elen = authsize;
1269 else
1270 elen = 0;
1271 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1272 sg_count, areq->assoclen, tbl_off, elen,
1273 is_ipsec_esp && !encrypt);
1274 tbl_off += ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001275
Christophe Leroye3451772019-05-21 13:34:19 +00001276 if (!encrypt && is_ipsec_esp) {
1277 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1278
1279 /* Add an entry to the link table for ICV data */
1280 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1281 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1282
1283 /* icv data follows link tables */
1284 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001285 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001286 sync_needed = true;
Christophe Leroye3451772019-05-21 13:34:19 +00001287 } else if (!encrypt) {
1288 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1289 sync_needed = true;
LEROY Christophe9a655602017-10-06 15:04:59 +02001290 } else if (!is_ipsec_esp) {
Christophe Leroye3451772019-05-21 13:34:19 +00001291 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1292 sg_count, areq->assoclen + cryptlen, tbl_off);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001293 }
1294
Kim Phillips9c4a7962008-06-23 19:50:15 +08001295 /* iv out */
LEROY Christophe9a655602017-10-06 15:04:59 +02001296 if (is_ipsec_esp)
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001297 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1298 DMA_FROM_DEVICE);
1299
1300 if (sync_needed)
1301 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1302 edesc->dma_len,
1303 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001304
Kim Phillips5228f0f2011-07-15 11:21:38 +08001305 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001306 if (ret != -EINPROGRESS) {
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001307 ipsec_esp_unmap(dev, edesc, areq, encrypt);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001308 kfree(edesc);
1309 }
1310 return ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001311}
1312
Kim Phillips9c4a7962008-06-23 19:50:15 +08001313/*
Lee Nipper56af8cd2009-03-29 15:50:50 +08001314 * allocate and map the extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +08001315 */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001316static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1317 struct scatterlist *src,
1318 struct scatterlist *dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001319 u8 *iv,
1320 unsigned int assoclen,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001321 unsigned int cryptlen,
1322 unsigned int authsize,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001323 unsigned int ivsize,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001324 int icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001325 u32 cryptoflags,
1326 bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001327{
Lee Nipper56af8cd2009-03-29 15:50:50 +08001328 struct talitos_edesc *edesc;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001329 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001330 dma_addr_t iv_dma = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001331 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
Kim Phillips586725f2008-07-17 20:19:18 +08001332 GFP_ATOMIC;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001333 struct talitos_private *priv = dev_get_drvdata(dev);
1334 bool is_sec1 = has_ftr_sec1(priv);
1335 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001336
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001337 if (cryptlen + authsize > max_len) {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001338 dev_err(dev, "length exceeds h/w max limit\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001339 return ERR_PTR(-EINVAL);
1340 }
1341
Horia Geanta62293a32013-11-28 15:11:17 +02001342 if (!dst || dst == src) {
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001343 src_len = assoclen + cryptlen + authsize;
1344 src_nents = sg_nents_for_len(src, src_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001345 if (src_nents < 0) {
1346 dev_err(dev, "Invalid number of src SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001347 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001348 }
Horia Geanta62293a32013-11-28 15:11:17 +02001349 src_nents = (src_nents == 1) ? 0 : src_nents;
1350 dst_nents = dst ? src_nents : 0;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001351 dst_len = 0;
Horia Geanta62293a32013-11-28 15:11:17 +02001352 } else { /* dst && dst != src*/
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001353 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1354 src_nents = sg_nents_for_len(src, src_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001355 if (src_nents < 0) {
1356 dev_err(dev, "Invalid number of src SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001357 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001358 }
Horia Geanta62293a32013-11-28 15:11:17 +02001359 src_nents = (src_nents == 1) ? 0 : src_nents;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001360 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1361 dst_nents = sg_nents_for_len(dst, dst_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001362 if (dst_nents < 0) {
1363 dev_err(dev, "Invalid number of dst SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001364 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001365 }
Horia Geanta62293a32013-11-28 15:11:17 +02001366 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001367 }
1368
1369 /*
1370 * allocate space for base edesc plus the link tables,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001371 * allowing for two separate entries for AD and generated ICV (+ 2),
1372 * and space for two sets of ICVs (stashed and generated)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001373 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001374 alloc_len = sizeof(struct talitos_edesc);
Christophe Leroye3451772019-05-21 13:34:19 +00001375 if (src_nents || dst_nents || !encrypt) {
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001376 if (is_sec1)
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001377 dma_len = (src_nents ? src_len : 0) +
Christophe Leroye3451772019-05-21 13:34:19 +00001378 (dst_nents ? dst_len : 0) + authsize;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001379 else
Herbert Xuaeb4c132015-07-30 17:53:22 +08001380 dma_len = (src_nents + dst_nents + 2) *
Christophe Leroye3451772019-05-21 13:34:19 +00001381 sizeof(struct talitos_ptr) + authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001382 alloc_len += dma_len;
1383 } else {
1384 dma_len = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001385 }
Christophe Leroye3451772019-05-21 13:34:19 +00001386 alloc_len += icv_stashing ? authsize : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001387
LEROY Christophe37b5e882017-10-06 15:05:06 +02001388 /* if its a ahash, add space for a second desc next to the first one */
1389 if (is_sec1 && !dst)
1390 alloc_len += sizeof(struct talitos_desc);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001391 alloc_len += ivsize;
LEROY Christophe37b5e882017-10-06 15:05:06 +02001392
Kim Phillips586725f2008-07-17 20:19:18 +08001393 edesc = kmalloc(alloc_len, GFP_DMA | flags);
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001394 if (!edesc)
1395 return ERR_PTR(-ENOMEM);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001396 if (ivsize) {
1397 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001398 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001399 }
LEROY Christophee4a647c2017-10-06 15:04:45 +02001400 memset(&edesc->desc, 0, sizeof(edesc->desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +08001401
1402 edesc->src_nents = src_nents;
1403 edesc->dst_nents = dst_nents;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001404 edesc->iv_dma = iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001405 edesc->dma_len = dma_len;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001406 if (dma_len)
1407 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
Lee Nipper497f2e62010-05-19 19:20:36 +10001408 edesc->dma_len,
1409 DMA_BIDIRECTIONAL);
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001410
Kim Phillips9c4a7962008-06-23 19:50:15 +08001411 return edesc;
1412}
1413
Horia Geanta79fd31d2012-08-02 17:16:40 +03001414static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
Horia Geanta62293a32013-11-28 15:11:17 +02001415 int icv_stashing, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001416{
1417 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001418 unsigned int authsize = crypto_aead_authsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001419 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001420 unsigned int ivsize = crypto_aead_ivsize(authenc);
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001421 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001422
Herbert Xuaeb4c132015-07-30 17:53:22 +08001423 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001424 iv, areq->assoclen, cryptlen,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001425 authsize, ivsize, icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001426 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001427}
1428
Lee Nipper56af8cd2009-03-29 15:50:50 +08001429static int aead_encrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001430{
1431 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1432 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001433 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001434
1435 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001436 edesc = aead_edesc_alloc(req, req->iv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001437 if (IS_ERR(edesc))
1438 return PTR_ERR(edesc);
1439
1440 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001441 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001442
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001443 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001444}
1445
Lee Nipper56af8cd2009-03-29 15:50:50 +08001446static int aead_decrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001447{
1448 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001449 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001450 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001451 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001452 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001453 void *icvdata;
1454
Kim Phillips9c4a7962008-06-23 19:50:15 +08001455 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001456 edesc = aead_edesc_alloc(req, req->iv, 1, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001457 if (IS_ERR(edesc))
1458 return PTR_ERR(edesc);
1459
Christophe Leroy4bbfb832019-05-21 13:34:15 +00001460 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1461 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
Kim Phillipse938e462009-03-29 15:53:23 +08001462 ((!edesc->src_nents && !edesc->dst_nents) ||
1463 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001464
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001465 /* decrypt and check the ICV */
Kim Phillipse938e462009-03-29 15:53:23 +08001466 edesc->desc.hdr = ctx->desc_hdr_template |
1467 DESC_HDR_DIR_INBOUND |
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001468 DESC_HDR_MODE1_MDEU_CICV;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001469
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001470 /* reset integrity check result bits */
Kim Phillips9c4a7962008-06-23 19:50:15 +08001471
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001472 return ipsec_esp(edesc, req, false,
1473 ipsec_esp_decrypt_hwauth_done);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001474 }
Kim Phillipse938e462009-03-29 15:53:23 +08001475
1476 /* Have to check the ICV with software */
1477 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1478
1479 /* stash incoming ICV for later cmp with ICV generated by the h/w */
Christophe Leroye3451772019-05-21 13:34:19 +00001480 icvdata = edesc->buf + edesc->dma_len;
Kim Phillipse938e462009-03-29 15:53:23 +08001481
Christophe Leroyeae55a52019-05-21 13:34:17 +00001482 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1483 req->assoclen + req->cryptlen - authsize);
Kim Phillipse938e462009-03-29 15:53:23 +08001484
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001485 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001486}
1487
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001488static int skcipher_setkey(struct crypto_skcipher *cipher,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001489 const u8 *key, unsigned int keylen)
1490{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001491 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001492 struct device *dev = ctx->dev;
LEROY Christophef384cdc2017-10-06 15:04:37 +02001493
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001494 if (ctx->keylen)
1495 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1496
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001497 memcpy(&ctx->key, key, keylen);
1498 ctx->keylen = keylen;
1499
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001500 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1501
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001502 return 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001503}
1504
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001505static int skcipher_des_setkey(struct crypto_skcipher *cipher,
Herbert Xuef7c5c82019-04-11 16:51:21 +08001506 const u8 *key, unsigned int keylen)
1507{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001508 return verify_skcipher_des_key(cipher, key) ?:
1509 skcipher_setkey(cipher, key, keylen);
Herbert Xuef7c5c82019-04-11 16:51:21 +08001510}
1511
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001512static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
Herbert Xuef7c5c82019-04-11 16:51:21 +08001513 const u8 *key, unsigned int keylen)
1514{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001515 return verify_skcipher_des3_key(cipher, key) ?:
1516 skcipher_setkey(cipher, key, keylen);
Herbert Xuef7c5c82019-04-11 16:51:21 +08001517}
1518
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001519static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001520 const u8 *key, unsigned int keylen)
1521{
1522 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1523 keylen == AES_KEYSIZE_256)
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001524 return skcipher_setkey(cipher, key, keylen);
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001525
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001526 return -EINVAL;
1527}
1528
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001529static void common_nonsnoop_unmap(struct device *dev,
1530 struct talitos_edesc *edesc,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001531 struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001532{
1533 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
LEROY Christophe032d1972015-04-17 16:31:51 +02001534
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001535 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001536 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1537
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001538 if (edesc->dma_len)
1539 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1540 DMA_BIDIRECTIONAL);
1541}
1542
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001543static void skcipher_done(struct device *dev,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001544 struct talitos_desc *desc, void *context,
1545 int err)
1546{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001547 struct skcipher_request *areq = context;
1548 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1549 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1550 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001551 struct talitos_edesc *edesc;
1552
1553 edesc = container_of(desc, struct talitos_edesc, desc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001554
1555 common_nonsnoop_unmap(dev, edesc, areq);
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001556 memcpy(areq->iv, ctx->iv, ivsize);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001557
1558 kfree(edesc);
1559
1560 areq->base.complete(&areq->base, err);
1561}
1562
1563static int common_nonsnoop(struct talitos_edesc *edesc,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001564 struct skcipher_request *areq,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001565 void (*callback) (struct device *dev,
1566 struct talitos_desc *desc,
1567 void *context, int error))
1568{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001569 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1570 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001571 struct device *dev = ctx->dev;
1572 struct talitos_desc *desc = &edesc->desc;
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001573 unsigned int cryptlen = areq->cryptlen;
1574 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001575 int sg_count, ret;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001576 bool sync_needed = false;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001577 struct talitos_private *priv = dev_get_drvdata(dev);
1578 bool is_sec1 = has_ftr_sec1(priv);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001579
1580 /* first DWORD empty */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001581
1582 /* cipher iv */
LEROY Christopheda9de142017-10-06 15:04:57 +02001583 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001584
1585 /* cipher key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001586 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001587
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001588 sg_count = edesc->src_nents ?: 1;
1589 if (is_sec1 && sg_count > 1)
1590 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1591 cryptlen);
1592 else
1593 sg_count = dma_map_sg(dev, areq->src, sg_count,
1594 (areq->src == areq->dst) ?
1595 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001596 /*
1597 * cipher in
1598 */
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001599 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1600 &desc->ptr[3], sg_count, 0, 0);
1601 if (sg_count > 1)
1602 sync_needed = true;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001603
1604 /* cipher out */
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001605 if (areq->src != areq->dst) {
1606 sg_count = edesc->dst_nents ? : 1;
1607 if (!is_sec1 || sg_count == 1)
1608 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1609 }
1610
1611 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1612 sg_count, 0, (edesc->src_nents + 1));
1613 if (ret > 1)
1614 sync_needed = true;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001615
1616 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001617 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001618 DMA_FROM_DEVICE);
1619
1620 /* last DWORD empty */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001621
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001622 if (sync_needed)
1623 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1624 edesc->dma_len, DMA_BIDIRECTIONAL);
1625
Kim Phillips5228f0f2011-07-15 11:21:38 +08001626 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001627 if (ret != -EINPROGRESS) {
1628 common_nonsnoop_unmap(dev, edesc, areq);
1629 kfree(edesc);
1630 }
1631 return ret;
1632}
1633
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001634static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
Horia Geanta62293a32013-11-28 15:11:17 +02001635 areq, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001636{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001637 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1638 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1639 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001640
Herbert Xuaeb4c132015-07-30 17:53:22 +08001641 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001642 areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001643 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001644}
1645
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001646static int skcipher_encrypt(struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001647{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001648 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1649 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001650 struct talitos_edesc *edesc;
Christophe Leroyee483d32019-05-21 13:34:12 +00001651 unsigned int blocksize =
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001652 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
Christophe Leroyee483d32019-05-21 13:34:12 +00001653
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001654 if (!areq->cryptlen)
Christophe Leroyee483d32019-05-21 13:34:12 +00001655 return 0;
1656
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001657 if (areq->cryptlen % blocksize)
Christophe Leroyee483d32019-05-21 13:34:12 +00001658 return -EINVAL;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001659
1660 /* allocate extended descriptor */
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001661 edesc = skcipher_edesc_alloc(areq, true);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001662 if (IS_ERR(edesc))
1663 return PTR_ERR(edesc);
1664
1665 /* set encrypt */
1666 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1667
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001668 return common_nonsnoop(edesc, areq, skcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001669}
1670
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001671static int skcipher_decrypt(struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001672{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001673 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1674 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001675 struct talitos_edesc *edesc;
Christophe Leroyee483d32019-05-21 13:34:12 +00001676 unsigned int blocksize =
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001677 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
Christophe Leroyee483d32019-05-21 13:34:12 +00001678
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001679 if (!areq->cryptlen)
Christophe Leroyee483d32019-05-21 13:34:12 +00001680 return 0;
1681
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001682 if (areq->cryptlen % blocksize)
Christophe Leroyee483d32019-05-21 13:34:12 +00001683 return -EINVAL;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001684
1685 /* allocate extended descriptor */
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001686 edesc = skcipher_edesc_alloc(areq, false);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001687 if (IS_ERR(edesc))
1688 return PTR_ERR(edesc);
1689
1690 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1691
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001692 return common_nonsnoop(edesc, areq, skcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001693}
1694
Lee Nipper497f2e62010-05-19 19:20:36 +10001695static void common_nonsnoop_hash_unmap(struct device *dev,
1696 struct talitos_edesc *edesc,
1697 struct ahash_request *areq)
1698{
1699 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001700 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophead4cd512018-02-26 17:40:04 +01001701 struct talitos_private *priv = dev_get_drvdata(dev);
1702 bool is_sec1 = has_ftr_sec1(priv);
1703 struct talitos_desc *desc = &edesc->desc;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001704 struct talitos_desc *desc2 = (struct talitos_desc *)
1705 (edesc->buf + edesc->dma_len);
LEROY Christophead4cd512018-02-26 17:40:04 +01001706
1707 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1708 if (desc->next_desc &&
1709 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1710 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001711 if (req_ctx->last)
1712 memcpy(areq->result, req_ctx->hw_context,
1713 crypto_ahash_digestsize(tfm));
Lee Nipper497f2e62010-05-19 19:20:36 +10001714
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001715 if (req_ctx->psrc)
1716 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
LEROY Christophe032d1972015-04-17 16:31:51 +02001717
LEROY Christophead4cd512018-02-26 17:40:04 +01001718 /* When using hashctx-in, must unmap it. */
1719 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1720 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1721 DMA_TO_DEVICE);
1722 else if (desc->next_desc)
1723 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1724 DMA_TO_DEVICE);
1725
1726 if (is_sec1 && req_ctx->nbuf)
1727 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1728 DMA_TO_DEVICE);
1729
Lee Nipper497f2e62010-05-19 19:20:36 +10001730 if (edesc->dma_len)
1731 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1732 DMA_BIDIRECTIONAL);
1733
LEROY Christophe37b5e882017-10-06 15:05:06 +02001734 if (edesc->desc.next_desc)
1735 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1736 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
Lee Nipper497f2e62010-05-19 19:20:36 +10001737}
1738
1739static void ahash_done(struct device *dev,
1740 struct talitos_desc *desc, void *context,
1741 int err)
1742{
1743 struct ahash_request *areq = context;
1744 struct talitos_edesc *edesc =
1745 container_of(desc, struct talitos_edesc, desc);
1746 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1747
1748 if (!req_ctx->last && req_ctx->to_hash_later) {
1749 /* Position any partial block for next update/final/finup */
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001750 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001751 req_ctx->nbuf = req_ctx->to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001752 }
1753 common_nonsnoop_hash_unmap(dev, edesc, areq);
1754
1755 kfree(edesc);
1756
1757 areq->base.complete(&areq->base, err);
1758}
1759
LEROY Christophe2d029052015-04-17 16:32:18 +02001760/*
1761 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1762 * ourself and submit a padded block
1763 */
LEROY Christophe5b2cf262017-10-06 15:04:47 +02001764static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
LEROY Christophe2d029052015-04-17 16:32:18 +02001765 struct talitos_edesc *edesc,
1766 struct talitos_ptr *ptr)
1767{
1768 static u8 padded_hash[64] = {
1769 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1770 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1771 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1772 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773 };
1774
1775 pr_err_once("Bug in SEC1, padding ourself\n");
1776 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1777 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1778 (char *)padded_hash, DMA_TO_DEVICE);
1779}
1780
Lee Nipper497f2e62010-05-19 19:20:36 +10001781static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1782 struct ahash_request *areq, unsigned int length,
1783 void (*callback) (struct device *dev,
1784 struct talitos_desc *desc,
1785 void *context, int error))
1786{
1787 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1788 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1789 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1790 struct device *dev = ctx->dev;
1791 struct talitos_desc *desc = &edesc->desc;
LEROY Christophe032d1972015-04-17 16:31:51 +02001792 int ret;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001793 bool sync_needed = false;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001794 struct talitos_private *priv = dev_get_drvdata(dev);
1795 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001796 int sg_count;
Lee Nipper497f2e62010-05-19 19:20:36 +10001797
1798 /* first DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001799
Kim Phillips60f208d2010-05-19 19:21:53 +10001800 /* hash context in */
1801 if (!req_ctx->first || req_ctx->swinit) {
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001802 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1803 req_ctx->hw_context_size,
1804 req_ctx->hw_context,
1805 DMA_TO_DEVICE);
Kim Phillips60f208d2010-05-19 19:21:53 +10001806 req_ctx->swinit = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001807 }
LEROY Christopheafd62fa2017-09-13 12:44:51 +02001808 /* Indicate next op is not the first. */
1809 req_ctx->first = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001810
1811 /* HMAC key */
1812 if (ctx->keylen)
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001813 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1814 is_sec1);
Lee Nipper497f2e62010-05-19 19:20:36 +10001815
LEROY Christophe37b5e882017-10-06 15:05:06 +02001816 if (is_sec1 && req_ctx->nbuf)
1817 length -= req_ctx->nbuf;
1818
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001819 sg_count = edesc->src_nents ?: 1;
1820 if (is_sec1 && sg_count > 1)
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001821 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001822 else if (length)
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001823 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1824 DMA_TO_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001825 /*
1826 * data in
1827 */
LEROY Christophe37b5e882017-10-06 15:05:06 +02001828 if (is_sec1 && req_ctx->nbuf) {
LEROY Christophead4cd512018-02-26 17:40:04 +01001829 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1830 req_ctx->buf[req_ctx->buf_idx],
1831 DMA_TO_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001832 } else {
1833 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001834 &desc->ptr[3], sg_count, 0, 0);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001835 if (sg_count > 1)
1836 sync_needed = true;
1837 }
Lee Nipper497f2e62010-05-19 19:20:36 +10001838
1839 /* fifth DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001840
1841 /* hash/HMAC out -or- hash context out */
1842 if (req_ctx->last)
1843 map_single_talitos_ptr(dev, &desc->ptr[5],
1844 crypto_ahash_digestsize(tfm),
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001845 req_ctx->hw_context, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001846 else
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001847 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1848 req_ctx->hw_context_size,
1849 req_ctx->hw_context,
1850 DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001851
1852 /* last DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001853
LEROY Christophe2d029052015-04-17 16:32:18 +02001854 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1855 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1856
LEROY Christophe37b5e882017-10-06 15:05:06 +02001857 if (is_sec1 && req_ctx->nbuf && length) {
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001858 struct talitos_desc *desc2 = (struct talitos_desc *)
1859 (edesc->buf + edesc->dma_len);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001860 dma_addr_t next_desc;
1861
1862 memset(desc2, 0, sizeof(*desc2));
1863 desc2->hdr = desc->hdr;
1864 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1865 desc2->hdr1 = desc2->hdr;
1866 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1867 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1868 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1869
LEROY Christophead4cd512018-02-26 17:40:04 +01001870 if (desc->ptr[1].ptr)
1871 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1872 is_sec1);
1873 else
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001874 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1875 req_ctx->hw_context_size,
1876 req_ctx->hw_context,
1877 DMA_TO_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001878 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1879 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001880 &desc2->ptr[3], sg_count, 0, 0);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001881 if (sg_count > 1)
1882 sync_needed = true;
1883 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1884 if (req_ctx->last)
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001885 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1886 req_ctx->hw_context_size,
1887 req_ctx->hw_context,
1888 DMA_FROM_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001889
1890 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1891 DMA_BIDIRECTIONAL);
1892 desc->next_desc = cpu_to_be32(next_desc);
1893 }
1894
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001895 if (sync_needed)
1896 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1897 edesc->dma_len, DMA_BIDIRECTIONAL);
1898
Kim Phillips5228f0f2011-07-15 11:21:38 +08001899 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001900 if (ret != -EINPROGRESS) {
1901 common_nonsnoop_hash_unmap(dev, edesc, areq);
1902 kfree(edesc);
1903 }
1904 return ret;
1905}
1906
1907static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1908 unsigned int nbytes)
1909{
1910 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1911 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1912 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001913 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1914 bool is_sec1 = has_ftr_sec1(priv);
1915
1916 if (is_sec1)
1917 nbytes -= req_ctx->nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +10001918
Herbert Xuaeb4c132015-07-30 17:53:22 +08001919 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001920 nbytes, 0, 0, 0, areq->base.flags, false);
Lee Nipper497f2e62010-05-19 19:20:36 +10001921}
1922
1923static int ahash_init(struct ahash_request *areq)
1924{
1925 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001926 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1927 struct device *dev = ctx->dev;
Lee Nipper497f2e62010-05-19 19:20:36 +10001928 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe49f97832017-10-06 15:05:04 +02001929 unsigned int size;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001930 dma_addr_t dma;
Lee Nipper497f2e62010-05-19 19:20:36 +10001931
1932 /* Initialize the context */
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001933 req_ctx->buf_idx = 0;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001934 req_ctx->nbuf = 0;
Kim Phillips60f208d2010-05-19 19:21:53 +10001935 req_ctx->first = 1; /* first indicates h/w must init its context */
1936 req_ctx->swinit = 0; /* assume h/w init of context */
LEROY Christophe49f97832017-10-06 15:05:04 +02001937 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
Lee Nipper497f2e62010-05-19 19:20:36 +10001938 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1939 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
LEROY Christophe49f97832017-10-06 15:05:04 +02001940 req_ctx->hw_context_size = size;
Lee Nipper497f2e62010-05-19 19:20:36 +10001941
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001942 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1943 DMA_TO_DEVICE);
1944 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1945
Lee Nipper497f2e62010-05-19 19:20:36 +10001946 return 0;
1947}
1948
Kim Phillips60f208d2010-05-19 19:21:53 +10001949/*
1950 * on h/w without explicit sha224 support, we initialize h/w context
1951 * manually with sha224 constants, and tell it to run sha256.
1952 */
1953static int ahash_init_sha224_swinit(struct ahash_request *areq)
1954{
1955 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1956
Kim Phillipsa7524472010-09-23 15:56:38 +08001957 req_ctx->hw_context[0] = SHA224_H0;
1958 req_ctx->hw_context[1] = SHA224_H1;
1959 req_ctx->hw_context[2] = SHA224_H2;
1960 req_ctx->hw_context[3] = SHA224_H3;
1961 req_ctx->hw_context[4] = SHA224_H4;
1962 req_ctx->hw_context[5] = SHA224_H5;
1963 req_ctx->hw_context[6] = SHA224_H6;
1964 req_ctx->hw_context[7] = SHA224_H7;
Kim Phillips60f208d2010-05-19 19:21:53 +10001965
1966 /* init 64-bit count */
1967 req_ctx->hw_context[8] = 0;
1968 req_ctx->hw_context[9] = 0;
1969
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001970 ahash_init(areq);
1971 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1972
Kim Phillips60f208d2010-05-19 19:21:53 +10001973 return 0;
1974}
1975
Lee Nipper497f2e62010-05-19 19:20:36 +10001976static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1977{
1978 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1979 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1980 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981 struct talitos_edesc *edesc;
1982 unsigned int blocksize =
1983 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1984 unsigned int nbytes_to_hash;
1985 unsigned int to_hash_later;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001986 unsigned int nsg;
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001987 int nents;
LEROY Christophe37b5e882017-10-06 15:05:06 +02001988 struct device *dev = ctx->dev;
1989 struct talitos_private *priv = dev_get_drvdata(dev);
1990 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001991 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
Lee Nipper497f2e62010-05-19 19:20:36 +10001992
Lee Nipper5e833bc2010-06-16 15:29:15 +10001993 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1994 /* Buffer up to one whole block */
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001995 nents = sg_nents_for_len(areq->src, nbytes);
1996 if (nents < 0) {
1997 dev_err(ctx->dev, "Invalid number of src SG.\n");
1998 return nents;
1999 }
2000 sg_copy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002001 ctx_buf + req_ctx->nbuf, nbytes);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002002 req_ctx->nbuf += nbytes;
Lee Nipper497f2e62010-05-19 19:20:36 +10002003 return 0;
2004 }
2005
Lee Nipper5e833bc2010-06-16 15:29:15 +10002006 /* At least (blocksize + 1) bytes are available to hash */
2007 nbytes_to_hash = nbytes + req_ctx->nbuf;
2008 to_hash_later = nbytes_to_hash & (blocksize - 1);
2009
2010 if (req_ctx->last)
2011 to_hash_later = 0;
2012 else if (to_hash_later)
2013 /* There is a partial block. Hash the full block(s) now */
2014 nbytes_to_hash -= to_hash_later;
2015 else {
2016 /* Keep one block buffered */
2017 nbytes_to_hash -= blocksize;
2018 to_hash_later = blocksize;
2019 }
2020
2021 /* Chain in any previously buffered data */
LEROY Christophe37b5e882017-10-06 15:05:06 +02002022 if (!is_sec1 && req_ctx->nbuf) {
Lee Nipper5e833bc2010-06-16 15:29:15 +10002023 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2024 sg_init_table(req_ctx->bufsl, nsg);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002025 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002026 if (nsg > 1)
Dan Williamsc56f6d12015-08-07 18:15:13 +02002027 sg_chain(req_ctx->bufsl, 2, areq->src);
Lee Nipper497f2e62010-05-19 19:20:36 +10002028 req_ctx->psrc = req_ctx->bufsl;
LEROY Christophe37b5e882017-10-06 15:05:06 +02002029 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002030 int offset;
2031
LEROY Christophe37b5e882017-10-06 15:05:06 +02002032 if (nbytes_to_hash > blocksize)
2033 offset = blocksize - req_ctx->nbuf;
2034 else
2035 offset = nbytes_to_hash - req_ctx->nbuf;
2036 nents = sg_nents_for_len(areq->src, offset);
2037 if (nents < 0) {
2038 dev_err(ctx->dev, "Invalid number of src SG.\n");
2039 return nents;
2040 }
2041 sg_copy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002042 ctx_buf + req_ctx->nbuf, offset);
LEROY Christophe37b5e882017-10-06 15:05:06 +02002043 req_ctx->nbuf += offset;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002044 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2045 offset);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002046 } else
Lee Nipper497f2e62010-05-19 19:20:36 +10002047 req_ctx->psrc = areq->src;
Lee Nipper497f2e62010-05-19 19:20:36 +10002048
Lee Nipper5e833bc2010-06-16 15:29:15 +10002049 if (to_hash_later) {
LABBE Corentin8e409fe2015-11-04 21:13:34 +01002050 nents = sg_nents_for_len(areq->src, nbytes);
2051 if (nents < 0) {
2052 dev_err(ctx->dev, "Invalid number of src SG.\n");
2053 return nents;
2054 }
Akinobu Mitad0525722013-07-08 16:01:55 -07002055 sg_pcopy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002056 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
Lee Nipper5e833bc2010-06-16 15:29:15 +10002057 to_hash_later,
2058 nbytes - to_hash_later);
Lee Nipper497f2e62010-05-19 19:20:36 +10002059 }
Lee Nipper5e833bc2010-06-16 15:29:15 +10002060 req_ctx->to_hash_later = to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10002061
Lee Nipper5e833bc2010-06-16 15:29:15 +10002062 /* Allocate extended descriptor */
Lee Nipper497f2e62010-05-19 19:20:36 +10002063 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2064 if (IS_ERR(edesc))
2065 return PTR_ERR(edesc);
2066
2067 edesc->desc.hdr = ctx->desc_hdr_template;
2068
2069 /* On last one, request SEC to pad; otherwise continue */
2070 if (req_ctx->last)
2071 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2072 else
2073 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2074
Kim Phillips60f208d2010-05-19 19:21:53 +10002075 /* request SEC to INIT hash. */
2076 if (req_ctx->first && !req_ctx->swinit)
Lee Nipper497f2e62010-05-19 19:20:36 +10002077 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2078
2079 /* When the tfm context has a keylen, it's an HMAC.
2080 * A first or last (ie. not middle) descriptor must request HMAC.
2081 */
2082 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2083 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2084
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002085 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
Lee Nipper497f2e62010-05-19 19:20:36 +10002086}
2087
2088static int ahash_update(struct ahash_request *areq)
2089{
2090 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2091
2092 req_ctx->last = 0;
2093
2094 return ahash_process_req(areq, areq->nbytes);
2095}
2096
2097static int ahash_final(struct ahash_request *areq)
2098{
2099 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100
2101 req_ctx->last = 1;
2102
2103 return ahash_process_req(areq, 0);
2104}
2105
2106static int ahash_finup(struct ahash_request *areq)
2107{
2108 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2109
2110 req_ctx->last = 1;
2111
2112 return ahash_process_req(areq, areq->nbytes);
2113}
2114
2115static int ahash_digest(struct ahash_request *areq)
2116{
2117 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Kim Phillips60f208d2010-05-19 19:21:53 +10002118 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002119
Kim Phillips60f208d2010-05-19 19:21:53 +10002120 ahash->init(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002121 req_ctx->last = 1;
2122
2123 return ahash_process_req(areq, areq->nbytes);
2124}
2125
Horia Geant?3639ca82016-04-21 19:24:55 +03002126static int ahash_export(struct ahash_request *areq, void *out)
2127{
2128 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2129 struct talitos_export_state *export = out;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2131 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2132 struct device *dev = ctx->dev;
2133 dma_addr_t dma;
2134
2135 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2136 DMA_FROM_DEVICE);
2137 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
Horia Geant?3639ca82016-04-21 19:24:55 +03002138
2139 memcpy(export->hw_context, req_ctx->hw_context,
2140 req_ctx->hw_context_size);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002141 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
Horia Geant?3639ca82016-04-21 19:24:55 +03002142 export->swinit = req_ctx->swinit;
2143 export->first = req_ctx->first;
2144 export->last = req_ctx->last;
2145 export->to_hash_later = req_ctx->to_hash_later;
2146 export->nbuf = req_ctx->nbuf;
2147
2148 return 0;
2149}
2150
2151static int ahash_import(struct ahash_request *areq, const void *in)
2152{
2153 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002155 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2156 struct device *dev = ctx->dev;
Horia Geant?3639ca82016-04-21 19:24:55 +03002157 const struct talitos_export_state *export = in;
LEROY Christophe49f97832017-10-06 15:05:04 +02002158 unsigned int size;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002159 dma_addr_t dma;
Horia Geant?3639ca82016-04-21 19:24:55 +03002160
2161 memset(req_ctx, 0, sizeof(*req_ctx));
LEROY Christophe49f97832017-10-06 15:05:04 +02002162 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
Horia Geant?3639ca82016-04-21 19:24:55 +03002163 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2164 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
LEROY Christophe49f97832017-10-06 15:05:04 +02002165 req_ctx->hw_context_size = size;
LEROY Christophe49f97832017-10-06 15:05:04 +02002166 memcpy(req_ctx->hw_context, export->hw_context, size);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002167 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
Horia Geant?3639ca82016-04-21 19:24:55 +03002168 req_ctx->swinit = export->swinit;
2169 req_ctx->first = export->first;
2170 req_ctx->last = export->last;
2171 req_ctx->to_hash_later = export->to_hash_later;
2172 req_ctx->nbuf = export->nbuf;
2173
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002174 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2175 DMA_TO_DEVICE);
2176 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2177
Horia Geant?3639ca82016-04-21 19:24:55 +03002178 return 0;
2179}
2180
Lee Nipper79b3a412011-11-21 16:13:25 +08002181static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2182 u8 *hash)
2183{
2184 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2185
2186 struct scatterlist sg[1];
2187 struct ahash_request *req;
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002188 struct crypto_wait wait;
Lee Nipper79b3a412011-11-21 16:13:25 +08002189 int ret;
2190
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002191 crypto_init_wait(&wait);
Lee Nipper79b3a412011-11-21 16:13:25 +08002192
2193 req = ahash_request_alloc(tfm, GFP_KERNEL);
2194 if (!req)
2195 return -ENOMEM;
2196
2197 /* Keep tfm keylen == 0 during hash of the long key */
2198 ctx->keylen = 0;
2199 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002200 crypto_req_done, &wait);
Lee Nipper79b3a412011-11-21 16:13:25 +08002201
2202 sg_init_one(&sg[0], key, keylen);
2203
2204 ahash_request_set_crypt(req, sg, hash, keylen);
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002205 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2206
Lee Nipper79b3a412011-11-21 16:13:25 +08002207 ahash_request_free(req);
2208
2209 return ret;
2210}
2211
2212static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2213 unsigned int keylen)
2214{
2215 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002216 struct device *dev = ctx->dev;
Lee Nipper79b3a412011-11-21 16:13:25 +08002217 unsigned int blocksize =
2218 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2219 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2220 unsigned int keysize = keylen;
2221 u8 hash[SHA512_DIGEST_SIZE];
2222 int ret;
2223
2224 if (keylen <= blocksize)
2225 memcpy(ctx->key, key, keysize);
2226 else {
2227 /* Must get the hash of the long key */
2228 ret = keyhash(tfm, key, keylen, hash);
2229
Eric Biggers674f3682019-12-30 21:19:36 -06002230 if (ret)
Lee Nipper79b3a412011-11-21 16:13:25 +08002231 return -EINVAL;
Lee Nipper79b3a412011-11-21 16:13:25 +08002232
2233 keysize = digestsize;
2234 memcpy(ctx->key, hash, digestsize);
2235 }
2236
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002237 if (ctx->keylen)
2238 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2239
Lee Nipper79b3a412011-11-21 16:13:25 +08002240 ctx->keylen = keysize;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002241 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
Lee Nipper79b3a412011-11-21 16:13:25 +08002242
2243 return 0;
2244}
2245
2246
Kim Phillips9c4a7962008-06-23 19:50:15 +08002247struct talitos_alg_template {
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002248 u32 type;
LEROY Christopheb0057762016-06-06 13:20:44 +02002249 u32 priority;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002250 union {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002251 struct skcipher_alg skcipher;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002252 struct ahash_alg hash;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002253 struct aead_alg aead;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002254 } alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002255 __be32 desc_hdr_template;
2256};
2257
2258static struct talitos_alg_template driver_algs[] = {
Horia Geanta991155b2013-03-20 16:31:38 +02002259 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002260 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002261 .alg.aead = {
2262 .base = {
2263 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2264 .cra_driver_name = "authenc-hmac-sha1-"
2265 "cbc-aes-talitos",
2266 .cra_blocksize = AES_BLOCK_SIZE,
2267 .cra_flags = CRYPTO_ALG_ASYNC,
2268 },
2269 .ivsize = AES_BLOCK_SIZE,
2270 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002271 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08002272 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2273 DESC_HDR_SEL0_AESU |
2274 DESC_HDR_MODE0_AESU_CBC |
2275 DESC_HDR_SEL1_MDEUA |
2276 DESC_HDR_MODE1_MDEU_INIT |
2277 DESC_HDR_MODE1_MDEU_PAD |
2278 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper70bcaca2008-07-03 19:08:46 +08002279 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002280 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002281 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2282 .alg.aead = {
2283 .base = {
2284 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2285 .cra_driver_name = "authenc-hmac-sha1-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002286 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002287 .cra_blocksize = AES_BLOCK_SIZE,
2288 .cra_flags = CRYPTO_ALG_ASYNC,
2289 },
2290 .ivsize = AES_BLOCK_SIZE,
2291 .maxauthsize = SHA1_DIGEST_SIZE,
2292 },
2293 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2294 DESC_HDR_SEL0_AESU |
2295 DESC_HDR_MODE0_AESU_CBC |
2296 DESC_HDR_SEL1_MDEUA |
2297 DESC_HDR_MODE1_MDEU_INIT |
2298 DESC_HDR_MODE1_MDEU_PAD |
2299 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2300 },
2301 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002302 .alg.aead = {
2303 .base = {
2304 .cra_name = "authenc(hmac(sha1),"
2305 "cbc(des3_ede))",
2306 .cra_driver_name = "authenc-hmac-sha1-"
2307 "cbc-3des-talitos",
2308 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2309 .cra_flags = CRYPTO_ALG_ASYNC,
2310 },
2311 .ivsize = DES3_EDE_BLOCK_SIZE,
2312 .maxauthsize = SHA1_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002313 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002314 },
Lee Nipper70bcaca2008-07-03 19:08:46 +08002315 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2316 DESC_HDR_SEL0_DEU |
2317 DESC_HDR_MODE0_DEU_CBC |
2318 DESC_HDR_MODE0_DEU_3DES |
2319 DESC_HDR_SEL1_MDEUA |
2320 DESC_HDR_MODE1_MDEU_INIT |
2321 DESC_HDR_MODE1_MDEU_PAD |
2322 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper3952f172008-07-10 18:29:18 +08002323 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002324 { .type = CRYPTO_ALG_TYPE_AEAD,
2325 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2326 .alg.aead = {
2327 .base = {
2328 .cra_name = "authenc(hmac(sha1),"
2329 "cbc(des3_ede))",
2330 .cra_driver_name = "authenc-hmac-sha1-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002331 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002332 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2333 .cra_flags = CRYPTO_ALG_ASYNC,
2334 },
2335 .ivsize = DES3_EDE_BLOCK_SIZE,
2336 .maxauthsize = SHA1_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002337 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002338 },
2339 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2340 DESC_HDR_SEL0_DEU |
2341 DESC_HDR_MODE0_DEU_CBC |
2342 DESC_HDR_MODE0_DEU_3DES |
2343 DESC_HDR_SEL1_MDEUA |
2344 DESC_HDR_MODE1_MDEU_INIT |
2345 DESC_HDR_MODE1_MDEU_PAD |
2346 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2347 },
Horia Geanta357fb602012-07-03 19:16:53 +03002348 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002349 .alg.aead = {
2350 .base = {
2351 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2352 .cra_driver_name = "authenc-hmac-sha224-"
2353 "cbc-aes-talitos",
2354 .cra_blocksize = AES_BLOCK_SIZE,
2355 .cra_flags = CRYPTO_ALG_ASYNC,
2356 },
2357 .ivsize = AES_BLOCK_SIZE,
2358 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002359 },
2360 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2361 DESC_HDR_SEL0_AESU |
2362 DESC_HDR_MODE0_AESU_CBC |
2363 DESC_HDR_SEL1_MDEUA |
2364 DESC_HDR_MODE1_MDEU_INIT |
2365 DESC_HDR_MODE1_MDEU_PAD |
2366 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2367 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002368 { .type = CRYPTO_ALG_TYPE_AEAD,
2369 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2370 .alg.aead = {
2371 .base = {
2372 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2373 .cra_driver_name = "authenc-hmac-sha224-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002374 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002375 .cra_blocksize = AES_BLOCK_SIZE,
2376 .cra_flags = CRYPTO_ALG_ASYNC,
2377 },
2378 .ivsize = AES_BLOCK_SIZE,
2379 .maxauthsize = SHA224_DIGEST_SIZE,
2380 },
2381 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2382 DESC_HDR_SEL0_AESU |
2383 DESC_HDR_MODE0_AESU_CBC |
2384 DESC_HDR_SEL1_MDEUA |
2385 DESC_HDR_MODE1_MDEU_INIT |
2386 DESC_HDR_MODE1_MDEU_PAD |
2387 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2388 },
Horia Geanta357fb602012-07-03 19:16:53 +03002389 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002390 .alg.aead = {
2391 .base = {
2392 .cra_name = "authenc(hmac(sha224),"
2393 "cbc(des3_ede))",
2394 .cra_driver_name = "authenc-hmac-sha224-"
2395 "cbc-3des-talitos",
2396 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2397 .cra_flags = CRYPTO_ALG_ASYNC,
2398 },
2399 .ivsize = DES3_EDE_BLOCK_SIZE,
2400 .maxauthsize = SHA224_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002401 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002402 },
2403 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2404 DESC_HDR_SEL0_DEU |
2405 DESC_HDR_MODE0_DEU_CBC |
2406 DESC_HDR_MODE0_DEU_3DES |
2407 DESC_HDR_SEL1_MDEUA |
2408 DESC_HDR_MODE1_MDEU_INIT |
2409 DESC_HDR_MODE1_MDEU_PAD |
2410 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2411 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002412 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002413 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2414 .alg.aead = {
2415 .base = {
2416 .cra_name = "authenc(hmac(sha224),"
2417 "cbc(des3_ede))",
2418 .cra_driver_name = "authenc-hmac-sha224-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002419 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002420 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2421 .cra_flags = CRYPTO_ALG_ASYNC,
2422 },
2423 .ivsize = DES3_EDE_BLOCK_SIZE,
2424 .maxauthsize = SHA224_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002425 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002426 },
2427 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2428 DESC_HDR_SEL0_DEU |
2429 DESC_HDR_MODE0_DEU_CBC |
2430 DESC_HDR_MODE0_DEU_3DES |
2431 DESC_HDR_SEL1_MDEUA |
2432 DESC_HDR_MODE1_MDEU_INIT |
2433 DESC_HDR_MODE1_MDEU_PAD |
2434 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2435 },
2436 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002437 .alg.aead = {
2438 .base = {
2439 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2440 .cra_driver_name = "authenc-hmac-sha256-"
2441 "cbc-aes-talitos",
2442 .cra_blocksize = AES_BLOCK_SIZE,
2443 .cra_flags = CRYPTO_ALG_ASYNC,
2444 },
2445 .ivsize = AES_BLOCK_SIZE,
2446 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002447 },
Lee Nipper3952f172008-07-10 18:29:18 +08002448 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2449 DESC_HDR_SEL0_AESU |
2450 DESC_HDR_MODE0_AESU_CBC |
2451 DESC_HDR_SEL1_MDEUA |
2452 DESC_HDR_MODE1_MDEU_INIT |
2453 DESC_HDR_MODE1_MDEU_PAD |
2454 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2455 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002456 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002457 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2458 .alg.aead = {
2459 .base = {
2460 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2461 .cra_driver_name = "authenc-hmac-sha256-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002462 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002463 .cra_blocksize = AES_BLOCK_SIZE,
2464 .cra_flags = CRYPTO_ALG_ASYNC,
2465 },
2466 .ivsize = AES_BLOCK_SIZE,
2467 .maxauthsize = SHA256_DIGEST_SIZE,
2468 },
2469 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2470 DESC_HDR_SEL0_AESU |
2471 DESC_HDR_MODE0_AESU_CBC |
2472 DESC_HDR_SEL1_MDEUA |
2473 DESC_HDR_MODE1_MDEU_INIT |
2474 DESC_HDR_MODE1_MDEU_PAD |
2475 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2476 },
2477 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002478 .alg.aead = {
2479 .base = {
2480 .cra_name = "authenc(hmac(sha256),"
2481 "cbc(des3_ede))",
2482 .cra_driver_name = "authenc-hmac-sha256-"
2483 "cbc-3des-talitos",
2484 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2485 .cra_flags = CRYPTO_ALG_ASYNC,
2486 },
2487 .ivsize = DES3_EDE_BLOCK_SIZE,
2488 .maxauthsize = SHA256_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002489 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002490 },
Lee Nipper3952f172008-07-10 18:29:18 +08002491 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2492 DESC_HDR_SEL0_DEU |
2493 DESC_HDR_MODE0_DEU_CBC |
2494 DESC_HDR_MODE0_DEU_3DES |
2495 DESC_HDR_SEL1_MDEUA |
2496 DESC_HDR_MODE1_MDEU_INIT |
2497 DESC_HDR_MODE1_MDEU_PAD |
2498 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2499 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002500 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002501 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2502 .alg.aead = {
2503 .base = {
2504 .cra_name = "authenc(hmac(sha256),"
2505 "cbc(des3_ede))",
2506 .cra_driver_name = "authenc-hmac-sha256-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002507 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002508 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2509 .cra_flags = CRYPTO_ALG_ASYNC,
2510 },
2511 .ivsize = DES3_EDE_BLOCK_SIZE,
2512 .maxauthsize = SHA256_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002513 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002514 },
2515 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2516 DESC_HDR_SEL0_DEU |
2517 DESC_HDR_MODE0_DEU_CBC |
2518 DESC_HDR_MODE0_DEU_3DES |
2519 DESC_HDR_SEL1_MDEUA |
2520 DESC_HDR_MODE1_MDEU_INIT |
2521 DESC_HDR_MODE1_MDEU_PAD |
2522 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2523 },
2524 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002525 .alg.aead = {
2526 .base = {
2527 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2528 .cra_driver_name = "authenc-hmac-sha384-"
2529 "cbc-aes-talitos",
2530 .cra_blocksize = AES_BLOCK_SIZE,
2531 .cra_flags = CRYPTO_ALG_ASYNC,
2532 },
2533 .ivsize = AES_BLOCK_SIZE,
2534 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002535 },
2536 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2537 DESC_HDR_SEL0_AESU |
2538 DESC_HDR_MODE0_AESU_CBC |
2539 DESC_HDR_SEL1_MDEUB |
2540 DESC_HDR_MODE1_MDEU_INIT |
2541 DESC_HDR_MODE1_MDEU_PAD |
2542 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2543 },
2544 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002545 .alg.aead = {
2546 .base = {
2547 .cra_name = "authenc(hmac(sha384),"
2548 "cbc(des3_ede))",
2549 .cra_driver_name = "authenc-hmac-sha384-"
2550 "cbc-3des-talitos",
2551 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2552 .cra_flags = CRYPTO_ALG_ASYNC,
2553 },
2554 .ivsize = DES3_EDE_BLOCK_SIZE,
2555 .maxauthsize = SHA384_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002556 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002557 },
2558 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2559 DESC_HDR_SEL0_DEU |
2560 DESC_HDR_MODE0_DEU_CBC |
2561 DESC_HDR_MODE0_DEU_3DES |
2562 DESC_HDR_SEL1_MDEUB |
2563 DESC_HDR_MODE1_MDEU_INIT |
2564 DESC_HDR_MODE1_MDEU_PAD |
2565 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2566 },
2567 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002568 .alg.aead = {
2569 .base = {
2570 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2571 .cra_driver_name = "authenc-hmac-sha512-"
2572 "cbc-aes-talitos",
2573 .cra_blocksize = AES_BLOCK_SIZE,
2574 .cra_flags = CRYPTO_ALG_ASYNC,
2575 },
2576 .ivsize = AES_BLOCK_SIZE,
2577 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002578 },
2579 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2580 DESC_HDR_SEL0_AESU |
2581 DESC_HDR_MODE0_AESU_CBC |
2582 DESC_HDR_SEL1_MDEUB |
2583 DESC_HDR_MODE1_MDEU_INIT |
2584 DESC_HDR_MODE1_MDEU_PAD |
2585 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2586 },
2587 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002588 .alg.aead = {
2589 .base = {
2590 .cra_name = "authenc(hmac(sha512),"
2591 "cbc(des3_ede))",
2592 .cra_driver_name = "authenc-hmac-sha512-"
2593 "cbc-3des-talitos",
2594 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2595 .cra_flags = CRYPTO_ALG_ASYNC,
2596 },
2597 .ivsize = DES3_EDE_BLOCK_SIZE,
2598 .maxauthsize = SHA512_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002599 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002600 },
2601 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2602 DESC_HDR_SEL0_DEU |
2603 DESC_HDR_MODE0_DEU_CBC |
2604 DESC_HDR_MODE0_DEU_3DES |
2605 DESC_HDR_SEL1_MDEUB |
2606 DESC_HDR_MODE1_MDEU_INIT |
2607 DESC_HDR_MODE1_MDEU_PAD |
2608 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2609 },
2610 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002611 .alg.aead = {
2612 .base = {
2613 .cra_name = "authenc(hmac(md5),cbc(aes))",
2614 .cra_driver_name = "authenc-hmac-md5-"
2615 "cbc-aes-talitos",
2616 .cra_blocksize = AES_BLOCK_SIZE,
2617 .cra_flags = CRYPTO_ALG_ASYNC,
2618 },
2619 .ivsize = AES_BLOCK_SIZE,
2620 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002621 },
Lee Nipper3952f172008-07-10 18:29:18 +08002622 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2623 DESC_HDR_SEL0_AESU |
2624 DESC_HDR_MODE0_AESU_CBC |
2625 DESC_HDR_SEL1_MDEUA |
2626 DESC_HDR_MODE1_MDEU_INIT |
2627 DESC_HDR_MODE1_MDEU_PAD |
2628 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2629 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002630 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002631 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2632 .alg.aead = {
2633 .base = {
2634 .cra_name = "authenc(hmac(md5),cbc(aes))",
2635 .cra_driver_name = "authenc-hmac-md5-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002636 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002637 .cra_blocksize = AES_BLOCK_SIZE,
2638 .cra_flags = CRYPTO_ALG_ASYNC,
2639 },
2640 .ivsize = AES_BLOCK_SIZE,
2641 .maxauthsize = MD5_DIGEST_SIZE,
2642 },
2643 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2644 DESC_HDR_SEL0_AESU |
2645 DESC_HDR_MODE0_AESU_CBC |
2646 DESC_HDR_SEL1_MDEUA |
2647 DESC_HDR_MODE1_MDEU_INIT |
2648 DESC_HDR_MODE1_MDEU_PAD |
2649 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2650 },
2651 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002652 .alg.aead = {
2653 .base = {
2654 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2655 .cra_driver_name = "authenc-hmac-md5-"
2656 "cbc-3des-talitos",
2657 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2658 .cra_flags = CRYPTO_ALG_ASYNC,
2659 },
2660 .ivsize = DES3_EDE_BLOCK_SIZE,
2661 .maxauthsize = MD5_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002662 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002663 },
Lee Nipper3952f172008-07-10 18:29:18 +08002664 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2665 DESC_HDR_SEL0_DEU |
2666 DESC_HDR_MODE0_DEU_CBC |
2667 DESC_HDR_MODE0_DEU_3DES |
2668 DESC_HDR_SEL1_MDEUA |
2669 DESC_HDR_MODE1_MDEU_INIT |
2670 DESC_HDR_MODE1_MDEU_PAD |
2671 DESC_HDR_MODE1_MDEU_MD5_HMAC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002672 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002673 { .type = CRYPTO_ALG_TYPE_AEAD,
2674 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2675 .alg.aead = {
2676 .base = {
2677 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2678 .cra_driver_name = "authenc-hmac-md5-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002679 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002680 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2681 .cra_flags = CRYPTO_ALG_ASYNC,
2682 },
2683 .ivsize = DES3_EDE_BLOCK_SIZE,
2684 .maxauthsize = MD5_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002685 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002686 },
2687 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2688 DESC_HDR_SEL0_DEU |
2689 DESC_HDR_MODE0_DEU_CBC |
2690 DESC_HDR_MODE0_DEU_3DES |
2691 DESC_HDR_SEL1_MDEUA |
2692 DESC_HDR_MODE1_MDEU_INIT |
2693 DESC_HDR_MODE1_MDEU_PAD |
2694 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2695 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002696 /* SKCIPHER algorithms. */
2697 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2698 .alg.skcipher = {
2699 .base.cra_name = "ecb(aes)",
2700 .base.cra_driver_name = "ecb-aes-talitos",
2701 .base.cra_blocksize = AES_BLOCK_SIZE,
2702 .base.cra_flags = CRYPTO_ALG_ASYNC,
2703 .min_keysize = AES_MIN_KEY_SIZE,
2704 .max_keysize = AES_MAX_KEY_SIZE,
2705 .setkey = skcipher_aes_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002706 },
2707 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2708 DESC_HDR_SEL0_AESU,
2709 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002710 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2711 .alg.skcipher = {
2712 .base.cra_name = "cbc(aes)",
2713 .base.cra_driver_name = "cbc-aes-talitos",
2714 .base.cra_blocksize = AES_BLOCK_SIZE,
2715 .base.cra_flags = CRYPTO_ALG_ASYNC,
2716 .min_keysize = AES_MIN_KEY_SIZE,
2717 .max_keysize = AES_MAX_KEY_SIZE,
2718 .ivsize = AES_BLOCK_SIZE,
2719 .setkey = skcipher_aes_setkey,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002720 },
2721 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2722 DESC_HDR_SEL0_AESU |
2723 DESC_HDR_MODE0_AESU_CBC,
2724 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002725 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2726 .alg.skcipher = {
2727 .base.cra_name = "ctr(aes)",
2728 .base.cra_driver_name = "ctr-aes-talitos",
2729 .base.cra_blocksize = 1,
2730 .base.cra_flags = CRYPTO_ALG_ASYNC,
2731 .min_keysize = AES_MIN_KEY_SIZE,
2732 .max_keysize = AES_MAX_KEY_SIZE,
2733 .ivsize = AES_BLOCK_SIZE,
2734 .setkey = skcipher_aes_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002735 },
LEROY Christophe70d355c2017-10-06 15:04:43 +02002736 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002737 DESC_HDR_SEL0_AESU |
2738 DESC_HDR_MODE0_AESU_CTR,
2739 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002740 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2741 .alg.skcipher = {
2742 .base.cra_name = "ecb(des)",
2743 .base.cra_driver_name = "ecb-des-talitos",
2744 .base.cra_blocksize = DES_BLOCK_SIZE,
2745 .base.cra_flags = CRYPTO_ALG_ASYNC,
2746 .min_keysize = DES_KEY_SIZE,
2747 .max_keysize = DES_KEY_SIZE,
2748 .setkey = skcipher_des_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002749 },
2750 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2751 DESC_HDR_SEL0_DEU,
2752 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002753 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2754 .alg.skcipher = {
2755 .base.cra_name = "cbc(des)",
2756 .base.cra_driver_name = "cbc-des-talitos",
2757 .base.cra_blocksize = DES_BLOCK_SIZE,
2758 .base.cra_flags = CRYPTO_ALG_ASYNC,
2759 .min_keysize = DES_KEY_SIZE,
2760 .max_keysize = DES_KEY_SIZE,
2761 .ivsize = DES_BLOCK_SIZE,
2762 .setkey = skcipher_des_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002763 },
2764 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2765 DESC_HDR_SEL0_DEU |
2766 DESC_HDR_MODE0_DEU_CBC,
2767 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002768 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2769 .alg.skcipher = {
2770 .base.cra_name = "ecb(des3_ede)",
2771 .base.cra_driver_name = "ecb-3des-talitos",
2772 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2773 .base.cra_flags = CRYPTO_ALG_ASYNC,
2774 .min_keysize = DES3_EDE_KEY_SIZE,
2775 .max_keysize = DES3_EDE_KEY_SIZE,
2776 .setkey = skcipher_des3_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002777 },
2778 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2779 DESC_HDR_SEL0_DEU |
2780 DESC_HDR_MODE0_DEU_3DES,
2781 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002782 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2783 .alg.skcipher = {
2784 .base.cra_name = "cbc(des3_ede)",
2785 .base.cra_driver_name = "cbc-3des-talitos",
2786 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2787 .base.cra_flags = CRYPTO_ALG_ASYNC,
2788 .min_keysize = DES3_EDE_KEY_SIZE,
2789 .max_keysize = DES3_EDE_KEY_SIZE,
2790 .ivsize = DES3_EDE_BLOCK_SIZE,
2791 .setkey = skcipher_des3_setkey,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002792 },
2793 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2794 DESC_HDR_SEL0_DEU |
2795 DESC_HDR_MODE0_DEU_CBC |
2796 DESC_HDR_MODE0_DEU_3DES,
Lee Nipper497f2e62010-05-19 19:20:36 +10002797 },
2798 /* AHASH algorithms. */
2799 { .type = CRYPTO_ALG_TYPE_AHASH,
2800 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002801 .halg.digestsize = MD5_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002802 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002803 .halg.base = {
2804 .cra_name = "md5",
2805 .cra_driver_name = "md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002806 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002807 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002808 }
2809 },
2810 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2811 DESC_HDR_SEL0_MDEUA |
2812 DESC_HDR_MODE0_MDEU_MD5,
2813 },
2814 { .type = CRYPTO_ALG_TYPE_AHASH,
2815 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002816 .halg.digestsize = SHA1_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002817 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002818 .halg.base = {
2819 .cra_name = "sha1",
2820 .cra_driver_name = "sha1-talitos",
2821 .cra_blocksize = SHA1_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002822 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002823 }
2824 },
2825 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2826 DESC_HDR_SEL0_MDEUA |
2827 DESC_HDR_MODE0_MDEU_SHA1,
2828 },
2829 { .type = CRYPTO_ALG_TYPE_AHASH,
2830 .alg.hash = {
Kim Phillips60f208d2010-05-19 19:21:53 +10002831 .halg.digestsize = SHA224_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002832 .halg.statesize = sizeof(struct talitos_export_state),
Kim Phillips60f208d2010-05-19 19:21:53 +10002833 .halg.base = {
2834 .cra_name = "sha224",
2835 .cra_driver_name = "sha224-talitos",
2836 .cra_blocksize = SHA224_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002837 .cra_flags = CRYPTO_ALG_ASYNC,
Kim Phillips60f208d2010-05-19 19:21:53 +10002838 }
2839 },
2840 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2841 DESC_HDR_SEL0_MDEUA |
2842 DESC_HDR_MODE0_MDEU_SHA224,
2843 },
2844 { .type = CRYPTO_ALG_TYPE_AHASH,
2845 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002846 .halg.digestsize = SHA256_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002847 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002848 .halg.base = {
2849 .cra_name = "sha256",
2850 .cra_driver_name = "sha256-talitos",
2851 .cra_blocksize = SHA256_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002852 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002853 }
2854 },
2855 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2856 DESC_HDR_SEL0_MDEUA |
2857 DESC_HDR_MODE0_MDEU_SHA256,
2858 },
2859 { .type = CRYPTO_ALG_TYPE_AHASH,
2860 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002861 .halg.digestsize = SHA384_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002862 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002863 .halg.base = {
2864 .cra_name = "sha384",
2865 .cra_driver_name = "sha384-talitos",
2866 .cra_blocksize = SHA384_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002867 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002868 }
2869 },
2870 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2871 DESC_HDR_SEL0_MDEUB |
2872 DESC_HDR_MODE0_MDEUB_SHA384,
2873 },
2874 { .type = CRYPTO_ALG_TYPE_AHASH,
2875 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002876 .halg.digestsize = SHA512_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002877 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002878 .halg.base = {
2879 .cra_name = "sha512",
2880 .cra_driver_name = "sha512-talitos",
2881 .cra_blocksize = SHA512_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002882 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper497f2e62010-05-19 19:20:36 +10002883 }
2884 },
2885 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2886 DESC_HDR_SEL0_MDEUB |
2887 DESC_HDR_MODE0_MDEUB_SHA512,
2888 },
Lee Nipper79b3a412011-11-21 16:13:25 +08002889 { .type = CRYPTO_ALG_TYPE_AHASH,
2890 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002891 .halg.digestsize = MD5_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002892 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002893 .halg.base = {
2894 .cra_name = "hmac(md5)",
2895 .cra_driver_name = "hmac-md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002896 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002897 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002898 }
2899 },
2900 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2901 DESC_HDR_SEL0_MDEUA |
2902 DESC_HDR_MODE0_MDEU_MD5,
2903 },
2904 { .type = CRYPTO_ALG_TYPE_AHASH,
2905 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002906 .halg.digestsize = SHA1_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002907 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002908 .halg.base = {
2909 .cra_name = "hmac(sha1)",
2910 .cra_driver_name = "hmac-sha1-talitos",
2911 .cra_blocksize = SHA1_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002912 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002913 }
2914 },
2915 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 DESC_HDR_SEL0_MDEUA |
2917 DESC_HDR_MODE0_MDEU_SHA1,
2918 },
2919 { .type = CRYPTO_ALG_TYPE_AHASH,
2920 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002921 .halg.digestsize = SHA224_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002922 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002923 .halg.base = {
2924 .cra_name = "hmac(sha224)",
2925 .cra_driver_name = "hmac-sha224-talitos",
2926 .cra_blocksize = SHA224_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002927 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002928 }
2929 },
2930 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2931 DESC_HDR_SEL0_MDEUA |
2932 DESC_HDR_MODE0_MDEU_SHA224,
2933 },
2934 { .type = CRYPTO_ALG_TYPE_AHASH,
2935 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002936 .halg.digestsize = SHA256_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002937 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002938 .halg.base = {
2939 .cra_name = "hmac(sha256)",
2940 .cra_driver_name = "hmac-sha256-talitos",
2941 .cra_blocksize = SHA256_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002942 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002943 }
2944 },
2945 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2946 DESC_HDR_SEL0_MDEUA |
2947 DESC_HDR_MODE0_MDEU_SHA256,
2948 },
2949 { .type = CRYPTO_ALG_TYPE_AHASH,
2950 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002951 .halg.digestsize = SHA384_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002952 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002953 .halg.base = {
2954 .cra_name = "hmac(sha384)",
2955 .cra_driver_name = "hmac-sha384-talitos",
2956 .cra_blocksize = SHA384_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002957 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002958 }
2959 },
2960 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2961 DESC_HDR_SEL0_MDEUB |
2962 DESC_HDR_MODE0_MDEUB_SHA384,
2963 },
2964 { .type = CRYPTO_ALG_TYPE_AHASH,
2965 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002966 .halg.digestsize = SHA512_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002967 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002968 .halg.base = {
2969 .cra_name = "hmac(sha512)",
2970 .cra_driver_name = "hmac-sha512-talitos",
2971 .cra_blocksize = SHA512_BLOCK_SIZE,
Eric Biggers6a38f622018-06-30 15:16:12 -07002972 .cra_flags = CRYPTO_ALG_ASYNC,
Lee Nipper79b3a412011-11-21 16:13:25 +08002973 }
2974 },
2975 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2976 DESC_HDR_SEL0_MDEUB |
2977 DESC_HDR_MODE0_MDEUB_SHA512,
2978 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08002979};
2980
2981struct talitos_crypto_alg {
2982 struct list_head entry;
2983 struct device *dev;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002984 struct talitos_alg_template algt;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002985};
2986
Jonas Eymann89d124c2016-04-19 20:33:47 +03002987static int talitos_init_common(struct talitos_ctx *ctx,
2988 struct talitos_crypto_alg *talitos_alg)
Kim Phillips9c4a7962008-06-23 19:50:15 +08002989{
Kim Phillips5228f0f2011-07-15 11:21:38 +08002990 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002991
2992 /* update context with ptr to dev */
2993 ctx->dev = talitos_alg->dev;
Kim Phillips19bbbc62009-03-29 15:53:59 +08002994
Kim Phillips5228f0f2011-07-15 11:21:38 +08002995 /* assign SEC channel to tfm in round-robin fashion */
2996 priv = dev_get_drvdata(ctx->dev);
2997 ctx->ch = atomic_inc_return(&priv->last_chan) &
2998 (priv->num_channels - 1);
2999
Kim Phillips9c4a7962008-06-23 19:50:15 +08003000 /* copy descriptor header template value */
Lee Nipperacbf7c622010-05-19 19:19:33 +10003001 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003002
Kim Phillips602dba52011-07-15 11:21:39 +08003003 /* select done notification */
3004 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3005
Lee Nipper497f2e62010-05-19 19:20:36 +10003006 return 0;
3007}
3008
Herbert Xuaeb4c132015-07-30 17:53:22 +08003009static int talitos_cra_init_aead(struct crypto_aead *tfm)
Lee Nipper497f2e62010-05-19 19:20:36 +10003010{
Jonas Eymann89d124c2016-04-19 20:33:47 +03003011 struct aead_alg *alg = crypto_aead_alg(tfm);
3012 struct talitos_crypto_alg *talitos_alg;
3013 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3014
3015 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3016 algt.alg.aead);
3017
3018 return talitos_init_common(ctx, talitos_alg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003019}
3020
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003021static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3022{
3023 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3024 struct talitos_crypto_alg *talitos_alg;
3025 struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3026
3027 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3028 algt.alg.skcipher);
3029
3030 return talitos_init_common(ctx, talitos_alg);
3031}
3032
Lee Nipper497f2e62010-05-19 19:20:36 +10003033static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3034{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003035 struct crypto_alg *alg = tfm->__crt_alg;
3036 struct talitos_crypto_alg *talitos_alg;
Lee Nipper497f2e62010-05-19 19:20:36 +10003037 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3038
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003039 talitos_alg = container_of(__crypto_ahash_alg(alg),
3040 struct talitos_crypto_alg,
3041 algt.alg.hash);
Lee Nipper497f2e62010-05-19 19:20:36 +10003042
3043 ctx->keylen = 0;
3044 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3045 sizeof(struct talitos_ahash_req_ctx));
3046
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003047 return talitos_init_common(ctx, talitos_alg);
Lee Nipper497f2e62010-05-19 19:20:36 +10003048}
3049
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003050static void talitos_cra_exit(struct crypto_tfm *tfm)
3051{
3052 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3053 struct device *dev = ctx->dev;
3054
3055 if (ctx->keylen)
3056 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3057}
3058
Kim Phillips9c4a7962008-06-23 19:50:15 +08003059/*
3060 * given the alg's descriptor header template, determine whether descriptor
3061 * type and primary/secondary execution units required match the hw
3062 * capabilities description provided in the device tree node.
3063 */
3064static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3065{
3066 struct talitos_private *priv = dev_get_drvdata(dev);
3067 int ret;
3068
3069 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3070 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3071
3072 if (SECONDARY_EU(desc_hdr_template))
3073 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3074 & priv->exec_units);
3075
3076 return ret;
3077}
3078
Grant Likely2dc11582010-08-06 09:25:50 -06003079static int talitos_remove(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08003080{
3081 struct device *dev = &ofdev->dev;
3082 struct talitos_private *priv = dev_get_drvdata(dev);
3083 struct talitos_crypto_alg *t_alg, *n;
3084 int i;
3085
3086 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
Lee Nipperacbf7c622010-05-19 19:19:33 +10003087 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003088 case CRYPTO_ALG_TYPE_SKCIPHER:
3089 crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
Lee Nipperacbf7c622010-05-19 19:19:33 +10003090 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003091 case CRYPTO_ALG_TYPE_AEAD:
3092 crypto_unregister_aead(&t_alg->algt.alg.aead);
Gustavo A. R. Silva5fc194e2019-09-09 00:29:52 -05003093 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003094 case CRYPTO_ALG_TYPE_AHASH:
3095 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3096 break;
3097 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003098 list_del(&t_alg->entry);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003099 }
3100
3101 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3102 talitos_unregister_rng(dev);
3103
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003104 for (i = 0; i < 2; i++)
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003105 if (priv->irq[i]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003106 free_irq(priv->irq[i], dev);
3107 irq_dispose_mapping(priv->irq[i]);
3108 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003109
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003110 tasklet_kill(&priv->done_task[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003111 if (priv->irq[1])
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003112 tasklet_kill(&priv->done_task[1]);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003113
Kim Phillips9c4a7962008-06-23 19:50:15 +08003114 return 0;
3115}
3116
3117static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3118 struct talitos_alg_template
3119 *template)
3120{
Kim Phillips60f208d2010-05-19 19:21:53 +10003121 struct talitos_private *priv = dev_get_drvdata(dev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003122 struct talitos_crypto_alg *t_alg;
3123 struct crypto_alg *alg;
3124
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003125 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3126 GFP_KERNEL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003127 if (!t_alg)
3128 return ERR_PTR(-ENOMEM);
3129
Lee Nipperacbf7c622010-05-19 19:19:33 +10003130 t_alg->algt = *template;
3131
3132 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003133 case CRYPTO_ALG_TYPE_SKCIPHER:
3134 alg = &t_alg->algt.alg.skcipher.base;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003135 alg->cra_exit = talitos_cra_exit;
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003136 t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3137 t_alg->algt.alg.skcipher.setkey =
3138 t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3139 t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3140 t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
Lee Nipper497f2e62010-05-19 19:20:36 +10003141 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003142 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuaeb4c132015-07-30 17:53:22 +08003143 alg = &t_alg->algt.alg.aead.base;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003144 alg->cra_exit = talitos_cra_exit;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003145 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
Herbert Xuef7c5c82019-04-11 16:51:21 +08003146 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3147 aead_setkey;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003148 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3149 t_alg->algt.alg.aead.decrypt = aead_decrypt;
LEROY Christophe6cda0752017-10-06 15:04:39 +02003150 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3151 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003152 devm_kfree(dev, t_alg);
LEROY Christophe6cda0752017-10-06 15:04:39 +02003153 return ERR_PTR(-ENOTSUPP);
3154 }
Lee Nipperacbf7c622010-05-19 19:19:33 +10003155 break;
3156 case CRYPTO_ALG_TYPE_AHASH:
3157 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipper497f2e62010-05-19 19:20:36 +10003158 alg->cra_init = talitos_cra_init_ahash;
LEROY Christophead4cd512018-02-26 17:40:04 +01003159 alg->cra_exit = talitos_cra_exit;
Kim Phillipsb286e002012-08-08 20:33:34 -05003160 t_alg->algt.alg.hash.init = ahash_init;
3161 t_alg->algt.alg.hash.update = ahash_update;
3162 t_alg->algt.alg.hash.final = ahash_final;
3163 t_alg->algt.alg.hash.finup = ahash_finup;
3164 t_alg->algt.alg.hash.digest = ahash_digest;
LEROY Christophe56136632017-09-12 11:03:39 +02003165 if (!strncmp(alg->cra_name, "hmac", 4))
3166 t_alg->algt.alg.hash.setkey = ahash_setkey;
Horia Geant?3639ca82016-04-21 19:24:55 +03003167 t_alg->algt.alg.hash.import = ahash_import;
3168 t_alg->algt.alg.hash.export = ahash_export;
Kim Phillipsb286e002012-08-08 20:33:34 -05003169
Lee Nipper79b3a412011-11-21 16:13:25 +08003170 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
Kim Phillips0b2730d2011-12-12 14:59:10 -06003171 !strncmp(alg->cra_name, "hmac", 4)) {
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003172 devm_kfree(dev, t_alg);
Lee Nipper79b3a412011-11-21 16:13:25 +08003173 return ERR_PTR(-ENOTSUPP);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003174 }
Kim Phillips60f208d2010-05-19 19:21:53 +10003175 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
Lee Nipper79b3a412011-11-21 16:13:25 +08003176 (!strcmp(alg->cra_name, "sha224") ||
3177 !strcmp(alg->cra_name, "hmac(sha224)"))) {
Kim Phillips60f208d2010-05-19 19:21:53 +10003178 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3179 t_alg->algt.desc_hdr_template =
3180 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3181 DESC_HDR_SEL0_MDEUA |
3182 DESC_HDR_MODE0_MDEU_SHA256;
3183 }
Lee Nipper497f2e62010-05-19 19:20:36 +10003184 break;
Kim Phillips1d119112010-09-23 15:55:27 +08003185 default:
3186 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003187 devm_kfree(dev, t_alg);
Kim Phillips1d119112010-09-23 15:55:27 +08003188 return ERR_PTR(-EINVAL);
Lee Nipperacbf7c622010-05-19 19:19:33 +10003189 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003190
Kim Phillips9c4a7962008-06-23 19:50:15 +08003191 alg->cra_module = THIS_MODULE;
LEROY Christopheb0057762016-06-06 13:20:44 +02003192 if (t_alg->algt.priority)
3193 alg->cra_priority = t_alg->algt.priority;
3194 else
3195 alg->cra_priority = TALITOS_CRA_PRIORITY;
Christophe Leroyc9cca702019-05-21 13:34:18 +00003196 if (has_ftr_sec1(priv))
3197 alg->cra_alignmask = 3;
3198 else
3199 alg->cra_alignmask = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003200 alg->cra_ctxsize = sizeof(struct talitos_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003201 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003202
Kim Phillips9c4a7962008-06-23 19:50:15 +08003203 t_alg->dev = dev;
3204
3205 return t_alg;
3206}
3207
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003208static int talitos_probe_irq(struct platform_device *ofdev)
3209{
3210 struct device *dev = &ofdev->dev;
3211 struct device_node *np = ofdev->dev.of_node;
3212 struct talitos_private *priv = dev_get_drvdata(dev);
3213 int err;
LEROY Christophedd3c0982015-04-17 16:32:13 +02003214 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003215
3216 priv->irq[0] = irq_of_parse_and_map(np, 0);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003217 if (!priv->irq[0]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003218 dev_err(dev, "failed to map irq\n");
3219 return -EINVAL;
3220 }
LEROY Christophedd3c0982015-04-17 16:32:13 +02003221 if (is_sec1) {
3222 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3223 dev_driver_string(dev), dev);
3224 goto primary_out;
3225 }
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003226
3227 priv->irq[1] = irq_of_parse_and_map(np, 1);
3228
3229 /* get the primary irq line */
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003230 if (!priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02003231 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003232 dev_driver_string(dev), dev);
3233 goto primary_out;
3234 }
3235
LEROY Christophedd3c0982015-04-17 16:32:13 +02003236 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003237 dev_driver_string(dev), dev);
3238 if (err)
3239 goto primary_out;
3240
3241 /* get the secondary irq line */
LEROY Christophedd3c0982015-04-17 16:32:13 +02003242 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003243 dev_driver_string(dev), dev);
3244 if (err) {
3245 dev_err(dev, "failed to request secondary irq\n");
3246 irq_dispose_mapping(priv->irq[1]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003247 priv->irq[1] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003248 }
3249
3250 return err;
3251
3252primary_out:
3253 if (err) {
3254 dev_err(dev, "failed to request primary irq\n");
3255 irq_dispose_mapping(priv->irq[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003256 priv->irq[0] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003257 }
3258
3259 return err;
3260}
3261
Grant Likely1c48a5c2011-02-17 02:43:24 -07003262static int talitos_probe(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08003263{
3264 struct device *dev = &ofdev->dev;
Grant Likely61c7a082010-04-13 16:12:29 -07003265 struct device_node *np = ofdev->dev.of_node;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003266 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003267 int i, err;
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003268 int stride;
LEROY Christophefd5ea7f2017-10-06 15:04:53 +02003269 struct resource *res;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003270
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003271 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003272 if (!priv)
3273 return -ENOMEM;
3274
Kevin Haof3de9cb2014-01-28 20:17:23 +08003275 INIT_LIST_HEAD(&priv->alg_list);
3276
Kim Phillips9c4a7962008-06-23 19:50:15 +08003277 dev_set_drvdata(dev, priv);
3278
3279 priv->ofdev = ofdev;
3280
Horia Geanta511d63c2012-03-30 17:49:53 +03003281 spin_lock_init(&priv->reg_lock);
3282
LEROY Christophefd5ea7f2017-10-06 15:04:53 +02003283 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3284 if (!res)
3285 return -ENXIO;
3286 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003287 if (!priv->reg) {
3288 dev_err(dev, "failed to of_iomap\n");
3289 err = -ENOMEM;
3290 goto err_out;
3291 }
3292
3293 /* get SEC version capabilities from device tree */
LEROY Christophefa14c6c2017-10-06 15:04:51 +02003294 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3295 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3296 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3297 of_property_read_u32(np, "fsl,descriptor-types-mask",
3298 &priv->desc_types);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003299
3300 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3301 !priv->exec_units || !priv->desc_types) {
3302 dev_err(dev, "invalid property data in device tree node\n");
3303 err = -EINVAL;
3304 goto err_out;
3305 }
3306
Lee Nipperf3c85bc2008-07-30 16:26:57 +08003307 if (of_device_is_compatible(np, "fsl,sec3.0"))
3308 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3309
Kim Phillipsfe5720e2008-10-12 20:33:14 +08003310 if (of_device_is_compatible(np, "fsl,sec2.1"))
Kim Phillips60f208d2010-05-19 19:21:53 +10003311 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
Lee Nipper79b3a412011-11-21 16:13:25 +08003312 TALITOS_FTR_SHA224_HWINIT |
3313 TALITOS_FTR_HMAC_OK;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08003314
LEROY Christophe21590882015-04-17 16:32:05 +02003315 if (of_device_is_compatible(np, "fsl,sec1.0"))
3316 priv->features |= TALITOS_FTR_SEC1;
3317
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003318 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3319 priv->reg_deu = priv->reg + TALITOS12_DEU;
3320 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3321 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3322 stride = TALITOS1_CH_STRIDE;
3323 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3324 priv->reg_deu = priv->reg + TALITOS10_DEU;
3325 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3326 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3327 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3328 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3329 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3330 stride = TALITOS1_CH_STRIDE;
3331 } else {
3332 priv->reg_deu = priv->reg + TALITOS2_DEU;
3333 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3334 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3335 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3336 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3337 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3338 priv->reg_keu = priv->reg + TALITOS2_KEU;
3339 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3340 stride = TALITOS2_CH_STRIDE;
3341 }
3342
LEROY Christophedd3c0982015-04-17 16:32:13 +02003343 err = talitos_probe_irq(ofdev);
3344 if (err)
3345 goto err_out;
3346
Christophe Leroyc8c74642019-06-17 21:14:45 +00003347 if (has_ftr_sec1(priv)) {
LEROY Christophe9c02e282017-10-06 15:04:55 +02003348 if (priv->num_channels == 1)
3349 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
LEROY Christophedd3c0982015-04-17 16:32:13 +02003350 (unsigned long)dev);
LEROY Christophe9c02e282017-10-06 15:04:55 +02003351 else
3352 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3353 (unsigned long)dev);
3354 } else {
3355 if (priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02003356 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3357 (unsigned long)dev);
3358 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3359 (unsigned long)dev);
LEROY Christophe9c02e282017-10-06 15:04:55 +02003360 } else if (priv->num_channels == 1) {
3361 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3362 (unsigned long)dev);
3363 } else {
3364 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3365 (unsigned long)dev);
LEROY Christophedd3c0982015-04-17 16:32:13 +02003366 }
3367 }
3368
Kees Cooka86854d2018-06-12 14:07:58 -07003369 priv->chan = devm_kcalloc(dev,
3370 priv->num_channels,
3371 sizeof(struct talitos_channel),
3372 GFP_KERNEL);
Kim Phillips4b9926282009-08-13 11:50:38 +10003373 if (!priv->chan) {
3374 dev_err(dev, "failed to allocate channel management space\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08003375 err = -ENOMEM;
3376 goto err_out;
3377 }
3378
Martin Hicksf641ddd2015-03-03 08:21:33 -05003379 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3380
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003381 for (i = 0; i < priv->num_channels; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003382 priv->chan[i].reg = priv->reg + stride * (i + 1);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003383 if (!priv->irq[1] || !(i & 1))
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003384 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
Kim Phillipsad42d5f2011-11-21 16:13:27 +08003385
Kim Phillips4b9926282009-08-13 11:50:38 +10003386 spin_lock_init(&priv->chan[i].head_lock);
3387 spin_lock_init(&priv->chan[i].tail_lock);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003388
Kees Cooka86854d2018-06-12 14:07:58 -07003389 priv->chan[i].fifo = devm_kcalloc(dev,
3390 priv->fifo_len,
3391 sizeof(struct talitos_request),
3392 GFP_KERNEL);
Kim Phillips4b9926282009-08-13 11:50:38 +10003393 if (!priv->chan[i].fifo) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08003394 dev_err(dev, "failed to allocate request fifo %d\n", i);
3395 err = -ENOMEM;
3396 goto err_out;
3397 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003398
Kim Phillips4b9926282009-08-13 11:50:38 +10003399 atomic_set(&priv->chan[i].submit_count,
3400 -(priv->chfifo_len - 1));
Martin Hicksf641ddd2015-03-03 08:21:33 -05003401 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003402
Kim Phillips81eb0242009-08-13 11:51:51 +10003403 dma_set_mask(dev, DMA_BIT_MASK(36));
3404
Kim Phillips9c4a7962008-06-23 19:50:15 +08003405 /* reset and initialize the h/w */
3406 err = init_device(dev);
3407 if (err) {
3408 dev_err(dev, "failed to initialize device\n");
3409 goto err_out;
3410 }
3411
3412 /* register the RNG, if available */
3413 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3414 err = talitos_register_rng(dev);
3415 if (err) {
3416 dev_err(dev, "failed to register hwrng: %d\n", err);
3417 goto err_out;
3418 } else
3419 dev_info(dev, "hwrng\n");
3420 }
3421
3422 /* register crypto algorithms the device supports */
Kim Phillips9c4a7962008-06-23 19:50:15 +08003423 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3424 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3425 struct talitos_crypto_alg *t_alg;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003426 struct crypto_alg *alg = NULL;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003427
3428 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3429 if (IS_ERR(t_alg)) {
3430 err = PTR_ERR(t_alg);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003431 if (err == -ENOTSUPP)
Lee Nipper79b3a412011-11-21 16:13:25 +08003432 continue;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003433 goto err_out;
3434 }
3435
Lee Nipperacbf7c622010-05-19 19:19:33 +10003436 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003437 case CRYPTO_ALG_TYPE_SKCIPHER:
3438 err = crypto_register_skcipher(
3439 &t_alg->algt.alg.skcipher);
3440 alg = &t_alg->algt.alg.skcipher.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003441 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003442
3443 case CRYPTO_ALG_TYPE_AEAD:
3444 err = crypto_register_aead(
3445 &t_alg->algt.alg.aead);
3446 alg = &t_alg->algt.alg.aead.base;
3447 break;
3448
Lee Nipperacbf7c622010-05-19 19:19:33 +10003449 case CRYPTO_ALG_TYPE_AHASH:
3450 err = crypto_register_ahash(
3451 &t_alg->algt.alg.hash);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003452 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003453 break;
3454 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003455 if (err) {
3456 dev_err(dev, "%s alg registration failed\n",
Herbert Xuaeb4c132015-07-30 17:53:22 +08003457 alg->cra_driver_name);
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003458 devm_kfree(dev, t_alg);
Horia Geanta991155b2013-03-20 16:31:38 +02003459 } else
Kim Phillips9c4a7962008-06-23 19:50:15 +08003460 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003461 }
3462 }
Kim Phillips5b859b6e2011-11-21 16:13:26 +08003463 if (!list_empty(&priv->alg_list))
3464 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3465 (char *)of_get_property(np, "compatible", NULL));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003466
3467 return 0;
3468
3469err_out:
3470 talitos_remove(ofdev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003471
3472 return err;
3473}
3474
Márton Németh6c3f9752010-01-17 21:54:01 +11003475static const struct of_device_id talitos_match[] = {
LEROY Christophe0635b7db2015-04-17 16:32:20 +02003476#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3477 {
3478 .compatible = "fsl,sec1.0",
3479 },
3480#endif
3481#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Kim Phillips9c4a7962008-06-23 19:50:15 +08003482 {
3483 .compatible = "fsl,sec2.0",
3484 },
LEROY Christophe0635b7db2015-04-17 16:32:20 +02003485#endif
Kim Phillips9c4a7962008-06-23 19:50:15 +08003486 {},
3487};
3488MODULE_DEVICE_TABLE(of, talitos_match);
3489
Grant Likely1c48a5c2011-02-17 02:43:24 -07003490static struct platform_driver talitos_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003491 .driver = {
3492 .name = "talitos",
Grant Likely40182942010-04-13 16:13:02 -07003493 .of_match_table = talitos_match,
3494 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08003495 .probe = talitos_probe,
Al Viro596f1032008-11-22 17:34:24 +00003496 .remove = talitos_remove,
Kim Phillips9c4a7962008-06-23 19:50:15 +08003497};
3498
Axel Lin741e8c22011-11-26 21:26:19 +08003499module_platform_driver(talitos_driver);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003500
3501MODULE_LICENSE("GPL");
3502MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3503MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");