blob: 25c9f825b8b5424d098fa450b6145f6e10de850d [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Kim Phillips9c4a7962008-06-23 19:50:15 +08002/*
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 *
Kim Phillips5228f0f2011-07-15 11:21:38 +08005 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
Kim Phillips9c4a7962008-06-23 19:50:15 +08006 *
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
Kim Phillips9c4a7962008-06-23 19:50:15 +080013 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20#include <linux/crypto.h>
21#include <linux/hw_random.h>
Rob Herring5af50732013-09-17 14:28:33 -050022#include <linux/of_address.h>
23#include <linux/of_irq.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080024#include <linux/of_platform.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/spinlock.h>
28#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080030
31#include <crypto/algapi.h>
32#include <crypto/aes.h>
Ard Biesheuvel9d574ae2019-08-15 12:01:05 +030033#include <crypto/internal/des.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080034#include <crypto/sha1.h>
35#include <crypto/sha2.h>
Lee Nipper497f2e62010-05-19 19:20:36 +100036#include <crypto/md5.h>
Herbert Xue98014a2015-05-11 17:47:48 +080037#include <crypto/internal/aead.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080038#include <crypto/authenc.h>
Ard Biesheuvel373960d2019-11-09 18:09:49 +010039#include <crypto/internal/skcipher.h>
Lee Nipperacbf7c622010-05-19 19:19:33 +100040#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Lee Nipper4de9d0b2009-03-29 15:52:32 +080042#include <crypto/scatterwalk.h>
Kim Phillips9c4a7962008-06-23 19:50:15 +080043
44#include "talitos.h"
45
LEROY Christophe922f9dc2015-04-17 16:32:07 +020046static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
LEROY Christopheda9de142017-10-06 15:04:57 +020047 unsigned int len, bool is_sec1)
Kim Phillips81eb0242009-08-13 11:51:51 +100048{
LEROY Christopheedc6bd692015-04-17 16:31:53 +020049 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
LEROY Christopheda9de142017-10-06 15:04:57 +020050 if (is_sec1) {
51 ptr->len1 = cpu_to_be16(len);
52 } else {
53 ptr->len = cpu_to_be16(len);
LEROY Christophe922f9dc2015-04-17 16:32:07 +020054 ptr->eptr = upper_32_bits(dma_addr);
LEROY Christopheda9de142017-10-06 15:04:57 +020055 }
Kim Phillips81eb0242009-08-13 11:51:51 +100056}
57
Horia Geant?340ff602016-04-19 20:33:48 +030058static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
59 struct talitos_ptr *src_ptr, bool is_sec1)
60{
61 dst_ptr->ptr = src_ptr->ptr;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020062 if (is_sec1) {
LEROY Christopheda9de142017-10-06 15:04:57 +020063 dst_ptr->len1 = src_ptr->len1;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020064 } else {
LEROY Christopheda9de142017-10-06 15:04:57 +020065 dst_ptr->len = src_ptr->len;
66 dst_ptr->eptr = src_ptr->eptr;
LEROY Christophe922f9dc2015-04-17 16:32:07 +020067 }
LEROY Christophe538caf82015-04-17 16:31:59 +020068}
69
LEROY Christophe922f9dc2015-04-17 16:32:07 +020070static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
71 bool is_sec1)
LEROY Christophe538caf82015-04-17 16:31:59 +020072{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020073 if (is_sec1)
74 return be16_to_cpu(ptr->len1);
75 else
76 return be16_to_cpu(ptr->len);
LEROY Christophe538caf82015-04-17 16:31:59 +020077}
78
LEROY Christopheb096b542016-06-06 13:20:34 +020079static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
80 bool is_sec1)
LEROY Christophe185eb792015-04-17 16:31:55 +020081{
LEROY Christophe922f9dc2015-04-17 16:32:07 +020082 if (!is_sec1)
LEROY Christopheb096b542016-06-06 13:20:34 +020083 ptr->j_extent = val;
84}
85
86static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
87{
88 if (!is_sec1)
89 ptr->j_extent |= val;
LEROY Christophe185eb792015-04-17 16:31:55 +020090}
91
Kim Phillips9c4a7962008-06-23 19:50:15 +080092/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
LEROY Christophe6a4967c2018-02-26 17:40:06 +010095static void __map_single_talitos_ptr(struct device *dev,
96 struct talitos_ptr *ptr,
97 unsigned int len, void *data,
98 enum dma_data_direction dir,
99 unsigned long attrs)
100{
101 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
102 struct talitos_private *priv = dev_get_drvdata(dev);
103 bool is_sec1 = has_ftr_sec1(priv);
104
105 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
106}
107
Kim Phillips9c4a7962008-06-23 19:50:15 +0800108static void map_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200109 struct talitos_ptr *ptr,
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300110 unsigned int len, void *data,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800111 enum dma_data_direction dir)
112{
LEROY Christophe6a4967c2018-02-26 17:40:06 +0100113 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
114}
Kim Phillips81eb0242009-08-13 11:51:51 +1000115
LEROY Christophe6a4967c2018-02-26 17:40:06 +0100116static void map_single_talitos_ptr_nosync(struct device *dev,
117 struct talitos_ptr *ptr,
118 unsigned int len, void *data,
119 enum dma_data_direction dir)
120{
121 __map_single_talitos_ptr(dev, ptr, len, data, dir,
122 DMA_ATTR_SKIP_CPU_SYNC);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800123}
124
125/*
126 * unmap bus single (contiguous) h/w descriptor pointer
127 */
128static void unmap_single_talitos_ptr(struct device *dev,
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200129 struct talitos_ptr *ptr,
Kim Phillips9c4a7962008-06-23 19:50:15 +0800130 enum dma_data_direction dir)
131{
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200132 struct talitos_private *priv = dev_get_drvdata(dev);
133 bool is_sec1 = has_ftr_sec1(priv);
134
LEROY Christopheedc6bd692015-04-17 16:31:53 +0200135 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
LEROY Christophe922f9dc2015-04-17 16:32:07 +0200136 from_talitos_ptr_len(ptr, is_sec1), dir);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800137}
138
139static int reset_channel(struct device *dev, int ch)
140{
141 struct talitos_private *priv = dev_get_drvdata(dev);
142 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200143 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800144
LEROY Christophedd3c0982015-04-17 16:32:13 +0200145 if (is_sec1) {
146 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
147 TALITOS1_CCCR_LO_RESET);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800148
LEROY Christophedd3c0982015-04-17 16:32:13 +0200149 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
150 TALITOS1_CCCR_LO_RESET) && --timeout)
151 cpu_relax();
152 } else {
153 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
154 TALITOS2_CCCR_RESET);
155
156 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
157 TALITOS2_CCCR_RESET) && --timeout)
158 cpu_relax();
159 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800160
161 if (timeout == 0) {
162 dev_err(dev, "failed to reset channel %d\n", ch);
163 return -EIO;
164 }
165
Kim Phillips81eb0242009-08-13 11:51:51 +1000166 /* set 36-bit addressing, done writeback enable and done IRQ enable */
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800167 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
Kim Phillips81eb0242009-08-13 11:51:51 +1000168 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
LEROY Christophe37b5e882017-10-06 15:05:06 +0200169 /* enable chaining descriptors */
170 if (is_sec1)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_NE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800173
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800174 /* and ICCR writeback, if available */
175 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800176 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800177 TALITOS_CCCR_LO_IWSE);
178
Kim Phillips9c4a7962008-06-23 19:50:15 +0800179 return 0;
180}
181
182static int reset_device(struct device *dev)
183{
184 struct talitos_private *priv = dev_get_drvdata(dev);
185 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200186 bool is_sec1 = has_ftr_sec1(priv);
187 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800188
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800189 setbits32(priv->reg + TALITOS_MCR, mcr);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800190
LEROY Christophedd3c0982015-04-17 16:32:13 +0200191 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800192 && --timeout)
193 cpu_relax();
194
Kim Phillips2cdba3c2011-12-12 14:59:11 -0600195 if (priv->irq[1]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800196 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
197 setbits32(priv->reg + TALITOS_MCR, mcr);
198 }
199
Kim Phillips9c4a7962008-06-23 19:50:15 +0800200 if (timeout == 0) {
201 dev_err(dev, "failed to reset device\n");
202 return -EIO;
203 }
204
205 return 0;
206}
207
208/*
209 * Reset and initialize the device
210 */
211static int init_device(struct device *dev)
212{
213 struct talitos_private *priv = dev_get_drvdata(dev);
214 int ch, err;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200215 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800216
217 /*
218 * Master reset
219 * errata documentation: warning: certain SEC interrupts
220 * are not fully cleared by writing the MCR:SWR bit,
221 * set bit twice to completely reset
222 */
223 err = reset_device(dev);
224 if (err)
225 return err;
226
227 err = reset_device(dev);
228 if (err)
229 return err;
230
231 /* reset channels */
232 for (ch = 0; ch < priv->num_channels; ch++) {
233 err = reset_channel(dev, ch);
234 if (err)
235 return err;
236 }
237
238 /* enable channel done and error interrupts */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200239 if (is_sec1) {
240 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
241 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
242 /* disable parity error check in DEU (erroneous? test vect.) */
243 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 } else {
245 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
246 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
247 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800248
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800249 /* disable integrity check error interrupts (use writeback instead) */
250 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200251 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
Kim Phillipsfe5720e2008-10-12 20:33:14 +0800252 TALITOS_MDEUICR_LO_ICE);
253
Kim Phillips9c4a7962008-06-23 19:50:15 +0800254 return 0;
255}
256
257/**
258 * talitos_submit - submits a descriptor to the device for processing
259 * @dev: the SEC device to be used
Kim Phillips5228f0f2011-07-15 11:21:38 +0800260 * @ch: the SEC device channel to be used
Kim Phillips9c4a7962008-06-23 19:50:15 +0800261 * @desc: the descriptor to be processed by the device
262 * @callback: whom to call when processing is complete
263 * @context: a handle for use by caller (optional)
264 *
265 * desc must contain valid dma-mapped (bus physical) address pointers.
266 * callback must check err and feedback in descriptor header
267 * for device processing status.
268 */
Christophe Leroyfbb8d462019-05-21 13:34:20 +0000269static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
270 void (*callback)(struct device *dev,
271 struct talitos_desc *desc,
272 void *context, int error),
273 void *context)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800274{
275 struct talitos_private *priv = dev_get_drvdata(dev);
276 struct talitos_request *request;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800277 unsigned long flags;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800278 int head;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200279 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800280
Kim Phillips4b9926282009-08-13 11:50:38 +1000281 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800282
Kim Phillips4b9926282009-08-13 11:50:38 +1000283 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
Kim Phillipsec6644d2008-07-17 20:16:40 +0800284 /* h/w fifo is full */
Kim Phillips4b9926282009-08-13 11:50:38 +1000285 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800286 return -EAGAIN;
287 }
288
Kim Phillips4b9926282009-08-13 11:50:38 +1000289 head = priv->chan[ch].head;
290 request = &priv->chan[ch].fifo[head];
Kim Phillipsec6644d2008-07-17 20:16:40 +0800291
Kim Phillips9c4a7962008-06-23 19:50:15 +0800292 /* map descriptor and save caller data */
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200293 if (is_sec1) {
294 desc->hdr1 = desc->hdr;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200295 request->dma_desc = dma_map_single(dev, &desc->hdr1,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 } else {
299 request->dma_desc = dma_map_single(dev, desc,
300 TALITOS_DESC_SIZE,
301 DMA_BIDIRECTIONAL);
302 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800303 request->callback = callback;
304 request->context = context;
305
306 /* increment fifo head */
Kim Phillips4b9926282009-08-13 11:50:38 +1000307 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800308
309 smp_wmb();
310 request->desc = desc;
311
312 /* GO! */
313 wmb();
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800314 out_be32(priv->chan[ch].reg + TALITOS_FF,
315 upper_32_bits(request->dma_desc));
316 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
Kim Phillipsa7524472010-09-23 15:56:38 +0800317 lower_32_bits(request->dma_desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800318
Kim Phillips4b9926282009-08-13 11:50:38 +1000319 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800320
321 return -EINPROGRESS;
322}
323
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000324static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
325{
326 struct talitos_edesc *edesc;
327
328 if (!is_sec1)
329 return request->desc->hdr;
330
331 if (!request->desc->next_desc)
332 return request->desc->hdr1;
333
334 edesc = container_of(request->desc, struct talitos_edesc, desc);
335
336 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
337}
Kim Phillips9c4a7962008-06-23 19:50:15 +0800338
339/*
340 * process what was done, notify callback of error if not
341 */
342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343{
344 struct talitos_private *priv = dev_get_drvdata(dev);
345 struct talitos_request *request, saved_req;
346 unsigned long flags;
347 int tail, status;
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200348 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800349
Kim Phillips4b9926282009-08-13 11:50:38 +1000350 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800351
Kim Phillips4b9926282009-08-13 11:50:38 +1000352 tail = priv->chan[ch].tail;
353 while (priv->chan[ch].fifo[tail].desc) {
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200354 __be32 hdr;
355
Kim Phillips4b9926282009-08-13 11:50:38 +1000356 request = &priv->chan[ch].fifo[tail];
Kim Phillips9c4a7962008-06-23 19:50:15 +0800357
358 /* descriptors with their done bits set don't get the error */
359 rmb();
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000360 hdr = get_request_hdr(request, is_sec1);
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200361
362 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800363 status = 0;
Lee Nipperca38a812008-12-20 17:09:25 +1100364 else
Kim Phillips9c4a7962008-06-23 19:50:15 +0800365 if (!error)
366 break;
367 else
368 status = error;
369
370 dma_unmap_single(dev, request->dma_desc,
LEROY Christophe7d607c6a2015-04-17 16:32:09 +0200371 TALITOS_DESC_SIZE,
Kim Phillipse938e462009-03-29 15:53:23 +0800372 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800373
374 /* copy entries so we can call callback outside lock */
375 saved_req.desc = request->desc;
376 saved_req.callback = request->callback;
377 saved_req.context = request->context;
378
379 /* release request entry in fifo */
380 smp_wmb();
381 request->desc = NULL;
382
383 /* increment fifo tail */
Kim Phillips4b9926282009-08-13 11:50:38 +1000384 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800385
Kim Phillips4b9926282009-08-13 11:50:38 +1000386 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800387
Kim Phillips4b9926282009-08-13 11:50:38 +1000388 atomic_dec(&priv->chan[ch].submit_count);
Kim Phillipsec6644d2008-07-17 20:16:40 +0800389
Kim Phillips9c4a7962008-06-23 19:50:15 +0800390 saved_req.callback(dev, saved_req.desc, saved_req.context,
391 status);
392 /* channel may resume processing in single desc error case */
393 if (error && !reset_ch && status == error)
394 return;
Kim Phillips4b9926282009-08-13 11:50:38 +1000395 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
396 tail = priv->chan[ch].tail;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800397 }
398
Kim Phillips4b9926282009-08-13 11:50:38 +1000399 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800400}
401
402/*
403 * process completed requests for channels that have done status
404 */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200405#define DEF_TALITOS1_DONE(name, ch_done_mask) \
406static void talitos1_done_##name(unsigned long data) \
407{ \
408 struct device *dev = (struct device *)data; \
409 struct talitos_private *priv = dev_get_drvdata(dev); \
410 unsigned long flags; \
411 \
412 if (ch_done_mask & 0x10000000) \
413 flush_channel(dev, 0, 0, 0); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200414 if (ch_done_mask & 0x40000000) \
415 flush_channel(dev, 1, 0, 0); \
416 if (ch_done_mask & 0x00010000) \
417 flush_channel(dev, 2, 0, 0); \
418 if (ch_done_mask & 0x00040000) \
419 flush_channel(dev, 3, 0, 0); \
420 \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200421 /* At this point, all completed channels have been processed */ \
422 /* Unmask done interrupts for channels completed later on. */ \
423 spin_lock_irqsave(&priv->reg_lock, flags); \
424 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
425 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
426 spin_unlock_irqrestore(&priv->reg_lock, flags); \
427}
428
429DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
LEROY Christophe9c02e282017-10-06 15:04:55 +0200430DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200431
432#define DEF_TALITOS2_DONE(name, ch_done_mask) \
433static void talitos2_done_##name(unsigned long data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800434{ \
435 struct device *dev = (struct device *)data; \
436 struct talitos_private *priv = dev_get_drvdata(dev); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300437 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800438 \
439 if (ch_done_mask & 1) \
440 flush_channel(dev, 0, 0, 0); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800441 if (ch_done_mask & (1 << 2)) \
442 flush_channel(dev, 1, 0, 0); \
443 if (ch_done_mask & (1 << 4)) \
444 flush_channel(dev, 2, 0, 0); \
445 if (ch_done_mask & (1 << 6)) \
446 flush_channel(dev, 3, 0, 0); \
447 \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800448 /* At this point, all completed channels have been processed */ \
449 /* Unmask done interrupts for channels completed later on. */ \
Horia Geanta511d63c2012-03-30 17:49:53 +0300450 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800451 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
LEROY Christophedd3c0982015-04-17 16:32:13 +0200452 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
Horia Geanta511d63c2012-03-30 17:49:53 +0300453 spin_unlock_irqrestore(&priv->reg_lock, flags); \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800454}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200455
456DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
LEROY Christophe9c02e282017-10-06 15:04:55 +0200457DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200458DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
459DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800460
461/*
462 * locate current (offending) descriptor
463 */
Christophe Leroy02376162020-10-08 09:34:56 +0000464static __be32 current_desc_hdr(struct device *dev, int ch)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800465{
466 struct talitos_private *priv = dev_get_drvdata(dev);
Horia Geantab62ffd82013-11-13 12:20:37 +0200467 int tail, iter;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800468 dma_addr_t cur_desc;
469
Horia Geantab62ffd82013-11-13 12:20:37 +0200470 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
471 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800472
Horia Geantab62ffd82013-11-13 12:20:37 +0200473 if (!cur_desc) {
474 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
475 return 0;
476 }
477
478 tail = priv->chan[ch].tail;
479
480 iter = tail;
LEROY Christophe37b5e882017-10-06 15:05:06 +0200481 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
Christophe Leroy195404d2020-10-08 09:34:55 +0000482 priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
Horia Geantab62ffd82013-11-13 12:20:37 +0200483 iter = (iter + 1) & (priv->fifo_len - 1);
484 if (iter == tail) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800485 dev_err(dev, "couldn't locate current descriptor\n");
Kim Phillips3e721ae2011-10-21 15:20:28 +0200486 return 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800487 }
488 }
489
Christophe Leroy195404d2020-10-08 09:34:55 +0000490 if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
Christophe Leroy58cdbc62019-06-24 07:20:16 +0000491 struct talitos_edesc *edesc;
492
493 edesc = container_of(priv->chan[ch].fifo[iter].desc,
494 struct talitos_edesc, desc);
495 return ((struct talitos_desc *)
496 (edesc->buf + edesc->dma_len))->hdr;
497 }
LEROY Christophe37b5e882017-10-06 15:05:06 +0200498
Horia Geantab62ffd82013-11-13 12:20:37 +0200499 return priv->chan[ch].fifo[iter].desc->hdr;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800500}
501
502/*
503 * user diagnostics; report root cause of error based on execution unit status
504 */
Christophe Leroy02376162020-10-08 09:34:56 +0000505static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800506{
507 struct talitos_private *priv = dev_get_drvdata(dev);
508 int i;
509
Kim Phillips3e721ae2011-10-21 15:20:28 +0200510 if (!desc_hdr)
Christophe Leroy02376162020-10-08 09:34:56 +0000511 desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
Kim Phillips3e721ae2011-10-21 15:20:28 +0200512
513 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800514 case DESC_HDR_SEL0_AFEU:
515 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200516 in_be32(priv->reg_afeu + TALITOS_EUISR),
517 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800518 break;
519 case DESC_HDR_SEL0_DEU:
520 dev_err(dev, "DEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200521 in_be32(priv->reg_deu + TALITOS_EUISR),
522 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800523 break;
524 case DESC_HDR_SEL0_MDEUA:
525 case DESC_HDR_SEL0_MDEUB:
526 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200527 in_be32(priv->reg_mdeu + TALITOS_EUISR),
528 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800529 break;
530 case DESC_HDR_SEL0_RNG:
531 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200532 in_be32(priv->reg_rngu + TALITOS_ISR),
533 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800534 break;
535 case DESC_HDR_SEL0_PKEU:
536 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200537 in_be32(priv->reg_pkeu + TALITOS_EUISR),
538 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800539 break;
540 case DESC_HDR_SEL0_AESU:
541 dev_err(dev, "AESUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200542 in_be32(priv->reg_aesu + TALITOS_EUISR),
543 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800544 break;
545 case DESC_HDR_SEL0_CRCU:
546 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200547 in_be32(priv->reg_crcu + TALITOS_EUISR),
548 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800549 break;
550 case DESC_HDR_SEL0_KEU:
551 dev_err(dev, "KEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200552 in_be32(priv->reg_pkeu + TALITOS_EUISR),
553 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800554 break;
555 }
556
Kim Phillips3e721ae2011-10-21 15:20:28 +0200557 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
Kim Phillips9c4a7962008-06-23 19:50:15 +0800558 case DESC_HDR_SEL1_MDEUA:
559 case DESC_HDR_SEL1_MDEUB:
560 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200561 in_be32(priv->reg_mdeu + TALITOS_EUISR),
562 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800563 break;
564 case DESC_HDR_SEL1_CRCU:
565 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200566 in_be32(priv->reg_crcu + TALITOS_EUISR),
567 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800568 break;
569 }
570
571 for (i = 0; i < 8; i++)
572 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
574 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800575}
576
577/*
578 * recover from error interrupts
579 */
Kim Phillips5e718a02011-12-12 14:59:12 -0600580static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800581{
Kim Phillips9c4a7962008-06-23 19:50:15 +0800582 struct talitos_private *priv = dev_get_drvdata(dev);
583 unsigned int timeout = TALITOS_TIMEOUT;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200584 int ch, error, reset_dev = 0;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300585 u32 v_lo;
LEROY Christophedd3c0982015-04-17 16:32:13 +0200586 bool is_sec1 = has_ftr_sec1(priv);
587 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
Kim Phillips9c4a7962008-06-23 19:50:15 +0800588
589 for (ch = 0; ch < priv->num_channels; ch++) {
590 /* skip channels without errors */
LEROY Christophedd3c0982015-04-17 16:32:13 +0200591 if (is_sec1) {
592 /* bits 29, 31, 17, 19 */
593 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
594 continue;
595 } else {
596 if (!(isr & (1 << (ch * 2 + 1))))
597 continue;
598 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800599
600 error = -EINVAL;
601
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800602 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800603
604 if (v_lo & TALITOS_CCPSR_LO_DOF) {
605 dev_err(dev, "double fetch fifo overflow error\n");
606 error = -EAGAIN;
607 reset_ch = 1;
608 }
609 if (v_lo & TALITOS_CCPSR_LO_SOF) {
610 /* h/w dropped descriptor */
611 dev_err(dev, "single fetch fifo overflow error\n");
612 error = -EAGAIN;
613 }
614 if (v_lo & TALITOS_CCPSR_LO_MDTE)
615 dev_err(dev, "master data transfer error\n");
616 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
Colin Ian King4d9b3a52016-11-01 20:14:04 -0600617 dev_err(dev, is_sec1 ? "pointer not complete error\n"
LEROY Christophedd3c0982015-04-17 16:32:13 +0200618 : "s/g data length zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800619 if (v_lo & TALITOS_CCPSR_LO_FPZ)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200620 dev_err(dev, is_sec1 ? "parity error\n"
621 : "fetch pointer zero error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800622 if (v_lo & TALITOS_CCPSR_LO_IDH)
623 dev_err(dev, "illegal descriptor header error\n");
624 if (v_lo & TALITOS_CCPSR_LO_IEU)
LEROY Christophedd3c0982015-04-17 16:32:13 +0200625 dev_err(dev, is_sec1 ? "static assignment error\n"
626 : "invalid exec unit error\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +0800627 if (v_lo & TALITOS_CCPSR_LO_EU)
Kim Phillips3e721ae2011-10-21 15:20:28 +0200628 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
LEROY Christophedd3c0982015-04-17 16:32:13 +0200629 if (!is_sec1) {
630 if (v_lo & TALITOS_CCPSR_LO_GB)
631 dev_err(dev, "gather boundary error\n");
632 if (v_lo & TALITOS_CCPSR_LO_GRL)
633 dev_err(dev, "gather return/length error\n");
634 if (v_lo & TALITOS_CCPSR_LO_SB)
635 dev_err(dev, "scatter boundary error\n");
636 if (v_lo & TALITOS_CCPSR_LO_SRL)
637 dev_err(dev, "scatter return/length error\n");
638 }
Kim Phillips9c4a7962008-06-23 19:50:15 +0800639
640 flush_channel(dev, ch, error, reset_ch);
641
642 if (reset_ch) {
643 reset_channel(dev, ch);
644 } else {
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800645 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
LEROY Christophedd3c0982015-04-17 16:32:13 +0200646 TALITOS2_CCCR_CONT);
Kim Phillipsad42d5f2011-11-21 16:13:27 +0800647 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
648 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
LEROY Christophedd3c0982015-04-17 16:32:13 +0200649 TALITOS2_CCCR_CONT) && --timeout)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800650 cpu_relax();
651 if (timeout == 0) {
652 dev_err(dev, "failed to restart channel %d\n",
653 ch);
654 reset_dev = 1;
655 }
656 }
657 }
LEROY Christophedd3c0982015-04-17 16:32:13 +0200658 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
659 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
660 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
661 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
662 isr, isr_lo);
663 else
664 dev_err(dev, "done overflow, internal time out, or "
665 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800666
667 /* purge request queues */
668 for (ch = 0; ch < priv->num_channels; ch++)
669 flush_channel(dev, ch, -EIO, 1);
670
671 /* reset and reinitialize the device */
672 init_device(dev);
673 }
674}
675
LEROY Christophedd3c0982015-04-17 16:32:13 +0200676#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
677static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
678{ \
679 struct device *dev = data; \
680 struct talitos_private *priv = dev_get_drvdata(dev); \
681 u32 isr, isr_lo; \
682 unsigned long flags; \
683 \
684 spin_lock_irqsave(&priv->reg_lock, flags); \
685 isr = in_be32(priv->reg + TALITOS_ISR); \
686 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
687 /* Acknowledge interrupt */ \
688 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
689 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
690 \
691 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
692 spin_unlock_irqrestore(&priv->reg_lock, flags); \
693 talitos_error(dev, isr & ch_err_mask, isr_lo); \
694 } \
695 else { \
696 if (likely(isr & ch_done_mask)) { \
697 /* mask further done interrupts. */ \
698 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
699 /* done_task will unmask done interrupts at exit */ \
700 tasklet_schedule(&priv->done_task[tlet]); \
701 } \
702 spin_unlock_irqrestore(&priv->reg_lock, flags); \
703 } \
704 \
705 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
706 IRQ_NONE; \
707}
708
709DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
710
711#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
712static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800713{ \
714 struct device *dev = data; \
715 struct talitos_private *priv = dev_get_drvdata(dev); \
716 u32 isr, isr_lo; \
Horia Geanta511d63c2012-03-30 17:49:53 +0300717 unsigned long flags; \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800718 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300719 spin_lock_irqsave(&priv->reg_lock, flags); \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800720 isr = in_be32(priv->reg + TALITOS_ISR); \
721 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
722 /* Acknowledge interrupt */ \
723 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
724 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
725 \
Horia Geanta511d63c2012-03-30 17:49:53 +0300726 if (unlikely(isr & ch_err_mask || isr_lo)) { \
727 spin_unlock_irqrestore(&priv->reg_lock, flags); \
728 talitos_error(dev, isr & ch_err_mask, isr_lo); \
729 } \
730 else { \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800731 if (likely(isr & ch_done_mask)) { \
732 /* mask further done interrupts. */ \
733 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
734 /* done_task will unmask done interrupts at exit */ \
735 tasklet_schedule(&priv->done_task[tlet]); \
736 } \
Horia Geanta511d63c2012-03-30 17:49:53 +0300737 spin_unlock_irqrestore(&priv->reg_lock, flags); \
738 } \
Kim Phillipsc3e337f2011-11-21 16:13:27 +0800739 \
740 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
741 IRQ_NONE; \
Kim Phillips9c4a7962008-06-23 19:50:15 +0800742}
LEROY Christophedd3c0982015-04-17 16:32:13 +0200743
744DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
745DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
746 0)
747DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
748 1)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800749
750/*
751 * hwrng
752 */
753static int talitos_rng_data_present(struct hwrng *rng, int wait)
754{
755 struct device *dev = (struct device *)rng->priv;
756 struct talitos_private *priv = dev_get_drvdata(dev);
757 u32 ofl;
758 int i;
759
760 for (i = 0; i < 20; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200761 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
Kim Phillips9c4a7962008-06-23 19:50:15 +0800762 TALITOS_RNGUSR_LO_OFL;
763 if (ofl || !wait)
764 break;
765 udelay(10);
766 }
767
768 return !!ofl;
769}
770
771static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
772{
773 struct device *dev = (struct device *)rng->priv;
774 struct talitos_private *priv = dev_get_drvdata(dev);
775
776 /* rng fifo requires 64-bit accesses */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
778 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800779
780 return sizeof(u32);
781}
782
783static int talitos_rng_init(struct hwrng *rng)
784{
785 struct device *dev = (struct device *)rng->priv;
786 struct talitos_private *priv = dev_get_drvdata(dev);
787 unsigned int timeout = TALITOS_TIMEOUT;
788
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200789 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
790 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
791 & TALITOS_RNGUSR_LO_RD)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800792 && --timeout)
793 cpu_relax();
794 if (timeout == 0) {
795 dev_err(dev, "failed to reset rng hw\n");
796 return -ENODEV;
797 }
798
799 /* start generating */
LEROY Christophe5fa7fa12015-04-17 16:32:11 +0200800 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800801
802 return 0;
803}
804
805static int talitos_register_rng(struct device *dev)
806{
807 struct talitos_private *priv = dev_get_drvdata(dev);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500808 int err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800809
Julia Lawall77450fd2020-09-27 21:12:23 +0200810 priv->rng.name = dev_driver_string(dev);
811 priv->rng.init = talitos_rng_init;
812 priv->rng.data_present = talitos_rng_data_present;
813 priv->rng.data_read = talitos_rng_data_read;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800814 priv->rng.priv = (unsigned long)dev;
815
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500816 err = hwrng_register(&priv->rng);
817 if (!err)
818 priv->rng_registered = true;
819
820 return err;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800821}
822
823static void talitos_unregister_rng(struct device *dev)
824{
825 struct talitos_private *priv = dev_get_drvdata(dev);
826
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500827 if (!priv->rng_registered)
828 return;
829
Kim Phillips9c4a7962008-06-23 19:50:15 +0800830 hwrng_unregister(&priv->rng);
Aaron Sierra35a3bb32015-08-05 16:52:08 -0500831 priv->rng_registered = false;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800832}
833
834/*
835 * crypto alg
836 */
837#define TALITOS_CRA_PRIORITY 3000
LEROY Christophe7405c8d2016-06-06 13:20:46 +0200838/*
839 * Defines a priority for doing AEAD with descriptors type
840 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
841 */
842#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
Christophe Leroy192125e2019-06-12 05:49:50 +0000843#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Martin Hicks03d2c512017-05-02 09:38:35 -0400844#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
Christophe Leroyb8fbdc22019-05-21 13:34:09 +0000845#else
846#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
847#endif
Lee Nipper3952f172008-07-10 18:29:18 +0800848#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
Lee Nipper70bcaca2008-07-03 19:08:46 +0800849
Kim Phillips9c4a7962008-06-23 19:50:15 +0800850struct talitos_ctx {
851 struct device *dev;
Kim Phillips5228f0f2011-07-15 11:21:38 +0800852 int ch;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800853 __be32 desc_hdr_template;
854 u8 key[TALITOS_MAX_KEY_SIZE];
Lee Nipper70bcaca2008-07-03 19:08:46 +0800855 u8 iv[TALITOS_MAX_IV_LENGTH];
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200856 dma_addr_t dma_key;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800857 unsigned int keylen;
858 unsigned int enckeylen;
859 unsigned int authkeylen;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800860};
861
Lee Nipper497f2e62010-05-19 19:20:36 +1000862#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
863#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
864
865struct talitos_ahash_req_ctx {
Kim Phillips60f208d2010-05-19 19:21:53 +1000866 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
Lee Nipper497f2e62010-05-19 19:20:36 +1000867 unsigned int hw_context_size;
LEROY Christophe3c0dd192017-10-06 15:05:08 +0200868 u8 buf[2][HASH_MAX_BLOCK_SIZE];
869 int buf_idx;
Kim Phillips60f208d2010-05-19 19:21:53 +1000870 unsigned int swinit;
Lee Nipper497f2e62010-05-19 19:20:36 +1000871 unsigned int first;
872 unsigned int last;
873 unsigned int to_hash_later;
Horia Geant?42e8b0d2015-05-11 20:04:56 +0300874 unsigned int nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +1000875 struct scatterlist bufsl[2];
876 struct scatterlist *psrc;
877};
878
Horia Geant?3639ca82016-04-21 19:24:55 +0300879struct talitos_export_state {
880 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
881 u8 buf[HASH_MAX_BLOCK_SIZE];
882 unsigned int swinit;
883 unsigned int first;
884 unsigned int last;
885 unsigned int to_hash_later;
886 unsigned int nbuf;
887};
888
Lee Nipper56af8cd2009-03-29 15:50:50 +0800889static int aead_setkey(struct crypto_aead *authenc,
890 const u8 *key, unsigned int keylen)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800891{
892 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200893 struct device *dev = ctx->dev;
Mathias Krausec306a982013-10-15 13:49:34 +0200894 struct crypto_authenc_keys keys;
Kim Phillips9c4a7962008-06-23 19:50:15 +0800895
Mathias Krausec306a982013-10-15 13:49:34 +0200896 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800897 goto badkey;
898
Mathias Krausec306a982013-10-15 13:49:34 +0200899 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800900 goto badkey;
901
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200902 if (ctx->keylen)
903 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
904
Mathias Krausec306a982013-10-15 13:49:34 +0200905 memcpy(ctx->key, keys.authkey, keys.authkeylen);
906 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800907
Mathias Krausec306a982013-10-15 13:49:34 +0200908 ctx->keylen = keys.authkeylen + keys.enckeylen;
909 ctx->enckeylen = keys.enckeylen;
910 ctx->authkeylen = keys.authkeylen;
LEROY Christophe2e13ce02017-10-06 15:05:02 +0200911 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
912 DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +0800913
Tudor-Dan Ambarus8f0691f2018-03-23 12:42:24 +0200914 memzero_explicit(&keys, sizeof(keys));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800915 return 0;
916
917badkey:
Tudor-Dan Ambarus8f0691f2018-03-23 12:42:24 +0200918 memzero_explicit(&keys, sizeof(keys));
Kim Phillips9c4a7962008-06-23 19:50:15 +0800919 return -EINVAL;
920}
921
Herbert Xuef7c5c82019-04-11 16:51:21 +0800922static int aead_des3_setkey(struct crypto_aead *authenc,
923 const u8 *key, unsigned int keylen)
924{
925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 struct device *dev = ctx->dev;
927 struct crypto_authenc_keys keys;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800928 int err;
929
930 err = crypto_authenc_extractkeys(&keys, key, keylen);
931 if (unlikely(err))
Eric Biggers674f3682019-12-30 21:19:36 -0600932 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800933
934 err = -EINVAL;
935 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
Eric Biggers674f3682019-12-30 21:19:36 -0600936 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800937
Ard Biesheuvel9d574ae2019-08-15 12:01:05 +0300938 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
939 if (err)
Herbert Xuef7c5c82019-04-11 16:51:21 +0800940 goto out;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800941
942 if (ctx->keylen)
943 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
944
945 memcpy(ctx->key, keys.authkey, keys.authkeylen);
946 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
947
948 ctx->keylen = keys.authkeylen + keys.enckeylen;
949 ctx->enckeylen = keys.enckeylen;
950 ctx->authkeylen = keys.authkeylen;
951 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
952 DMA_TO_DEVICE);
953
954out:
955 memzero_explicit(&keys, sizeof(keys));
956 return err;
Herbert Xuef7c5c82019-04-11 16:51:21 +0800957}
958
Lee Nipper4de9d0b2009-03-29 15:52:32 +0800959static void talitos_sg_unmap(struct device *dev,
960 struct talitos_edesc *edesc,
961 struct scatterlist *src,
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200962 struct scatterlist *dst,
963 unsigned int len, unsigned int offset)
LEROY Christophe246a87c2016-06-06 13:20:36 +0200964{
965 struct talitos_private *priv = dev_get_drvdata(dev);
966 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200967 unsigned int src_nents = edesc->src_nents ? : 1;
968 unsigned int dst_nents = edesc->dst_nents ? : 1;
LEROY Christophe246a87c2016-06-06 13:20:36 +0200969
LEROY Christophe6a1e8d12016-06-06 13:20:38 +0200970 if (is_sec1 && dst && dst_nents > 1) {
971 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
972 len, DMA_FROM_DEVICE);
973 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
974 offset);
975 }
976 if (src != dst) {
977 if (src_nents == 1 || !is_sec1)
978 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
979
980 if (dst && (dst_nents == 1 || !is_sec1))
981 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
982 } else if (src_nents == 1 || !is_sec1) {
983 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
LEROY Christophe246a87c2016-06-06 13:20:36 +0200984 }
985}
986
Kim Phillips9c4a7962008-06-23 19:50:15 +0800987static void ipsec_esp_unmap(struct device *dev,
Lee Nipper56af8cd2009-03-29 15:50:50 +0800988 struct talitos_edesc *edesc,
Christophe Leroy7ede4c32019-05-21 13:34:14 +0000989 struct aead_request *areq, bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +0800990{
LEROY Christophe549bd8b2016-06-06 13:20:40 +0200991 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
992 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
993 unsigned int ivsize = crypto_aead_ivsize(aead);
Christophe Leroy7ede4c32019-05-21 13:34:14 +0000994 unsigned int authsize = crypto_aead_authsize(aead);
995 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
LEROY Christophe9a655602017-10-06 15:04:59 +0200996 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
997 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
LEROY Christophe549bd8b2016-06-06 13:20:40 +0200998
LEROY Christophe9a655602017-10-06 15:04:59 +0200999 if (is_ipsec_esp)
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001000 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001 DMA_FROM_DEVICE);
LEROY Christophe9a655602017-10-06 15:04:59 +02001002 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001003
Christophe Leroye3451772019-05-21 13:34:19 +00001004 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005 cryptlen + authsize, areq->assoclen);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001006
1007 if (edesc->dma_len)
1008 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009 DMA_BIDIRECTIONAL);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001010
LEROY Christophe9a655602017-10-06 15:04:59 +02001011 if (!is_ipsec_esp) {
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001012 unsigned int dst_nents = edesc->dst_nents ? : 1;
1013
1014 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001015 areq->assoclen + cryptlen - ivsize);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001016 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001017}
1018
1019/*
1020 * ipsec_esp descriptor callbacks
1021 */
1022static void ipsec_esp_encrypt_done(struct device *dev,
1023 struct talitos_desc *desc, void *context,
1024 int err)
1025{
1026 struct aead_request *areq = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001027 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001028 unsigned int ivsize = crypto_aead_ivsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001029 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001030
Kim Phillips19bbbc62009-03-29 15:53:59 +08001031 edesc = container_of(desc, struct talitos_edesc, desc);
1032
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001033 ipsec_esp_unmap(dev, edesc, areq, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001034
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001035 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1036
Kim Phillips9c4a7962008-06-23 19:50:15 +08001037 kfree(edesc);
1038
1039 aead_request_complete(areq, err);
1040}
1041
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001042static void ipsec_esp_decrypt_swauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001043 struct talitos_desc *desc,
1044 void *context, int err)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001045{
1046 struct aead_request *req = context;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001047 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001048 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001049 struct talitos_edesc *edesc;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001050 char *oicv, *icv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001051
Kim Phillips19bbbc62009-03-29 15:53:59 +08001052 edesc = container_of(desc, struct talitos_edesc, desc);
1053
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001054 ipsec_esp_unmap(dev, edesc, req, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001055
1056 if (!err) {
1057 /* auth check */
Christophe Leroye3451772019-05-21 13:34:19 +00001058 oicv = edesc->buf + edesc->dma_len;
1059 icv = oicv - authsize;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001060
David Gstir79960942015-11-15 17:14:42 +01001061 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001062 }
1063
1064 kfree(edesc);
1065
1066 aead_request_complete(req, err);
1067}
1068
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001069static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
Kim Phillipse938e462009-03-29 15:53:23 +08001070 struct talitos_desc *desc,
1071 void *context, int err)
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001072{
1073 struct aead_request *req = context;
Kim Phillips19bbbc62009-03-29 15:53:59 +08001074 struct talitos_edesc *edesc;
1075
1076 edesc = container_of(desc, struct talitos_edesc, desc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001077
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001078 ipsec_esp_unmap(dev, edesc, req, false);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001079
1080 /* check ICV auth status */
Kim Phillipse938e462009-03-29 15:53:23 +08001081 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082 DESC_HDR_LO_ICCR1_PASS))
1083 err = -EBADMSG;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001084
1085 kfree(edesc);
1086
1087 aead_request_complete(req, err);
1088}
1089
Kim Phillips9c4a7962008-06-23 19:50:15 +08001090/*
1091 * convert scatterlist to SEC h/w link table format
1092 * stop at cryptlen bytes
1093 */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001094static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
Christophe Leroye3451772019-05-21 13:34:19 +00001095 unsigned int offset, int datalen, int elen,
Christophe Leroy416b8462021-01-20 18:57:24 +00001096 struct talitos_ptr *link_tbl_ptr, int align)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001097{
Christophe Leroye3451772019-05-21 13:34:19 +00001098 int n_sg = elen ? sg_count + 1 : sg_count;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001099 int count = 0;
Christophe Leroye3451772019-05-21 13:34:19 +00001100 int cryptlen = datalen + elen;
Christophe Leroy416b8462021-01-20 18:57:24 +00001101 int padding = ALIGN(cryptlen, align) - cryptlen;
Lee Nipper70bcaca2008-07-03 19:08:46 +08001102
Herbert Xuaeb4c132015-07-30 17:53:22 +08001103 while (cryptlen && sg && n_sg--) {
1104 unsigned int len = sg_dma_len(sg);
1105
1106 if (offset >= len) {
1107 offset -= len;
1108 goto next;
1109 }
1110
1111 len -= offset;
1112
1113 if (len > cryptlen)
1114 len = cryptlen;
1115
Christophe Leroye3451772019-05-21 13:34:19 +00001116 if (datalen > 0 && len > datalen) {
1117 to_talitos_ptr(link_tbl_ptr + count,
1118 sg_dma_address(sg) + offset, datalen, 0);
1119 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1120 count++;
1121 len -= datalen;
1122 offset += datalen;
1123 }
Herbert Xuaeb4c132015-07-30 17:53:22 +08001124 to_talitos_ptr(link_tbl_ptr + count,
Christophe Leroy416b8462021-01-20 18:57:24 +00001125 sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
LEROY Christopheb096b542016-06-06 13:20:34 +02001126 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001127 count++;
1128 cryptlen -= len;
Christophe Leroye3451772019-05-21 13:34:19 +00001129 datalen -= len;
Herbert Xuaeb4c132015-07-30 17:53:22 +08001130 offset = 0;
1131
1132next:
Cristian Stoica5be4d4c2015-01-20 10:06:16 +02001133 sg = sg_next(sg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001134 }
1135
Kim Phillips9c4a7962008-06-23 19:50:15 +08001136 /* tag end of link table */
Herbert Xuaeb4c132015-07-30 17:53:22 +08001137 if (count > 0)
LEROY Christopheb096b542016-06-06 13:20:34 +02001138 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
Christophe Leroye3451772019-05-21 13:34:19 +00001139 DESC_PTR_LNKTBL_RET, 0);
Lee Nipper70bcaca2008-07-03 19:08:46 +08001140
Herbert Xuaeb4c132015-07-30 17:53:22 +08001141 return count;
1142}
1143
LEROY Christophe2b122732018-03-22 10:57:01 +01001144static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1145 unsigned int len, struct talitos_edesc *edesc,
1146 struct talitos_ptr *ptr, int sg_count,
Christophe Leroye3451772019-05-21 13:34:19 +00001147 unsigned int offset, int tbl_off, int elen,
Christophe Leroy416b8462021-01-20 18:57:24 +00001148 bool force, int align)
LEROY Christophe246a87c2016-06-06 13:20:36 +02001149{
LEROY Christophe246a87c2016-06-06 13:20:36 +02001150 struct talitos_private *priv = dev_get_drvdata(dev);
1151 bool is_sec1 = has_ftr_sec1(priv);
Christophe Leroy416b8462021-01-20 18:57:24 +00001152 int aligned_len = ALIGN(len, align);
LEROY Christophe246a87c2016-06-06 13:20:36 +02001153
LEROY Christophe87a81dc2018-01-26 17:09:59 +01001154 if (!src) {
1155 to_talitos_ptr(ptr, 0, 0, is_sec1);
1156 return 1;
1157 }
LEROY Christophe2b122732018-03-22 10:57:01 +01001158 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
Christophe Leroye3451772019-05-21 13:34:19 +00001159 if (sg_count == 1 && !force) {
Christophe Leroy416b8462021-01-20 18:57:24 +00001160 to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001161 return sg_count;
LEROY Christophe246a87c2016-06-06 13:20:36 +02001162 }
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001163 if (is_sec1) {
Christophe Leroy416b8462021-01-20 18:57:24 +00001164 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001165 return sg_count;
1166 }
Christophe Leroye3451772019-05-21 13:34:19 +00001167 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
Christophe Leroy416b8462021-01-20 18:57:24 +00001168 &edesc->link_tbl[tbl_off], align);
Christophe Leroye3451772019-05-21 13:34:19 +00001169 if (sg_count == 1 && !force) {
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001170 /* Only one segment now, so no link tbl needed*/
1171 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1172 return sg_count;
1173 }
1174 to_talitos_ptr(ptr, edesc->dma_link_tbl +
Christophe Leroy416b8462021-01-20 18:57:24 +00001175 tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001176 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1177
LEROY Christophe246a87c2016-06-06 13:20:36 +02001178 return sg_count;
1179}
1180
LEROY Christophe2b122732018-03-22 10:57:01 +01001181static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1182 unsigned int len, struct talitos_edesc *edesc,
1183 struct talitos_ptr *ptr, int sg_count,
1184 unsigned int offset, int tbl_off)
1185{
1186 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
Christophe Leroy416b8462021-01-20 18:57:24 +00001187 tbl_off, 0, false, 1);
LEROY Christophe2b122732018-03-22 10:57:01 +01001188}
1189
Kim Phillips9c4a7962008-06-23 19:50:15 +08001190/*
1191 * fill in and submit ipsec_esp descriptor
1192 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001193static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001194 bool encrypt,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001195 void (*callback)(struct device *dev,
1196 struct talitos_desc *desc,
1197 void *context, int error))
Kim Phillips9c4a7962008-06-23 19:50:15 +08001198{
1199 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001200 unsigned int authsize = crypto_aead_authsize(aead);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001201 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1202 struct device *dev = ctx->dev;
1203 struct talitos_desc *desc = &edesc->desc;
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001204 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
Kim Phillipse41256f2009-08-13 11:49:06 +10001205 unsigned int ivsize = crypto_aead_ivsize(aead);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001206 int tbl_off = 0;
Kim Phillipsfa86a262008-07-17 20:20:06 +08001207 int sg_count, ret;
LEROY Christophe2b122732018-03-22 10:57:01 +01001208 int elen = 0;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001209 bool sync_needed = false;
1210 struct talitos_private *priv = dev_get_drvdata(dev);
1211 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe9a655602017-10-06 15:04:59 +02001212 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1213 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1214 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
Christophe Leroye3451772019-05-21 13:34:19 +00001215 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001216
1217 /* hmac key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001218 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001219
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001220 sg_count = edesc->src_nents ?: 1;
1221 if (is_sec1 && sg_count > 1)
1222 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1223 areq->assoclen + cryptlen);
1224 else
1225 sg_count = dma_map_sg(dev, areq->src, sg_count,
1226 (areq->src == areq->dst) ?
1227 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1228
Kim Phillips9c4a7962008-06-23 19:50:15 +08001229 /* hmac data */
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001230 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1231 &desc->ptr[1], sg_count, 0, tbl_off);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001232
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001233 if (ret > 1) {
Horia Geant?340ff602016-04-19 20:33:48 +03001234 tbl_off += ret;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001235 sync_needed = true;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001236 }
1237
Kim Phillips9c4a7962008-06-23 19:50:15 +08001238 /* cipher iv */
LEROY Christophe9a655602017-10-06 15:04:59 +02001239 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001240
1241 /* cipher key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001242 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1243 ctx->enckeylen, is_sec1);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001244
1245 /*
1246 * cipher in
1247 * map and adjust cipher len to aead request cryptlen.
1248 * extent is bytes of HMAC postpended to ciphertext,
1249 * typically 12 for ipsec
1250 */
LEROY Christophe2b122732018-03-22 10:57:01 +01001251 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1252 elen = authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001253
LEROY Christophe2b122732018-03-22 10:57:01 +01001254 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
Christophe Leroye3451772019-05-21 13:34:19 +00001255 sg_count, areq->assoclen, tbl_off, elen,
Christophe Leroy416b8462021-01-20 18:57:24 +00001256 false, 1);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001257
LEROY Christopheec8c7d12017-10-06 15:04:33 +02001258 if (ret > 1) {
1259 tbl_off += ret;
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001260 sync_needed = true;
Horia Geant?340ff602016-04-19 20:33:48 +03001261 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001262
1263 /* cipher out */
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001264 if (areq->src != areq->dst) {
1265 sg_count = edesc->dst_nents ? : 1;
1266 if (!is_sec1 || sg_count == 1)
1267 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1268 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08001269
Christophe Leroye3451772019-05-21 13:34:19 +00001270 if (is_ipsec_esp && encrypt)
1271 elen = authsize;
1272 else
1273 elen = 0;
1274 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1275 sg_count, areq->assoclen, tbl_off, elen,
Christophe Leroy416b8462021-01-20 18:57:24 +00001276 is_ipsec_esp && !encrypt, 1);
Christophe Leroye3451772019-05-21 13:34:19 +00001277 tbl_off += ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001278
Christophe Leroye3451772019-05-21 13:34:19 +00001279 if (!encrypt && is_ipsec_esp) {
1280 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1281
1282 /* Add an entry to the link table for ICV data */
1283 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1285
1286 /* icv data follows link tables */
1287 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001288 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001289 sync_needed = true;
Christophe Leroye3451772019-05-21 13:34:19 +00001290 } else if (!encrypt) {
1291 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1292 sync_needed = true;
LEROY Christophe9a655602017-10-06 15:04:59 +02001293 } else if (!is_ipsec_esp) {
Christophe Leroye3451772019-05-21 13:34:19 +00001294 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1295 sg_count, areq->assoclen + cryptlen, tbl_off);
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001296 }
1297
Kim Phillips9c4a7962008-06-23 19:50:15 +08001298 /* iv out */
LEROY Christophe9a655602017-10-06 15:04:59 +02001299 if (is_ipsec_esp)
LEROY Christophe549bd8b2016-06-06 13:20:40 +02001300 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1301 DMA_FROM_DEVICE);
1302
1303 if (sync_needed)
1304 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1305 edesc->dma_len,
1306 DMA_BIDIRECTIONAL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001307
Kim Phillips5228f0f2011-07-15 11:21:38 +08001308 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001309 if (ret != -EINPROGRESS) {
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001310 ipsec_esp_unmap(dev, edesc, areq, encrypt);
Kim Phillipsfa86a262008-07-17 20:20:06 +08001311 kfree(edesc);
1312 }
1313 return ret;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001314}
1315
Kim Phillips9c4a7962008-06-23 19:50:15 +08001316/*
Lee Nipper56af8cd2009-03-29 15:50:50 +08001317 * allocate and map the extended descriptor
Kim Phillips9c4a7962008-06-23 19:50:15 +08001318 */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001319static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1320 struct scatterlist *src,
1321 struct scatterlist *dst,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001322 u8 *iv,
1323 unsigned int assoclen,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001324 unsigned int cryptlen,
1325 unsigned int authsize,
Horia Geanta79fd31d2012-08-02 17:16:40 +03001326 unsigned int ivsize,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001327 int icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001328 u32 cryptoflags,
1329 bool encrypt)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001330{
Lee Nipper56af8cd2009-03-29 15:50:50 +08001331 struct talitos_edesc *edesc;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001332 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001333 dma_addr_t iv_dma = 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001334 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
Kim Phillips586725f2008-07-17 20:19:18 +08001335 GFP_ATOMIC;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001336 struct talitos_private *priv = dev_get_drvdata(dev);
1337 bool is_sec1 = has_ftr_sec1(priv);
1338 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001339
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001340 if (cryptlen + authsize > max_len) {
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001341 dev_err(dev, "length exceeds h/w max limit\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08001342 return ERR_PTR(-EINVAL);
1343 }
1344
Horia Geanta62293a32013-11-28 15:11:17 +02001345 if (!dst || dst == src) {
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001346 src_len = assoclen + cryptlen + authsize;
1347 src_nents = sg_nents_for_len(src, src_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001348 if (src_nents < 0) {
1349 dev_err(dev, "Invalid number of src SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001350 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001351 }
Horia Geanta62293a32013-11-28 15:11:17 +02001352 src_nents = (src_nents == 1) ? 0 : src_nents;
1353 dst_nents = dst ? src_nents : 0;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001354 dst_len = 0;
Horia Geanta62293a32013-11-28 15:11:17 +02001355 } else { /* dst && dst != src*/
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001356 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357 src_nents = sg_nents_for_len(src, src_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001358 if (src_nents < 0) {
1359 dev_err(dev, "Invalid number of src SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001360 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001361 }
Horia Geanta62293a32013-11-28 15:11:17 +02001362 src_nents = (src_nents == 1) ? 0 : src_nents;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001363 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1364 dst_nents = sg_nents_for_len(dst, dst_len);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001365 if (dst_nents < 0) {
1366 dev_err(dev, "Invalid number of dst SG.\n");
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001367 return ERR_PTR(-EINVAL);
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001368 }
Horia Geanta62293a32013-11-28 15:11:17 +02001369 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001370 }
1371
1372 /*
1373 * allocate space for base edesc plus the link tables,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001374 * allowing for two separate entries for AD and generated ICV (+ 2),
1375 * and space for two sets of ICVs (stashed and generated)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001376 */
Lee Nipper56af8cd2009-03-29 15:50:50 +08001377 alloc_len = sizeof(struct talitos_edesc);
Christophe Leroye3451772019-05-21 13:34:19 +00001378 if (src_nents || dst_nents || !encrypt) {
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001379 if (is_sec1)
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001380 dma_len = (src_nents ? src_len : 0) +
Christophe Leroye3451772019-05-21 13:34:19 +00001381 (dst_nents ? dst_len : 0) + authsize;
LEROY Christophe6f65f6a2015-04-17 16:32:15 +02001382 else
Herbert Xuaeb4c132015-07-30 17:53:22 +08001383 dma_len = (src_nents + dst_nents + 2) *
Christophe Leroye3451772019-05-21 13:34:19 +00001384 sizeof(struct talitos_ptr) + authsize;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001385 alloc_len += dma_len;
1386 } else {
1387 dma_len = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001388 }
Christophe Leroye3451772019-05-21 13:34:19 +00001389 alloc_len += icv_stashing ? authsize : 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001390
LEROY Christophe37b5e882017-10-06 15:05:06 +02001391 /* if its a ahash, add space for a second desc next to the first one */
1392 if (is_sec1 && !dst)
1393 alloc_len += sizeof(struct talitos_desc);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001394 alloc_len += ivsize;
LEROY Christophe37b5e882017-10-06 15:05:06 +02001395
Kim Phillips586725f2008-07-17 20:19:18 +08001396 edesc = kmalloc(alloc_len, GFP_DMA | flags);
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001397 if (!edesc)
1398 return ERR_PTR(-ENOMEM);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001399 if (ivsize) {
1400 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
Christophe Leroyc56c2e12019-01-08 06:56:46 +00001401 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
Christophe Leroy1bea4452019-01-08 06:56:48 +00001402 }
LEROY Christophee4a647c2017-10-06 15:04:45 +02001403 memset(&edesc->desc, 0, sizeof(edesc->desc));
Kim Phillips9c4a7962008-06-23 19:50:15 +08001404
1405 edesc->src_nents = src_nents;
1406 edesc->dst_nents = dst_nents;
Horia Geanta79fd31d2012-08-02 17:16:40 +03001407 edesc->iv_dma = iv_dma;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001408 edesc->dma_len = dma_len;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001409 if (dma_len)
1410 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
Lee Nipper497f2e62010-05-19 19:20:36 +10001411 edesc->dma_len,
1412 DMA_BIDIRECTIONAL);
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001413
Kim Phillips9c4a7962008-06-23 19:50:15 +08001414 return edesc;
1415}
1416
Horia Geanta79fd31d2012-08-02 17:16:40 +03001417static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
Horia Geanta62293a32013-11-28 15:11:17 +02001418 int icv_stashing, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001419{
1420 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001421 unsigned int authsize = crypto_aead_authsize(authenc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001422 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Horia Geanta79fd31d2012-08-02 17:16:40 +03001423 unsigned int ivsize = crypto_aead_ivsize(authenc);
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001424 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001425
Herbert Xuaeb4c132015-07-30 17:53:22 +08001426 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001427 iv, areq->assoclen, cryptlen,
Herbert Xuaeb4c132015-07-30 17:53:22 +08001428 authsize, ivsize, icv_stashing,
Horia Geanta62293a32013-11-28 15:11:17 +02001429 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001430}
1431
Lee Nipper56af8cd2009-03-29 15:50:50 +08001432static int aead_encrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001433{
1434 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001436 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001437
1438 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001439 edesc = aead_edesc_alloc(req, req->iv, 0, true);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001440 if (IS_ERR(edesc))
1441 return PTR_ERR(edesc);
1442
1443 /* set encrypt */
Lee Nipper70bcaca2008-07-03 19:08:46 +08001444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001445
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001446 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001447}
1448
Lee Nipper56af8cd2009-03-29 15:50:50 +08001449static int aead_decrypt(struct aead_request *req)
Kim Phillips9c4a7962008-06-23 19:50:15 +08001450{
1451 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
Herbert Xuaeb4c132015-07-30 17:53:22 +08001452 unsigned int authsize = crypto_aead_authsize(authenc);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001453 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001454 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
Lee Nipper56af8cd2009-03-29 15:50:50 +08001455 struct talitos_edesc *edesc;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001456 void *icvdata;
1457
Kim Phillips9c4a7962008-06-23 19:50:15 +08001458 /* allocate extended descriptor */
Horia Geanta62293a32013-11-28 15:11:17 +02001459 edesc = aead_edesc_alloc(req, req->iv, 1, false);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001460 if (IS_ERR(edesc))
1461 return PTR_ERR(edesc);
1462
Christophe Leroy4bbfb832019-05-21 13:34:15 +00001463 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
Kim Phillipse938e462009-03-29 15:53:23 +08001465 ((!edesc->src_nents && !edesc->dst_nents) ||
1466 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08001467
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001468 /* decrypt and check the ICV */
Kim Phillipse938e462009-03-29 15:53:23 +08001469 edesc->desc.hdr = ctx->desc_hdr_template |
1470 DESC_HDR_DIR_INBOUND |
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001471 DESC_HDR_MODE1_MDEU_CICV;
Kim Phillips9c4a7962008-06-23 19:50:15 +08001472
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001473 /* reset integrity check result bits */
Kim Phillips9c4a7962008-06-23 19:50:15 +08001474
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001475 return ipsec_esp(edesc, req, false,
1476 ipsec_esp_decrypt_hwauth_done);
Kim Phillipsfe5720e2008-10-12 20:33:14 +08001477 }
Kim Phillipse938e462009-03-29 15:53:23 +08001478
1479 /* Have to check the ICV with software */
1480 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482 /* stash incoming ICV for later cmp with ICV generated by the h/w */
Christophe Leroye3451772019-05-21 13:34:19 +00001483 icvdata = edesc->buf + edesc->dma_len;
Kim Phillipse938e462009-03-29 15:53:23 +08001484
Christophe Leroyeae55a52019-05-21 13:34:17 +00001485 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486 req->assoclen + req->cryptlen - authsize);
Kim Phillipse938e462009-03-29 15:53:23 +08001487
Christophe Leroy7ede4c32019-05-21 13:34:14 +00001488 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
Kim Phillips9c4a7962008-06-23 19:50:15 +08001489}
1490
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001491static int skcipher_setkey(struct crypto_skcipher *cipher,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001492 const u8 *key, unsigned int keylen)
1493{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001494 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001495 struct device *dev = ctx->dev;
LEROY Christophef384cdc2017-10-06 15:04:37 +02001496
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001497 if (ctx->keylen)
1498 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001500 memcpy(&ctx->key, key, keylen);
1501 ctx->keylen = keylen;
1502
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001503 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1504
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001505 return 0;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001506}
1507
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001508static int skcipher_des_setkey(struct crypto_skcipher *cipher,
Herbert Xuef7c5c82019-04-11 16:51:21 +08001509 const u8 *key, unsigned int keylen)
1510{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001511 return verify_skcipher_des_key(cipher, key) ?:
1512 skcipher_setkey(cipher, key, keylen);
Herbert Xuef7c5c82019-04-11 16:51:21 +08001513}
1514
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001515static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
Herbert Xuef7c5c82019-04-11 16:51:21 +08001516 const u8 *key, unsigned int keylen)
1517{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001518 return verify_skcipher_des3_key(cipher, key) ?:
1519 skcipher_setkey(cipher, key, keylen);
Herbert Xuef7c5c82019-04-11 16:51:21 +08001520}
1521
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001522static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001523 const u8 *key, unsigned int keylen)
1524{
1525 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1526 keylen == AES_KEYSIZE_256)
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001527 return skcipher_setkey(cipher, key, keylen);
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001528
Christophe Leroy1ba34e72019-05-21 13:34:10 +00001529 return -EINVAL;
1530}
1531
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001532static void common_nonsnoop_unmap(struct device *dev,
1533 struct talitos_edesc *edesc,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001534 struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001535{
1536 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
LEROY Christophe032d1972015-04-17 16:31:51 +02001537
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001538 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001539 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1540
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001541 if (edesc->dma_len)
1542 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1543 DMA_BIDIRECTIONAL);
1544}
1545
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001546static void skcipher_done(struct device *dev,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001547 struct talitos_desc *desc, void *context,
1548 int err)
1549{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001550 struct skcipher_request *areq = context;
1551 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1552 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1553 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Kim Phillips19bbbc62009-03-29 15:53:59 +08001554 struct talitos_edesc *edesc;
1555
1556 edesc = container_of(desc, struct talitos_edesc, desc);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001557
1558 common_nonsnoop_unmap(dev, edesc, areq);
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001559 memcpy(areq->iv, ctx->iv, ivsize);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001560
1561 kfree(edesc);
1562
1563 areq->base.complete(&areq->base, err);
1564}
1565
1566static int common_nonsnoop(struct talitos_edesc *edesc,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001567 struct skcipher_request *areq,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001568 void (*callback) (struct device *dev,
1569 struct talitos_desc *desc,
1570 void *context, int error))
1571{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001572 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1573 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001574 struct device *dev = ctx->dev;
1575 struct talitos_desc *desc = &edesc->desc;
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001576 unsigned int cryptlen = areq->cryptlen;
1577 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001578 int sg_count, ret;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001579 bool sync_needed = false;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001580 struct talitos_private *priv = dev_get_drvdata(dev);
1581 bool is_sec1 = has_ftr_sec1(priv);
Christophe Leroy416b8462021-01-20 18:57:24 +00001582 bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1583 (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001584
1585 /* first DWORD empty */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001586
1587 /* cipher iv */
LEROY Christopheda9de142017-10-06 15:04:57 +02001588 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001589
1590 /* cipher key */
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001591 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001592
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001593 sg_count = edesc->src_nents ?: 1;
1594 if (is_sec1 && sg_count > 1)
1595 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596 cryptlen);
1597 else
1598 sg_count = dma_map_sg(dev, areq->src, sg_count,
1599 (areq->src == areq->dst) ?
1600 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001601 /*
1602 * cipher in
1603 */
Christophe Leroy416b8462021-01-20 18:57:24 +00001604 sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1605 sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001606 if (sg_count > 1)
1607 sync_needed = true;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001608
1609 /* cipher out */
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001610 if (areq->src != areq->dst) {
1611 sg_count = edesc->dst_nents ? : 1;
1612 if (!is_sec1 || sg_count == 1)
1613 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1614 }
1615
1616 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617 sg_count, 0, (edesc->src_nents + 1));
1618 if (ret > 1)
1619 sync_needed = true;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001620
1621 /* iv out */
LEROY Christophea2b35aa2015-04-17 16:31:57 +02001622 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001623 DMA_FROM_DEVICE);
1624
1625 /* last DWORD empty */
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001626
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001627 if (sync_needed)
1628 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629 edesc->dma_len, DMA_BIDIRECTIONAL);
1630
Kim Phillips5228f0f2011-07-15 11:21:38 +08001631 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001632 if (ret != -EINPROGRESS) {
1633 common_nonsnoop_unmap(dev, edesc, areq);
1634 kfree(edesc);
1635 }
1636 return ret;
1637}
1638
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001639static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
Horia Geanta62293a32013-11-28 15:11:17 +02001640 areq, bool encrypt)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001641{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001642 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1643 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1644 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001645
Herbert Xuaeb4c132015-07-30 17:53:22 +08001646 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001647 areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001648 areq->base.flags, encrypt);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001649}
1650
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001651static int skcipher_encrypt(struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001652{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001653 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1654 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001655 struct talitos_edesc *edesc;
Christophe Leroyee483d32019-05-21 13:34:12 +00001656 unsigned int blocksize =
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001657 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
Christophe Leroyee483d32019-05-21 13:34:12 +00001658
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001659 if (!areq->cryptlen)
Christophe Leroyee483d32019-05-21 13:34:12 +00001660 return 0;
1661
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001662 if (areq->cryptlen % blocksize)
Christophe Leroyee483d32019-05-21 13:34:12 +00001663 return -EINVAL;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001664
1665 /* allocate extended descriptor */
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001666 edesc = skcipher_edesc_alloc(areq, true);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001667 if (IS_ERR(edesc))
1668 return PTR_ERR(edesc);
1669
1670 /* set encrypt */
1671 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001673 return common_nonsnoop(edesc, areq, skcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001674}
1675
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001676static int skcipher_decrypt(struct skcipher_request *areq)
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001677{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001678 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1679 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001680 struct talitos_edesc *edesc;
Christophe Leroyee483d32019-05-21 13:34:12 +00001681 unsigned int blocksize =
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001682 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
Christophe Leroyee483d32019-05-21 13:34:12 +00001683
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001684 if (!areq->cryptlen)
Christophe Leroyee483d32019-05-21 13:34:12 +00001685 return 0;
1686
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001687 if (areq->cryptlen % blocksize)
Christophe Leroyee483d32019-05-21 13:34:12 +00001688 return -EINVAL;
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001689
1690 /* allocate extended descriptor */
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001691 edesc = skcipher_edesc_alloc(areq, false);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001692 if (IS_ERR(edesc))
1693 return PTR_ERR(edesc);
1694
1695 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1696
Ard Biesheuvel373960d2019-11-09 18:09:49 +01001697 return common_nonsnoop(edesc, areq, skcipher_done);
Lee Nipper4de9d0b2009-03-29 15:52:32 +08001698}
1699
Lee Nipper497f2e62010-05-19 19:20:36 +10001700static void common_nonsnoop_hash_unmap(struct device *dev,
1701 struct talitos_edesc *edesc,
1702 struct ahash_request *areq)
1703{
1704 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001705 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophead4cd512018-02-26 17:40:04 +01001706 struct talitos_private *priv = dev_get_drvdata(dev);
1707 bool is_sec1 = has_ftr_sec1(priv);
1708 struct talitos_desc *desc = &edesc->desc;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001709 struct talitos_desc *desc2 = (struct talitos_desc *)
1710 (edesc->buf + edesc->dma_len);
LEROY Christophead4cd512018-02-26 17:40:04 +01001711
1712 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1713 if (desc->next_desc &&
1714 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1715 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001716 if (req_ctx->last)
1717 memcpy(areq->result, req_ctx->hw_context,
1718 crypto_ahash_digestsize(tfm));
Lee Nipper497f2e62010-05-19 19:20:36 +10001719
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001720 if (req_ctx->psrc)
1721 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
LEROY Christophe032d1972015-04-17 16:31:51 +02001722
LEROY Christophead4cd512018-02-26 17:40:04 +01001723 /* When using hashctx-in, must unmap it. */
1724 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1725 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1726 DMA_TO_DEVICE);
1727 else if (desc->next_desc)
1728 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1729 DMA_TO_DEVICE);
1730
1731 if (is_sec1 && req_ctx->nbuf)
1732 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1733 DMA_TO_DEVICE);
1734
Lee Nipper497f2e62010-05-19 19:20:36 +10001735 if (edesc->dma_len)
1736 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1737 DMA_BIDIRECTIONAL);
1738
LEROY Christophe37b5e882017-10-06 15:05:06 +02001739 if (edesc->desc.next_desc)
1740 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1741 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
Lee Nipper497f2e62010-05-19 19:20:36 +10001742}
1743
1744static void ahash_done(struct device *dev,
1745 struct talitos_desc *desc, void *context,
1746 int err)
1747{
1748 struct ahash_request *areq = context;
1749 struct talitos_edesc *edesc =
1750 container_of(desc, struct talitos_edesc, desc);
1751 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752
1753 if (!req_ctx->last && req_ctx->to_hash_later) {
1754 /* Position any partial block for next update/final/finup */
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001755 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001756 req_ctx->nbuf = req_ctx->to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10001757 }
1758 common_nonsnoop_hash_unmap(dev, edesc, areq);
1759
1760 kfree(edesc);
1761
1762 areq->base.complete(&areq->base, err);
1763}
1764
LEROY Christophe2d029052015-04-17 16:32:18 +02001765/*
1766 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1767 * ourself and submit a padded block
1768 */
LEROY Christophe5b2cf262017-10-06 15:04:47 +02001769static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
LEROY Christophe2d029052015-04-17 16:32:18 +02001770 struct talitos_edesc *edesc,
1771 struct talitos_ptr *ptr)
1772{
1773 static u8 padded_hash[64] = {
1774 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 };
1779
1780 pr_err_once("Bug in SEC1, padding ourself\n");
1781 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1782 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1783 (char *)padded_hash, DMA_TO_DEVICE);
1784}
1785
Lee Nipper497f2e62010-05-19 19:20:36 +10001786static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1787 struct ahash_request *areq, unsigned int length,
1788 void (*callback) (struct device *dev,
1789 struct talitos_desc *desc,
1790 void *context, int error))
1791{
1792 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1793 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1794 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795 struct device *dev = ctx->dev;
1796 struct talitos_desc *desc = &edesc->desc;
LEROY Christophe032d1972015-04-17 16:31:51 +02001797 int ret;
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001798 bool sync_needed = false;
LEROY Christophe922f9dc2015-04-17 16:32:07 +02001799 struct talitos_private *priv = dev_get_drvdata(dev);
1800 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001801 int sg_count;
Lee Nipper497f2e62010-05-19 19:20:36 +10001802
1803 /* first DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001804
Kim Phillips60f208d2010-05-19 19:21:53 +10001805 /* hash context in */
1806 if (!req_ctx->first || req_ctx->swinit) {
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001807 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1808 req_ctx->hw_context_size,
1809 req_ctx->hw_context,
1810 DMA_TO_DEVICE);
Kim Phillips60f208d2010-05-19 19:21:53 +10001811 req_ctx->swinit = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001812 }
LEROY Christopheafd62fa2017-09-13 12:44:51 +02001813 /* Indicate next op is not the first. */
1814 req_ctx->first = 0;
Lee Nipper497f2e62010-05-19 19:20:36 +10001815
1816 /* HMAC key */
1817 if (ctx->keylen)
LEROY Christophe2e13ce02017-10-06 15:05:02 +02001818 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1819 is_sec1);
Lee Nipper497f2e62010-05-19 19:20:36 +10001820
LEROY Christophe37b5e882017-10-06 15:05:06 +02001821 if (is_sec1 && req_ctx->nbuf)
1822 length -= req_ctx->nbuf;
1823
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001824 sg_count = edesc->src_nents ?: 1;
1825 if (is_sec1 && sg_count > 1)
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001826 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001827 else if (length)
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001828 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1829 DMA_TO_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001830 /*
1831 * data in
1832 */
LEROY Christophe37b5e882017-10-06 15:05:06 +02001833 if (is_sec1 && req_ctx->nbuf) {
LEROY Christophead4cd512018-02-26 17:40:04 +01001834 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1835 req_ctx->buf[req_ctx->buf_idx],
1836 DMA_TO_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001837 } else {
1838 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001839 &desc->ptr[3], sg_count, 0, 0);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001840 if (sg_count > 1)
1841 sync_needed = true;
1842 }
Lee Nipper497f2e62010-05-19 19:20:36 +10001843
1844 /* fifth DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001845
1846 /* hash/HMAC out -or- hash context out */
1847 if (req_ctx->last)
1848 map_single_talitos_ptr(dev, &desc->ptr[5],
1849 crypto_ahash_digestsize(tfm),
Christophe Leroy7a6eda52019-09-10 06:04:14 +00001850 req_ctx->hw_context, DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001851 else
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001852 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1853 req_ctx->hw_context_size,
1854 req_ctx->hw_context,
1855 DMA_FROM_DEVICE);
Lee Nipper497f2e62010-05-19 19:20:36 +10001856
1857 /* last DWORD empty */
Lee Nipper497f2e62010-05-19 19:20:36 +10001858
LEROY Christophe2d029052015-04-17 16:32:18 +02001859 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1860 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1861
LEROY Christophe37b5e882017-10-06 15:05:06 +02001862 if (is_sec1 && req_ctx->nbuf && length) {
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001863 struct talitos_desc *desc2 = (struct talitos_desc *)
1864 (edesc->buf + edesc->dma_len);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001865 dma_addr_t next_desc;
1866
1867 memset(desc2, 0, sizeof(*desc2));
1868 desc2->hdr = desc->hdr;
1869 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1870 desc2->hdr1 = desc2->hdr;
1871 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1872 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1873 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1874
LEROY Christophead4cd512018-02-26 17:40:04 +01001875 if (desc->ptr[1].ptr)
1876 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1877 is_sec1);
1878 else
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001879 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1880 req_ctx->hw_context_size,
1881 req_ctx->hw_context,
1882 DMA_TO_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001883 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1884 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
Christophe Leroy58cdbc62019-06-24 07:20:16 +00001885 &desc2->ptr[3], sg_count, 0, 0);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001886 if (sg_count > 1)
1887 sync_needed = true;
1888 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1889 if (req_ctx->last)
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001890 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1891 req_ctx->hw_context_size,
1892 req_ctx->hw_context,
1893 DMA_FROM_DEVICE);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001894
1895 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1896 DMA_BIDIRECTIONAL);
1897 desc->next_desc = cpu_to_be32(next_desc);
1898 }
1899
LEROY Christophe6a1e8d12016-06-06 13:20:38 +02001900 if (sync_needed)
1901 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1902 edesc->dma_len, DMA_BIDIRECTIONAL);
1903
Kim Phillips5228f0f2011-07-15 11:21:38 +08001904 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10001905 if (ret != -EINPROGRESS) {
1906 common_nonsnoop_hash_unmap(dev, edesc, areq);
1907 kfree(edesc);
1908 }
1909 return ret;
1910}
1911
1912static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1913 unsigned int nbytes)
1914{
1915 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1917 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe37b5e882017-10-06 15:05:06 +02001918 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1919 bool is_sec1 = has_ftr_sec1(priv);
1920
1921 if (is_sec1)
1922 nbytes -= req_ctx->nbuf;
Lee Nipper497f2e62010-05-19 19:20:36 +10001923
Herbert Xuaeb4c132015-07-30 17:53:22 +08001924 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
Horia Geanta62293a32013-11-28 15:11:17 +02001925 nbytes, 0, 0, 0, areq->base.flags, false);
Lee Nipper497f2e62010-05-19 19:20:36 +10001926}
1927
1928static int ahash_init(struct ahash_request *areq)
1929{
1930 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001931 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932 struct device *dev = ctx->dev;
Lee Nipper497f2e62010-05-19 19:20:36 +10001933 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
LEROY Christophe49f97832017-10-06 15:05:04 +02001934 unsigned int size;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001935 dma_addr_t dma;
Lee Nipper497f2e62010-05-19 19:20:36 +10001936
1937 /* Initialize the context */
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001938 req_ctx->buf_idx = 0;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001939 req_ctx->nbuf = 0;
Kim Phillips60f208d2010-05-19 19:21:53 +10001940 req_ctx->first = 1; /* first indicates h/w must init its context */
1941 req_ctx->swinit = 0; /* assume h/w init of context */
LEROY Christophe49f97832017-10-06 15:05:04 +02001942 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
Lee Nipper497f2e62010-05-19 19:20:36 +10001943 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1944 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
LEROY Christophe49f97832017-10-06 15:05:04 +02001945 req_ctx->hw_context_size = size;
Lee Nipper497f2e62010-05-19 19:20:36 +10001946
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001947 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1948 DMA_TO_DEVICE);
1949 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1950
Lee Nipper497f2e62010-05-19 19:20:36 +10001951 return 0;
1952}
1953
Kim Phillips60f208d2010-05-19 19:21:53 +10001954/*
1955 * on h/w without explicit sha224 support, we initialize h/w context
1956 * manually with sha224 constants, and tell it to run sha256.
1957 */
1958static int ahash_init_sha224_swinit(struct ahash_request *areq)
1959{
1960 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961
Kim Phillipsa7524472010-09-23 15:56:38 +08001962 req_ctx->hw_context[0] = SHA224_H0;
1963 req_ctx->hw_context[1] = SHA224_H1;
1964 req_ctx->hw_context[2] = SHA224_H2;
1965 req_ctx->hw_context[3] = SHA224_H3;
1966 req_ctx->hw_context[4] = SHA224_H4;
1967 req_ctx->hw_context[5] = SHA224_H5;
1968 req_ctx->hw_context[6] = SHA224_H6;
1969 req_ctx->hw_context[7] = SHA224_H7;
Kim Phillips60f208d2010-05-19 19:21:53 +10001970
1971 /* init 64-bit count */
1972 req_ctx->hw_context[8] = 0;
1973 req_ctx->hw_context[9] = 0;
1974
LEROY Christophe6a4967c2018-02-26 17:40:06 +01001975 ahash_init(areq);
1976 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1977
Kim Phillips60f208d2010-05-19 19:21:53 +10001978 return 0;
1979}
1980
Lee Nipper497f2e62010-05-19 19:20:36 +10001981static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1982{
1983 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1984 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986 struct talitos_edesc *edesc;
1987 unsigned int blocksize =
1988 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1989 unsigned int nbytes_to_hash;
1990 unsigned int to_hash_later;
Lee Nipper5e833bc2010-06-16 15:29:15 +10001991 unsigned int nsg;
LABBE Corentin8e409fe2015-11-04 21:13:34 +01001992 int nents;
LEROY Christophe37b5e882017-10-06 15:05:06 +02001993 struct device *dev = ctx->dev;
1994 struct talitos_private *priv = dev_get_drvdata(dev);
1995 bool is_sec1 = has_ftr_sec1(priv);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02001996 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
Lee Nipper497f2e62010-05-19 19:20:36 +10001997
Lee Nipper5e833bc2010-06-16 15:29:15 +10001998 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1999 /* Buffer up to one whole block */
LABBE Corentin8e409fe2015-11-04 21:13:34 +01002000 nents = sg_nents_for_len(areq->src, nbytes);
2001 if (nents < 0) {
2002 dev_err(ctx->dev, "Invalid number of src SG.\n");
2003 return nents;
2004 }
2005 sg_copy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002006 ctx_buf + req_ctx->nbuf, nbytes);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002007 req_ctx->nbuf += nbytes;
Lee Nipper497f2e62010-05-19 19:20:36 +10002008 return 0;
2009 }
2010
Lee Nipper5e833bc2010-06-16 15:29:15 +10002011 /* At least (blocksize + 1) bytes are available to hash */
2012 nbytes_to_hash = nbytes + req_ctx->nbuf;
2013 to_hash_later = nbytes_to_hash & (blocksize - 1);
2014
2015 if (req_ctx->last)
2016 to_hash_later = 0;
2017 else if (to_hash_later)
2018 /* There is a partial block. Hash the full block(s) now */
2019 nbytes_to_hash -= to_hash_later;
2020 else {
2021 /* Keep one block buffered */
2022 nbytes_to_hash -= blocksize;
2023 to_hash_later = blocksize;
2024 }
2025
2026 /* Chain in any previously buffered data */
LEROY Christophe37b5e882017-10-06 15:05:06 +02002027 if (!is_sec1 && req_ctx->nbuf) {
Lee Nipper5e833bc2010-06-16 15:29:15 +10002028 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2029 sg_init_table(req_ctx->bufsl, nsg);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002030 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002031 if (nsg > 1)
Dan Williamsc56f6d12015-08-07 18:15:13 +02002032 sg_chain(req_ctx->bufsl, 2, areq->src);
Lee Nipper497f2e62010-05-19 19:20:36 +10002033 req_ctx->psrc = req_ctx->bufsl;
LEROY Christophe37b5e882017-10-06 15:05:06 +02002034 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002035 int offset;
2036
LEROY Christophe37b5e882017-10-06 15:05:06 +02002037 if (nbytes_to_hash > blocksize)
2038 offset = blocksize - req_ctx->nbuf;
2039 else
2040 offset = nbytes_to_hash - req_ctx->nbuf;
2041 nents = sg_nents_for_len(areq->src, offset);
2042 if (nents < 0) {
2043 dev_err(ctx->dev, "Invalid number of src SG.\n");
2044 return nents;
2045 }
2046 sg_copy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002047 ctx_buf + req_ctx->nbuf, offset);
LEROY Christophe37b5e882017-10-06 15:05:06 +02002048 req_ctx->nbuf += offset;
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002049 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2050 offset);
Lee Nipper5e833bc2010-06-16 15:29:15 +10002051 } else
Lee Nipper497f2e62010-05-19 19:20:36 +10002052 req_ctx->psrc = areq->src;
Lee Nipper497f2e62010-05-19 19:20:36 +10002053
Lee Nipper5e833bc2010-06-16 15:29:15 +10002054 if (to_hash_later) {
LABBE Corentin8e409fe2015-11-04 21:13:34 +01002055 nents = sg_nents_for_len(areq->src, nbytes);
2056 if (nents < 0) {
2057 dev_err(ctx->dev, "Invalid number of src SG.\n");
2058 return nents;
2059 }
Akinobu Mitad0525722013-07-08 16:01:55 -07002060 sg_pcopy_to_buffer(areq->src, nents,
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002061 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
Lee Nipper5e833bc2010-06-16 15:29:15 +10002062 to_hash_later,
2063 nbytes - to_hash_later);
Lee Nipper497f2e62010-05-19 19:20:36 +10002064 }
Lee Nipper5e833bc2010-06-16 15:29:15 +10002065 req_ctx->to_hash_later = to_hash_later;
Lee Nipper497f2e62010-05-19 19:20:36 +10002066
Lee Nipper5e833bc2010-06-16 15:29:15 +10002067 /* Allocate extended descriptor */
Lee Nipper497f2e62010-05-19 19:20:36 +10002068 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2069 if (IS_ERR(edesc))
2070 return PTR_ERR(edesc);
2071
2072 edesc->desc.hdr = ctx->desc_hdr_template;
2073
2074 /* On last one, request SEC to pad; otherwise continue */
2075 if (req_ctx->last)
2076 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2077 else
2078 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2079
Kim Phillips60f208d2010-05-19 19:21:53 +10002080 /* request SEC to INIT hash. */
2081 if (req_ctx->first && !req_ctx->swinit)
Lee Nipper497f2e62010-05-19 19:20:36 +10002082 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2083
2084 /* When the tfm context has a keylen, it's an HMAC.
2085 * A first or last (ie. not middle) descriptor must request HMAC.
2086 */
2087 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2088 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2089
Christophe Leroy58cdbc62019-06-24 07:20:16 +00002090 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
Lee Nipper497f2e62010-05-19 19:20:36 +10002091}
2092
2093static int ahash_update(struct ahash_request *areq)
2094{
2095 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2096
2097 req_ctx->last = 0;
2098
2099 return ahash_process_req(areq, areq->nbytes);
2100}
2101
2102static int ahash_final(struct ahash_request *areq)
2103{
2104 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105
2106 req_ctx->last = 1;
2107
2108 return ahash_process_req(areq, 0);
2109}
2110
2111static int ahash_finup(struct ahash_request *areq)
2112{
2113 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2114
2115 req_ctx->last = 1;
2116
2117 return ahash_process_req(areq, areq->nbytes);
2118}
2119
2120static int ahash_digest(struct ahash_request *areq)
2121{
2122 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
Kim Phillips60f208d2010-05-19 19:21:53 +10002123 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002124
Kim Phillips60f208d2010-05-19 19:21:53 +10002125 ahash->init(areq);
Lee Nipper497f2e62010-05-19 19:20:36 +10002126 req_ctx->last = 1;
2127
2128 return ahash_process_req(areq, areq->nbytes);
2129}
2130
Horia Geant?3639ca82016-04-21 19:24:55 +03002131static int ahash_export(struct ahash_request *areq, void *out)
2132{
2133 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2134 struct talitos_export_state *export = out;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002135 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2136 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2137 struct device *dev = ctx->dev;
2138 dma_addr_t dma;
2139
2140 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2141 DMA_FROM_DEVICE);
2142 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
Horia Geant?3639ca82016-04-21 19:24:55 +03002143
2144 memcpy(export->hw_context, req_ctx->hw_context,
2145 req_ctx->hw_context_size);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002146 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
Horia Geant?3639ca82016-04-21 19:24:55 +03002147 export->swinit = req_ctx->swinit;
2148 export->first = req_ctx->first;
2149 export->last = req_ctx->last;
2150 export->to_hash_later = req_ctx->to_hash_later;
2151 export->nbuf = req_ctx->nbuf;
2152
2153 return 0;
2154}
2155
2156static int ahash_import(struct ahash_request *areq, const void *in)
2157{
2158 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2159 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002160 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2161 struct device *dev = ctx->dev;
Horia Geant?3639ca82016-04-21 19:24:55 +03002162 const struct talitos_export_state *export = in;
LEROY Christophe49f97832017-10-06 15:05:04 +02002163 unsigned int size;
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002164 dma_addr_t dma;
Horia Geant?3639ca82016-04-21 19:24:55 +03002165
2166 memset(req_ctx, 0, sizeof(*req_ctx));
LEROY Christophe49f97832017-10-06 15:05:04 +02002167 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
Horia Geant?3639ca82016-04-21 19:24:55 +03002168 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2169 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
LEROY Christophe49f97832017-10-06 15:05:04 +02002170 req_ctx->hw_context_size = size;
LEROY Christophe49f97832017-10-06 15:05:04 +02002171 memcpy(req_ctx->hw_context, export->hw_context, size);
LEROY Christophe3c0dd192017-10-06 15:05:08 +02002172 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
Horia Geant?3639ca82016-04-21 19:24:55 +03002173 req_ctx->swinit = export->swinit;
2174 req_ctx->first = export->first;
2175 req_ctx->last = export->last;
2176 req_ctx->to_hash_later = export->to_hash_later;
2177 req_ctx->nbuf = export->nbuf;
2178
LEROY Christophe6a4967c2018-02-26 17:40:06 +01002179 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2180 DMA_TO_DEVICE);
2181 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2182
Horia Geant?3639ca82016-04-21 19:24:55 +03002183 return 0;
2184}
2185
Lee Nipper79b3a412011-11-21 16:13:25 +08002186static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2187 u8 *hash)
2188{
2189 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2190
2191 struct scatterlist sg[1];
2192 struct ahash_request *req;
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002193 struct crypto_wait wait;
Lee Nipper79b3a412011-11-21 16:13:25 +08002194 int ret;
2195
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002196 crypto_init_wait(&wait);
Lee Nipper79b3a412011-11-21 16:13:25 +08002197
2198 req = ahash_request_alloc(tfm, GFP_KERNEL);
2199 if (!req)
2200 return -ENOMEM;
2201
2202 /* Keep tfm keylen == 0 during hash of the long key */
2203 ctx->keylen = 0;
2204 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002205 crypto_req_done, &wait);
Lee Nipper79b3a412011-11-21 16:13:25 +08002206
2207 sg_init_one(&sg[0], key, keylen);
2208
2209 ahash_request_set_crypt(req, sg, hash, keylen);
Gilad Ben-Yosseff1c90ac32017-10-18 08:00:49 +01002210 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2211
Lee Nipper79b3a412011-11-21 16:13:25 +08002212 ahash_request_free(req);
2213
2214 return ret;
2215}
2216
2217static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2218 unsigned int keylen)
2219{
2220 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002221 struct device *dev = ctx->dev;
Lee Nipper79b3a412011-11-21 16:13:25 +08002222 unsigned int blocksize =
2223 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2224 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2225 unsigned int keysize = keylen;
2226 u8 hash[SHA512_DIGEST_SIZE];
2227 int ret;
2228
2229 if (keylen <= blocksize)
2230 memcpy(ctx->key, key, keysize);
2231 else {
2232 /* Must get the hash of the long key */
2233 ret = keyhash(tfm, key, keylen, hash);
2234
Eric Biggers674f3682019-12-30 21:19:36 -06002235 if (ret)
Lee Nipper79b3a412011-11-21 16:13:25 +08002236 return -EINVAL;
Lee Nipper79b3a412011-11-21 16:13:25 +08002237
2238 keysize = digestsize;
2239 memcpy(ctx->key, hash, digestsize);
2240 }
2241
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002242 if (ctx->keylen)
2243 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2244
Lee Nipper79b3a412011-11-21 16:13:25 +08002245 ctx->keylen = keysize;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02002246 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
Lee Nipper79b3a412011-11-21 16:13:25 +08002247
2248 return 0;
2249}
2250
2251
Kim Phillips9c4a7962008-06-23 19:50:15 +08002252struct talitos_alg_template {
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002253 u32 type;
LEROY Christopheb0057762016-06-06 13:20:44 +02002254 u32 priority;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002255 union {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002256 struct skcipher_alg skcipher;
Lee Nipperacbf7c622010-05-19 19:19:33 +10002257 struct ahash_alg hash;
Herbert Xuaeb4c132015-07-30 17:53:22 +08002258 struct aead_alg aead;
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002259 } alg;
Kim Phillips9c4a7962008-06-23 19:50:15 +08002260 __be32 desc_hdr_template;
2261};
2262
2263static struct talitos_alg_template driver_algs[] = {
Horia Geanta991155b2013-03-20 16:31:38 +02002264 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002265 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002266 .alg.aead = {
2267 .base = {
2268 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2269 .cra_driver_name = "authenc-hmac-sha1-"
2270 "cbc-aes-talitos",
2271 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002272 .cra_flags = CRYPTO_ALG_ASYNC |
2273 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002274 },
2275 .ivsize = AES_BLOCK_SIZE,
2276 .maxauthsize = SHA1_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002277 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08002278 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2279 DESC_HDR_SEL0_AESU |
2280 DESC_HDR_MODE0_AESU_CBC |
2281 DESC_HDR_SEL1_MDEUA |
2282 DESC_HDR_MODE1_MDEU_INIT |
2283 DESC_HDR_MODE1_MDEU_PAD |
2284 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper70bcaca2008-07-03 19:08:46 +08002285 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002286 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002287 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2288 .alg.aead = {
2289 .base = {
2290 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2291 .cra_driver_name = "authenc-hmac-sha1-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002292 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002293 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002294 .cra_flags = CRYPTO_ALG_ASYNC |
2295 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002296 },
2297 .ivsize = AES_BLOCK_SIZE,
2298 .maxauthsize = SHA1_DIGEST_SIZE,
2299 },
2300 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2301 DESC_HDR_SEL0_AESU |
2302 DESC_HDR_MODE0_AESU_CBC |
2303 DESC_HDR_SEL1_MDEUA |
2304 DESC_HDR_MODE1_MDEU_INIT |
2305 DESC_HDR_MODE1_MDEU_PAD |
2306 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2307 },
2308 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002309 .alg.aead = {
2310 .base = {
2311 .cra_name = "authenc(hmac(sha1),"
2312 "cbc(des3_ede))",
2313 .cra_driver_name = "authenc-hmac-sha1-"
2314 "cbc-3des-talitos",
2315 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002316 .cra_flags = CRYPTO_ALG_ASYNC |
2317 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002318 },
2319 .ivsize = DES3_EDE_BLOCK_SIZE,
2320 .maxauthsize = SHA1_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002321 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002322 },
Lee Nipper70bcaca2008-07-03 19:08:46 +08002323 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2324 DESC_HDR_SEL0_DEU |
2325 DESC_HDR_MODE0_DEU_CBC |
2326 DESC_HDR_MODE0_DEU_3DES |
2327 DESC_HDR_SEL1_MDEUA |
2328 DESC_HDR_MODE1_MDEU_INIT |
2329 DESC_HDR_MODE1_MDEU_PAD |
2330 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
Lee Nipper3952f172008-07-10 18:29:18 +08002331 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002332 { .type = CRYPTO_ALG_TYPE_AEAD,
2333 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2334 .alg.aead = {
2335 .base = {
2336 .cra_name = "authenc(hmac(sha1),"
2337 "cbc(des3_ede))",
2338 .cra_driver_name = "authenc-hmac-sha1-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002339 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002340 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002341 .cra_flags = CRYPTO_ALG_ASYNC |
2342 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002343 },
2344 .ivsize = DES3_EDE_BLOCK_SIZE,
2345 .maxauthsize = SHA1_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002346 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002347 },
2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_DEU |
2350 DESC_HDR_MODE0_DEU_CBC |
2351 DESC_HDR_MODE0_DEU_3DES |
2352 DESC_HDR_SEL1_MDEUA |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2356 },
Horia Geanta357fb602012-07-03 19:16:53 +03002357 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002358 .alg.aead = {
2359 .base = {
2360 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-aes-talitos",
2363 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002364 .cra_flags = CRYPTO_ALG_ASYNC |
2365 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002366 },
2367 .ivsize = AES_BLOCK_SIZE,
2368 .maxauthsize = SHA224_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002369 },
2370 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2371 DESC_HDR_SEL0_AESU |
2372 DESC_HDR_MODE0_AESU_CBC |
2373 DESC_HDR_SEL1_MDEUA |
2374 DESC_HDR_MODE1_MDEU_INIT |
2375 DESC_HDR_MODE1_MDEU_PAD |
2376 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2377 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002378 { .type = CRYPTO_ALG_TYPE_AEAD,
2379 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2380 .alg.aead = {
2381 .base = {
2382 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2383 .cra_driver_name = "authenc-hmac-sha224-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002384 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002385 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002386 .cra_flags = CRYPTO_ALG_ASYNC |
2387 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002388 },
2389 .ivsize = AES_BLOCK_SIZE,
2390 .maxauthsize = SHA224_DIGEST_SIZE,
2391 },
2392 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2393 DESC_HDR_SEL0_AESU |
2394 DESC_HDR_MODE0_AESU_CBC |
2395 DESC_HDR_SEL1_MDEUA |
2396 DESC_HDR_MODE1_MDEU_INIT |
2397 DESC_HDR_MODE1_MDEU_PAD |
2398 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2399 },
Horia Geanta357fb602012-07-03 19:16:53 +03002400 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002401 .alg.aead = {
2402 .base = {
2403 .cra_name = "authenc(hmac(sha224),"
2404 "cbc(des3_ede))",
2405 .cra_driver_name = "authenc-hmac-sha224-"
2406 "cbc-3des-talitos",
2407 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002408 .cra_flags = CRYPTO_ALG_ASYNC |
2409 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002410 },
2411 .ivsize = DES3_EDE_BLOCK_SIZE,
2412 .maxauthsize = SHA224_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002413 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002414 },
2415 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2416 DESC_HDR_SEL0_DEU |
2417 DESC_HDR_MODE0_DEU_CBC |
2418 DESC_HDR_MODE0_DEU_3DES |
2419 DESC_HDR_SEL1_MDEUA |
2420 DESC_HDR_MODE1_MDEU_INIT |
2421 DESC_HDR_MODE1_MDEU_PAD |
2422 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2423 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002424 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002425 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2426 .alg.aead = {
2427 .base = {
2428 .cra_name = "authenc(hmac(sha224),"
2429 "cbc(des3_ede))",
2430 .cra_driver_name = "authenc-hmac-sha224-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002431 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002432 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002433 .cra_flags = CRYPTO_ALG_ASYNC |
2434 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002435 },
2436 .ivsize = DES3_EDE_BLOCK_SIZE,
2437 .maxauthsize = SHA224_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002438 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002439 },
2440 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2441 DESC_HDR_SEL0_DEU |
2442 DESC_HDR_MODE0_DEU_CBC |
2443 DESC_HDR_MODE0_DEU_3DES |
2444 DESC_HDR_SEL1_MDEUA |
2445 DESC_HDR_MODE1_MDEU_INIT |
2446 DESC_HDR_MODE1_MDEU_PAD |
2447 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2448 },
2449 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002450 .alg.aead = {
2451 .base = {
2452 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2453 .cra_driver_name = "authenc-hmac-sha256-"
2454 "cbc-aes-talitos",
2455 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002456 .cra_flags = CRYPTO_ALG_ASYNC |
2457 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002458 },
2459 .ivsize = AES_BLOCK_SIZE,
2460 .maxauthsize = SHA256_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002461 },
Lee Nipper3952f172008-07-10 18:29:18 +08002462 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2463 DESC_HDR_SEL0_AESU |
2464 DESC_HDR_MODE0_AESU_CBC |
2465 DESC_HDR_SEL1_MDEUA |
2466 DESC_HDR_MODE1_MDEU_INIT |
2467 DESC_HDR_MODE1_MDEU_PAD |
2468 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2469 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002470 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002471 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2472 .alg.aead = {
2473 .base = {
2474 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2475 .cra_driver_name = "authenc-hmac-sha256-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002476 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002477 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002478 .cra_flags = CRYPTO_ALG_ASYNC |
2479 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002480 },
2481 .ivsize = AES_BLOCK_SIZE,
2482 .maxauthsize = SHA256_DIGEST_SIZE,
2483 },
2484 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2485 DESC_HDR_SEL0_AESU |
2486 DESC_HDR_MODE0_AESU_CBC |
2487 DESC_HDR_SEL1_MDEUA |
2488 DESC_HDR_MODE1_MDEU_INIT |
2489 DESC_HDR_MODE1_MDEU_PAD |
2490 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2491 },
2492 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002493 .alg.aead = {
2494 .base = {
2495 .cra_name = "authenc(hmac(sha256),"
2496 "cbc(des3_ede))",
2497 .cra_driver_name = "authenc-hmac-sha256-"
2498 "cbc-3des-talitos",
2499 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002500 .cra_flags = CRYPTO_ALG_ASYNC |
2501 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002502 },
2503 .ivsize = DES3_EDE_BLOCK_SIZE,
2504 .maxauthsize = SHA256_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002505 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002506 },
Lee Nipper3952f172008-07-10 18:29:18 +08002507 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2508 DESC_HDR_SEL0_DEU |
2509 DESC_HDR_MODE0_DEU_CBC |
2510 DESC_HDR_MODE0_DEU_3DES |
2511 DESC_HDR_SEL1_MDEUA |
2512 DESC_HDR_MODE1_MDEU_INIT |
2513 DESC_HDR_MODE1_MDEU_PAD |
2514 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2515 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002516 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002517 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2518 .alg.aead = {
2519 .base = {
2520 .cra_name = "authenc(hmac(sha256),"
2521 "cbc(des3_ede))",
2522 .cra_driver_name = "authenc-hmac-sha256-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002523 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002524 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002525 .cra_flags = CRYPTO_ALG_ASYNC |
2526 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002527 },
2528 .ivsize = DES3_EDE_BLOCK_SIZE,
2529 .maxauthsize = SHA256_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002530 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002531 },
2532 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2533 DESC_HDR_SEL0_DEU |
2534 DESC_HDR_MODE0_DEU_CBC |
2535 DESC_HDR_MODE0_DEU_3DES |
2536 DESC_HDR_SEL1_MDEUA |
2537 DESC_HDR_MODE1_MDEU_INIT |
2538 DESC_HDR_MODE1_MDEU_PAD |
2539 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2540 },
2541 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002542 .alg.aead = {
2543 .base = {
2544 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2545 .cra_driver_name = "authenc-hmac-sha384-"
2546 "cbc-aes-talitos",
2547 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002548 .cra_flags = CRYPTO_ALG_ASYNC |
2549 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002550 },
2551 .ivsize = AES_BLOCK_SIZE,
2552 .maxauthsize = SHA384_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002553 },
2554 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2555 DESC_HDR_SEL0_AESU |
2556 DESC_HDR_MODE0_AESU_CBC |
2557 DESC_HDR_SEL1_MDEUB |
2558 DESC_HDR_MODE1_MDEU_INIT |
2559 DESC_HDR_MODE1_MDEU_PAD |
2560 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2561 },
2562 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002563 .alg.aead = {
2564 .base = {
2565 .cra_name = "authenc(hmac(sha384),"
2566 "cbc(des3_ede))",
2567 .cra_driver_name = "authenc-hmac-sha384-"
2568 "cbc-3des-talitos",
2569 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002570 .cra_flags = CRYPTO_ALG_ASYNC |
2571 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002572 },
2573 .ivsize = DES3_EDE_BLOCK_SIZE,
2574 .maxauthsize = SHA384_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002575 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002576 },
2577 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2578 DESC_HDR_SEL0_DEU |
2579 DESC_HDR_MODE0_DEU_CBC |
2580 DESC_HDR_MODE0_DEU_3DES |
2581 DESC_HDR_SEL1_MDEUB |
2582 DESC_HDR_MODE1_MDEU_INIT |
2583 DESC_HDR_MODE1_MDEU_PAD |
2584 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2585 },
2586 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002587 .alg.aead = {
2588 .base = {
2589 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2590 .cra_driver_name = "authenc-hmac-sha512-"
2591 "cbc-aes-talitos",
2592 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002593 .cra_flags = CRYPTO_ALG_ASYNC |
2594 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002595 },
2596 .ivsize = AES_BLOCK_SIZE,
2597 .maxauthsize = SHA512_DIGEST_SIZE,
Horia Geanta357fb602012-07-03 19:16:53 +03002598 },
2599 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2600 DESC_HDR_SEL0_AESU |
2601 DESC_HDR_MODE0_AESU_CBC |
2602 DESC_HDR_SEL1_MDEUB |
2603 DESC_HDR_MODE1_MDEU_INIT |
2604 DESC_HDR_MODE1_MDEU_PAD |
2605 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2606 },
2607 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002608 .alg.aead = {
2609 .base = {
2610 .cra_name = "authenc(hmac(sha512),"
2611 "cbc(des3_ede))",
2612 .cra_driver_name = "authenc-hmac-sha512-"
2613 "cbc-3des-talitos",
2614 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002615 .cra_flags = CRYPTO_ALG_ASYNC |
2616 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002617 },
2618 .ivsize = DES3_EDE_BLOCK_SIZE,
2619 .maxauthsize = SHA512_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002620 .setkey = aead_des3_setkey,
Horia Geanta357fb602012-07-03 19:16:53 +03002621 },
2622 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2623 DESC_HDR_SEL0_DEU |
2624 DESC_HDR_MODE0_DEU_CBC |
2625 DESC_HDR_MODE0_DEU_3DES |
2626 DESC_HDR_SEL1_MDEUB |
2627 DESC_HDR_MODE1_MDEU_INIT |
2628 DESC_HDR_MODE1_MDEU_PAD |
2629 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2630 },
2631 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002632 .alg.aead = {
2633 .base = {
2634 .cra_name = "authenc(hmac(md5),cbc(aes))",
2635 .cra_driver_name = "authenc-hmac-md5-"
2636 "cbc-aes-talitos",
2637 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002638 .cra_flags = CRYPTO_ALG_ASYNC |
2639 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002640 },
2641 .ivsize = AES_BLOCK_SIZE,
2642 .maxauthsize = MD5_DIGEST_SIZE,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002643 },
Lee Nipper3952f172008-07-10 18:29:18 +08002644 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2645 DESC_HDR_SEL0_AESU |
2646 DESC_HDR_MODE0_AESU_CBC |
2647 DESC_HDR_SEL1_MDEUA |
2648 DESC_HDR_MODE1_MDEU_INIT |
2649 DESC_HDR_MODE1_MDEU_PAD |
2650 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2651 },
Lee Nipperd5e4aae2010-05-19 19:18:38 +10002652 { .type = CRYPTO_ALG_TYPE_AEAD,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002653 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2654 .alg.aead = {
2655 .base = {
2656 .cra_name = "authenc(hmac(md5),cbc(aes))",
2657 .cra_driver_name = "authenc-hmac-md5-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002658 "cbc-aes-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002659 .cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002660 .cra_flags = CRYPTO_ALG_ASYNC |
2661 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002662 },
2663 .ivsize = AES_BLOCK_SIZE,
2664 .maxauthsize = MD5_DIGEST_SIZE,
2665 },
2666 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2667 DESC_HDR_SEL0_AESU |
2668 DESC_HDR_MODE0_AESU_CBC |
2669 DESC_HDR_SEL1_MDEUA |
2670 DESC_HDR_MODE1_MDEU_INIT |
2671 DESC_HDR_MODE1_MDEU_PAD |
2672 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2673 },
2674 { .type = CRYPTO_ALG_TYPE_AEAD,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002675 .alg.aead = {
2676 .base = {
2677 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2678 .cra_driver_name = "authenc-hmac-md5-"
2679 "cbc-3des-talitos",
2680 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002681 .cra_flags = CRYPTO_ALG_ASYNC |
2682 CRYPTO_ALG_ALLOCATES_MEMORY,
Herbert Xuaeb4c132015-07-30 17:53:22 +08002683 },
2684 .ivsize = DES3_EDE_BLOCK_SIZE,
2685 .maxauthsize = MD5_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002686 .setkey = aead_des3_setkey,
Lee Nipper56af8cd2009-03-29 15:50:50 +08002687 },
Lee Nipper3952f172008-07-10 18:29:18 +08002688 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2689 DESC_HDR_SEL0_DEU |
2690 DESC_HDR_MODE0_DEU_CBC |
2691 DESC_HDR_MODE0_DEU_3DES |
2692 DESC_HDR_SEL1_MDEUA |
2693 DESC_HDR_MODE1_MDEU_INIT |
2694 DESC_HDR_MODE1_MDEU_PAD |
2695 DESC_HDR_MODE1_MDEU_MD5_HMAC,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002696 },
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002697 { .type = CRYPTO_ALG_TYPE_AEAD,
2698 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2699 .alg.aead = {
2700 .base = {
2701 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2702 .cra_driver_name = "authenc-hmac-md5-"
Christophe Leroya1a42f82019-05-21 13:34:08 +00002703 "cbc-3des-talitos-hsna",
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002704 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002705 .cra_flags = CRYPTO_ALG_ASYNC |
2706 CRYPTO_ALG_ALLOCATES_MEMORY,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002707 },
2708 .ivsize = DES3_EDE_BLOCK_SIZE,
2709 .maxauthsize = MD5_DIGEST_SIZE,
Herbert Xuef7c5c82019-04-11 16:51:21 +08002710 .setkey = aead_des3_setkey,
LEROY Christophe7405c8d2016-06-06 13:20:46 +02002711 },
2712 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2713 DESC_HDR_SEL0_DEU |
2714 DESC_HDR_MODE0_DEU_CBC |
2715 DESC_HDR_MODE0_DEU_3DES |
2716 DESC_HDR_SEL1_MDEUA |
2717 DESC_HDR_MODE1_MDEU_INIT |
2718 DESC_HDR_MODE1_MDEU_PAD |
2719 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2720 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002721 /* SKCIPHER algorithms. */
2722 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2723 .alg.skcipher = {
2724 .base.cra_name = "ecb(aes)",
2725 .base.cra_driver_name = "ecb-aes-talitos",
2726 .base.cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002727 .base.cra_flags = CRYPTO_ALG_ASYNC |
2728 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002729 .min_keysize = AES_MIN_KEY_SIZE,
2730 .max_keysize = AES_MAX_KEY_SIZE,
2731 .setkey = skcipher_aes_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002732 },
2733 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2734 DESC_HDR_SEL0_AESU,
2735 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002736 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2737 .alg.skcipher = {
2738 .base.cra_name = "cbc(aes)",
2739 .base.cra_driver_name = "cbc-aes-talitos",
2740 .base.cra_blocksize = AES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002741 .base.cra_flags = CRYPTO_ALG_ASYNC |
2742 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002743 .min_keysize = AES_MIN_KEY_SIZE,
2744 .max_keysize = AES_MAX_KEY_SIZE,
2745 .ivsize = AES_BLOCK_SIZE,
2746 .setkey = skcipher_aes_setkey,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002747 },
2748 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2749 DESC_HDR_SEL0_AESU |
2750 DESC_HDR_MODE0_AESU_CBC,
2751 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002752 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2753 .alg.skcipher = {
2754 .base.cra_name = "ctr(aes)",
2755 .base.cra_driver_name = "ctr-aes-talitos",
2756 .base.cra_blocksize = 1,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002757 .base.cra_flags = CRYPTO_ALG_ASYNC |
2758 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002759 .min_keysize = AES_MIN_KEY_SIZE,
2760 .max_keysize = AES_MAX_KEY_SIZE,
2761 .ivsize = AES_BLOCK_SIZE,
2762 .setkey = skcipher_aes_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002763 },
LEROY Christophe70d355c2017-10-06 15:04:43 +02002764 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002765 DESC_HDR_SEL0_AESU |
2766 DESC_HDR_MODE0_AESU_CTR,
2767 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002768 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2769 .alg.skcipher = {
Christophe Leroy43a942d2021-01-20 18:57:25 +00002770 .base.cra_name = "ctr(aes)",
2771 .base.cra_driver_name = "ctr-aes-talitos",
2772 .base.cra_blocksize = 1,
2773 .base.cra_flags = CRYPTO_ALG_ASYNC |
2774 CRYPTO_ALG_ALLOCATES_MEMORY,
2775 .min_keysize = AES_MIN_KEY_SIZE,
2776 .max_keysize = AES_MAX_KEY_SIZE,
2777 .ivsize = AES_BLOCK_SIZE,
2778 .setkey = skcipher_aes_setkey,
2779 },
2780 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2781 DESC_HDR_SEL0_AESU |
2782 DESC_HDR_MODE0_AESU_CTR,
2783 },
2784 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2785 .alg.skcipher = {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002786 .base.cra_name = "ecb(des)",
2787 .base.cra_driver_name = "ecb-des-talitos",
2788 .base.cra_blocksize = DES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002789 .base.cra_flags = CRYPTO_ALG_ASYNC |
2790 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002791 .min_keysize = DES_KEY_SIZE,
2792 .max_keysize = DES_KEY_SIZE,
2793 .setkey = skcipher_des_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002794 },
2795 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2796 DESC_HDR_SEL0_DEU,
2797 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002798 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2799 .alg.skcipher = {
2800 .base.cra_name = "cbc(des)",
2801 .base.cra_driver_name = "cbc-des-talitos",
2802 .base.cra_blocksize = DES_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002803 .base.cra_flags = CRYPTO_ALG_ASYNC |
2804 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002805 .min_keysize = DES_KEY_SIZE,
2806 .max_keysize = DES_KEY_SIZE,
2807 .ivsize = DES_BLOCK_SIZE,
2808 .setkey = skcipher_des_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002809 },
2810 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2811 DESC_HDR_SEL0_DEU |
2812 DESC_HDR_MODE0_DEU_CBC,
2813 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002814 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2815 .alg.skcipher = {
2816 .base.cra_name = "ecb(des3_ede)",
2817 .base.cra_driver_name = "ecb-3des-talitos",
2818 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002819 .base.cra_flags = CRYPTO_ALG_ASYNC |
2820 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002821 .min_keysize = DES3_EDE_KEY_SIZE,
2822 .max_keysize = DES3_EDE_KEY_SIZE,
2823 .setkey = skcipher_des3_setkey,
LEROY Christophe5e75ae12015-12-01 12:44:15 +01002824 },
2825 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2826 DESC_HDR_SEL0_DEU |
2827 DESC_HDR_MODE0_DEU_3DES,
2828 },
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002829 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2830 .alg.skcipher = {
2831 .base.cra_name = "cbc(des3_ede)",
2832 .base.cra_driver_name = "cbc-3des-talitos",
2833 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002834 .base.cra_flags = CRYPTO_ALG_ASYNC |
2835 CRYPTO_ALG_ALLOCATES_MEMORY,
Ard Biesheuvel373960d2019-11-09 18:09:49 +01002836 .min_keysize = DES3_EDE_KEY_SIZE,
2837 .max_keysize = DES3_EDE_KEY_SIZE,
2838 .ivsize = DES3_EDE_BLOCK_SIZE,
2839 .setkey = skcipher_des3_setkey,
Lee Nipper4de9d0b2009-03-29 15:52:32 +08002840 },
2841 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2842 DESC_HDR_SEL0_DEU |
2843 DESC_HDR_MODE0_DEU_CBC |
2844 DESC_HDR_MODE0_DEU_3DES,
Lee Nipper497f2e62010-05-19 19:20:36 +10002845 },
2846 /* AHASH algorithms. */
2847 { .type = CRYPTO_ALG_TYPE_AHASH,
2848 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002849 .halg.digestsize = MD5_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002850 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002851 .halg.base = {
2852 .cra_name = "md5",
2853 .cra_driver_name = "md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002854 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002855 .cra_flags = CRYPTO_ALG_ASYNC |
2856 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper497f2e62010-05-19 19:20:36 +10002857 }
2858 },
2859 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2860 DESC_HDR_SEL0_MDEUA |
2861 DESC_HDR_MODE0_MDEU_MD5,
2862 },
2863 { .type = CRYPTO_ALG_TYPE_AHASH,
2864 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002865 .halg.digestsize = SHA1_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002866 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002867 .halg.base = {
2868 .cra_name = "sha1",
2869 .cra_driver_name = "sha1-talitos",
2870 .cra_blocksize = SHA1_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002871 .cra_flags = CRYPTO_ALG_ASYNC |
2872 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper497f2e62010-05-19 19:20:36 +10002873 }
2874 },
2875 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2876 DESC_HDR_SEL0_MDEUA |
2877 DESC_HDR_MODE0_MDEU_SHA1,
2878 },
2879 { .type = CRYPTO_ALG_TYPE_AHASH,
2880 .alg.hash = {
Kim Phillips60f208d2010-05-19 19:21:53 +10002881 .halg.digestsize = SHA224_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002882 .halg.statesize = sizeof(struct talitos_export_state),
Kim Phillips60f208d2010-05-19 19:21:53 +10002883 .halg.base = {
2884 .cra_name = "sha224",
2885 .cra_driver_name = "sha224-talitos",
2886 .cra_blocksize = SHA224_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002887 .cra_flags = CRYPTO_ALG_ASYNC |
2888 CRYPTO_ALG_ALLOCATES_MEMORY,
Kim Phillips60f208d2010-05-19 19:21:53 +10002889 }
2890 },
2891 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2892 DESC_HDR_SEL0_MDEUA |
2893 DESC_HDR_MODE0_MDEU_SHA224,
2894 },
2895 { .type = CRYPTO_ALG_TYPE_AHASH,
2896 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002897 .halg.digestsize = SHA256_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002898 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002899 .halg.base = {
2900 .cra_name = "sha256",
2901 .cra_driver_name = "sha256-talitos",
2902 .cra_blocksize = SHA256_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002903 .cra_flags = CRYPTO_ALG_ASYNC |
2904 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper497f2e62010-05-19 19:20:36 +10002905 }
2906 },
2907 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2908 DESC_HDR_SEL0_MDEUA |
2909 DESC_HDR_MODE0_MDEU_SHA256,
2910 },
2911 { .type = CRYPTO_ALG_TYPE_AHASH,
2912 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002913 .halg.digestsize = SHA384_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002914 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002915 .halg.base = {
2916 .cra_name = "sha384",
2917 .cra_driver_name = "sha384-talitos",
2918 .cra_blocksize = SHA384_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002919 .cra_flags = CRYPTO_ALG_ASYNC |
2920 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper497f2e62010-05-19 19:20:36 +10002921 }
2922 },
2923 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2924 DESC_HDR_SEL0_MDEUB |
2925 DESC_HDR_MODE0_MDEUB_SHA384,
2926 },
2927 { .type = CRYPTO_ALG_TYPE_AHASH,
2928 .alg.hash = {
Lee Nipper497f2e62010-05-19 19:20:36 +10002929 .halg.digestsize = SHA512_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002930 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper497f2e62010-05-19 19:20:36 +10002931 .halg.base = {
2932 .cra_name = "sha512",
2933 .cra_driver_name = "sha512-talitos",
2934 .cra_blocksize = SHA512_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002935 .cra_flags = CRYPTO_ALG_ASYNC |
2936 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper497f2e62010-05-19 19:20:36 +10002937 }
2938 },
2939 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2940 DESC_HDR_SEL0_MDEUB |
2941 DESC_HDR_MODE0_MDEUB_SHA512,
2942 },
Lee Nipper79b3a412011-11-21 16:13:25 +08002943 { .type = CRYPTO_ALG_TYPE_AHASH,
2944 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002945 .halg.digestsize = MD5_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002946 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002947 .halg.base = {
2948 .cra_name = "hmac(md5)",
2949 .cra_driver_name = "hmac-md5-talitos",
Martin Hicksb3988612015-03-03 08:21:34 -05002950 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002951 .cra_flags = CRYPTO_ALG_ASYNC |
2952 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08002953 }
2954 },
2955 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956 DESC_HDR_SEL0_MDEUA |
2957 DESC_HDR_MODE0_MDEU_MD5,
2958 },
2959 { .type = CRYPTO_ALG_TYPE_AHASH,
2960 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002961 .halg.digestsize = SHA1_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002962 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002963 .halg.base = {
2964 .cra_name = "hmac(sha1)",
2965 .cra_driver_name = "hmac-sha1-talitos",
2966 .cra_blocksize = SHA1_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002967 .cra_flags = CRYPTO_ALG_ASYNC |
2968 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08002969 }
2970 },
2971 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2972 DESC_HDR_SEL0_MDEUA |
2973 DESC_HDR_MODE0_MDEU_SHA1,
2974 },
2975 { .type = CRYPTO_ALG_TYPE_AHASH,
2976 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002977 .halg.digestsize = SHA224_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002978 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002979 .halg.base = {
2980 .cra_name = "hmac(sha224)",
2981 .cra_driver_name = "hmac-sha224-talitos",
2982 .cra_blocksize = SHA224_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002983 .cra_flags = CRYPTO_ALG_ASYNC |
2984 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08002985 }
2986 },
2987 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2988 DESC_HDR_SEL0_MDEUA |
2989 DESC_HDR_MODE0_MDEU_SHA224,
2990 },
2991 { .type = CRYPTO_ALG_TYPE_AHASH,
2992 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08002993 .halg.digestsize = SHA256_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03002994 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08002995 .halg.base = {
2996 .cra_name = "hmac(sha256)",
2997 .cra_driver_name = "hmac-sha256-talitos",
2998 .cra_blocksize = SHA256_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07002999 .cra_flags = CRYPTO_ALG_ASYNC |
3000 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08003001 }
3002 },
3003 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3004 DESC_HDR_SEL0_MDEUA |
3005 DESC_HDR_MODE0_MDEU_SHA256,
3006 },
3007 { .type = CRYPTO_ALG_TYPE_AHASH,
3008 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08003009 .halg.digestsize = SHA384_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03003010 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08003011 .halg.base = {
3012 .cra_name = "hmac(sha384)",
3013 .cra_driver_name = "hmac-sha384-talitos",
3014 .cra_blocksize = SHA384_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07003015 .cra_flags = CRYPTO_ALG_ASYNC |
3016 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08003017 }
3018 },
3019 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3020 DESC_HDR_SEL0_MDEUB |
3021 DESC_HDR_MODE0_MDEUB_SHA384,
3022 },
3023 { .type = CRYPTO_ALG_TYPE_AHASH,
3024 .alg.hash = {
Lee Nipper79b3a412011-11-21 16:13:25 +08003025 .halg.digestsize = SHA512_DIGEST_SIZE,
Horia Geant?3639ca82016-04-21 19:24:55 +03003026 .halg.statesize = sizeof(struct talitos_export_state),
Lee Nipper79b3a412011-11-21 16:13:25 +08003027 .halg.base = {
3028 .cra_name = "hmac(sha512)",
3029 .cra_driver_name = "hmac-sha512-talitos",
3030 .cra_blocksize = SHA512_BLOCK_SIZE,
Mikulas Patockab8aa7dc2020-07-09 23:20:41 -07003031 .cra_flags = CRYPTO_ALG_ASYNC |
3032 CRYPTO_ALG_ALLOCATES_MEMORY,
Lee Nipper79b3a412011-11-21 16:13:25 +08003033 }
3034 },
3035 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3036 DESC_HDR_SEL0_MDEUB |
3037 DESC_HDR_MODE0_MDEUB_SHA512,
3038 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003039};
3040
3041struct talitos_crypto_alg {
3042 struct list_head entry;
3043 struct device *dev;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003044 struct talitos_alg_template algt;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003045};
3046
Jonas Eymann89d124c2016-04-19 20:33:47 +03003047static int talitos_init_common(struct talitos_ctx *ctx,
3048 struct talitos_crypto_alg *talitos_alg)
Kim Phillips9c4a7962008-06-23 19:50:15 +08003049{
Kim Phillips5228f0f2011-07-15 11:21:38 +08003050 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003051
3052 /* update context with ptr to dev */
3053 ctx->dev = talitos_alg->dev;
Kim Phillips19bbbc62009-03-29 15:53:59 +08003054
Kim Phillips5228f0f2011-07-15 11:21:38 +08003055 /* assign SEC channel to tfm in round-robin fashion */
3056 priv = dev_get_drvdata(ctx->dev);
3057 ctx->ch = atomic_inc_return(&priv->last_chan) &
3058 (priv->num_channels - 1);
3059
Kim Phillips9c4a7962008-06-23 19:50:15 +08003060 /* copy descriptor header template value */
Lee Nipperacbf7c622010-05-19 19:19:33 +10003061 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003062
Kim Phillips602dba52011-07-15 11:21:39 +08003063 /* select done notification */
3064 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3065
Lee Nipper497f2e62010-05-19 19:20:36 +10003066 return 0;
3067}
3068
Herbert Xuaeb4c132015-07-30 17:53:22 +08003069static int talitos_cra_init_aead(struct crypto_aead *tfm)
Lee Nipper497f2e62010-05-19 19:20:36 +10003070{
Jonas Eymann89d124c2016-04-19 20:33:47 +03003071 struct aead_alg *alg = crypto_aead_alg(tfm);
3072 struct talitos_crypto_alg *talitos_alg;
3073 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3074
3075 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3076 algt.alg.aead);
3077
3078 return talitos_init_common(ctx, talitos_alg);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003079}
3080
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003081static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3082{
3083 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3084 struct talitos_crypto_alg *talitos_alg;
3085 struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3086
3087 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3088 algt.alg.skcipher);
3089
3090 return talitos_init_common(ctx, talitos_alg);
3091}
3092
Lee Nipper497f2e62010-05-19 19:20:36 +10003093static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3094{
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003095 struct crypto_alg *alg = tfm->__crt_alg;
3096 struct talitos_crypto_alg *talitos_alg;
Lee Nipper497f2e62010-05-19 19:20:36 +10003097 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3098
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003099 talitos_alg = container_of(__crypto_ahash_alg(alg),
3100 struct talitos_crypto_alg,
3101 algt.alg.hash);
Lee Nipper497f2e62010-05-19 19:20:36 +10003102
3103 ctx->keylen = 0;
3104 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3105 sizeof(struct talitos_ahash_req_ctx));
3106
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003107 return talitos_init_common(ctx, talitos_alg);
Lee Nipper497f2e62010-05-19 19:20:36 +10003108}
3109
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003110static void talitos_cra_exit(struct crypto_tfm *tfm)
3111{
3112 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3113 struct device *dev = ctx->dev;
3114
3115 if (ctx->keylen)
3116 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3117}
3118
Kim Phillips9c4a7962008-06-23 19:50:15 +08003119/*
3120 * given the alg's descriptor header template, determine whether descriptor
3121 * type and primary/secondary execution units required match the hw
3122 * capabilities description provided in the device tree node.
3123 */
3124static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3125{
3126 struct talitos_private *priv = dev_get_drvdata(dev);
3127 int ret;
3128
3129 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3130 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3131
3132 if (SECONDARY_EU(desc_hdr_template))
3133 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3134 & priv->exec_units);
3135
3136 return ret;
3137}
3138
Grant Likely2dc11582010-08-06 09:25:50 -06003139static int talitos_remove(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08003140{
3141 struct device *dev = &ofdev->dev;
3142 struct talitos_private *priv = dev_get_drvdata(dev);
3143 struct talitos_crypto_alg *t_alg, *n;
3144 int i;
3145
3146 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
Lee Nipperacbf7c622010-05-19 19:19:33 +10003147 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003148 case CRYPTO_ALG_TYPE_SKCIPHER:
3149 crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
Lee Nipperacbf7c622010-05-19 19:19:33 +10003150 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003151 case CRYPTO_ALG_TYPE_AEAD:
3152 crypto_unregister_aead(&t_alg->algt.alg.aead);
Gustavo A. R. Silva5fc194e2019-09-09 00:29:52 -05003153 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003154 case CRYPTO_ALG_TYPE_AHASH:
3155 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3156 break;
3157 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003158 list_del(&t_alg->entry);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003159 }
3160
3161 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3162 talitos_unregister_rng(dev);
3163
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003164 for (i = 0; i < 2; i++)
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003165 if (priv->irq[i]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003166 free_irq(priv->irq[i], dev);
3167 irq_dispose_mapping(priv->irq[i]);
3168 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003169
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003170 tasklet_kill(&priv->done_task[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003171 if (priv->irq[1])
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003172 tasklet_kill(&priv->done_task[1]);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003173
Kim Phillips9c4a7962008-06-23 19:50:15 +08003174 return 0;
3175}
3176
3177static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3178 struct talitos_alg_template
3179 *template)
3180{
Kim Phillips60f208d2010-05-19 19:21:53 +10003181 struct talitos_private *priv = dev_get_drvdata(dev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003182 struct talitos_crypto_alg *t_alg;
3183 struct crypto_alg *alg;
3184
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003185 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3186 GFP_KERNEL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003187 if (!t_alg)
3188 return ERR_PTR(-ENOMEM);
3189
Lee Nipperacbf7c622010-05-19 19:19:33 +10003190 t_alg->algt = *template;
3191
3192 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003193 case CRYPTO_ALG_TYPE_SKCIPHER:
3194 alg = &t_alg->algt.alg.skcipher.base;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003195 alg->cra_exit = talitos_cra_exit;
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003196 t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3197 t_alg->algt.alg.skcipher.setkey =
3198 t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3199 t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3200 t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
Christophe Leroy43a942d2021-01-20 18:57:25 +00003201 if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3202 DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3203 DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3204 devm_kfree(dev, t_alg);
3205 return ERR_PTR(-ENOTSUPP);
3206 }
Lee Nipper497f2e62010-05-19 19:20:36 +10003207 break;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003208 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuaeb4c132015-07-30 17:53:22 +08003209 alg = &t_alg->algt.alg.aead.base;
LEROY Christophe2e13ce02017-10-06 15:05:02 +02003210 alg->cra_exit = talitos_cra_exit;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003211 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
Herbert Xuef7c5c82019-04-11 16:51:21 +08003212 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3213 aead_setkey;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003214 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3215 t_alg->algt.alg.aead.decrypt = aead_decrypt;
LEROY Christophe6cda0752017-10-06 15:04:39 +02003216 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3217 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003218 devm_kfree(dev, t_alg);
LEROY Christophe6cda0752017-10-06 15:04:39 +02003219 return ERR_PTR(-ENOTSUPP);
3220 }
Lee Nipperacbf7c622010-05-19 19:19:33 +10003221 break;
3222 case CRYPTO_ALG_TYPE_AHASH:
3223 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipper497f2e62010-05-19 19:20:36 +10003224 alg->cra_init = talitos_cra_init_ahash;
LEROY Christophead4cd512018-02-26 17:40:04 +01003225 alg->cra_exit = talitos_cra_exit;
Kim Phillipsb286e002012-08-08 20:33:34 -05003226 t_alg->algt.alg.hash.init = ahash_init;
3227 t_alg->algt.alg.hash.update = ahash_update;
3228 t_alg->algt.alg.hash.final = ahash_final;
3229 t_alg->algt.alg.hash.finup = ahash_finup;
3230 t_alg->algt.alg.hash.digest = ahash_digest;
LEROY Christophe56136632017-09-12 11:03:39 +02003231 if (!strncmp(alg->cra_name, "hmac", 4))
3232 t_alg->algt.alg.hash.setkey = ahash_setkey;
Horia Geant?3639ca82016-04-21 19:24:55 +03003233 t_alg->algt.alg.hash.import = ahash_import;
3234 t_alg->algt.alg.hash.export = ahash_export;
Kim Phillipsb286e002012-08-08 20:33:34 -05003235
Lee Nipper79b3a412011-11-21 16:13:25 +08003236 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
Kim Phillips0b2730d2011-12-12 14:59:10 -06003237 !strncmp(alg->cra_name, "hmac", 4)) {
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003238 devm_kfree(dev, t_alg);
Lee Nipper79b3a412011-11-21 16:13:25 +08003239 return ERR_PTR(-ENOTSUPP);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003240 }
Kim Phillips60f208d2010-05-19 19:21:53 +10003241 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
Lee Nipper79b3a412011-11-21 16:13:25 +08003242 (!strcmp(alg->cra_name, "sha224") ||
3243 !strcmp(alg->cra_name, "hmac(sha224)"))) {
Kim Phillips60f208d2010-05-19 19:21:53 +10003244 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3245 t_alg->algt.desc_hdr_template =
3246 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3247 DESC_HDR_SEL0_MDEUA |
3248 DESC_HDR_MODE0_MDEU_SHA256;
3249 }
Lee Nipper497f2e62010-05-19 19:20:36 +10003250 break;
Kim Phillips1d119112010-09-23 15:55:27 +08003251 default:
3252 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003253 devm_kfree(dev, t_alg);
Kim Phillips1d119112010-09-23 15:55:27 +08003254 return ERR_PTR(-EINVAL);
Lee Nipperacbf7c622010-05-19 19:19:33 +10003255 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003256
Kim Phillips9c4a7962008-06-23 19:50:15 +08003257 alg->cra_module = THIS_MODULE;
LEROY Christopheb0057762016-06-06 13:20:44 +02003258 if (t_alg->algt.priority)
3259 alg->cra_priority = t_alg->algt.priority;
3260 else
3261 alg->cra_priority = TALITOS_CRA_PRIORITY;
Christophe Leroyc9cca702019-05-21 13:34:18 +00003262 if (has_ftr_sec1(priv))
3263 alg->cra_alignmask = 3;
3264 else
3265 alg->cra_alignmask = 0;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003266 alg->cra_ctxsize = sizeof(struct talitos_ctx);
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01003267 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003268
Kim Phillips9c4a7962008-06-23 19:50:15 +08003269 t_alg->dev = dev;
3270
3271 return t_alg;
3272}
3273
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003274static int talitos_probe_irq(struct platform_device *ofdev)
3275{
3276 struct device *dev = &ofdev->dev;
3277 struct device_node *np = ofdev->dev.of_node;
3278 struct talitos_private *priv = dev_get_drvdata(dev);
3279 int err;
LEROY Christophedd3c0982015-04-17 16:32:13 +02003280 bool is_sec1 = has_ftr_sec1(priv);
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003281
3282 priv->irq[0] = irq_of_parse_and_map(np, 0);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003283 if (!priv->irq[0]) {
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003284 dev_err(dev, "failed to map irq\n");
3285 return -EINVAL;
3286 }
LEROY Christophedd3c0982015-04-17 16:32:13 +02003287 if (is_sec1) {
3288 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3289 dev_driver_string(dev), dev);
3290 goto primary_out;
3291 }
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003292
3293 priv->irq[1] = irq_of_parse_and_map(np, 1);
3294
3295 /* get the primary irq line */
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003296 if (!priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02003297 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003298 dev_driver_string(dev), dev);
3299 goto primary_out;
3300 }
3301
LEROY Christophedd3c0982015-04-17 16:32:13 +02003302 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003303 dev_driver_string(dev), dev);
3304 if (err)
3305 goto primary_out;
3306
3307 /* get the secondary irq line */
LEROY Christophedd3c0982015-04-17 16:32:13 +02003308 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003309 dev_driver_string(dev), dev);
3310 if (err) {
3311 dev_err(dev, "failed to request secondary irq\n");
3312 irq_dispose_mapping(priv->irq[1]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003313 priv->irq[1] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003314 }
3315
3316 return err;
3317
3318primary_out:
3319 if (err) {
3320 dev_err(dev, "failed to request primary irq\n");
3321 irq_dispose_mapping(priv->irq[0]);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003322 priv->irq[0] = 0;
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003323 }
3324
3325 return err;
3326}
3327
Grant Likely1c48a5c2011-02-17 02:43:24 -07003328static int talitos_probe(struct platform_device *ofdev)
Kim Phillips9c4a7962008-06-23 19:50:15 +08003329{
3330 struct device *dev = &ofdev->dev;
Grant Likely61c7a082010-04-13 16:12:29 -07003331 struct device_node *np = ofdev->dev.of_node;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003332 struct talitos_private *priv;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003333 int i, err;
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003334 int stride;
LEROY Christophefd5ea7f2017-10-06 15:04:53 +02003335 struct resource *res;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003336
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003337 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003338 if (!priv)
3339 return -ENOMEM;
3340
Kevin Haof3de9cb2014-01-28 20:17:23 +08003341 INIT_LIST_HEAD(&priv->alg_list);
3342
Kim Phillips9c4a7962008-06-23 19:50:15 +08003343 dev_set_drvdata(dev, priv);
3344
3345 priv->ofdev = ofdev;
3346
Horia Geanta511d63c2012-03-30 17:49:53 +03003347 spin_lock_init(&priv->reg_lock);
3348
LEROY Christophefd5ea7f2017-10-06 15:04:53 +02003349 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3350 if (!res)
3351 return -ENXIO;
3352 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003353 if (!priv->reg) {
3354 dev_err(dev, "failed to of_iomap\n");
3355 err = -ENOMEM;
3356 goto err_out;
3357 }
3358
3359 /* get SEC version capabilities from device tree */
LEROY Christophefa14c6c2017-10-06 15:04:51 +02003360 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3361 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3362 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3363 of_property_read_u32(np, "fsl,descriptor-types-mask",
3364 &priv->desc_types);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003365
3366 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3367 !priv->exec_units || !priv->desc_types) {
3368 dev_err(dev, "invalid property data in device tree node\n");
3369 err = -EINVAL;
3370 goto err_out;
3371 }
3372
Lee Nipperf3c85bc2008-07-30 16:26:57 +08003373 if (of_device_is_compatible(np, "fsl,sec3.0"))
3374 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3375
Kim Phillipsfe5720e2008-10-12 20:33:14 +08003376 if (of_device_is_compatible(np, "fsl,sec2.1"))
Kim Phillips60f208d2010-05-19 19:21:53 +10003377 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
Lee Nipper79b3a412011-11-21 16:13:25 +08003378 TALITOS_FTR_SHA224_HWINIT |
3379 TALITOS_FTR_HMAC_OK;
Kim Phillipsfe5720e2008-10-12 20:33:14 +08003380
LEROY Christophe21590882015-04-17 16:32:05 +02003381 if (of_device_is_compatible(np, "fsl,sec1.0"))
3382 priv->features |= TALITOS_FTR_SEC1;
3383
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003384 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3385 priv->reg_deu = priv->reg + TALITOS12_DEU;
3386 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3387 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3388 stride = TALITOS1_CH_STRIDE;
3389 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3390 priv->reg_deu = priv->reg + TALITOS10_DEU;
3391 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3392 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3393 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3394 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3395 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3396 stride = TALITOS1_CH_STRIDE;
3397 } else {
3398 priv->reg_deu = priv->reg + TALITOS2_DEU;
3399 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3400 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3401 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3402 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3403 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3404 priv->reg_keu = priv->reg + TALITOS2_KEU;
3405 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3406 stride = TALITOS2_CH_STRIDE;
3407 }
3408
LEROY Christophedd3c0982015-04-17 16:32:13 +02003409 err = talitos_probe_irq(ofdev);
3410 if (err)
3411 goto err_out;
3412
Christophe Leroyc8c74642019-06-17 21:14:45 +00003413 if (has_ftr_sec1(priv)) {
LEROY Christophe9c02e282017-10-06 15:04:55 +02003414 if (priv->num_channels == 1)
3415 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
LEROY Christophedd3c0982015-04-17 16:32:13 +02003416 (unsigned long)dev);
LEROY Christophe9c02e282017-10-06 15:04:55 +02003417 else
3418 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3419 (unsigned long)dev);
3420 } else {
3421 if (priv->irq[1]) {
LEROY Christophedd3c0982015-04-17 16:32:13 +02003422 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3423 (unsigned long)dev);
3424 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3425 (unsigned long)dev);
LEROY Christophe9c02e282017-10-06 15:04:55 +02003426 } else if (priv->num_channels == 1) {
3427 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3428 (unsigned long)dev);
3429 } else {
3430 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3431 (unsigned long)dev);
LEROY Christophedd3c0982015-04-17 16:32:13 +02003432 }
3433 }
3434
Kees Cooka86854d2018-06-12 14:07:58 -07003435 priv->chan = devm_kcalloc(dev,
3436 priv->num_channels,
3437 sizeof(struct talitos_channel),
3438 GFP_KERNEL);
Kim Phillips4b9926282009-08-13 11:50:38 +10003439 if (!priv->chan) {
3440 dev_err(dev, "failed to allocate channel management space\n");
Kim Phillips9c4a7962008-06-23 19:50:15 +08003441 err = -ENOMEM;
3442 goto err_out;
3443 }
3444
Martin Hicksf641ddd2015-03-03 08:21:33 -05003445 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3446
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003447 for (i = 0; i < priv->num_channels; i++) {
LEROY Christophe5fa7fa12015-04-17 16:32:11 +02003448 priv->chan[i].reg = priv->reg + stride * (i + 1);
Kim Phillips2cdba3c2011-12-12 14:59:11 -06003449 if (!priv->irq[1] || !(i & 1))
Kim Phillipsc3e337f2011-11-21 16:13:27 +08003450 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
Kim Phillipsad42d5f2011-11-21 16:13:27 +08003451
Kim Phillips4b9926282009-08-13 11:50:38 +10003452 spin_lock_init(&priv->chan[i].head_lock);
3453 spin_lock_init(&priv->chan[i].tail_lock);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003454
Kees Cooka86854d2018-06-12 14:07:58 -07003455 priv->chan[i].fifo = devm_kcalloc(dev,
3456 priv->fifo_len,
3457 sizeof(struct talitos_request),
3458 GFP_KERNEL);
Kim Phillips4b9926282009-08-13 11:50:38 +10003459 if (!priv->chan[i].fifo) {
Kim Phillips9c4a7962008-06-23 19:50:15 +08003460 dev_err(dev, "failed to allocate request fifo %d\n", i);
3461 err = -ENOMEM;
3462 goto err_out;
3463 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003464
Kim Phillips4b9926282009-08-13 11:50:38 +10003465 atomic_set(&priv->chan[i].submit_count,
3466 -(priv->chfifo_len - 1));
Martin Hicksf641ddd2015-03-03 08:21:33 -05003467 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003468
Kim Phillips81eb0242009-08-13 11:51:51 +10003469 dma_set_mask(dev, DMA_BIT_MASK(36));
3470
Kim Phillips9c4a7962008-06-23 19:50:15 +08003471 /* reset and initialize the h/w */
3472 err = init_device(dev);
3473 if (err) {
3474 dev_err(dev, "failed to initialize device\n");
3475 goto err_out;
3476 }
3477
3478 /* register the RNG, if available */
3479 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3480 err = talitos_register_rng(dev);
3481 if (err) {
3482 dev_err(dev, "failed to register hwrng: %d\n", err);
3483 goto err_out;
3484 } else
3485 dev_info(dev, "hwrng\n");
3486 }
3487
3488 /* register crypto algorithms the device supports */
Kim Phillips9c4a7962008-06-23 19:50:15 +08003489 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3490 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3491 struct talitos_crypto_alg *t_alg;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003492 struct crypto_alg *alg = NULL;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003493
3494 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3495 if (IS_ERR(t_alg)) {
3496 err = PTR_ERR(t_alg);
Kim Phillips0b2730d2011-12-12 14:59:10 -06003497 if (err == -ENOTSUPP)
Lee Nipper79b3a412011-11-21 16:13:25 +08003498 continue;
Kim Phillips9c4a7962008-06-23 19:50:15 +08003499 goto err_out;
3500 }
3501
Lee Nipperacbf7c622010-05-19 19:19:33 +10003502 switch (t_alg->algt.type) {
Ard Biesheuvel373960d2019-11-09 18:09:49 +01003503 case CRYPTO_ALG_TYPE_SKCIPHER:
3504 err = crypto_register_skcipher(
3505 &t_alg->algt.alg.skcipher);
3506 alg = &t_alg->algt.alg.skcipher.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003507 break;
Herbert Xuaeb4c132015-07-30 17:53:22 +08003508
3509 case CRYPTO_ALG_TYPE_AEAD:
3510 err = crypto_register_aead(
3511 &t_alg->algt.alg.aead);
3512 alg = &t_alg->algt.alg.aead.base;
3513 break;
3514
Lee Nipperacbf7c622010-05-19 19:19:33 +10003515 case CRYPTO_ALG_TYPE_AHASH:
3516 err = crypto_register_ahash(
3517 &t_alg->algt.alg.hash);
Herbert Xuaeb4c132015-07-30 17:53:22 +08003518 alg = &t_alg->algt.alg.hash.halg.base;
Lee Nipperacbf7c622010-05-19 19:19:33 +10003519 break;
3520 }
Kim Phillips9c4a7962008-06-23 19:50:15 +08003521 if (err) {
3522 dev_err(dev, "%s alg registration failed\n",
Herbert Xuaeb4c132015-07-30 17:53:22 +08003523 alg->cra_driver_name);
LEROY Christophe24b92ff2017-10-06 15:04:49 +02003524 devm_kfree(dev, t_alg);
Horia Geanta991155b2013-03-20 16:31:38 +02003525 } else
Kim Phillips9c4a7962008-06-23 19:50:15 +08003526 list_add_tail(&t_alg->entry, &priv->alg_list);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003527 }
3528 }
Kim Phillips5b859b6e2011-11-21 16:13:26 +08003529 if (!list_empty(&priv->alg_list))
3530 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3531 (char *)of_get_property(np, "compatible", NULL));
Kim Phillips9c4a7962008-06-23 19:50:15 +08003532
3533 return 0;
3534
3535err_out:
3536 talitos_remove(ofdev);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003537
3538 return err;
3539}
3540
Márton Németh6c3f9752010-01-17 21:54:01 +11003541static const struct of_device_id talitos_match[] = {
LEROY Christophe0635b7db2015-04-17 16:32:20 +02003542#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3543 {
3544 .compatible = "fsl,sec1.0",
3545 },
3546#endif
3547#ifdef CONFIG_CRYPTO_DEV_TALITOS2
Kim Phillips9c4a7962008-06-23 19:50:15 +08003548 {
3549 .compatible = "fsl,sec2.0",
3550 },
LEROY Christophe0635b7db2015-04-17 16:32:20 +02003551#endif
Kim Phillips9c4a7962008-06-23 19:50:15 +08003552 {},
3553};
3554MODULE_DEVICE_TABLE(of, talitos_match);
3555
Grant Likely1c48a5c2011-02-17 02:43:24 -07003556static struct platform_driver talitos_driver = {
Grant Likely40182942010-04-13 16:13:02 -07003557 .driver = {
3558 .name = "talitos",
Grant Likely40182942010-04-13 16:13:02 -07003559 .of_match_table = talitos_match,
3560 },
Kim Phillips9c4a7962008-06-23 19:50:15 +08003561 .probe = talitos_probe,
Al Viro596f1032008-11-22 17:34:24 +00003562 .remove = talitos_remove,
Kim Phillips9c4a7962008-06-23 19:50:15 +08003563};
3564
Axel Lin741e8c22011-11-26 21:26:19 +08003565module_platform_driver(talitos_driver);
Kim Phillips9c4a7962008-06-23 19:50:15 +08003566
3567MODULE_LICENSE("GPL");
3568MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3569MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");