blob: d66e20a2f54cecaac0ebb02ad35ff4699401f948 [file] [log] [blame]
Krzysztof Kozlowski5c8d8502018-01-09 18:57:36 +01001// SPDX-License-Identifier: GPL-2.0
2//
3// Cryptographic API.
4//
5// Support for Samsung S5PV210 and Exynos HW acceleration.
6//
7// Copyright (C) 2011 NetUP Inc. All rights reserved.
8// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
9//
10// Hash part based on omap-sham.c driver.
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080011
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080012#include <linux/clk.h>
Krzysztof Kozlowski3cf9d842016-03-22 10:58:25 +090013#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080023#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080025
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080026#include <crypto/ctr.h>
Krzysztof Kozlowski3cf9d842016-03-22 10:58:25 +090027#include <crypto/aes.h>
28#include <crypto/algapi.h>
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +090029#include <crypto/scatterwalk.h>
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080030
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020031#include <crypto/hash.h>
32#include <crypto/md5.h>
33#include <crypto/sha.h>
34#include <crypto/internal/hash.h>
35
Kamil Koniecznye5e40902017-10-25 17:27:34 +020036#define _SBF(s, v) ((v) << (s))
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080037
38/* Feed control registers */
Kamil Koniecznye5e40902017-10-25 17:27:34 +020039#define SSS_REG_FCINTSTAT 0x0000
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020040#define SSS_FCINTSTAT_HPARTINT BIT(7)
41#define SSS_FCINTSTAT_HDONEINT BIT(5)
Kamil Koniecznye5e40902017-10-25 17:27:34 +020042#define SSS_FCINTSTAT_BRDMAINT BIT(3)
43#define SSS_FCINTSTAT_BTDMAINT BIT(2)
44#define SSS_FCINTSTAT_HRDMAINT BIT(1)
45#define SSS_FCINTSTAT_PKDMAINT BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080046
Kamil Koniecznye5e40902017-10-25 17:27:34 +020047#define SSS_REG_FCINTENSET 0x0004
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020048#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
49#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
Kamil Koniecznye5e40902017-10-25 17:27:34 +020050#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
51#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
52#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
53#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080054
Kamil Koniecznye5e40902017-10-25 17:27:34 +020055#define SSS_REG_FCINTENCLR 0x0008
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020056#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
57#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
Kamil Koniecznye5e40902017-10-25 17:27:34 +020058#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
59#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
60#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
61#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080062
Kamil Koniecznye5e40902017-10-25 17:27:34 +020063#define SSS_REG_FCINTPEND 0x000C
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020064#define SSS_FCINTPEND_HPARTINTP BIT(7)
65#define SSS_FCINTPEND_HDONEINTP BIT(5)
Kamil Koniecznye5e40902017-10-25 17:27:34 +020066#define SSS_FCINTPEND_BRDMAINTP BIT(3)
67#define SSS_FCINTPEND_BTDMAINTP BIT(2)
68#define SSS_FCINTPEND_HRDMAINTP BIT(1)
69#define SSS_FCINTPEND_PKDMAINTP BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080070
Kamil Koniecznye5e40902017-10-25 17:27:34 +020071#define SSS_REG_FCFIFOSTAT 0x0010
72#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
73#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
74#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
75#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
76#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
77#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
78#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
79#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080080
Kamil Koniecznye5e40902017-10-25 17:27:34 +020081#define SSS_REG_FCFIFOCTRL 0x0014
82#define SSS_FCFIFOCTRL_DESSEL BIT(2)
83#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
84#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
85#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
Kamil Koniecznyc2afad62017-10-25 17:27:35 +020086#define SSS_HASHIN_MASK _SBF(0, 0x03)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080087
Kamil Koniecznye5e40902017-10-25 17:27:34 +020088#define SSS_REG_FCBRDMAS 0x0020
89#define SSS_REG_FCBRDMAL 0x0024
90#define SSS_REG_FCBRDMAC 0x0028
91#define SSS_FCBRDMAC_BYTESWAP BIT(1)
92#define SSS_FCBRDMAC_FLUSH BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080093
Kamil Koniecznye5e40902017-10-25 17:27:34 +020094#define SSS_REG_FCBTDMAS 0x0030
95#define SSS_REG_FCBTDMAL 0x0034
96#define SSS_REG_FCBTDMAC 0x0038
97#define SSS_FCBTDMAC_BYTESWAP BIT(1)
98#define SSS_FCBTDMAC_FLUSH BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +080099
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200100#define SSS_REG_FCHRDMAS 0x0040
101#define SSS_REG_FCHRDMAL 0x0044
102#define SSS_REG_FCHRDMAC 0x0048
103#define SSS_FCHRDMAC_BYTESWAP BIT(1)
104#define SSS_FCHRDMAC_FLUSH BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800105
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200106#define SSS_REG_FCPKDMAS 0x0050
107#define SSS_REG_FCPKDMAL 0x0054
108#define SSS_REG_FCPKDMAC 0x0058
109#define SSS_FCPKDMAC_BYTESWAP BIT(3)
110#define SSS_FCPKDMAC_DESCEND BIT(2)
111#define SSS_FCPKDMAC_TRANSMIT BIT(1)
112#define SSS_FCPKDMAC_FLUSH BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800113
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200114#define SSS_REG_FCPKDMAO 0x005C
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800115
116/* AES registers */
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800117#define SSS_REG_AES_CONTROL 0x00
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200118#define SSS_AES_BYTESWAP_DI BIT(11)
119#define SSS_AES_BYTESWAP_DO BIT(10)
120#define SSS_AES_BYTESWAP_IV BIT(9)
121#define SSS_AES_BYTESWAP_CNT BIT(8)
122#define SSS_AES_BYTESWAP_KEY BIT(7)
123#define SSS_AES_KEY_CHANGE_MODE BIT(6)
124#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
125#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
126#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
127#define SSS_AES_FIFO_MODE BIT(3)
128#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
129#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
130#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
131#define SSS_AES_MODE_DECRYPT BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800132
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800133#define SSS_REG_AES_STATUS 0x04
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200134#define SSS_AES_BUSY BIT(2)
135#define SSS_AES_INPUT_READY BIT(1)
136#define SSS_AES_OUTPUT_READY BIT(0)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800137
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800138#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
139#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
140#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
141#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
142#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800143
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200144#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
145#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
146#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800147
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200148#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800149#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
150 SSS_AES_REG(dev, reg))
151
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800152/* HW engine modes */
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200153#define FLAGS_AES_DECRYPT BIT(0)
154#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
155#define FLAGS_AES_CBC _SBF(1, 0x01)
156#define FLAGS_AES_CTR _SBF(1, 0x02)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800157
Kamil Koniecznye5e40902017-10-25 17:27:34 +0200158#define AES_KEY_LEN 16
159#define CRYPTO_QUEUE_LEN 1
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800160
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200161/* HASH registers */
162#define SSS_REG_HASH_CTRL 0x00
163
164#define SSS_HASH_USER_IV_EN BIT(5)
165#define SSS_HASH_INIT_BIT BIT(4)
166#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
167#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
168#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
169
170#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
171
172#define SSS_REG_HASH_CTRL_PAUSE 0x04
173
174#define SSS_HASH_PAUSE BIT(0)
175
176#define SSS_REG_HASH_CTRL_FIFO 0x08
177
178#define SSS_HASH_FIFO_MODE_DMA BIT(0)
179#define SSS_HASH_FIFO_MODE_CPU 0
180
181#define SSS_REG_HASH_CTRL_SWAP 0x0C
182
183#define SSS_HASH_BYTESWAP_DI BIT(3)
184#define SSS_HASH_BYTESWAP_DO BIT(2)
185#define SSS_HASH_BYTESWAP_IV BIT(1)
186#define SSS_HASH_BYTESWAP_KEY BIT(0)
187
188#define SSS_REG_HASH_STATUS 0x10
189
190#define SSS_HASH_STATUS_MSG_DONE BIT(6)
191#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
192#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
193
194#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
195#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
196
197#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
198#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
199
200#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
201#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
202
203#define HASH_BLOCK_SIZE 64
204#define HASH_REG_SIZEOF 4
205#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
206#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
207#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
208
209/*
210 * HASH bit numbers, used by device, setting in dev->hash_flags with
211 * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
212 * to keep HASH state BUSY or FREE, or to signal state from irq_handler
213 * to hash_tasklet. SGS keep track of allocated memory for scatterlist
214 */
215#define HASH_FLAGS_BUSY 0
216#define HASH_FLAGS_FINAL 1
217#define HASH_FLAGS_DMA_ACTIVE 2
218#define HASH_FLAGS_OUTPUT_READY 3
219#define HASH_FLAGS_DMA_READY 4
220#define HASH_FLAGS_SGS_COPIED 5
221#define HASH_FLAGS_SGS_ALLOCED 6
222
223/* HASH HW constants */
224#define BUFLEN HASH_BLOCK_SIZE
225
226#define SSS_HASH_DMA_LEN_ALIGN 8
227#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
228
229#define SSS_HASH_QUEUE_LENGTH 10
230
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800231/**
232 * struct samsung_aes_variant - platform specific SSS driver data
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800233 * @aes_offset: AES register offset from SSS module's base.
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200234 * @hash_offset: HASH register offset from SSS module's base.
Kamil Konieczny0918f182019-02-22 13:21:44 +0100235 * @clk_names: names of clocks needed to run SSS IP
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800236 *
237 * Specifies platform specific configuration of SSS module.
238 * Note: A structure for driver specific platform data is used for future
239 * expansion of its usage.
240 */
241struct samsung_aes_variant {
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200242 unsigned int aes_offset;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200243 unsigned int hash_offset;
Kamil Koniecznyaa1abbe2019-03-01 15:02:54 +0100244 const char *clk_names[2];
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800245};
246
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800247struct s5p_aes_reqctx {
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200248 unsigned long mode;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800249};
250
251struct s5p_aes_ctx {
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200252 struct s5p_aes_dev *dev;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800253
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200254 u8 aes_key[AES_MAX_KEY_SIZE];
255 u8 nonce[CTR_RFC3686_NONCE_SIZE];
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200256 int keylen;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800257};
258
Krzysztof Kozlowski106d7332017-03-17 16:49:21 +0200259/**
260 * struct s5p_aes_dev - Crypto device state container
261 * @dev: Associated device
262 * @clk: Clock for accessing hardware
263 * @ioaddr: Mapped IO memory region
264 * @aes_ioaddr: Per-varian offset for AES block IO memory
265 * @irq_fc: Feed control interrupt line
266 * @req: Crypto request currently handled by the device
267 * @ctx: Configuration for currently handled crypto request
268 * @sg_src: Scatter list with source data for currently handled block
269 * in device. This is DMA-mapped into device.
270 * @sg_dst: Scatter list with destination data for currently handled block
271 * in device. This is DMA-mapped into device.
272 * @sg_src_cpy: In case of unaligned access, copied scatter list
273 * with source data.
274 * @sg_dst_cpy: In case of unaligned access, copied scatter list
275 * with destination data.
276 * @tasklet: New request scheduling jib
277 * @queue: Crypto queue
278 * @busy: Indicates whether the device is currently handling some request
279 * thus it uses some of the fields from this state, like:
280 * req, ctx, sg_src/dst (and copies). This essentially
281 * protects against concurrent access to these fields.
282 * @lock: Lock for protecting both access to device hardware registers
283 * and fields related to current request (including the busy field).
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200284 * @res: Resources for hash.
285 * @io_hash_base: Per-variant offset for HASH block IO memory.
286 * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
287 * variable.
288 * @hash_flags: Flags for current HASH op.
289 * @hash_queue: Async hash queue.
290 * @hash_tasklet: New HASH request scheduling job.
291 * @xmit_buf: Buffer for current HASH request transfer into SSS block.
292 * @hash_req: Current request sending to SSS HASH block.
293 * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
294 * @hash_sg_cnt: Counter for hash_sg_iter.
295 *
296 * @use_hash: true if HASH algs enabled
Krzysztof Kozlowski106d7332017-03-17 16:49:21 +0200297 */
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800298struct s5p_aes_dev {
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200299 struct device *dev;
300 struct clk *clk;
Kamil Konieczny0918f182019-02-22 13:21:44 +0100301 struct clk *pclk;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200302 void __iomem *ioaddr;
303 void __iomem *aes_ioaddr;
304 int irq_fc;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800305
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100306 struct skcipher_request *req;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200307 struct s5p_aes_ctx *ctx;
308 struct scatterlist *sg_src;
309 struct scatterlist *sg_dst;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800310
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200311 struct scatterlist *sg_src_cpy;
312 struct scatterlist *sg_dst_cpy;
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900313
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200314 struct tasklet_struct tasklet;
315 struct crypto_queue queue;
316 bool busy;
317 spinlock_t lock;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200318
319 struct resource *res;
320 void __iomem *io_hash_base;
321
322 spinlock_t hash_lock; /* protect hash_ vars */
323 unsigned long hash_flags;
324 struct crypto_queue hash_queue;
325 struct tasklet_struct hash_tasklet;
326
327 u8 xmit_buf[BUFLEN];
328 struct ahash_request *hash_req;
329 struct scatterlist *hash_sg_iter;
330 unsigned int hash_sg_cnt;
331
332 bool use_hash;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800333};
334
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200335/**
336 * struct s5p_hash_reqctx - HASH request context
337 * @dd: Associated device
338 * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
339 * @digcnt: Number of bytes processed by HW (without buffer[] ones)
340 * @digest: Digest message or IV for partial result
341 * @nregs: Number of HW registers for digest or IV read/write
342 * @engine: Bits for selecting type of HASH in SSS block
343 * @sg: sg for DMA transfer
344 * @sg_len: Length of sg for DMA transfer
345 * @sgl[]: sg for joining buffer and req->src scatterlist
346 * @skip: Skip offset in req->src for current op
347 * @total: Total number of bytes for current request
348 * @finup: Keep state for finup or final.
349 * @error: Keep track of error.
350 * @bufcnt: Number of bytes holded in buffer[]
351 * @buffer[]: For byte(s) from end of req->src in UPDATE op
352 */
353struct s5p_hash_reqctx {
354 struct s5p_aes_dev *dd;
355 bool op_update;
356
357 u64 digcnt;
358 u8 digest[SHA256_DIGEST_SIZE];
359
360 unsigned int nregs; /* digest_size / sizeof(reg) */
361 u32 engine;
362
363 struct scatterlist *sg;
364 unsigned int sg_len;
365 struct scatterlist sgl[2];
366 unsigned int skip;
367 unsigned int total;
368 bool finup;
369 bool error;
370
371 u32 bufcnt;
372 u8 buffer[0];
373};
374
375/**
376 * struct s5p_hash_ctx - HASH transformation context
377 * @dd: Associated device
378 * @flags: Bits for algorithm HASH.
379 * @fallback: Software transformation for zero message or size < BUFLEN.
380 */
381struct s5p_hash_ctx {
382 struct s5p_aes_dev *dd;
383 unsigned long flags;
384 struct crypto_shash *fallback;
385};
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800386
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800387static const struct samsung_aes_variant s5p_aes_data = {
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800388 .aes_offset = 0x4000,
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200389 .hash_offset = 0x6000,
Kamil Konieczny0918f182019-02-22 13:21:44 +0100390 .clk_names = { "secss", },
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800391};
392
393static const struct samsung_aes_variant exynos_aes_data = {
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800394 .aes_offset = 0x200,
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200395 .hash_offset = 0x400,
Kamil Konieczny0918f182019-02-22 13:21:44 +0100396 .clk_names = { "secss", },
397};
398
399static const struct samsung_aes_variant exynos5433_slim_aes_data = {
400 .aes_offset = 0x400,
401 .hash_offset = 0x800,
402 .clk_names = { "pclk", "aclk", },
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800403};
404
Naveen Krishna Chatradhi6b9f16e2014-05-08 21:58:13 +0800405static const struct of_device_id s5p_sss_dt_match[] = {
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800406 {
407 .compatible = "samsung,s5pv210-secss",
408 .data = &s5p_aes_data,
409 },
410 {
411 .compatible = "samsung,exynos4210-secss",
412 .data = &exynos_aes_data,
413 },
Kamil Konieczny0918f182019-02-22 13:21:44 +0100414 {
415 .compatible = "samsung,exynos5433-slim-sss",
416 .data = &exynos5433_slim_aes_data,
417 },
Naveen Krishna Chatradhi6b9f16e2014-05-08 21:58:13 +0800418 { },
419};
420MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
421
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100422static inline const struct samsung_aes_variant *find_s5p_sss_version
423 (const struct platform_device *pdev)
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800424{
425 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
426 const struct of_device_id *match;
Krzysztof Koz?owski313becd2016-01-11 20:45:50 +0900427
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800428 match = of_match_node(s5p_sss_dt_match,
429 pdev->dev.of_node);
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100430 return (const struct samsung_aes_variant *)match->data;
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800431 }
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100432 return (const struct samsung_aes_variant *)
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +0800433 platform_get_device_id(pdev)->driver_data;
434}
435
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200436static struct s5p_aes_dev *s5p_dev;
437
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100438static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
439 const struct scatterlist *sg)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800440{
441 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
442 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
443}
444
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100445static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
446 const struct scatterlist *sg)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800447{
448 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
449 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
450}
451
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900452static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
453{
454 int len;
455
456 if (!*sg)
457 return;
458
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100459 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900460 free_pages((unsigned long)sg_virt(*sg), get_order(len));
461
462 kfree(*sg);
463 *sg = NULL;
464}
465
466static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
467 unsigned int nbytes, int out)
468{
469 struct scatter_walk walk;
470
471 if (!nbytes)
472 return;
473
474 scatterwalk_start(&walk, sg);
475 scatterwalk_copychunks(buf, &walk, nbytes, out);
476 scatterwalk_done(&walk, out, 0);
477}
478
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200479static void s5p_sg_done(struct s5p_aes_dev *dev)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800480{
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100481 struct skcipher_request *req = dev->req;
482 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
Kamil Koniecznye8e3c1c2019-02-19 17:02:32 +0100483
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900484 if (dev->sg_dst_cpy) {
485 dev_dbg(dev->dev,
486 "Copying %d bytes of output data back to original place\n",
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100487 dev->req->cryptlen);
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900488 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100489 dev->req->cryptlen, 1);
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900490 }
491 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
492 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
Kamil Koniecznye8e3c1c2019-02-19 17:02:32 +0100493 if (reqctx->mode & FLAGS_AES_CBC)
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100494 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
Kamil Koniecznye8e3c1c2019-02-19 17:02:32 +0100495
496 else if (reqctx->mode & FLAGS_AES_CTR)
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100497 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200498}
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900499
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200500/* Calls the completion. Cannot be called with dev->lock hold. */
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100501static void s5p_aes_complete(struct skcipher_request *req, int err)
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200502{
Christoph Manszewski5842cd42018-09-17 17:09:27 +0200503 req->base.complete(&req->base, err);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800504}
505
506static void s5p_unset_outdata(struct s5p_aes_dev *dev)
507{
508 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
509}
510
511static void s5p_unset_indata(struct s5p_aes_dev *dev)
512{
513 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
514}
515
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900516static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
Christoph Manszewski6c12b6b2018-09-17 17:09:28 +0200517 struct scatterlist **dst)
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900518{
519 void *pages;
520 int len;
521
522 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
523 if (!*dst)
524 return -ENOMEM;
525
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100526 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900527 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
528 if (!pages) {
529 kfree(*dst);
530 *dst = NULL;
531 return -ENOMEM;
532 }
533
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100534 s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +0900535
536 sg_init_table(*dst, 1);
537 sg_set_buf(*dst, pages, len);
538
539 return 0;
540}
541
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800542static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
543{
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200544 if (!sg->length)
545 return -EINVAL;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800546
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200547 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
548 return -ENOMEM;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800549
550 dev->sg_dst = sg;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800551
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200552 return 0;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800553}
554
555static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
556{
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200557 if (!sg->length)
558 return -EINVAL;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800559
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200560 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
561 return -ENOMEM;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800562
563 dev->sg_src = sg;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800564
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200565 return 0;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800566}
567
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200568/*
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200569 * Returns -ERRNO on error (mapping of new data failed).
570 * On success returns:
571 * - 0 if there is no more data,
572 * - 1 if new transmitting (output) data is ready and its address+length
573 * have to be written to device (by calling s5p_set_dma_outdata()).
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200574 */
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200575static int s5p_aes_tx(struct s5p_aes_dev *dev)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800576{
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200577 int ret = 0;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800578
579 s5p_unset_outdata(dev);
580
581 if (!sg_is_last(dev->sg_dst)) {
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200582 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
583 if (!ret)
584 ret = 1;
Naveen Krishna Chatradhidc5e3f12014-05-08 21:58:15 +0800585 }
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200586
587 return ret;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800588}
589
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200590/*
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200591 * Returns -ERRNO on error (mapping of new data failed).
592 * On success returns:
593 * - 0 if there is no more data,
594 * - 1 if new receiving (input) data is ready and its address+length
595 * have to be written to device (by calling s5p_set_dma_indata()).
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200596 */
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200597static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800598{
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200599 int ret = 0;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800600
601 s5p_unset_indata(dev);
602
603 if (!sg_is_last(dev->sg_src)) {
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200604 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
605 if (!ret)
606 ret = 1;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800607 }
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200608
609 return ret;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800610}
611
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200612static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
613{
614 return __raw_readl(dd->io_hash_base + offset);
615}
616
617static inline void s5p_hash_write(struct s5p_aes_dev *dd,
618 u32 offset, u32 value)
619{
620 __raw_writel(value, dd->io_hash_base + offset);
621}
622
623/**
624 * s5p_set_dma_hashdata() - start DMA with sg
625 * @dev: device
626 * @sg: scatterlist ready to DMA transmit
627 */
628static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100629 const struct scatterlist *sg)
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200630{
631 dev->hash_sg_cnt--;
632 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
633 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
634}
635
636/**
637 * s5p_hash_rx() - get next hash_sg_iter
638 * @dev: device
639 *
640 * Return:
641 * 2 if there is no more data and it is UPDATE op
642 * 1 if new receiving (input) data is ready and can be written to device
643 * 0 if there is no more data and it is FINAL op
644 */
645static int s5p_hash_rx(struct s5p_aes_dev *dev)
646{
647 if (dev->hash_sg_cnt > 0) {
648 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
649 return 1;
650 }
651
652 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
653 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
654 return 0;
655
656 return 2;
657}
658
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800659static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
660{
661 struct platform_device *pdev = dev_id;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200662 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +0100663 struct skcipher_request *req;
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200664 int err_dma_tx = 0;
665 int err_dma_rx = 0;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200666 int err_dma_hx = 0;
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200667 bool tx_end = false;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200668 bool hx_end = false;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +0200669 unsigned long flags;
Christoph Manszewskib1b44162018-09-17 17:09:29 +0200670 u32 status, st_bits;
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200671 int err;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800672
673 spin_lock_irqsave(&dev->lock, flags);
674
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200675 /*
676 * Handle rx or tx interrupt. If there is still data (scatterlist did not
677 * reach end), then map next scatterlist entry.
678 * In case of such mapping error, s5p_aes_complete() should be called.
679 *
680 * If there is no more data in tx scatter list, call s5p_aes_complete()
681 * and schedule new tasklet.
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200682 *
683 * Handle hx interrupt. If there is still data map next entry.
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200684 */
Krzysztof Kozlowski55124422016-04-19 15:44:12 +0200685 status = SSS_READ(dev, FCINTSTAT);
686 if (status & SSS_FCINTSTAT_BRDMAINT)
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200687 err_dma_rx = s5p_aes_rx(dev);
688
689 if (status & SSS_FCINTSTAT_BTDMAINT) {
690 if (sg_is_last(dev->sg_dst))
691 tx_end = true;
692 err_dma_tx = s5p_aes_tx(dev);
693 }
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800694
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200695 if (status & SSS_FCINTSTAT_HRDMAINT)
696 err_dma_hx = s5p_hash_rx(dev);
697
698 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
699 SSS_FCINTSTAT_HRDMAINT);
700 /* clear DMA bits */
701 SSS_WRITE(dev, FCINTPEND, st_bits);
702
703 /* clear HASH irq bits */
704 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
705 /* cannot have both HPART and HDONE */
706 if (status & SSS_FCINTSTAT_HPARTINT)
707 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
708
709 if (status & SSS_FCINTSTAT_HDONEINT)
710 st_bits = SSS_HASH_STATUS_MSG_DONE;
711
712 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
713 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
714 hx_end = true;
715 /* when DONE or PART, do not handle HASH DMA */
716 err_dma_hx = 0;
717 }
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800718
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200719 if (err_dma_rx < 0) {
720 err = err_dma_rx;
721 goto error;
722 }
723 if (err_dma_tx < 0) {
724 err = err_dma_tx;
725 goto error;
726 }
Krzysztof Kozlowski79152e82016-04-22 14:15:23 +0200727
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200728 if (tx_end) {
729 s5p_sg_done(dev);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200730 if (err_dma_hx == 1)
731 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200732
733 spin_unlock_irqrestore(&dev->lock, flags);
734
Christoph Manszewski5842cd42018-09-17 17:09:27 +0200735 s5p_aes_complete(dev->req, 0);
Krzysztof Kozlowski42d5c172017-03-17 16:49:19 +0200736 /* Device is still busy */
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200737 tasklet_schedule(&dev->tasklet);
738 } else {
739 /*
740 * Writing length of DMA block (either receiving or
741 * transmitting) will start the operation immediately, so this
742 * should be done at the end (even after clearing pending
743 * interrupts to not miss the interrupt).
744 */
745 if (err_dma_tx == 1)
746 s5p_set_dma_outdata(dev, dev->sg_dst);
747 if (err_dma_rx == 1)
748 s5p_set_dma_indata(dev, dev->sg_src);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200749 if (err_dma_hx == 1)
750 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200751
752 spin_unlock_irqrestore(&dev->lock, flags);
753 }
754
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200755 goto hash_irq_end;
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +0200756
757error:
758 s5p_sg_done(dev);
Krzysztof Kozlowski42d5c172017-03-17 16:49:19 +0200759 dev->busy = false;
Christoph Manszewski5842cd42018-09-17 17:09:27 +0200760 req = dev->req;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200761 if (err_dma_hx == 1)
762 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
763
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800764 spin_unlock_irqrestore(&dev->lock, flags);
Christoph Manszewski5842cd42018-09-17 17:09:27 +0200765 s5p_aes_complete(req, err);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800766
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200767hash_irq_end:
768 /*
769 * Note about else if:
770 * when hash_sg_iter reaches end and its UPDATE op,
771 * issue SSS_HASH_PAUSE and wait for HPART irq
772 */
773 if (hx_end)
774 tasklet_schedule(&dev->hash_tasklet);
775 else if (err_dma_hx == 2)
776 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
777 SSS_HASH_PAUSE);
778
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +0800779 return IRQ_HANDLED;
780}
781
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200782/**
783 * s5p_hash_read_msg() - read message or IV from HW
784 * @req: AHASH request
785 */
786static void s5p_hash_read_msg(struct ahash_request *req)
787{
788 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
789 struct s5p_aes_dev *dd = ctx->dd;
790 u32 *hash = (u32 *)ctx->digest;
791 unsigned int i;
792
793 for (i = 0; i < ctx->nregs; i++)
794 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
795}
796
797/**
798 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
799 * @dd: device
800 * @ctx: request context
801 */
802static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100803 const struct s5p_hash_reqctx *ctx)
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200804{
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100805 const u32 *hash = (const u32 *)ctx->digest;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200806 unsigned int i;
807
808 for (i = 0; i < ctx->nregs; i++)
809 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
810}
811
812/**
813 * s5p_hash_write_iv() - write IV for next partial/finup op.
814 * @req: AHASH request
815 */
816static void s5p_hash_write_iv(struct ahash_request *req)
817{
818 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
819
820 s5p_hash_write_ctx_iv(ctx->dd, ctx);
821}
822
823/**
824 * s5p_hash_copy_result() - copy digest into req->result
825 * @req: AHASH request
826 */
827static void s5p_hash_copy_result(struct ahash_request *req)
828{
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +0100829 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +0200830
831 if (!req->result)
832 return;
833
834 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
835}
836
837/**
838 * s5p_hash_dma_flush() - flush HASH DMA
839 * @dev: secss device
840 */
841static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
842{
843 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
844}
845
846/**
847 * s5p_hash_dma_enable() - enable DMA mode for HASH
848 * @dev: secss device
849 *
850 * enable DMA mode for HASH
851 */
852static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
853{
854 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
855}
856
857/**
858 * s5p_hash_irq_disable() - disable irq HASH signals
859 * @dev: secss device
860 * @flags: bitfield with irq's to be disabled
861 */
862static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
863{
864 SSS_WRITE(dev, FCINTENCLR, flags);
865}
866
867/**
868 * s5p_hash_irq_enable() - enable irq signals
869 * @dev: secss device
870 * @flags: bitfield with irq's to be enabled
871 */
872static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
873{
874 SSS_WRITE(dev, FCINTENSET, flags);
875}
876
877/**
878 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
879 * @dev: secss device
880 * @hashflow: HASH stream flow with/without crypto AES/DES
881 */
882static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
883{
884 unsigned long flags;
885 u32 flow;
886
887 spin_lock_irqsave(&dev->lock, flags);
888
889 flow = SSS_READ(dev, FCFIFOCTRL);
890 flow &= ~SSS_HASHIN_MASK;
891 flow |= hashflow;
892 SSS_WRITE(dev, FCFIFOCTRL, flow);
893
894 spin_unlock_irqrestore(&dev->lock, flags);
895}
896
897/**
898 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
899 * @dev: secss device
900 * @hashflow: HASH stream flow with/without AES/DES
901 *
902 * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
903 * enable HASH irq's HRDMA, HDONE, HPART
904 */
905static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
906{
907 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
908 SSS_FCINTENCLR_HDONEINTENCLR |
909 SSS_FCINTENCLR_HPARTINTENCLR);
910 s5p_hash_dma_flush(dev);
911
912 s5p_hash_dma_enable(dev);
913 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
914 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
915 SSS_FCINTENSET_HDONEINTENSET |
916 SSS_FCINTENSET_HPARTINTENSET);
917}
918
919/**
920 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
921 * @dd: secss device
922 * @length: length for request
923 * @final: true if final op
924 *
925 * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
926 * after previous updates, fill up IV words. For final, calculate and set
927 * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
928 * length as 2^63 so it will be never reached and set to zero prelow and
929 * prehigh.
930 *
931 * This function does not start DMA transfer.
932 */
933static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
934 bool final)
935{
936 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
937 u32 prelow, prehigh, low, high;
938 u32 configflags, swapflags;
939 u64 tmplen;
940
941 configflags = ctx->engine | SSS_HASH_INIT_BIT;
942
943 if (likely(ctx->digcnt)) {
944 s5p_hash_write_ctx_iv(dd, ctx);
945 configflags |= SSS_HASH_USER_IV_EN;
946 }
947
948 if (final) {
949 /* number of bytes for last part */
950 low = length;
951 high = 0;
952 /* total number of bits prev hashed */
953 tmplen = ctx->digcnt * 8;
954 prelow = (u32)tmplen;
955 prehigh = (u32)(tmplen >> 32);
956 } else {
957 prelow = 0;
958 prehigh = 0;
959 low = 0;
960 high = BIT(31);
961 }
962
963 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
964 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
965
966 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
967 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
968 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
969 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
970
971 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
972 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
973}
974
975/**
976 * s5p_hash_xmit_dma() - start DMA hash processing
977 * @dd: secss device
978 * @length: length for request
979 * @final: true if final op
980 *
981 * Update digcnt here, as it is needed for finup/final op.
982 */
983static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
984 bool final)
985{
986 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
987 unsigned int cnt;
988
989 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
990 if (!cnt) {
991 dev_err(dd->dev, "dma_map_sg error\n");
992 ctx->error = true;
993 return -EINVAL;
994 }
995
996 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
997 dd->hash_sg_iter = ctx->sg;
998 dd->hash_sg_cnt = cnt;
999 s5p_hash_write_ctrl(dd, length, final);
1000 ctx->digcnt += length;
1001 ctx->total -= length;
1002
1003 /* catch last interrupt */
1004 if (final)
1005 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1006
1007 s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
1008
1009 return -EINPROGRESS;
1010}
1011
1012/**
1013 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
1014 * @ctx: request context
1015 * @sg: source scatterlist request
1016 * @new_len: number of bytes to process from sg
1017 *
1018 * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
1019 * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
1020 * with allocated buffer.
1021 *
1022 * Set bit in dd->hash_flag so we can free it after irq ends processing.
1023 */
1024static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1025 struct scatterlist *sg, unsigned int new_len)
1026{
1027 unsigned int pages, len;
1028 void *buf;
1029
1030 len = new_len + ctx->bufcnt;
1031 pages = get_order(len);
1032
1033 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1034 if (!buf) {
1035 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1036 ctx->error = true;
1037 return -ENOMEM;
1038 }
1039
1040 if (ctx->bufcnt)
1041 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1042
1043 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1044 new_len, 0);
1045 sg_init_table(ctx->sgl, 1);
1046 sg_set_buf(ctx->sgl, buf, len);
1047 ctx->sg = ctx->sgl;
1048 ctx->sg_len = 1;
1049 ctx->bufcnt = 0;
1050 ctx->skip = 0;
1051 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1052
1053 return 0;
1054}
1055
1056/**
1057 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
1058 * @ctx: request context
1059 * @sg: source scatterlist request
1060 * @new_len: number of bytes to process from sg
1061 *
1062 * Allocate new scatterlist table, copy data for HASH into it. If there was
1063 * xmit_buf filled, prepare it first, then copy page, length and offset from
1064 * source sg into it, adjusting begin and/or end for skip offset and
1065 * hash_later value.
1066 *
1067 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1068 * it after irq ends processing.
1069 */
1070static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1071 struct scatterlist *sg, unsigned int new_len)
1072{
1073 unsigned int skip = ctx->skip, n = sg_nents(sg);
1074 struct scatterlist *tmp;
1075 unsigned int len;
1076
1077 if (ctx->bufcnt)
1078 n++;
1079
1080 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1081 if (!ctx->sg) {
1082 ctx->error = true;
1083 return -ENOMEM;
1084 }
1085
1086 sg_init_table(ctx->sg, n);
1087
1088 tmp = ctx->sg;
1089
1090 ctx->sg_len = 0;
1091
1092 if (ctx->bufcnt) {
1093 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1094 tmp = sg_next(tmp);
1095 ctx->sg_len++;
1096 }
1097
1098 while (sg && skip >= sg->length) {
1099 skip -= sg->length;
1100 sg = sg_next(sg);
1101 }
1102
1103 while (sg && new_len) {
1104 len = sg->length - skip;
1105 if (new_len < len)
1106 len = new_len;
1107
1108 new_len -= len;
1109 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1110 skip = 0;
1111 if (new_len <= 0)
1112 sg_mark_end(tmp);
1113
1114 tmp = sg_next(tmp);
1115 ctx->sg_len++;
1116 sg = sg_next(sg);
1117 }
1118
1119 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1120
1121 return 0;
1122}
1123
1124/**
1125 * s5p_hash_prepare_sgs() - prepare sg for processing
1126 * @ctx: request context
1127 * @sg: source scatterlist request
1128 * @nbytes: number of bytes to process from sg
1129 * @final: final flag
1130 *
1131 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
1132 * sg table have good aligned elements (list_ok). If one of this checks fails,
1133 * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
1134 * data into this buffer and prepare request in sgl, or (2) allocates new sg
1135 * table and prepare sg elements.
1136 *
1137 * For digest or finup all conditions can be good, and we may not need any
1138 * fixes.
1139 */
1140static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1141 struct scatterlist *sg,
1142 unsigned int new_len, bool final)
1143{
1144 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1145 bool aligned = true, list_ok = true;
1146 struct scatterlist *sg_tmp = sg;
1147
1148 if (!sg || !sg->length || !new_len)
1149 return 0;
1150
1151 if (skip || !final)
1152 list_ok = false;
1153
1154 while (nbytes > 0 && sg_tmp) {
1155 n++;
1156 if (skip >= sg_tmp->length) {
1157 skip -= sg_tmp->length;
1158 if (!sg_tmp->length) {
1159 aligned = false;
1160 break;
1161 }
1162 } else {
1163 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1164 aligned = false;
1165 break;
1166 }
1167
1168 if (nbytes < sg_tmp->length - skip) {
1169 list_ok = false;
1170 break;
1171 }
1172
1173 nbytes -= sg_tmp->length - skip;
1174 skip = 0;
1175 }
1176
1177 sg_tmp = sg_next(sg_tmp);
1178 }
1179
1180 if (!aligned)
1181 return s5p_hash_copy_sgs(ctx, sg, new_len);
1182 else if (!list_ok)
1183 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1184
1185 /*
1186 * Have aligned data from previous operation and/or current
1187 * Note: will enter here only if (digest or finup) and aligned
1188 */
1189 if (ctx->bufcnt) {
1190 ctx->sg_len = n;
1191 sg_init_table(ctx->sgl, 2);
1192 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1193 sg_chain(ctx->sgl, 2, sg);
1194 ctx->sg = ctx->sgl;
1195 ctx->sg_len++;
1196 } else {
1197 ctx->sg = sg;
1198 ctx->sg_len = n;
1199 }
1200
1201 return 0;
1202}
1203
1204/**
1205 * s5p_hash_prepare_request() - prepare request for processing
1206 * @req: AHASH request
1207 * @update: true if UPDATE op
1208 *
1209 * Note 1: we can have update flag _and_ final flag at the same time.
1210 * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
1211 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1212 * we have final op
1213 */
1214static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1215{
1216 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1217 bool final = ctx->finup;
1218 int xmit_len, hash_later, nbytes;
1219 int ret;
1220
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001221 if (update)
1222 nbytes = req->nbytes;
1223 else
1224 nbytes = 0;
1225
1226 ctx->total = nbytes + ctx->bufcnt;
1227 if (!ctx->total)
1228 return 0;
1229
1230 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1231 /* bytes left from previous request, so fill up to BUFLEN */
1232 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1233
1234 if (len > nbytes)
1235 len = nbytes;
1236
1237 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1238 0, len, 0);
1239 ctx->bufcnt += len;
1240 nbytes -= len;
1241 ctx->skip = len;
1242 } else {
1243 ctx->skip = 0;
1244 }
1245
1246 if (ctx->bufcnt)
1247 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1248
1249 xmit_len = ctx->total;
1250 if (final) {
1251 hash_later = 0;
1252 } else {
1253 if (IS_ALIGNED(xmit_len, BUFLEN))
1254 xmit_len -= BUFLEN;
1255 else
1256 xmit_len -= xmit_len & (BUFLEN - 1);
1257
1258 hash_later = ctx->total - xmit_len;
1259 /* copy hash_later bytes from end of req->src */
1260 /* previous bytes are in xmit_buf, so no overwrite */
1261 scatterwalk_map_and_copy(ctx->buffer, req->src,
1262 req->nbytes - hash_later,
1263 hash_later, 0);
1264 }
1265
1266 if (xmit_len > BUFLEN) {
1267 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1268 final);
1269 if (ret)
1270 return ret;
1271 } else {
1272 /* have buffered data only */
1273 if (unlikely(!ctx->bufcnt)) {
1274 /* first update didn't fill up buffer */
1275 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1276 0, xmit_len, 0);
1277 }
1278
1279 sg_init_table(ctx->sgl, 1);
1280 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1281
1282 ctx->sg = ctx->sgl;
1283 ctx->sg_len = 1;
1284 }
1285
1286 ctx->bufcnt = hash_later;
1287 if (!final)
1288 ctx->total = xmit_len;
1289
1290 return 0;
1291}
1292
1293/**
1294 * s5p_hash_update_dma_stop() - unmap DMA
1295 * @dd: secss device
1296 *
1297 * Unmap scatterlist ctx->sg.
1298 */
1299static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1300{
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +01001301 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001302
1303 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1304 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1305}
1306
1307/**
1308 * s5p_hash_finish() - copy calculated digest to crypto layer
1309 * @req: AHASH request
1310 */
1311static void s5p_hash_finish(struct ahash_request *req)
1312{
1313 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1314 struct s5p_aes_dev *dd = ctx->dd;
1315
1316 if (ctx->digcnt)
1317 s5p_hash_copy_result(req);
1318
1319 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1320}
1321
1322/**
1323 * s5p_hash_finish_req() - finish request
1324 * @req: AHASH request
1325 * @err: error
1326 */
1327static void s5p_hash_finish_req(struct ahash_request *req, int err)
1328{
1329 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1330 struct s5p_aes_dev *dd = ctx->dd;
1331 unsigned long flags;
1332
1333 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1334 free_pages((unsigned long)sg_virt(ctx->sg),
1335 get_order(ctx->sg->length));
1336
1337 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1338 kfree(ctx->sg);
1339
1340 ctx->sg = NULL;
1341 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1342 BIT(HASH_FLAGS_SGS_COPIED));
1343
1344 if (!err && !ctx->error) {
1345 s5p_hash_read_msg(req);
1346 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1347 s5p_hash_finish(req);
1348 } else {
1349 ctx->error = true;
1350 }
1351
1352 spin_lock_irqsave(&dd->hash_lock, flags);
1353 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1354 BIT(HASH_FLAGS_DMA_READY) |
1355 BIT(HASH_FLAGS_OUTPUT_READY));
1356 spin_unlock_irqrestore(&dd->hash_lock, flags);
1357
1358 if (req->base.complete)
1359 req->base.complete(&req->base, err);
1360}
1361
1362/**
1363 * s5p_hash_handle_queue() - handle hash queue
1364 * @dd: device s5p_aes_dev
1365 * @req: AHASH request
1366 *
1367 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
1368 * device then processes the first request from the dd->queue
1369 *
1370 * Returns: see s5p_hash_final below.
1371 */
1372static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1373 struct ahash_request *req)
1374{
1375 struct crypto_async_request *async_req, *backlog;
1376 struct s5p_hash_reqctx *ctx;
1377 unsigned long flags;
1378 int err = 0, ret = 0;
1379
1380retry:
1381 spin_lock_irqsave(&dd->hash_lock, flags);
1382 if (req)
1383 ret = ahash_enqueue_request(&dd->hash_queue, req);
1384
1385 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1386 spin_unlock_irqrestore(&dd->hash_lock, flags);
1387 return ret;
1388 }
1389
1390 backlog = crypto_get_backlog(&dd->hash_queue);
1391 async_req = crypto_dequeue_request(&dd->hash_queue);
1392 if (async_req)
1393 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1394
1395 spin_unlock_irqrestore(&dd->hash_lock, flags);
1396
1397 if (!async_req)
1398 return ret;
1399
1400 if (backlog)
1401 backlog->complete(backlog, -EINPROGRESS);
1402
1403 req = ahash_request_cast(async_req);
1404 dd->hash_req = req;
1405 ctx = ahash_request_ctx(req);
1406
1407 err = s5p_hash_prepare_request(req, ctx->op_update);
1408 if (err || !ctx->total)
1409 goto out;
1410
1411 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1412 ctx->op_update, req->nbytes);
1413
1414 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1415 if (ctx->digcnt)
1416 s5p_hash_write_iv(req); /* restore hash IV */
1417
1418 if (ctx->op_update) { /* HASH_OP_UPDATE */
1419 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1420 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1421 /* no final() after finup() */
1422 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1423 } else { /* HASH_OP_FINAL */
1424 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1425 }
1426out:
1427 if (err != -EINPROGRESS) {
1428 /* hash_tasklet_cb will not finish it, so do it here */
1429 s5p_hash_finish_req(req, err);
1430 req = NULL;
1431
1432 /*
1433 * Execute next request immediately if there is anything
1434 * in queue.
1435 */
1436 goto retry;
1437 }
1438
1439 return ret;
1440}
1441
1442/**
1443 * s5p_hash_tasklet_cb() - hash tasklet
1444 * @data: ptr to s5p_aes_dev
1445 */
1446static void s5p_hash_tasklet_cb(unsigned long data)
1447{
1448 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1449
1450 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1451 s5p_hash_handle_queue(dd, NULL);
1452 return;
1453 }
1454
1455 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1456 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1457 &dd->hash_flags)) {
1458 s5p_hash_update_dma_stop(dd);
1459 }
1460
1461 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1462 &dd->hash_flags)) {
1463 /* hash or semi-hash ready */
1464 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
Dan Carpenterf7daa712017-11-10 00:26:04 +03001465 goto finish;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001466 }
1467 }
1468
1469 return;
1470
1471finish:
1472 /* finish curent request */
1473 s5p_hash_finish_req(dd->hash_req, 0);
1474
1475 /* If we are not busy, process next req */
1476 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1477 s5p_hash_handle_queue(dd, NULL);
1478}
1479
1480/**
1481 * s5p_hash_enqueue() - enqueue request
1482 * @req: AHASH request
1483 * @op: operation UPDATE (true) or FINAL (false)
1484 *
1485 * Returns: see s5p_hash_final below.
1486 */
1487static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1488{
1489 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1490 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1491
1492 ctx->op_update = op;
1493
1494 return s5p_hash_handle_queue(tctx->dd, req);
1495}
1496
1497/**
1498 * s5p_hash_update() - process the hash input data
1499 * @req: AHASH request
1500 *
1501 * If request will fit in buffer, copy it and return immediately
1502 * else enqueue it with OP_UPDATE.
1503 *
1504 * Returns: see s5p_hash_final below.
1505 */
1506static int s5p_hash_update(struct ahash_request *req)
1507{
1508 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1509
1510 if (!req->nbytes)
1511 return 0;
1512
1513 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1514 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1515 0, req->nbytes, 0);
1516 ctx->bufcnt += req->nbytes;
1517 return 0;
1518 }
1519
1520 return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
1521}
1522
1523/**
1524 * s5p_hash_shash_digest() - calculate shash digest
1525 * @tfm: crypto transformation
1526 * @flags: tfm flags
1527 * @data: input data
1528 * @len: length of data
1529 * @out: output buffer
1530 */
1531static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1532 const u8 *data, unsigned int len, u8 *out)
1533{
1534 SHASH_DESC_ON_STACK(shash, tfm);
1535
1536 shash->tfm = tfm;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001537
1538 return crypto_shash_digest(shash, data, len, out);
1539}
1540
1541/**
1542 * s5p_hash_final_shash() - calculate shash digest
1543 * @req: AHASH request
1544 */
1545static int s5p_hash_final_shash(struct ahash_request *req)
1546{
1547 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1548 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1549
1550 return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
1551 ctx->buffer, ctx->bufcnt, req->result);
1552}
1553
1554/**
1555 * s5p_hash_final() - close up hash and calculate digest
1556 * @req: AHASH request
1557 *
1558 * Note: in final req->src do not have any data, and req->nbytes can be
1559 * non-zero.
1560 *
1561 * If there were no input data processed yet and the buffered hash data is
1562 * less than BUFLEN (64) then calculate the final hash immediately by using
1563 * SW algorithm fallback.
1564 *
1565 * Otherwise enqueues the current AHASH request with OP_FINAL operation op
1566 * and finalize hash message in HW. Note that if digcnt!=0 then there were
1567 * previous update op, so there are always some buffered bytes in ctx->buffer,
1568 * which means that ctx->bufcnt!=0
1569 *
1570 * Returns:
1571 * 0 if the request has been processed immediately,
1572 * -EINPROGRESS if the operation has been queued for later execution or is set
1573 * to processing by HW,
1574 * -EBUSY if queue is full and request should be resubmitted later,
1575 * other negative values denotes an error.
1576 */
1577static int s5p_hash_final(struct ahash_request *req)
1578{
1579 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1580
1581 ctx->finup = true;
1582 if (ctx->error)
1583 return -EINVAL; /* uncompleted hash is not needed */
1584
1585 if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
1586 return s5p_hash_final_shash(req);
1587
1588 return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
1589}
1590
1591/**
1592 * s5p_hash_finup() - process last req->src and calculate digest
1593 * @req: AHASH request containing the last update data
1594 *
1595 * Return values: see s5p_hash_final above.
1596 */
1597static int s5p_hash_finup(struct ahash_request *req)
1598{
1599 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1600 int err1, err2;
1601
1602 ctx->finup = true;
1603
1604 err1 = s5p_hash_update(req);
1605 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1606 return err1;
1607
1608 /*
1609 * final() has to be always called to cleanup resources even if
1610 * update() failed, except EINPROGRESS or calculate digest for small
1611 * size
1612 */
1613 err2 = s5p_hash_final(req);
1614
1615 return err1 ?: err2;
1616}
1617
1618/**
1619 * s5p_hash_init() - initialize AHASH request contex
1620 * @req: AHASH request
1621 *
1622 * Init async hash request context.
1623 */
1624static int s5p_hash_init(struct ahash_request *req)
1625{
1626 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1627 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1628 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1629
1630 ctx->dd = tctx->dd;
1631 ctx->error = false;
1632 ctx->finup = false;
1633 ctx->bufcnt = 0;
1634 ctx->digcnt = 0;
1635 ctx->total = 0;
1636 ctx->skip = 0;
1637
1638 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1639 crypto_ahash_digestsize(tfm));
1640
1641 switch (crypto_ahash_digestsize(tfm)) {
1642 case MD5_DIGEST_SIZE:
1643 ctx->engine = SSS_HASH_ENGINE_MD5;
1644 ctx->nregs = HASH_MD5_MAX_REG;
1645 break;
1646 case SHA1_DIGEST_SIZE:
1647 ctx->engine = SSS_HASH_ENGINE_SHA1;
1648 ctx->nregs = HASH_SHA1_MAX_REG;
1649 break;
1650 case SHA256_DIGEST_SIZE:
1651 ctx->engine = SSS_HASH_ENGINE_SHA256;
1652 ctx->nregs = HASH_SHA256_MAX_REG;
1653 break;
1654 default:
1655 ctx->error = true;
1656 return -EINVAL;
1657 }
1658
1659 return 0;
1660}
1661
1662/**
1663 * s5p_hash_digest - calculate digest from req->src
1664 * @req: AHASH request
1665 *
1666 * Return values: see s5p_hash_final above.
1667 */
1668static int s5p_hash_digest(struct ahash_request *req)
1669{
1670 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1671}
1672
1673/**
1674 * s5p_hash_cra_init_alg - init crypto alg transformation
1675 * @tfm: crypto transformation
1676 */
1677static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1678{
1679 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1680 const char *alg_name = crypto_tfm_alg_name(tfm);
1681
1682 tctx->dd = s5p_dev;
1683 /* Allocate a fallback and abort if it failed. */
1684 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1685 CRYPTO_ALG_NEED_FALLBACK);
1686 if (IS_ERR(tctx->fallback)) {
1687 pr_err("fallback alloc fails for '%s'\n", alg_name);
1688 return PTR_ERR(tctx->fallback);
1689 }
1690
1691 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1692 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1693
1694 return 0;
1695}
1696
1697/**
1698 * s5p_hash_cra_init - init crypto tfm
1699 * @tfm: crypto transformation
1700 */
1701static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1702{
1703 return s5p_hash_cra_init_alg(tfm);
1704}
1705
1706/**
1707 * s5p_hash_cra_exit - exit crypto tfm
1708 * @tfm: crypto transformation
1709 *
1710 * free allocated fallback
1711 */
1712static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1713{
1714 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1715
1716 crypto_free_shash(tctx->fallback);
1717 tctx->fallback = NULL;
1718}
1719
1720/**
1721 * s5p_hash_export - export hash state
1722 * @req: AHASH request
1723 * @out: buffer for exported state
1724 */
1725static int s5p_hash_export(struct ahash_request *req, void *out)
1726{
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +01001727 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001728
1729 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1730
1731 return 0;
1732}
1733
1734/**
1735 * s5p_hash_import - import hash state
1736 * @req: AHASH request
1737 * @in: buffer with state to be imported from
1738 */
1739static int s5p_hash_import(struct ahash_request *req, const void *in)
1740{
1741 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1742 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1743 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1744 const struct s5p_hash_reqctx *ctx_in = in;
1745
1746 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1747 if (ctx_in->bufcnt > BUFLEN) {
1748 ctx->error = true;
1749 return -EINVAL;
1750 }
1751
1752 ctx->dd = tctx->dd;
1753 ctx->error = false;
1754
1755 return 0;
1756}
1757
1758static struct ahash_alg algs_sha1_md5_sha256[] = {
1759{
1760 .init = s5p_hash_init,
1761 .update = s5p_hash_update,
1762 .final = s5p_hash_final,
1763 .finup = s5p_hash_finup,
1764 .digest = s5p_hash_digest,
1765 .export = s5p_hash_export,
1766 .import = s5p_hash_import,
1767 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1768 .halg.digestsize = SHA1_DIGEST_SIZE,
1769 .halg.base = {
1770 .cra_name = "sha1",
1771 .cra_driver_name = "exynos-sha1",
1772 .cra_priority = 100,
Eric Biggers6a38f622018-06-30 15:16:12 -07001773 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001774 CRYPTO_ALG_ASYNC |
1775 CRYPTO_ALG_NEED_FALLBACK,
1776 .cra_blocksize = HASH_BLOCK_SIZE,
1777 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1778 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1779 .cra_module = THIS_MODULE,
1780 .cra_init = s5p_hash_cra_init,
1781 .cra_exit = s5p_hash_cra_exit,
1782 }
1783},
1784{
1785 .init = s5p_hash_init,
1786 .update = s5p_hash_update,
1787 .final = s5p_hash_final,
1788 .finup = s5p_hash_finup,
1789 .digest = s5p_hash_digest,
1790 .export = s5p_hash_export,
1791 .import = s5p_hash_import,
1792 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1793 .halg.digestsize = MD5_DIGEST_SIZE,
1794 .halg.base = {
1795 .cra_name = "md5",
1796 .cra_driver_name = "exynos-md5",
1797 .cra_priority = 100,
Eric Biggers6a38f622018-06-30 15:16:12 -07001798 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001799 CRYPTO_ALG_ASYNC |
1800 CRYPTO_ALG_NEED_FALLBACK,
1801 .cra_blocksize = HASH_BLOCK_SIZE,
1802 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1803 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1804 .cra_module = THIS_MODULE,
1805 .cra_init = s5p_hash_cra_init,
1806 .cra_exit = s5p_hash_cra_exit,
1807 }
1808},
1809{
1810 .init = s5p_hash_init,
1811 .update = s5p_hash_update,
1812 .final = s5p_hash_final,
1813 .finup = s5p_hash_finup,
1814 .digest = s5p_hash_digest,
1815 .export = s5p_hash_export,
1816 .import = s5p_hash_import,
1817 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1818 .halg.digestsize = SHA256_DIGEST_SIZE,
1819 .halg.base = {
1820 .cra_name = "sha256",
1821 .cra_driver_name = "exynos-sha256",
1822 .cra_priority = 100,
Eric Biggers6a38f622018-06-30 15:16:12 -07001823 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02001824 CRYPTO_ALG_ASYNC |
1825 CRYPTO_ALG_NEED_FALLBACK,
1826 .cra_blocksize = HASH_BLOCK_SIZE,
1827 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1828 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1829 .cra_module = THIS_MODULE,
1830 .cra_init = s5p_hash_cra_init,
1831 .cra_exit = s5p_hash_cra_exit,
1832 }
1833}
1834
1835};
1836
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001837static void s5p_set_aes(struct s5p_aes_dev *dev,
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001838 const u8 *key, const u8 *iv, const u8 *ctr,
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +01001839 unsigned int keylen)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001840{
1841 void __iomem *keystart;
1842
Naveen Krishna Chatradhi8f9702a2014-05-08 21:58:15 +08001843 if (iv)
Krzysztof Kozlowskief5c73b2019-02-19 13:01:03 +01001844 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1845 AES_BLOCK_SIZE);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001846
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001847 if (ctr)
Krzysztof Kozlowskief5c73b2019-02-19 13:01:03 +01001848 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1849 AES_BLOCK_SIZE);
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001850
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001851 if (keylen == AES_KEYSIZE_256)
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08001852 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001853 else if (keylen == AES_KEYSIZE_192)
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08001854 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001855 else
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08001856 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001857
Krzysztof Koz?owski1e3012d2016-01-11 20:45:51 +09001858 memcpy_toio(keystart, key, keylen);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001859}
1860
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001861static bool s5p_is_sg_aligned(struct scatterlist *sg)
1862{
1863 while (sg) {
Marek Szyprowskid1497972016-04-26 09:29:26 +02001864 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001865 return false;
1866 sg = sg_next(sg);
1867 }
1868
1869 return true;
1870}
1871
1872static int s5p_set_indata_start(struct s5p_aes_dev *dev,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01001873 struct skcipher_request *req)
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001874{
1875 struct scatterlist *sg;
1876 int err;
1877
1878 dev->sg_src_cpy = NULL;
1879 sg = req->src;
1880 if (!s5p_is_sg_aligned(sg)) {
1881 dev_dbg(dev->dev,
1882 "At least one unaligned source scatter list, making a copy\n");
1883 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1884 if (err)
1885 return err;
1886
1887 sg = dev->sg_src_cpy;
1888 }
1889
1890 err = s5p_set_indata(dev, sg);
1891 if (err) {
1892 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1893 return err;
1894 }
1895
1896 return 0;
1897}
1898
1899static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01001900 struct skcipher_request *req)
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001901{
1902 struct scatterlist *sg;
1903 int err;
1904
1905 dev->sg_dst_cpy = NULL;
1906 sg = req->dst;
1907 if (!s5p_is_sg_aligned(sg)) {
1908 dev_dbg(dev->dev,
1909 "At least one unaligned dest scatter list, making a copy\n");
1910 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1911 if (err)
1912 return err;
1913
1914 sg = dev->sg_dst_cpy;
1915 }
1916
1917 err = s5p_set_outdata(dev, sg);
1918 if (err) {
1919 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1920 return err;
1921 }
1922
1923 return 0;
1924}
1925
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001926static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1927{
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01001928 struct skcipher_request *req = dev->req;
Christoph Manszewskib1b44162018-09-17 17:09:29 +02001929 u32 aes_control;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +02001930 unsigned long flags;
1931 int err;
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001932 u8 *iv, *ctr;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001933
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001934 /* This sets bit [13:12] to 00, which selects 128-bit counter */
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001935 aes_control = SSS_AES_KEY_CHANGE_MODE;
1936 if (mode & FLAGS_AES_DECRYPT)
1937 aes_control |= SSS_AES_MODE_DECRYPT;
1938
Kamil Koniecznyc927b082018-02-07 16:52:09 +01001939 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001940 aes_control |= SSS_AES_CHAIN_MODE_CBC;
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01001941 iv = req->iv;
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001942 ctr = NULL;
Kamil Koniecznyc927b082018-02-07 16:52:09 +01001943 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001944 aes_control |= SSS_AES_CHAIN_MODE_CTR;
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001945 iv = NULL;
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01001946 ctr = req->iv;
Kamil Koniecznyc927b082018-02-07 16:52:09 +01001947 } else {
1948 iv = NULL; /* AES_ECB */
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001949 ctr = NULL;
Kamil Koniecznyc927b082018-02-07 16:52:09 +01001950 }
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001951
1952 if (dev->ctx->keylen == AES_KEYSIZE_192)
1953 aes_control |= SSS_AES_KEY_SIZE_192;
1954 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1955 aes_control |= SSS_AES_KEY_SIZE_256;
1956
1957 aes_control |= SSS_AES_FIFO_MODE;
1958
1959 /* as a variant it is possible to use byte swapping on DMA side */
1960 aes_control |= SSS_AES_BYTESWAP_DI
1961 | SSS_AES_BYTESWAP_DO
1962 | SSS_AES_BYTESWAP_IV
1963 | SSS_AES_BYTESWAP_KEY
1964 | SSS_AES_BYTESWAP_CNT;
1965
1966 spin_lock_irqsave(&dev->lock, flags);
1967
1968 SSS_WRITE(dev, FCINTENCLR,
1969 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1970 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1971
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001972 err = s5p_set_indata_start(dev, req);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001973 if (err)
1974 goto indata_error;
1975
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001976 err = s5p_set_outdata_start(dev, req);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001977 if (err)
1978 goto outdata_error;
1979
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08001980 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02001981 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001982
Krzysztof Kozlowski9e4a1102016-03-22 10:58:24 +09001983 s5p_set_dma_indata(dev, dev->sg_src);
1984 s5p_set_dma_outdata(dev, dev->sg_dst);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001985
1986 SSS_WRITE(dev, FCINTENSET,
1987 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1988
1989 spin_unlock_irqrestore(&dev->lock, flags);
1990
1991 return;
1992
Krzysztof Kozlowski119c3ab2016-03-22 10:58:23 +09001993outdata_error:
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001994 s5p_unset_indata(dev);
1995
Krzysztof Kozlowski119c3ab2016-03-22 10:58:23 +09001996indata_error:
Krzysztof Kozlowski28b62b12017-03-08 23:14:20 +02001997 s5p_sg_done(dev);
Krzysztof Kozlowski42d5c172017-03-17 16:49:19 +02001998 dev->busy = false;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08001999 spin_unlock_irqrestore(&dev->lock, flags);
Christoph Manszewski5842cd42018-09-17 17:09:27 +02002000 s5p_aes_complete(req, err);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002001}
2002
2003static void s5p_tasklet_cb(unsigned long data)
2004{
2005 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
2006 struct crypto_async_request *async_req, *backlog;
2007 struct s5p_aes_reqctx *reqctx;
2008 unsigned long flags;
2009
2010 spin_lock_irqsave(&dev->lock, flags);
2011 backlog = crypto_get_backlog(&dev->queue);
2012 async_req = crypto_dequeue_request(&dev->queue);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002013
Naveen Krishna Chatradhidc5e3f12014-05-08 21:58:15 +08002014 if (!async_req) {
2015 dev->busy = false;
2016 spin_unlock_irqrestore(&dev->lock, flags);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002017 return;
Naveen Krishna Chatradhidc5e3f12014-05-08 21:58:15 +08002018 }
2019 spin_unlock_irqrestore(&dev->lock, flags);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002020
2021 if (backlog)
2022 backlog->complete(backlog, -EINPROGRESS);
2023
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002024 dev->req = skcipher_request_cast(async_req);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002025 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002026 reqctx = skcipher_request_ctx(dev->req);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002027
2028 s5p_aes_crypt_start(dev, reqctx->mode);
2029}
2030
2031static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002032 struct skcipher_request *req)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002033{
2034 unsigned long flags;
2035 int err;
2036
2037 spin_lock_irqsave(&dev->lock, flags);
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002038 err = crypto_enqueue_request(&dev->queue, &req->base);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002039 if (dev->busy) {
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002040 spin_unlock_irqrestore(&dev->lock, flags);
Christoph Manszewskib1b44162018-09-17 17:09:29 +02002041 return err;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002042 }
2043 dev->busy = true;
2044
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002045 spin_unlock_irqrestore(&dev->lock, flags);
2046
2047 tasklet_schedule(&dev->tasklet);
2048
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002049 return err;
2050}
2051
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002052static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002053{
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002054 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2055 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
2056 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +02002057 struct s5p_aes_dev *dev = ctx->dev;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002058
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002059 if (!req->cryptlen)
Ard Biesheuvel84a0b002019-08-19 17:22:25 +03002060 return 0;
2061
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002062 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02002063 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
Ard Biesheuvel84a0b002019-08-19 17:22:25 +03002064 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002065 return -EINVAL;
2066 }
2067
2068 reqctx->mode = mode;
2069
2070 return s5p_aes_handle_req(dev, req);
2071}
2072
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002073static int s5p_aes_setkey(struct crypto_skcipher *cipher,
Christoph Manszewskib1b44162018-09-17 17:09:29 +02002074 const u8 *key, unsigned int keylen)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002075{
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002076 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002077 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2078
2079 if (keylen != AES_KEYSIZE_128 &&
2080 keylen != AES_KEYSIZE_192 &&
2081 keylen != AES_KEYSIZE_256)
2082 return -EINVAL;
2083
2084 memcpy(ctx->aes_key, key, keylen);
2085 ctx->keylen = keylen;
2086
2087 return 0;
2088}
2089
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002090static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002091{
2092 return s5p_aes_crypt(req, 0);
2093}
2094
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002095static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002096{
2097 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2098}
2099
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002100static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002101{
2102 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2103}
2104
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002105static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002106{
2107 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2108}
2109
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002110static int s5p_aes_ctr_crypt(struct skcipher_request *req)
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02002111{
2112 return s5p_aes_crypt(req, FLAGS_AES_CTR);
2113}
2114
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002115static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002116{
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002117 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002118
2119 ctx->dev = s5p_dev;
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002120 crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002121
2122 return 0;
2123}
2124
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002125static struct skcipher_alg algs[] = {
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002126 {
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002127 .base.cra_name = "ecb(aes)",
2128 .base.cra_driver_name = "ecb-aes-s5p",
2129 .base.cra_priority = 100,
2130 .base.cra_flags = CRYPTO_ALG_ASYNC |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002131 CRYPTO_ALG_KERN_DRIVER_ONLY,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002132 .base.cra_blocksize = AES_BLOCK_SIZE,
2133 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2134 .base.cra_alignmask = 0x0f,
2135 .base.cra_module = THIS_MODULE,
2136
2137 .min_keysize = AES_MIN_KEY_SIZE,
2138 .max_keysize = AES_MAX_KEY_SIZE,
2139 .setkey = s5p_aes_setkey,
2140 .encrypt = s5p_aes_ecb_encrypt,
2141 .decrypt = s5p_aes_ecb_decrypt,
2142 .init = s5p_aes_init_tfm,
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002143 },
2144 {
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002145 .base.cra_name = "cbc(aes)",
2146 .base.cra_driver_name = "cbc-aes-s5p",
2147 .base.cra_priority = 100,
2148 .base.cra_flags = CRYPTO_ALG_ASYNC |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01002149 CRYPTO_ALG_KERN_DRIVER_ONLY,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002150 .base.cra_blocksize = AES_BLOCK_SIZE,
2151 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2152 .base.cra_alignmask = 0x0f,
2153 .base.cra_module = THIS_MODULE,
2154
2155 .min_keysize = AES_MIN_KEY_SIZE,
2156 .max_keysize = AES_MAX_KEY_SIZE,
2157 .ivsize = AES_BLOCK_SIZE,
2158 .setkey = s5p_aes_setkey,
2159 .encrypt = s5p_aes_cbc_encrypt,
2160 .decrypt = s5p_aes_cbc_decrypt,
2161 .init = s5p_aes_init_tfm,
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002162 },
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02002163 {
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002164 .base.cra_name = "ctr(aes)",
2165 .base.cra_driver_name = "ctr-aes-s5p",
2166 .base.cra_priority = 100,
2167 .base.cra_flags = CRYPTO_ALG_ASYNC |
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02002168 CRYPTO_ALG_KERN_DRIVER_ONLY,
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002169 .base.cra_blocksize = 1,
2170 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2171 .base.cra_alignmask = 0x0f,
2172 .base.cra_module = THIS_MODULE,
2173
2174 .min_keysize = AES_MIN_KEY_SIZE,
2175 .max_keysize = AES_MAX_KEY_SIZE,
2176 .ivsize = AES_BLOCK_SIZE,
2177 .setkey = s5p_aes_setkey,
2178 .encrypt = s5p_aes_ctr_crypt,
2179 .decrypt = s5p_aes_ctr_crypt,
2180 .init = s5p_aes_init_tfm,
Christoph Manszewskicdf640a2018-09-17 17:09:30 +02002181 },
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002182};
2183
2184static int s5p_aes_probe(struct platform_device *pdev)
2185{
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +02002186 struct device *dev = &pdev->dev;
2187 int i, j, err = -ENODEV;
Krzysztof Kozlowski6584eac2018-03-01 21:50:13 +01002188 const struct samsung_aes_variant *variant;
Krzysztof Kozlowski5318c532016-05-27 13:49:40 +02002189 struct s5p_aes_dev *pdata;
2190 struct resource *res;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002191 unsigned int hash_i;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002192
2193 if (s5p_dev)
2194 return -EEXIST;
2195
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002196 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2197 if (!pdata)
2198 return -ENOMEM;
2199
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08002200 variant = find_s5p_sss_version(pdev);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002201 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2202
2203 /*
2204 * Note: HASH and PRNG uses the same registers in secss, avoid
2205 * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
2206 * is enabled in config. We need larger size for HASH registers in
2207 * secss, current describe only AES/DES
2208 */
2209 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2210 if (variant == &exynos_aes_data) {
2211 res->end += 0x300;
2212 pdata->use_hash = true;
2213 }
2214 }
2215
2216 pdata->res = res;
2217 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2218 if (IS_ERR(pdata->ioaddr)) {
2219 if (!pdata->use_hash)
2220 return PTR_ERR(pdata->ioaddr);
2221 /* try AES without HASH */
2222 res->end -= 0x300;
2223 pdata->use_hash = false;
2224 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2225 if (IS_ERR(pdata->ioaddr))
2226 return PTR_ERR(pdata->ioaddr);
2227 }
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08002228
Kamil Konieczny0918f182019-02-22 13:21:44 +01002229 pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002230 if (IS_ERR(pdata->clk)) {
Kamil Konieczny0918f182019-02-22 13:21:44 +01002231 dev_err(dev, "failed to find secss clock %s\n",
2232 variant->clk_names[0]);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002233 return -ENOENT;
2234 }
2235
Naveen Krishna Chatradhic1eb7ef2014-05-08 21:58:15 +08002236 err = clk_prepare_enable(pdata->clk);
2237 if (err < 0) {
Kamil Konieczny0918f182019-02-22 13:21:44 +01002238 dev_err(dev, "Enabling clock %s failed, err %d\n",
2239 variant->clk_names[0], err);
Naveen Krishna Chatradhic1eb7ef2014-05-08 21:58:15 +08002240 return err;
2241 }
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002242
Kamil Konieczny0918f182019-02-22 13:21:44 +01002243 if (variant->clk_names[1]) {
2244 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2245 if (IS_ERR(pdata->pclk)) {
2246 dev_err(dev, "failed to find clock %s\n",
2247 variant->clk_names[1]);
2248 err = -ENOENT;
2249 goto err_clk;
2250 }
2251
2252 err = clk_prepare_enable(pdata->pclk);
2253 if (err < 0) {
2254 dev_err(dev, "Enabling clock %s failed, err %d\n",
2255 variant->clk_names[0], err);
2256 goto err_clk;
2257 }
2258 } else {
2259 pdata->pclk = NULL;
2260 }
2261
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002262 spin_lock_init(&pdata->lock);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002263 spin_lock_init(&pdata->hash_lock);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002264
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08002265 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002266 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
Naveen Krishna Chatradhi89245102014-05-08 21:58:14 +08002267
Naveen Krishna Chatradhi96fc70b2014-05-08 21:58:12 +08002268 pdata->irq_fc = platform_get_irq(pdev, 0);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002269 if (pdata->irq_fc < 0) {
2270 err = pdata->irq_fc;
2271 dev_warn(dev, "feed control interrupt is not available.\n");
2272 goto err_irq;
2273 }
Krzysztof Kozlowski07de4bc2017-03-05 19:14:07 +02002274 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2275 s5p_aes_interrupt, IRQF_ONESHOT,
2276 pdev->name, pdev);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002277 if (err < 0) {
2278 dev_warn(dev, "feed control interrupt is not available.\n");
2279 goto err_irq;
2280 }
2281
Naveen Krishna Chatradhidc5e3f12014-05-08 21:58:15 +08002282 pdata->busy = false;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002283 pdata->dev = dev;
2284 platform_set_drvdata(pdev, pdata);
2285 s5p_dev = pdata;
2286
2287 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2288 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2289
2290 for (i = 0; i < ARRAY_SIZE(algs); i++) {
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002291 err = crypto_register_skcipher(&algs[i]);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002292 if (err)
2293 goto err_algs;
2294 }
2295
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002296 if (pdata->use_hash) {
2297 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2298 (unsigned long)pdata);
2299 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2300
2301 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2302 hash_i++) {
2303 struct ahash_alg *alg;
2304
2305 alg = &algs_sha1_md5_sha256[hash_i];
2306 err = crypto_register_ahash(alg);
2307 if (err) {
2308 dev_err(dev, "can't register '%s': %d\n",
2309 alg->halg.base.cra_driver_name, err);
2310 goto err_hash;
2311 }
2312 }
2313 }
2314
Krzysztof Koz?owski313becd2016-01-11 20:45:50 +09002315 dev_info(dev, "s5p-sss driver registered\n");
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002316
2317 return 0;
2318
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002319err_hash:
2320 for (j = hash_i - 1; j >= 0; j--)
2321 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2322
2323 tasklet_kill(&pdata->hash_tasklet);
2324 res->end -= 0x300;
2325
Krzysztof Kozlowski119c3ab2016-03-22 10:58:23 +09002326err_algs:
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002327 if (i < ARRAY_SIZE(algs))
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002328 dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002329 err);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002330
2331 for (j = 0; j < i; j++)
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002332 crypto_unregister_skcipher(&algs[j]);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002333
2334 tasklet_kill(&pdata->tasklet);
2335
Krzysztof Kozlowski119c3ab2016-03-22 10:58:23 +09002336err_irq:
Kamil Konieczny0918f182019-02-22 13:21:44 +01002337 if (pdata->pclk)
2338 clk_disable_unprepare(pdata->pclk);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002339
Kamil Konieczny0918f182019-02-22 13:21:44 +01002340err_clk:
2341 clk_disable_unprepare(pdata->clk);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002342 s5p_dev = NULL;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002343
2344 return err;
2345}
2346
2347static int s5p_aes_remove(struct platform_device *pdev)
2348{
2349 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2350 int i;
2351
2352 if (!pdata)
2353 return -ENODEV;
2354
2355 for (i = 0; i < ARRAY_SIZE(algs); i++)
Ard Biesheuvele6b98ce2019-11-09 18:09:32 +01002356 crypto_unregister_skcipher(&algs[i]);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002357
2358 tasklet_kill(&pdata->tasklet);
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002359 if (pdata->use_hash) {
2360 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2361 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2362
2363 pdata->res->end -= 0x300;
2364 tasklet_kill(&pdata->hash_tasklet);
2365 pdata->use_hash = false;
2366 }
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002367
Kamil Konieczny0918f182019-02-22 13:21:44 +01002368 if (pdata->pclk)
2369 clk_disable_unprepare(pdata->pclk);
2370
Naveen Krishna Chatradhic1eb7ef2014-05-08 21:58:15 +08002371 clk_disable_unprepare(pdata->clk);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002372 s5p_dev = NULL;
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002373
2374 return 0;
2375}
2376
2377static struct platform_driver s5p_aes_crypto = {
2378 .probe = s5p_aes_probe,
2379 .remove = s5p_aes_remove,
2380 .driver = {
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002381 .name = "s5p-secss",
Naveen Krishna Chatradhi6b9f16e2014-05-08 21:58:13 +08002382 .of_match_table = s5p_sss_dt_match,
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002383 },
2384};
2385
Axel Lin741e8c22011-11-26 21:26:19 +08002386module_platform_driver(s5p_aes_crypto);
Vladimir Zapolskiya49e4902011-04-08 20:40:51 +08002387
2388MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2389MODULE_LICENSE("GPL v2");
2390MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
Kamil Koniecznyc2afad62017-10-25 17:27:35 +02002391MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");