Krzysztof Kozlowski | 5c8d850 | 2018-01-09 18:57:36 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // |
| 3 | // Cryptographic API. |
| 4 | // |
| 5 | // Support for Samsung S5PV210 and Exynos HW acceleration. |
| 6 | // |
| 7 | // Copyright (C) 2011 NetUP Inc. All rights reserved. |
| 8 | // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. |
| 9 | // |
| 10 | // Hash part based on omap-sham.c driver. |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 11 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 12 | #include <linux/clk.h> |
Krzysztof Kozlowski | 3cf9d84 | 2016-03-22 10:58:25 +0900 | [diff] [blame] | 13 | #include <linux/crypto.h> |
| 14 | #include <linux/dma-mapping.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/of.h> |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/scatterlist.h> |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 25 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 26 | #include <crypto/ctr.h> |
Krzysztof Kozlowski | 3cf9d84 | 2016-03-22 10:58:25 +0900 | [diff] [blame] | 27 | #include <crypto/aes.h> |
| 28 | #include <crypto/algapi.h> |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 29 | #include <crypto/scatterwalk.h> |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 30 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 31 | #include <crypto/hash.h> |
| 32 | #include <crypto/md5.h> |
| 33 | #include <crypto/sha.h> |
| 34 | #include <crypto/internal/hash.h> |
| 35 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 36 | #define _SBF(s, v) ((v) << (s)) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 37 | |
| 38 | /* Feed control registers */ |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 39 | #define SSS_REG_FCINTSTAT 0x0000 |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 40 | #define SSS_FCINTSTAT_HPARTINT BIT(7) |
| 41 | #define SSS_FCINTSTAT_HDONEINT BIT(5) |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 42 | #define SSS_FCINTSTAT_BRDMAINT BIT(3) |
| 43 | #define SSS_FCINTSTAT_BTDMAINT BIT(2) |
| 44 | #define SSS_FCINTSTAT_HRDMAINT BIT(1) |
| 45 | #define SSS_FCINTSTAT_PKDMAINT BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 46 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 47 | #define SSS_REG_FCINTENSET 0x0004 |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 48 | #define SSS_FCINTENSET_HPARTINTENSET BIT(7) |
| 49 | #define SSS_FCINTENSET_HDONEINTENSET BIT(5) |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 50 | #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) |
| 51 | #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) |
| 52 | #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) |
| 53 | #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 54 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 55 | #define SSS_REG_FCINTENCLR 0x0008 |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 56 | #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7) |
| 57 | #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5) |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 58 | #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) |
| 59 | #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) |
| 60 | #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) |
| 61 | #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 62 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 63 | #define SSS_REG_FCINTPEND 0x000C |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 64 | #define SSS_FCINTPEND_HPARTINTP BIT(7) |
| 65 | #define SSS_FCINTPEND_HDONEINTP BIT(5) |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 66 | #define SSS_FCINTPEND_BRDMAINTP BIT(3) |
| 67 | #define SSS_FCINTPEND_BTDMAINTP BIT(2) |
| 68 | #define SSS_FCINTPEND_HRDMAINTP BIT(1) |
| 69 | #define SSS_FCINTPEND_PKDMAINTP BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 70 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 71 | #define SSS_REG_FCFIFOSTAT 0x0010 |
| 72 | #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) |
| 73 | #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) |
| 74 | #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) |
| 75 | #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) |
| 76 | #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) |
| 77 | #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) |
| 78 | #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) |
| 79 | #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 80 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 81 | #define SSS_REG_FCFIFOCTRL 0x0014 |
| 82 | #define SSS_FCFIFOCTRL_DESSEL BIT(2) |
| 83 | #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) |
| 84 | #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) |
| 85 | #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 86 | #define SSS_HASHIN_MASK _SBF(0, 0x03) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 87 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 88 | #define SSS_REG_FCBRDMAS 0x0020 |
| 89 | #define SSS_REG_FCBRDMAL 0x0024 |
| 90 | #define SSS_REG_FCBRDMAC 0x0028 |
| 91 | #define SSS_FCBRDMAC_BYTESWAP BIT(1) |
| 92 | #define SSS_FCBRDMAC_FLUSH BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 93 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 94 | #define SSS_REG_FCBTDMAS 0x0030 |
| 95 | #define SSS_REG_FCBTDMAL 0x0034 |
| 96 | #define SSS_REG_FCBTDMAC 0x0038 |
| 97 | #define SSS_FCBTDMAC_BYTESWAP BIT(1) |
| 98 | #define SSS_FCBTDMAC_FLUSH BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 99 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 100 | #define SSS_REG_FCHRDMAS 0x0040 |
| 101 | #define SSS_REG_FCHRDMAL 0x0044 |
| 102 | #define SSS_REG_FCHRDMAC 0x0048 |
| 103 | #define SSS_FCHRDMAC_BYTESWAP BIT(1) |
| 104 | #define SSS_FCHRDMAC_FLUSH BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 105 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 106 | #define SSS_REG_FCPKDMAS 0x0050 |
| 107 | #define SSS_REG_FCPKDMAL 0x0054 |
| 108 | #define SSS_REG_FCPKDMAC 0x0058 |
| 109 | #define SSS_FCPKDMAC_BYTESWAP BIT(3) |
| 110 | #define SSS_FCPKDMAC_DESCEND BIT(2) |
| 111 | #define SSS_FCPKDMAC_TRANSMIT BIT(1) |
| 112 | #define SSS_FCPKDMAC_FLUSH BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 113 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 114 | #define SSS_REG_FCPKDMAO 0x005C |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 115 | |
| 116 | /* AES registers */ |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 117 | #define SSS_REG_AES_CONTROL 0x00 |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 118 | #define SSS_AES_BYTESWAP_DI BIT(11) |
| 119 | #define SSS_AES_BYTESWAP_DO BIT(10) |
| 120 | #define SSS_AES_BYTESWAP_IV BIT(9) |
| 121 | #define SSS_AES_BYTESWAP_CNT BIT(8) |
| 122 | #define SSS_AES_BYTESWAP_KEY BIT(7) |
| 123 | #define SSS_AES_KEY_CHANGE_MODE BIT(6) |
| 124 | #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) |
| 125 | #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) |
| 126 | #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) |
| 127 | #define SSS_AES_FIFO_MODE BIT(3) |
| 128 | #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) |
| 129 | #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) |
| 130 | #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) |
| 131 | #define SSS_AES_MODE_DECRYPT BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 132 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 133 | #define SSS_REG_AES_STATUS 0x04 |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 134 | #define SSS_AES_BUSY BIT(2) |
| 135 | #define SSS_AES_INPUT_READY BIT(1) |
| 136 | #define SSS_AES_OUTPUT_READY BIT(0) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 137 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 138 | #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) |
| 139 | #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) |
| 140 | #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2)) |
| 141 | #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) |
| 142 | #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 143 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 144 | #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) |
| 145 | #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) |
| 146 | #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 147 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 148 | #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 149 | #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ |
| 150 | SSS_AES_REG(dev, reg)) |
| 151 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 152 | /* HW engine modes */ |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 153 | #define FLAGS_AES_DECRYPT BIT(0) |
| 154 | #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) |
| 155 | #define FLAGS_AES_CBC _SBF(1, 0x01) |
| 156 | #define FLAGS_AES_CTR _SBF(1, 0x02) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 157 | |
Kamil Konieczny | e5e4090 | 2017-10-25 17:27:34 +0200 | [diff] [blame] | 158 | #define AES_KEY_LEN 16 |
| 159 | #define CRYPTO_QUEUE_LEN 1 |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 160 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 161 | /* HASH registers */ |
| 162 | #define SSS_REG_HASH_CTRL 0x00 |
| 163 | |
| 164 | #define SSS_HASH_USER_IV_EN BIT(5) |
| 165 | #define SSS_HASH_INIT_BIT BIT(4) |
| 166 | #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00) |
| 167 | #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01) |
| 168 | #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02) |
| 169 | |
| 170 | #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03) |
| 171 | |
| 172 | #define SSS_REG_HASH_CTRL_PAUSE 0x04 |
| 173 | |
| 174 | #define SSS_HASH_PAUSE BIT(0) |
| 175 | |
| 176 | #define SSS_REG_HASH_CTRL_FIFO 0x08 |
| 177 | |
| 178 | #define SSS_HASH_FIFO_MODE_DMA BIT(0) |
| 179 | #define SSS_HASH_FIFO_MODE_CPU 0 |
| 180 | |
| 181 | #define SSS_REG_HASH_CTRL_SWAP 0x0C |
| 182 | |
| 183 | #define SSS_HASH_BYTESWAP_DI BIT(3) |
| 184 | #define SSS_HASH_BYTESWAP_DO BIT(2) |
| 185 | #define SSS_HASH_BYTESWAP_IV BIT(1) |
| 186 | #define SSS_HASH_BYTESWAP_KEY BIT(0) |
| 187 | |
| 188 | #define SSS_REG_HASH_STATUS 0x10 |
| 189 | |
| 190 | #define SSS_HASH_STATUS_MSG_DONE BIT(6) |
| 191 | #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4) |
| 192 | #define SSS_HASH_STATUS_BUFFER_READY BIT(0) |
| 193 | |
| 194 | #define SSS_REG_HASH_MSG_SIZE_LOW 0x20 |
| 195 | #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24 |
| 196 | |
| 197 | #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28 |
| 198 | #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C |
| 199 | |
| 200 | #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2)) |
| 201 | #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2)) |
| 202 | |
| 203 | #define HASH_BLOCK_SIZE 64 |
| 204 | #define HASH_REG_SIZEOF 4 |
| 205 | #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF) |
| 206 | #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF) |
| 207 | #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF) |
| 208 | |
| 209 | /* |
| 210 | * HASH bit numbers, used by device, setting in dev->hash_flags with |
| 211 | * functions set_bit(), clear_bit() or tested with test_bit() or BIT(), |
| 212 | * to keep HASH state BUSY or FREE, or to signal state from irq_handler |
| 213 | * to hash_tasklet. SGS keep track of allocated memory for scatterlist |
| 214 | */ |
| 215 | #define HASH_FLAGS_BUSY 0 |
| 216 | #define HASH_FLAGS_FINAL 1 |
| 217 | #define HASH_FLAGS_DMA_ACTIVE 2 |
| 218 | #define HASH_FLAGS_OUTPUT_READY 3 |
| 219 | #define HASH_FLAGS_DMA_READY 4 |
| 220 | #define HASH_FLAGS_SGS_COPIED 5 |
| 221 | #define HASH_FLAGS_SGS_ALLOCED 6 |
| 222 | |
| 223 | /* HASH HW constants */ |
| 224 | #define BUFLEN HASH_BLOCK_SIZE |
| 225 | |
| 226 | #define SSS_HASH_DMA_LEN_ALIGN 8 |
| 227 | #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1) |
| 228 | |
| 229 | #define SSS_HASH_QUEUE_LENGTH 10 |
| 230 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 231 | /** |
| 232 | * struct samsung_aes_variant - platform specific SSS driver data |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 233 | * @aes_offset: AES register offset from SSS module's base. |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 234 | * @hash_offset: HASH register offset from SSS module's base. |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 235 | * |
| 236 | * Specifies platform specific configuration of SSS module. |
| 237 | * Note: A structure for driver specific platform data is used for future |
| 238 | * expansion of its usage. |
| 239 | */ |
| 240 | struct samsung_aes_variant { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 241 | unsigned int aes_offset; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 242 | unsigned int hash_offset; |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 243 | }; |
| 244 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 245 | struct s5p_aes_reqctx { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 246 | unsigned long mode; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 247 | }; |
| 248 | |
| 249 | struct s5p_aes_ctx { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 250 | struct s5p_aes_dev *dev; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 251 | |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 252 | u8 aes_key[AES_MAX_KEY_SIZE]; |
| 253 | u8 nonce[CTR_RFC3686_NONCE_SIZE]; |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 254 | int keylen; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 255 | }; |
| 256 | |
Krzysztof Kozlowski | 106d733 | 2017-03-17 16:49:21 +0200 | [diff] [blame] | 257 | /** |
| 258 | * struct s5p_aes_dev - Crypto device state container |
| 259 | * @dev: Associated device |
| 260 | * @clk: Clock for accessing hardware |
| 261 | * @ioaddr: Mapped IO memory region |
| 262 | * @aes_ioaddr: Per-varian offset for AES block IO memory |
| 263 | * @irq_fc: Feed control interrupt line |
| 264 | * @req: Crypto request currently handled by the device |
| 265 | * @ctx: Configuration for currently handled crypto request |
| 266 | * @sg_src: Scatter list with source data for currently handled block |
| 267 | * in device. This is DMA-mapped into device. |
| 268 | * @sg_dst: Scatter list with destination data for currently handled block |
| 269 | * in device. This is DMA-mapped into device. |
| 270 | * @sg_src_cpy: In case of unaligned access, copied scatter list |
| 271 | * with source data. |
| 272 | * @sg_dst_cpy: In case of unaligned access, copied scatter list |
| 273 | * with destination data. |
| 274 | * @tasklet: New request scheduling jib |
| 275 | * @queue: Crypto queue |
| 276 | * @busy: Indicates whether the device is currently handling some request |
| 277 | * thus it uses some of the fields from this state, like: |
| 278 | * req, ctx, sg_src/dst (and copies). This essentially |
| 279 | * protects against concurrent access to these fields. |
| 280 | * @lock: Lock for protecting both access to device hardware registers |
| 281 | * and fields related to current request (including the busy field). |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 282 | * @res: Resources for hash. |
| 283 | * @io_hash_base: Per-variant offset for HASH block IO memory. |
| 284 | * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags |
| 285 | * variable. |
| 286 | * @hash_flags: Flags for current HASH op. |
| 287 | * @hash_queue: Async hash queue. |
| 288 | * @hash_tasklet: New HASH request scheduling job. |
| 289 | * @xmit_buf: Buffer for current HASH request transfer into SSS block. |
| 290 | * @hash_req: Current request sending to SSS HASH block. |
| 291 | * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block. |
| 292 | * @hash_sg_cnt: Counter for hash_sg_iter. |
| 293 | * |
| 294 | * @use_hash: true if HASH algs enabled |
Krzysztof Kozlowski | 106d733 | 2017-03-17 16:49:21 +0200 | [diff] [blame] | 295 | */ |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 296 | struct s5p_aes_dev { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 297 | struct device *dev; |
| 298 | struct clk *clk; |
| 299 | void __iomem *ioaddr; |
| 300 | void __iomem *aes_ioaddr; |
| 301 | int irq_fc; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 302 | |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 303 | struct ablkcipher_request *req; |
| 304 | struct s5p_aes_ctx *ctx; |
| 305 | struct scatterlist *sg_src; |
| 306 | struct scatterlist *sg_dst; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 307 | |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 308 | struct scatterlist *sg_src_cpy; |
| 309 | struct scatterlist *sg_dst_cpy; |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 310 | |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 311 | struct tasklet_struct tasklet; |
| 312 | struct crypto_queue queue; |
| 313 | bool busy; |
| 314 | spinlock_t lock; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 315 | |
| 316 | struct resource *res; |
| 317 | void __iomem *io_hash_base; |
| 318 | |
| 319 | spinlock_t hash_lock; /* protect hash_ vars */ |
| 320 | unsigned long hash_flags; |
| 321 | struct crypto_queue hash_queue; |
| 322 | struct tasklet_struct hash_tasklet; |
| 323 | |
| 324 | u8 xmit_buf[BUFLEN]; |
| 325 | struct ahash_request *hash_req; |
| 326 | struct scatterlist *hash_sg_iter; |
| 327 | unsigned int hash_sg_cnt; |
| 328 | |
| 329 | bool use_hash; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 330 | }; |
| 331 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 332 | /** |
| 333 | * struct s5p_hash_reqctx - HASH request context |
| 334 | * @dd: Associated device |
| 335 | * @op_update: Current request operation (OP_UPDATE or OP_FINAL) |
| 336 | * @digcnt: Number of bytes processed by HW (without buffer[] ones) |
| 337 | * @digest: Digest message or IV for partial result |
| 338 | * @nregs: Number of HW registers for digest or IV read/write |
| 339 | * @engine: Bits for selecting type of HASH in SSS block |
| 340 | * @sg: sg for DMA transfer |
| 341 | * @sg_len: Length of sg for DMA transfer |
| 342 | * @sgl[]: sg for joining buffer and req->src scatterlist |
| 343 | * @skip: Skip offset in req->src for current op |
| 344 | * @total: Total number of bytes for current request |
| 345 | * @finup: Keep state for finup or final. |
| 346 | * @error: Keep track of error. |
| 347 | * @bufcnt: Number of bytes holded in buffer[] |
| 348 | * @buffer[]: For byte(s) from end of req->src in UPDATE op |
| 349 | */ |
| 350 | struct s5p_hash_reqctx { |
| 351 | struct s5p_aes_dev *dd; |
| 352 | bool op_update; |
| 353 | |
| 354 | u64 digcnt; |
| 355 | u8 digest[SHA256_DIGEST_SIZE]; |
| 356 | |
| 357 | unsigned int nregs; /* digest_size / sizeof(reg) */ |
| 358 | u32 engine; |
| 359 | |
| 360 | struct scatterlist *sg; |
| 361 | unsigned int sg_len; |
| 362 | struct scatterlist sgl[2]; |
| 363 | unsigned int skip; |
| 364 | unsigned int total; |
| 365 | bool finup; |
| 366 | bool error; |
| 367 | |
| 368 | u32 bufcnt; |
| 369 | u8 buffer[0]; |
| 370 | }; |
| 371 | |
| 372 | /** |
| 373 | * struct s5p_hash_ctx - HASH transformation context |
| 374 | * @dd: Associated device |
| 375 | * @flags: Bits for algorithm HASH. |
| 376 | * @fallback: Software transformation for zero message or size < BUFLEN. |
| 377 | */ |
| 378 | struct s5p_hash_ctx { |
| 379 | struct s5p_aes_dev *dd; |
| 380 | unsigned long flags; |
| 381 | struct crypto_shash *fallback; |
| 382 | }; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 383 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 384 | static const struct samsung_aes_variant s5p_aes_data = { |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 385 | .aes_offset = 0x4000, |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 386 | .hash_offset = 0x6000, |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 387 | }; |
| 388 | |
| 389 | static const struct samsung_aes_variant exynos_aes_data = { |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 390 | .aes_offset = 0x200, |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 391 | .hash_offset = 0x400, |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 392 | }; |
| 393 | |
Naveen Krishna Chatradhi | 6b9f16e | 2014-05-08 21:58:13 +0800 | [diff] [blame] | 394 | static const struct of_device_id s5p_sss_dt_match[] = { |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 395 | { |
| 396 | .compatible = "samsung,s5pv210-secss", |
| 397 | .data = &s5p_aes_data, |
| 398 | }, |
| 399 | { |
| 400 | .compatible = "samsung,exynos4210-secss", |
| 401 | .data = &exynos_aes_data, |
| 402 | }, |
Naveen Krishna Chatradhi | 6b9f16e | 2014-05-08 21:58:13 +0800 | [diff] [blame] | 403 | { }, |
| 404 | }; |
| 405 | MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); |
| 406 | |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 407 | static inline const struct samsung_aes_variant *find_s5p_sss_version |
| 408 | (const struct platform_device *pdev) |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 409 | { |
| 410 | if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { |
| 411 | const struct of_device_id *match; |
Krzysztof Koz?owski | 313becd | 2016-01-11 20:45:50 +0900 | [diff] [blame] | 412 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 413 | match = of_match_node(s5p_sss_dt_match, |
| 414 | pdev->dev.of_node); |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 415 | return (const struct samsung_aes_variant *)match->data; |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 416 | } |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 417 | return (const struct samsung_aes_variant *) |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 418 | platform_get_device_id(pdev)->driver_data; |
| 419 | } |
| 420 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 421 | static struct s5p_aes_dev *s5p_dev; |
| 422 | |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 423 | static void s5p_set_dma_indata(struct s5p_aes_dev *dev, |
| 424 | const struct scatterlist *sg) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 425 | { |
| 426 | SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); |
| 427 | SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); |
| 428 | } |
| 429 | |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 430 | static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, |
| 431 | const struct scatterlist *sg) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 432 | { |
| 433 | SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); |
| 434 | SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); |
| 435 | } |
| 436 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 437 | static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) |
| 438 | { |
| 439 | int len; |
| 440 | |
| 441 | if (!*sg) |
| 442 | return; |
| 443 | |
| 444 | len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); |
| 445 | free_pages((unsigned long)sg_virt(*sg), get_order(len)); |
| 446 | |
| 447 | kfree(*sg); |
| 448 | *sg = NULL; |
| 449 | } |
| 450 | |
| 451 | static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, |
| 452 | unsigned int nbytes, int out) |
| 453 | { |
| 454 | struct scatter_walk walk; |
| 455 | |
| 456 | if (!nbytes) |
| 457 | return; |
| 458 | |
| 459 | scatterwalk_start(&walk, sg); |
| 460 | scatterwalk_copychunks(buf, &walk, nbytes, out); |
| 461 | scatterwalk_done(&walk, out, 0); |
| 462 | } |
| 463 | |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 464 | static void s5p_sg_done(struct s5p_aes_dev *dev) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 465 | { |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 466 | if (dev->sg_dst_cpy) { |
| 467 | dev_dbg(dev->dev, |
| 468 | "Copying %d bytes of output data back to original place\n", |
| 469 | dev->req->nbytes); |
| 470 | s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, |
| 471 | dev->req->nbytes, 1); |
| 472 | } |
| 473 | s5p_free_sg_cpy(dev, &dev->sg_src_cpy); |
| 474 | s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 475 | } |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 476 | |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 477 | /* Calls the completion. Cannot be called with dev->lock hold. */ |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 478 | static void s5p_aes_complete(struct ablkcipher_request *req, int err) |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 479 | { |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 480 | req->base.complete(&req->base, err); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | static void s5p_unset_outdata(struct s5p_aes_dev *dev) |
| 484 | { |
| 485 | dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); |
| 486 | } |
| 487 | |
| 488 | static void s5p_unset_indata(struct s5p_aes_dev *dev) |
| 489 | { |
| 490 | dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); |
| 491 | } |
| 492 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 493 | static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, |
Christoph Manszewski | 6c12b6b | 2018-09-17 17:09:28 +0200 | [diff] [blame] | 494 | struct scatterlist **dst) |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 495 | { |
| 496 | void *pages; |
| 497 | int len; |
| 498 | |
| 499 | *dst = kmalloc(sizeof(**dst), GFP_ATOMIC); |
| 500 | if (!*dst) |
| 501 | return -ENOMEM; |
| 502 | |
| 503 | len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); |
| 504 | pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len)); |
| 505 | if (!pages) { |
| 506 | kfree(*dst); |
| 507 | *dst = NULL; |
| 508 | return -ENOMEM; |
| 509 | } |
| 510 | |
| 511 | s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0); |
| 512 | |
| 513 | sg_init_table(*dst, 1); |
| 514 | sg_set_buf(*dst, pages, len); |
| 515 | |
| 516 | return 0; |
| 517 | } |
| 518 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 519 | static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) |
| 520 | { |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 521 | if (!sg->length) |
| 522 | return -EINVAL; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 523 | |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 524 | if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE)) |
| 525 | return -ENOMEM; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 526 | |
| 527 | dev->sg_dst = sg; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 528 | |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 529 | return 0; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 530 | } |
| 531 | |
| 532 | static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) |
| 533 | { |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 534 | if (!sg->length) |
| 535 | return -EINVAL; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 536 | |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 537 | if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE)) |
| 538 | return -ENOMEM; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 539 | |
| 540 | dev->sg_src = sg; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 541 | |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 542 | return 0; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 543 | } |
| 544 | |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 545 | /* |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 546 | * Returns -ERRNO on error (mapping of new data failed). |
| 547 | * On success returns: |
| 548 | * - 0 if there is no more data, |
| 549 | * - 1 if new transmitting (output) data is ready and its address+length |
| 550 | * have to be written to device (by calling s5p_set_dma_outdata()). |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 551 | */ |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 552 | static int s5p_aes_tx(struct s5p_aes_dev *dev) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 553 | { |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 554 | int ret = 0; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 555 | |
| 556 | s5p_unset_outdata(dev); |
| 557 | |
| 558 | if (!sg_is_last(dev->sg_dst)) { |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 559 | ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); |
| 560 | if (!ret) |
| 561 | ret = 1; |
Naveen Krishna Chatradhi | dc5e3f1 | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 562 | } |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 563 | |
| 564 | return ret; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 565 | } |
| 566 | |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 567 | /* |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 568 | * Returns -ERRNO on error (mapping of new data failed). |
| 569 | * On success returns: |
| 570 | * - 0 if there is no more data, |
| 571 | * - 1 if new receiving (input) data is ready and its address+length |
| 572 | * have to be written to device (by calling s5p_set_dma_indata()). |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 573 | */ |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 574 | static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 575 | { |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 576 | int ret = 0; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 577 | |
| 578 | s5p_unset_indata(dev); |
| 579 | |
| 580 | if (!sg_is_last(dev->sg_src)) { |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 581 | ret = s5p_set_indata(dev, sg_next(dev->sg_src)); |
| 582 | if (!ret) |
| 583 | ret = 1; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 584 | } |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 585 | |
| 586 | return ret; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 587 | } |
| 588 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 589 | static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset) |
| 590 | { |
| 591 | return __raw_readl(dd->io_hash_base + offset); |
| 592 | } |
| 593 | |
| 594 | static inline void s5p_hash_write(struct s5p_aes_dev *dd, |
| 595 | u32 offset, u32 value) |
| 596 | { |
| 597 | __raw_writel(value, dd->io_hash_base + offset); |
| 598 | } |
| 599 | |
| 600 | /** |
| 601 | * s5p_set_dma_hashdata() - start DMA with sg |
| 602 | * @dev: device |
| 603 | * @sg: scatterlist ready to DMA transmit |
| 604 | */ |
| 605 | static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev, |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 606 | const struct scatterlist *sg) |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 607 | { |
| 608 | dev->hash_sg_cnt--; |
| 609 | SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg)); |
| 610 | SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */ |
| 611 | } |
| 612 | |
| 613 | /** |
| 614 | * s5p_hash_rx() - get next hash_sg_iter |
| 615 | * @dev: device |
| 616 | * |
| 617 | * Return: |
| 618 | * 2 if there is no more data and it is UPDATE op |
| 619 | * 1 if new receiving (input) data is ready and can be written to device |
| 620 | * 0 if there is no more data and it is FINAL op |
| 621 | */ |
| 622 | static int s5p_hash_rx(struct s5p_aes_dev *dev) |
| 623 | { |
| 624 | if (dev->hash_sg_cnt > 0) { |
| 625 | dev->hash_sg_iter = sg_next(dev->hash_sg_iter); |
| 626 | return 1; |
| 627 | } |
| 628 | |
| 629 | set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags); |
| 630 | if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags)) |
| 631 | return 0; |
| 632 | |
| 633 | return 2; |
| 634 | } |
| 635 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 636 | static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) |
| 637 | { |
| 638 | struct platform_device *pdev = dev_id; |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 639 | struct s5p_aes_dev *dev = platform_get_drvdata(pdev); |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 640 | struct ablkcipher_request *req; |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 641 | int err_dma_tx = 0; |
| 642 | int err_dma_rx = 0; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 643 | int err_dma_hx = 0; |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 644 | bool tx_end = false; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 645 | bool hx_end = false; |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 646 | unsigned long flags; |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 647 | u32 status, st_bits; |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 648 | int err; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 649 | |
| 650 | spin_lock_irqsave(&dev->lock, flags); |
| 651 | |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 652 | /* |
| 653 | * Handle rx or tx interrupt. If there is still data (scatterlist did not |
| 654 | * reach end), then map next scatterlist entry. |
| 655 | * In case of such mapping error, s5p_aes_complete() should be called. |
| 656 | * |
| 657 | * If there is no more data in tx scatter list, call s5p_aes_complete() |
| 658 | * and schedule new tasklet. |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 659 | * |
| 660 | * Handle hx interrupt. If there is still data map next entry. |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 661 | */ |
Krzysztof Kozlowski | 5512442 | 2016-04-19 15:44:12 +0200 | [diff] [blame] | 662 | status = SSS_READ(dev, FCINTSTAT); |
| 663 | if (status & SSS_FCINTSTAT_BRDMAINT) |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 664 | err_dma_rx = s5p_aes_rx(dev); |
| 665 | |
| 666 | if (status & SSS_FCINTSTAT_BTDMAINT) { |
| 667 | if (sg_is_last(dev->sg_dst)) |
| 668 | tx_end = true; |
| 669 | err_dma_tx = s5p_aes_tx(dev); |
| 670 | } |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 671 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 672 | if (status & SSS_FCINTSTAT_HRDMAINT) |
| 673 | err_dma_hx = s5p_hash_rx(dev); |
| 674 | |
| 675 | st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT | |
| 676 | SSS_FCINTSTAT_HRDMAINT); |
| 677 | /* clear DMA bits */ |
| 678 | SSS_WRITE(dev, FCINTPEND, st_bits); |
| 679 | |
| 680 | /* clear HASH irq bits */ |
| 681 | if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) { |
| 682 | /* cannot have both HPART and HDONE */ |
| 683 | if (status & SSS_FCINTSTAT_HPARTINT) |
| 684 | st_bits = SSS_HASH_STATUS_PARTIAL_DONE; |
| 685 | |
| 686 | if (status & SSS_FCINTSTAT_HDONEINT) |
| 687 | st_bits = SSS_HASH_STATUS_MSG_DONE; |
| 688 | |
| 689 | set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags); |
| 690 | s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits); |
| 691 | hx_end = true; |
| 692 | /* when DONE or PART, do not handle HASH DMA */ |
| 693 | err_dma_hx = 0; |
| 694 | } |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 695 | |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 696 | if (err_dma_rx < 0) { |
| 697 | err = err_dma_rx; |
| 698 | goto error; |
| 699 | } |
| 700 | if (err_dma_tx < 0) { |
| 701 | err = err_dma_tx; |
| 702 | goto error; |
| 703 | } |
Krzysztof Kozlowski | 79152e8 | 2016-04-22 14:15:23 +0200 | [diff] [blame] | 704 | |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 705 | if (tx_end) { |
| 706 | s5p_sg_done(dev); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 707 | if (err_dma_hx == 1) |
| 708 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 709 | |
| 710 | spin_unlock_irqrestore(&dev->lock, flags); |
| 711 | |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 712 | s5p_aes_complete(dev->req, 0); |
Krzysztof Kozlowski | 42d5c17 | 2017-03-17 16:49:19 +0200 | [diff] [blame] | 713 | /* Device is still busy */ |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 714 | tasklet_schedule(&dev->tasklet); |
| 715 | } else { |
| 716 | /* |
| 717 | * Writing length of DMA block (either receiving or |
| 718 | * transmitting) will start the operation immediately, so this |
| 719 | * should be done at the end (even after clearing pending |
| 720 | * interrupts to not miss the interrupt). |
| 721 | */ |
| 722 | if (err_dma_tx == 1) |
| 723 | s5p_set_dma_outdata(dev, dev->sg_dst); |
| 724 | if (err_dma_rx == 1) |
| 725 | s5p_set_dma_indata(dev, dev->sg_src); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 726 | if (err_dma_hx == 1) |
| 727 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 728 | |
| 729 | spin_unlock_irqrestore(&dev->lock, flags); |
| 730 | } |
| 731 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 732 | goto hash_irq_end; |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 733 | |
| 734 | error: |
| 735 | s5p_sg_done(dev); |
Krzysztof Kozlowski | 42d5c17 | 2017-03-17 16:49:19 +0200 | [diff] [blame] | 736 | dev->busy = false; |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 737 | req = dev->req; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 738 | if (err_dma_hx == 1) |
| 739 | s5p_set_dma_hashdata(dev, dev->hash_sg_iter); |
| 740 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 741 | spin_unlock_irqrestore(&dev->lock, flags); |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 742 | s5p_aes_complete(req, err); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 743 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 744 | hash_irq_end: |
| 745 | /* |
| 746 | * Note about else if: |
| 747 | * when hash_sg_iter reaches end and its UPDATE op, |
| 748 | * issue SSS_HASH_PAUSE and wait for HPART irq |
| 749 | */ |
| 750 | if (hx_end) |
| 751 | tasklet_schedule(&dev->hash_tasklet); |
| 752 | else if (err_dma_hx == 2) |
| 753 | s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE, |
| 754 | SSS_HASH_PAUSE); |
| 755 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 756 | return IRQ_HANDLED; |
| 757 | } |
| 758 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 759 | /** |
| 760 | * s5p_hash_read_msg() - read message or IV from HW |
| 761 | * @req: AHASH request |
| 762 | */ |
| 763 | static void s5p_hash_read_msg(struct ahash_request *req) |
| 764 | { |
| 765 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 766 | struct s5p_aes_dev *dd = ctx->dd; |
| 767 | u32 *hash = (u32 *)ctx->digest; |
| 768 | unsigned int i; |
| 769 | |
| 770 | for (i = 0; i < ctx->nregs; i++) |
| 771 | hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i)); |
| 772 | } |
| 773 | |
| 774 | /** |
| 775 | * s5p_hash_write_ctx_iv() - write IV for next partial/finup op. |
| 776 | * @dd: device |
| 777 | * @ctx: request context |
| 778 | */ |
| 779 | static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd, |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 780 | const struct s5p_hash_reqctx *ctx) |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 781 | { |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 782 | const u32 *hash = (const u32 *)ctx->digest; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 783 | unsigned int i; |
| 784 | |
| 785 | for (i = 0; i < ctx->nregs; i++) |
| 786 | s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]); |
| 787 | } |
| 788 | |
| 789 | /** |
| 790 | * s5p_hash_write_iv() - write IV for next partial/finup op. |
| 791 | * @req: AHASH request |
| 792 | */ |
| 793 | static void s5p_hash_write_iv(struct ahash_request *req) |
| 794 | { |
| 795 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 796 | |
| 797 | s5p_hash_write_ctx_iv(ctx->dd, ctx); |
| 798 | } |
| 799 | |
| 800 | /** |
| 801 | * s5p_hash_copy_result() - copy digest into req->result |
| 802 | * @req: AHASH request |
| 803 | */ |
| 804 | static void s5p_hash_copy_result(struct ahash_request *req) |
| 805 | { |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 806 | const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 807 | |
| 808 | if (!req->result) |
| 809 | return; |
| 810 | |
| 811 | memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF); |
| 812 | } |
| 813 | |
| 814 | /** |
| 815 | * s5p_hash_dma_flush() - flush HASH DMA |
| 816 | * @dev: secss device |
| 817 | */ |
| 818 | static void s5p_hash_dma_flush(struct s5p_aes_dev *dev) |
| 819 | { |
| 820 | SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH); |
| 821 | } |
| 822 | |
| 823 | /** |
| 824 | * s5p_hash_dma_enable() - enable DMA mode for HASH |
| 825 | * @dev: secss device |
| 826 | * |
| 827 | * enable DMA mode for HASH |
| 828 | */ |
| 829 | static void s5p_hash_dma_enable(struct s5p_aes_dev *dev) |
| 830 | { |
| 831 | s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA); |
| 832 | } |
| 833 | |
| 834 | /** |
| 835 | * s5p_hash_irq_disable() - disable irq HASH signals |
| 836 | * @dev: secss device |
| 837 | * @flags: bitfield with irq's to be disabled |
| 838 | */ |
| 839 | static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags) |
| 840 | { |
| 841 | SSS_WRITE(dev, FCINTENCLR, flags); |
| 842 | } |
| 843 | |
| 844 | /** |
| 845 | * s5p_hash_irq_enable() - enable irq signals |
| 846 | * @dev: secss device |
| 847 | * @flags: bitfield with irq's to be enabled |
| 848 | */ |
| 849 | static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags) |
| 850 | { |
| 851 | SSS_WRITE(dev, FCINTENSET, flags); |
| 852 | } |
| 853 | |
| 854 | /** |
| 855 | * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH |
| 856 | * @dev: secss device |
| 857 | * @hashflow: HASH stream flow with/without crypto AES/DES |
| 858 | */ |
| 859 | static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow) |
| 860 | { |
| 861 | unsigned long flags; |
| 862 | u32 flow; |
| 863 | |
| 864 | spin_lock_irqsave(&dev->lock, flags); |
| 865 | |
| 866 | flow = SSS_READ(dev, FCFIFOCTRL); |
| 867 | flow &= ~SSS_HASHIN_MASK; |
| 868 | flow |= hashflow; |
| 869 | SSS_WRITE(dev, FCFIFOCTRL, flow); |
| 870 | |
| 871 | spin_unlock_irqrestore(&dev->lock, flags); |
| 872 | } |
| 873 | |
| 874 | /** |
| 875 | * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS |
| 876 | * @dev: secss device |
| 877 | * @hashflow: HASH stream flow with/without AES/DES |
| 878 | * |
| 879 | * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW, |
| 880 | * enable HASH irq's HRDMA, HDONE, HPART |
| 881 | */ |
| 882 | static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow) |
| 883 | { |
| 884 | s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR | |
| 885 | SSS_FCINTENCLR_HDONEINTENCLR | |
| 886 | SSS_FCINTENCLR_HPARTINTENCLR); |
| 887 | s5p_hash_dma_flush(dev); |
| 888 | |
| 889 | s5p_hash_dma_enable(dev); |
| 890 | s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK); |
| 891 | s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET | |
| 892 | SSS_FCINTENSET_HDONEINTENSET | |
| 893 | SSS_FCINTENSET_HPARTINTENSET); |
| 894 | } |
| 895 | |
| 896 | /** |
| 897 | * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing |
| 898 | * @dd: secss device |
| 899 | * @length: length for request |
| 900 | * @final: true if final op |
| 901 | * |
| 902 | * Prepare SSS HASH block for processing bytes in DMA mode. If it is called |
| 903 | * after previous updates, fill up IV words. For final, calculate and set |
| 904 | * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH |
| 905 | * length as 2^63 so it will be never reached and set to zero prelow and |
| 906 | * prehigh. |
| 907 | * |
| 908 | * This function does not start DMA transfer. |
| 909 | */ |
| 910 | static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length, |
| 911 | bool final) |
| 912 | { |
| 913 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); |
| 914 | u32 prelow, prehigh, low, high; |
| 915 | u32 configflags, swapflags; |
| 916 | u64 tmplen; |
| 917 | |
| 918 | configflags = ctx->engine | SSS_HASH_INIT_BIT; |
| 919 | |
| 920 | if (likely(ctx->digcnt)) { |
| 921 | s5p_hash_write_ctx_iv(dd, ctx); |
| 922 | configflags |= SSS_HASH_USER_IV_EN; |
| 923 | } |
| 924 | |
| 925 | if (final) { |
| 926 | /* number of bytes for last part */ |
| 927 | low = length; |
| 928 | high = 0; |
| 929 | /* total number of bits prev hashed */ |
| 930 | tmplen = ctx->digcnt * 8; |
| 931 | prelow = (u32)tmplen; |
| 932 | prehigh = (u32)(tmplen >> 32); |
| 933 | } else { |
| 934 | prelow = 0; |
| 935 | prehigh = 0; |
| 936 | low = 0; |
| 937 | high = BIT(31); |
| 938 | } |
| 939 | |
| 940 | swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO | |
| 941 | SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY; |
| 942 | |
| 943 | s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low); |
| 944 | s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high); |
| 945 | s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow); |
| 946 | s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh); |
| 947 | |
| 948 | s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags); |
| 949 | s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags); |
| 950 | } |
| 951 | |
| 952 | /** |
| 953 | * s5p_hash_xmit_dma() - start DMA hash processing |
| 954 | * @dd: secss device |
| 955 | * @length: length for request |
| 956 | * @final: true if final op |
| 957 | * |
| 958 | * Update digcnt here, as it is needed for finup/final op. |
| 959 | */ |
| 960 | static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length, |
| 961 | bool final) |
| 962 | { |
| 963 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); |
| 964 | unsigned int cnt; |
| 965 | |
| 966 | cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); |
| 967 | if (!cnt) { |
| 968 | dev_err(dd->dev, "dma_map_sg error\n"); |
| 969 | ctx->error = true; |
| 970 | return -EINVAL; |
| 971 | } |
| 972 | |
| 973 | set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); |
| 974 | dd->hash_sg_iter = ctx->sg; |
| 975 | dd->hash_sg_cnt = cnt; |
| 976 | s5p_hash_write_ctrl(dd, length, final); |
| 977 | ctx->digcnt += length; |
| 978 | ctx->total -= length; |
| 979 | |
| 980 | /* catch last interrupt */ |
| 981 | if (final) |
| 982 | set_bit(HASH_FLAGS_FINAL, &dd->hash_flags); |
| 983 | |
| 984 | s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */ |
| 985 | |
| 986 | return -EINPROGRESS; |
| 987 | } |
| 988 | |
| 989 | /** |
| 990 | * s5p_hash_copy_sgs() - copy request's bytes into new buffer |
| 991 | * @ctx: request context |
| 992 | * @sg: source scatterlist request |
| 993 | * @new_len: number of bytes to process from sg |
| 994 | * |
| 995 | * Allocate new buffer, copy data for HASH into it. If there was xmit_buf |
| 996 | * filled, copy it first, then copy data from sg into it. Prepare one sgl[0] |
| 997 | * with allocated buffer. |
| 998 | * |
| 999 | * Set bit in dd->hash_flag so we can free it after irq ends processing. |
| 1000 | */ |
| 1001 | static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx, |
| 1002 | struct scatterlist *sg, unsigned int new_len) |
| 1003 | { |
| 1004 | unsigned int pages, len; |
| 1005 | void *buf; |
| 1006 | |
| 1007 | len = new_len + ctx->bufcnt; |
| 1008 | pages = get_order(len); |
| 1009 | |
| 1010 | buf = (void *)__get_free_pages(GFP_ATOMIC, pages); |
| 1011 | if (!buf) { |
| 1012 | dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n"); |
| 1013 | ctx->error = true; |
| 1014 | return -ENOMEM; |
| 1015 | } |
| 1016 | |
| 1017 | if (ctx->bufcnt) |
| 1018 | memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); |
| 1019 | |
| 1020 | scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, |
| 1021 | new_len, 0); |
| 1022 | sg_init_table(ctx->sgl, 1); |
| 1023 | sg_set_buf(ctx->sgl, buf, len); |
| 1024 | ctx->sg = ctx->sgl; |
| 1025 | ctx->sg_len = 1; |
| 1026 | ctx->bufcnt = 0; |
| 1027 | ctx->skip = 0; |
| 1028 | set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags); |
| 1029 | |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
| 1033 | /** |
| 1034 | * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy |
| 1035 | * @ctx: request context |
| 1036 | * @sg: source scatterlist request |
| 1037 | * @new_len: number of bytes to process from sg |
| 1038 | * |
| 1039 | * Allocate new scatterlist table, copy data for HASH into it. If there was |
| 1040 | * xmit_buf filled, prepare it first, then copy page, length and offset from |
| 1041 | * source sg into it, adjusting begin and/or end for skip offset and |
| 1042 | * hash_later value. |
| 1043 | * |
| 1044 | * Resulting sg table will be assigned to ctx->sg. Set flag so we can free |
| 1045 | * it after irq ends processing. |
| 1046 | */ |
| 1047 | static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx, |
| 1048 | struct scatterlist *sg, unsigned int new_len) |
| 1049 | { |
| 1050 | unsigned int skip = ctx->skip, n = sg_nents(sg); |
| 1051 | struct scatterlist *tmp; |
| 1052 | unsigned int len; |
| 1053 | |
| 1054 | if (ctx->bufcnt) |
| 1055 | n++; |
| 1056 | |
| 1057 | ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); |
| 1058 | if (!ctx->sg) { |
| 1059 | ctx->error = true; |
| 1060 | return -ENOMEM; |
| 1061 | } |
| 1062 | |
| 1063 | sg_init_table(ctx->sg, n); |
| 1064 | |
| 1065 | tmp = ctx->sg; |
| 1066 | |
| 1067 | ctx->sg_len = 0; |
| 1068 | |
| 1069 | if (ctx->bufcnt) { |
| 1070 | sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); |
| 1071 | tmp = sg_next(tmp); |
| 1072 | ctx->sg_len++; |
| 1073 | } |
| 1074 | |
| 1075 | while (sg && skip >= sg->length) { |
| 1076 | skip -= sg->length; |
| 1077 | sg = sg_next(sg); |
| 1078 | } |
| 1079 | |
| 1080 | while (sg && new_len) { |
| 1081 | len = sg->length - skip; |
| 1082 | if (new_len < len) |
| 1083 | len = new_len; |
| 1084 | |
| 1085 | new_len -= len; |
| 1086 | sg_set_page(tmp, sg_page(sg), len, sg->offset + skip); |
| 1087 | skip = 0; |
| 1088 | if (new_len <= 0) |
| 1089 | sg_mark_end(tmp); |
| 1090 | |
| 1091 | tmp = sg_next(tmp); |
| 1092 | ctx->sg_len++; |
| 1093 | sg = sg_next(sg); |
| 1094 | } |
| 1095 | |
| 1096 | set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags); |
| 1097 | |
| 1098 | return 0; |
| 1099 | } |
| 1100 | |
| 1101 | /** |
| 1102 | * s5p_hash_prepare_sgs() - prepare sg for processing |
| 1103 | * @ctx: request context |
| 1104 | * @sg: source scatterlist request |
| 1105 | * @nbytes: number of bytes to process from sg |
| 1106 | * @final: final flag |
| 1107 | * |
| 1108 | * Check two conditions: (1) if buffers in sg have len aligned data, and (2) |
| 1109 | * sg table have good aligned elements (list_ok). If one of this checks fails, |
| 1110 | * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy |
| 1111 | * data into this buffer and prepare request in sgl, or (2) allocates new sg |
| 1112 | * table and prepare sg elements. |
| 1113 | * |
| 1114 | * For digest or finup all conditions can be good, and we may not need any |
| 1115 | * fixes. |
| 1116 | */ |
| 1117 | static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx, |
| 1118 | struct scatterlist *sg, |
| 1119 | unsigned int new_len, bool final) |
| 1120 | { |
| 1121 | unsigned int skip = ctx->skip, nbytes = new_len, n = 0; |
| 1122 | bool aligned = true, list_ok = true; |
| 1123 | struct scatterlist *sg_tmp = sg; |
| 1124 | |
| 1125 | if (!sg || !sg->length || !new_len) |
| 1126 | return 0; |
| 1127 | |
| 1128 | if (skip || !final) |
| 1129 | list_ok = false; |
| 1130 | |
| 1131 | while (nbytes > 0 && sg_tmp) { |
| 1132 | n++; |
| 1133 | if (skip >= sg_tmp->length) { |
| 1134 | skip -= sg_tmp->length; |
| 1135 | if (!sg_tmp->length) { |
| 1136 | aligned = false; |
| 1137 | break; |
| 1138 | } |
| 1139 | } else { |
| 1140 | if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) { |
| 1141 | aligned = false; |
| 1142 | break; |
| 1143 | } |
| 1144 | |
| 1145 | if (nbytes < sg_tmp->length - skip) { |
| 1146 | list_ok = false; |
| 1147 | break; |
| 1148 | } |
| 1149 | |
| 1150 | nbytes -= sg_tmp->length - skip; |
| 1151 | skip = 0; |
| 1152 | } |
| 1153 | |
| 1154 | sg_tmp = sg_next(sg_tmp); |
| 1155 | } |
| 1156 | |
| 1157 | if (!aligned) |
| 1158 | return s5p_hash_copy_sgs(ctx, sg, new_len); |
| 1159 | else if (!list_ok) |
| 1160 | return s5p_hash_copy_sg_lists(ctx, sg, new_len); |
| 1161 | |
| 1162 | /* |
| 1163 | * Have aligned data from previous operation and/or current |
| 1164 | * Note: will enter here only if (digest or finup) and aligned |
| 1165 | */ |
| 1166 | if (ctx->bufcnt) { |
| 1167 | ctx->sg_len = n; |
| 1168 | sg_init_table(ctx->sgl, 2); |
| 1169 | sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt); |
| 1170 | sg_chain(ctx->sgl, 2, sg); |
| 1171 | ctx->sg = ctx->sgl; |
| 1172 | ctx->sg_len++; |
| 1173 | } else { |
| 1174 | ctx->sg = sg; |
| 1175 | ctx->sg_len = n; |
| 1176 | } |
| 1177 | |
| 1178 | return 0; |
| 1179 | } |
| 1180 | |
| 1181 | /** |
| 1182 | * s5p_hash_prepare_request() - prepare request for processing |
| 1183 | * @req: AHASH request |
| 1184 | * @update: true if UPDATE op |
| 1185 | * |
| 1186 | * Note 1: we can have update flag _and_ final flag at the same time. |
| 1187 | * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or |
| 1188 | * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or |
| 1189 | * we have final op |
| 1190 | */ |
| 1191 | static int s5p_hash_prepare_request(struct ahash_request *req, bool update) |
| 1192 | { |
| 1193 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1194 | bool final = ctx->finup; |
| 1195 | int xmit_len, hash_later, nbytes; |
| 1196 | int ret; |
| 1197 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1198 | if (update) |
| 1199 | nbytes = req->nbytes; |
| 1200 | else |
| 1201 | nbytes = 0; |
| 1202 | |
| 1203 | ctx->total = nbytes + ctx->bufcnt; |
| 1204 | if (!ctx->total) |
| 1205 | return 0; |
| 1206 | |
| 1207 | if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) { |
| 1208 | /* bytes left from previous request, so fill up to BUFLEN */ |
| 1209 | int len = BUFLEN - ctx->bufcnt % BUFLEN; |
| 1210 | |
| 1211 | if (len > nbytes) |
| 1212 | len = nbytes; |
| 1213 | |
| 1214 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, |
| 1215 | 0, len, 0); |
| 1216 | ctx->bufcnt += len; |
| 1217 | nbytes -= len; |
| 1218 | ctx->skip = len; |
| 1219 | } else { |
| 1220 | ctx->skip = 0; |
| 1221 | } |
| 1222 | |
| 1223 | if (ctx->bufcnt) |
| 1224 | memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt); |
| 1225 | |
| 1226 | xmit_len = ctx->total; |
| 1227 | if (final) { |
| 1228 | hash_later = 0; |
| 1229 | } else { |
| 1230 | if (IS_ALIGNED(xmit_len, BUFLEN)) |
| 1231 | xmit_len -= BUFLEN; |
| 1232 | else |
| 1233 | xmit_len -= xmit_len & (BUFLEN - 1); |
| 1234 | |
| 1235 | hash_later = ctx->total - xmit_len; |
| 1236 | /* copy hash_later bytes from end of req->src */ |
| 1237 | /* previous bytes are in xmit_buf, so no overwrite */ |
| 1238 | scatterwalk_map_and_copy(ctx->buffer, req->src, |
| 1239 | req->nbytes - hash_later, |
| 1240 | hash_later, 0); |
| 1241 | } |
| 1242 | |
| 1243 | if (xmit_len > BUFLEN) { |
| 1244 | ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later, |
| 1245 | final); |
| 1246 | if (ret) |
| 1247 | return ret; |
| 1248 | } else { |
| 1249 | /* have buffered data only */ |
| 1250 | if (unlikely(!ctx->bufcnt)) { |
| 1251 | /* first update didn't fill up buffer */ |
| 1252 | scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, |
| 1253 | 0, xmit_len, 0); |
| 1254 | } |
| 1255 | |
| 1256 | sg_init_table(ctx->sgl, 1); |
| 1257 | sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len); |
| 1258 | |
| 1259 | ctx->sg = ctx->sgl; |
| 1260 | ctx->sg_len = 1; |
| 1261 | } |
| 1262 | |
| 1263 | ctx->bufcnt = hash_later; |
| 1264 | if (!final) |
| 1265 | ctx->total = xmit_len; |
| 1266 | |
| 1267 | return 0; |
| 1268 | } |
| 1269 | |
| 1270 | /** |
| 1271 | * s5p_hash_update_dma_stop() - unmap DMA |
| 1272 | * @dd: secss device |
| 1273 | * |
| 1274 | * Unmap scatterlist ctx->sg. |
| 1275 | */ |
| 1276 | static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd) |
| 1277 | { |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 1278 | const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1279 | |
| 1280 | dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); |
| 1281 | clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); |
| 1282 | } |
| 1283 | |
| 1284 | /** |
| 1285 | * s5p_hash_finish() - copy calculated digest to crypto layer |
| 1286 | * @req: AHASH request |
| 1287 | */ |
| 1288 | static void s5p_hash_finish(struct ahash_request *req) |
| 1289 | { |
| 1290 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1291 | struct s5p_aes_dev *dd = ctx->dd; |
| 1292 | |
| 1293 | if (ctx->digcnt) |
| 1294 | s5p_hash_copy_result(req); |
| 1295 | |
| 1296 | dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt); |
| 1297 | } |
| 1298 | |
| 1299 | /** |
| 1300 | * s5p_hash_finish_req() - finish request |
| 1301 | * @req: AHASH request |
| 1302 | * @err: error |
| 1303 | */ |
| 1304 | static void s5p_hash_finish_req(struct ahash_request *req, int err) |
| 1305 | { |
| 1306 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1307 | struct s5p_aes_dev *dd = ctx->dd; |
| 1308 | unsigned long flags; |
| 1309 | |
| 1310 | if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags)) |
| 1311 | free_pages((unsigned long)sg_virt(ctx->sg), |
| 1312 | get_order(ctx->sg->length)); |
| 1313 | |
| 1314 | if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags)) |
| 1315 | kfree(ctx->sg); |
| 1316 | |
| 1317 | ctx->sg = NULL; |
| 1318 | dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) | |
| 1319 | BIT(HASH_FLAGS_SGS_COPIED)); |
| 1320 | |
| 1321 | if (!err && !ctx->error) { |
| 1322 | s5p_hash_read_msg(req); |
| 1323 | if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags)) |
| 1324 | s5p_hash_finish(req); |
| 1325 | } else { |
| 1326 | ctx->error = true; |
| 1327 | } |
| 1328 | |
| 1329 | spin_lock_irqsave(&dd->hash_lock, flags); |
| 1330 | dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) | |
| 1331 | BIT(HASH_FLAGS_DMA_READY) | |
| 1332 | BIT(HASH_FLAGS_OUTPUT_READY)); |
| 1333 | spin_unlock_irqrestore(&dd->hash_lock, flags); |
| 1334 | |
| 1335 | if (req->base.complete) |
| 1336 | req->base.complete(&req->base, err); |
| 1337 | } |
| 1338 | |
| 1339 | /** |
| 1340 | * s5p_hash_handle_queue() - handle hash queue |
| 1341 | * @dd: device s5p_aes_dev |
| 1342 | * @req: AHASH request |
| 1343 | * |
| 1344 | * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the |
| 1345 | * device then processes the first request from the dd->queue |
| 1346 | * |
| 1347 | * Returns: see s5p_hash_final below. |
| 1348 | */ |
| 1349 | static int s5p_hash_handle_queue(struct s5p_aes_dev *dd, |
| 1350 | struct ahash_request *req) |
| 1351 | { |
| 1352 | struct crypto_async_request *async_req, *backlog; |
| 1353 | struct s5p_hash_reqctx *ctx; |
| 1354 | unsigned long flags; |
| 1355 | int err = 0, ret = 0; |
| 1356 | |
| 1357 | retry: |
| 1358 | spin_lock_irqsave(&dd->hash_lock, flags); |
| 1359 | if (req) |
| 1360 | ret = ahash_enqueue_request(&dd->hash_queue, req); |
| 1361 | |
| 1362 | if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { |
| 1363 | spin_unlock_irqrestore(&dd->hash_lock, flags); |
| 1364 | return ret; |
| 1365 | } |
| 1366 | |
| 1367 | backlog = crypto_get_backlog(&dd->hash_queue); |
| 1368 | async_req = crypto_dequeue_request(&dd->hash_queue); |
| 1369 | if (async_req) |
| 1370 | set_bit(HASH_FLAGS_BUSY, &dd->hash_flags); |
| 1371 | |
| 1372 | spin_unlock_irqrestore(&dd->hash_lock, flags); |
| 1373 | |
| 1374 | if (!async_req) |
| 1375 | return ret; |
| 1376 | |
| 1377 | if (backlog) |
| 1378 | backlog->complete(backlog, -EINPROGRESS); |
| 1379 | |
| 1380 | req = ahash_request_cast(async_req); |
| 1381 | dd->hash_req = req; |
| 1382 | ctx = ahash_request_ctx(req); |
| 1383 | |
| 1384 | err = s5p_hash_prepare_request(req, ctx->op_update); |
| 1385 | if (err || !ctx->total) |
| 1386 | goto out; |
| 1387 | |
| 1388 | dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n", |
| 1389 | ctx->op_update, req->nbytes); |
| 1390 | |
| 1391 | s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT); |
| 1392 | if (ctx->digcnt) |
| 1393 | s5p_hash_write_iv(req); /* restore hash IV */ |
| 1394 | |
| 1395 | if (ctx->op_update) { /* HASH_OP_UPDATE */ |
| 1396 | err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup); |
| 1397 | if (err != -EINPROGRESS && ctx->finup && !ctx->error) |
| 1398 | /* no final() after finup() */ |
| 1399 | err = s5p_hash_xmit_dma(dd, ctx->total, true); |
| 1400 | } else { /* HASH_OP_FINAL */ |
| 1401 | err = s5p_hash_xmit_dma(dd, ctx->total, true); |
| 1402 | } |
| 1403 | out: |
| 1404 | if (err != -EINPROGRESS) { |
| 1405 | /* hash_tasklet_cb will not finish it, so do it here */ |
| 1406 | s5p_hash_finish_req(req, err); |
| 1407 | req = NULL; |
| 1408 | |
| 1409 | /* |
| 1410 | * Execute next request immediately if there is anything |
| 1411 | * in queue. |
| 1412 | */ |
| 1413 | goto retry; |
| 1414 | } |
| 1415 | |
| 1416 | return ret; |
| 1417 | } |
| 1418 | |
| 1419 | /** |
| 1420 | * s5p_hash_tasklet_cb() - hash tasklet |
| 1421 | * @data: ptr to s5p_aes_dev |
| 1422 | */ |
| 1423 | static void s5p_hash_tasklet_cb(unsigned long data) |
| 1424 | { |
| 1425 | struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data; |
| 1426 | |
| 1427 | if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { |
| 1428 | s5p_hash_handle_queue(dd, NULL); |
| 1429 | return; |
| 1430 | } |
| 1431 | |
| 1432 | if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) { |
| 1433 | if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE, |
| 1434 | &dd->hash_flags)) { |
| 1435 | s5p_hash_update_dma_stop(dd); |
| 1436 | } |
| 1437 | |
| 1438 | if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY, |
| 1439 | &dd->hash_flags)) { |
| 1440 | /* hash or semi-hash ready */ |
| 1441 | clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); |
Dan Carpenter | f7daa71 | 2017-11-10 00:26:04 +0300 | [diff] [blame] | 1442 | goto finish; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1443 | } |
| 1444 | } |
| 1445 | |
| 1446 | return; |
| 1447 | |
| 1448 | finish: |
| 1449 | /* finish curent request */ |
| 1450 | s5p_hash_finish_req(dd->hash_req, 0); |
| 1451 | |
| 1452 | /* If we are not busy, process next req */ |
| 1453 | if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) |
| 1454 | s5p_hash_handle_queue(dd, NULL); |
| 1455 | } |
| 1456 | |
| 1457 | /** |
| 1458 | * s5p_hash_enqueue() - enqueue request |
| 1459 | * @req: AHASH request |
| 1460 | * @op: operation UPDATE (true) or FINAL (false) |
| 1461 | * |
| 1462 | * Returns: see s5p_hash_final below. |
| 1463 | */ |
| 1464 | static int s5p_hash_enqueue(struct ahash_request *req, bool op) |
| 1465 | { |
| 1466 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1467 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
| 1468 | |
| 1469 | ctx->op_update = op; |
| 1470 | |
| 1471 | return s5p_hash_handle_queue(tctx->dd, req); |
| 1472 | } |
| 1473 | |
| 1474 | /** |
| 1475 | * s5p_hash_update() - process the hash input data |
| 1476 | * @req: AHASH request |
| 1477 | * |
| 1478 | * If request will fit in buffer, copy it and return immediately |
| 1479 | * else enqueue it with OP_UPDATE. |
| 1480 | * |
| 1481 | * Returns: see s5p_hash_final below. |
| 1482 | */ |
| 1483 | static int s5p_hash_update(struct ahash_request *req) |
| 1484 | { |
| 1485 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1486 | |
| 1487 | if (!req->nbytes) |
| 1488 | return 0; |
| 1489 | |
| 1490 | if (ctx->bufcnt + req->nbytes <= BUFLEN) { |
| 1491 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, |
| 1492 | 0, req->nbytes, 0); |
| 1493 | ctx->bufcnt += req->nbytes; |
| 1494 | return 0; |
| 1495 | } |
| 1496 | |
| 1497 | return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */ |
| 1498 | } |
| 1499 | |
| 1500 | /** |
| 1501 | * s5p_hash_shash_digest() - calculate shash digest |
| 1502 | * @tfm: crypto transformation |
| 1503 | * @flags: tfm flags |
| 1504 | * @data: input data |
| 1505 | * @len: length of data |
| 1506 | * @out: output buffer |
| 1507 | */ |
| 1508 | static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags, |
| 1509 | const u8 *data, unsigned int len, u8 *out) |
| 1510 | { |
| 1511 | SHASH_DESC_ON_STACK(shash, tfm); |
| 1512 | |
| 1513 | shash->tfm = tfm; |
| 1514 | shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP; |
| 1515 | |
| 1516 | return crypto_shash_digest(shash, data, len, out); |
| 1517 | } |
| 1518 | |
| 1519 | /** |
| 1520 | * s5p_hash_final_shash() - calculate shash digest |
| 1521 | * @req: AHASH request |
| 1522 | */ |
| 1523 | static int s5p_hash_final_shash(struct ahash_request *req) |
| 1524 | { |
| 1525 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); |
| 1526 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1527 | |
| 1528 | return s5p_hash_shash_digest(tctx->fallback, req->base.flags, |
| 1529 | ctx->buffer, ctx->bufcnt, req->result); |
| 1530 | } |
| 1531 | |
| 1532 | /** |
| 1533 | * s5p_hash_final() - close up hash and calculate digest |
| 1534 | * @req: AHASH request |
| 1535 | * |
| 1536 | * Note: in final req->src do not have any data, and req->nbytes can be |
| 1537 | * non-zero. |
| 1538 | * |
| 1539 | * If there were no input data processed yet and the buffered hash data is |
| 1540 | * less than BUFLEN (64) then calculate the final hash immediately by using |
| 1541 | * SW algorithm fallback. |
| 1542 | * |
| 1543 | * Otherwise enqueues the current AHASH request with OP_FINAL operation op |
| 1544 | * and finalize hash message in HW. Note that if digcnt!=0 then there were |
| 1545 | * previous update op, so there are always some buffered bytes in ctx->buffer, |
| 1546 | * which means that ctx->bufcnt!=0 |
| 1547 | * |
| 1548 | * Returns: |
| 1549 | * 0 if the request has been processed immediately, |
| 1550 | * -EINPROGRESS if the operation has been queued for later execution or is set |
| 1551 | * to processing by HW, |
| 1552 | * -EBUSY if queue is full and request should be resubmitted later, |
| 1553 | * other negative values denotes an error. |
| 1554 | */ |
| 1555 | static int s5p_hash_final(struct ahash_request *req) |
| 1556 | { |
| 1557 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1558 | |
| 1559 | ctx->finup = true; |
| 1560 | if (ctx->error) |
| 1561 | return -EINVAL; /* uncompleted hash is not needed */ |
| 1562 | |
| 1563 | if (!ctx->digcnt && ctx->bufcnt < BUFLEN) |
| 1564 | return s5p_hash_final_shash(req); |
| 1565 | |
| 1566 | return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */ |
| 1567 | } |
| 1568 | |
| 1569 | /** |
| 1570 | * s5p_hash_finup() - process last req->src and calculate digest |
| 1571 | * @req: AHASH request containing the last update data |
| 1572 | * |
| 1573 | * Return values: see s5p_hash_final above. |
| 1574 | */ |
| 1575 | static int s5p_hash_finup(struct ahash_request *req) |
| 1576 | { |
| 1577 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1578 | int err1, err2; |
| 1579 | |
| 1580 | ctx->finup = true; |
| 1581 | |
| 1582 | err1 = s5p_hash_update(req); |
| 1583 | if (err1 == -EINPROGRESS || err1 == -EBUSY) |
| 1584 | return err1; |
| 1585 | |
| 1586 | /* |
| 1587 | * final() has to be always called to cleanup resources even if |
| 1588 | * update() failed, except EINPROGRESS or calculate digest for small |
| 1589 | * size |
| 1590 | */ |
| 1591 | err2 = s5p_hash_final(req); |
| 1592 | |
| 1593 | return err1 ?: err2; |
| 1594 | } |
| 1595 | |
| 1596 | /** |
| 1597 | * s5p_hash_init() - initialize AHASH request contex |
| 1598 | * @req: AHASH request |
| 1599 | * |
| 1600 | * Init async hash request context. |
| 1601 | */ |
| 1602 | static int s5p_hash_init(struct ahash_request *req) |
| 1603 | { |
| 1604 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1605 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 1606 | struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); |
| 1607 | |
| 1608 | ctx->dd = tctx->dd; |
| 1609 | ctx->error = false; |
| 1610 | ctx->finup = false; |
| 1611 | ctx->bufcnt = 0; |
| 1612 | ctx->digcnt = 0; |
| 1613 | ctx->total = 0; |
| 1614 | ctx->skip = 0; |
| 1615 | |
| 1616 | dev_dbg(tctx->dd->dev, "init: digest size: %d\n", |
| 1617 | crypto_ahash_digestsize(tfm)); |
| 1618 | |
| 1619 | switch (crypto_ahash_digestsize(tfm)) { |
| 1620 | case MD5_DIGEST_SIZE: |
| 1621 | ctx->engine = SSS_HASH_ENGINE_MD5; |
| 1622 | ctx->nregs = HASH_MD5_MAX_REG; |
| 1623 | break; |
| 1624 | case SHA1_DIGEST_SIZE: |
| 1625 | ctx->engine = SSS_HASH_ENGINE_SHA1; |
| 1626 | ctx->nregs = HASH_SHA1_MAX_REG; |
| 1627 | break; |
| 1628 | case SHA256_DIGEST_SIZE: |
| 1629 | ctx->engine = SSS_HASH_ENGINE_SHA256; |
| 1630 | ctx->nregs = HASH_SHA256_MAX_REG; |
| 1631 | break; |
| 1632 | default: |
| 1633 | ctx->error = true; |
| 1634 | return -EINVAL; |
| 1635 | } |
| 1636 | |
| 1637 | return 0; |
| 1638 | } |
| 1639 | |
| 1640 | /** |
| 1641 | * s5p_hash_digest - calculate digest from req->src |
| 1642 | * @req: AHASH request |
| 1643 | * |
| 1644 | * Return values: see s5p_hash_final above. |
| 1645 | */ |
| 1646 | static int s5p_hash_digest(struct ahash_request *req) |
| 1647 | { |
| 1648 | return s5p_hash_init(req) ?: s5p_hash_finup(req); |
| 1649 | } |
| 1650 | |
| 1651 | /** |
| 1652 | * s5p_hash_cra_init_alg - init crypto alg transformation |
| 1653 | * @tfm: crypto transformation |
| 1654 | */ |
| 1655 | static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm) |
| 1656 | { |
| 1657 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); |
| 1658 | const char *alg_name = crypto_tfm_alg_name(tfm); |
| 1659 | |
| 1660 | tctx->dd = s5p_dev; |
| 1661 | /* Allocate a fallback and abort if it failed. */ |
| 1662 | tctx->fallback = crypto_alloc_shash(alg_name, 0, |
| 1663 | CRYPTO_ALG_NEED_FALLBACK); |
| 1664 | if (IS_ERR(tctx->fallback)) { |
| 1665 | pr_err("fallback alloc fails for '%s'\n", alg_name); |
| 1666 | return PTR_ERR(tctx->fallback); |
| 1667 | } |
| 1668 | |
| 1669 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 1670 | sizeof(struct s5p_hash_reqctx) + BUFLEN); |
| 1671 | |
| 1672 | return 0; |
| 1673 | } |
| 1674 | |
| 1675 | /** |
| 1676 | * s5p_hash_cra_init - init crypto tfm |
| 1677 | * @tfm: crypto transformation |
| 1678 | */ |
| 1679 | static int s5p_hash_cra_init(struct crypto_tfm *tfm) |
| 1680 | { |
| 1681 | return s5p_hash_cra_init_alg(tfm); |
| 1682 | } |
| 1683 | |
| 1684 | /** |
| 1685 | * s5p_hash_cra_exit - exit crypto tfm |
| 1686 | * @tfm: crypto transformation |
| 1687 | * |
| 1688 | * free allocated fallback |
| 1689 | */ |
| 1690 | static void s5p_hash_cra_exit(struct crypto_tfm *tfm) |
| 1691 | { |
| 1692 | struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); |
| 1693 | |
| 1694 | crypto_free_shash(tctx->fallback); |
| 1695 | tctx->fallback = NULL; |
| 1696 | } |
| 1697 | |
| 1698 | /** |
| 1699 | * s5p_hash_export - export hash state |
| 1700 | * @req: AHASH request |
| 1701 | * @out: buffer for exported state |
| 1702 | */ |
| 1703 | static int s5p_hash_export(struct ahash_request *req, void *out) |
| 1704 | { |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 1705 | const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1706 | |
| 1707 | memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt); |
| 1708 | |
| 1709 | return 0; |
| 1710 | } |
| 1711 | |
| 1712 | /** |
| 1713 | * s5p_hash_import - import hash state |
| 1714 | * @req: AHASH request |
| 1715 | * @in: buffer with state to be imported from |
| 1716 | */ |
| 1717 | static int s5p_hash_import(struct ahash_request *req, const void *in) |
| 1718 | { |
| 1719 | struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); |
| 1720 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 1721 | struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); |
| 1722 | const struct s5p_hash_reqctx *ctx_in = in; |
| 1723 | |
| 1724 | memcpy(ctx, in, sizeof(*ctx) + BUFLEN); |
| 1725 | if (ctx_in->bufcnt > BUFLEN) { |
| 1726 | ctx->error = true; |
| 1727 | return -EINVAL; |
| 1728 | } |
| 1729 | |
| 1730 | ctx->dd = tctx->dd; |
| 1731 | ctx->error = false; |
| 1732 | |
| 1733 | return 0; |
| 1734 | } |
| 1735 | |
| 1736 | static struct ahash_alg algs_sha1_md5_sha256[] = { |
| 1737 | { |
| 1738 | .init = s5p_hash_init, |
| 1739 | .update = s5p_hash_update, |
| 1740 | .final = s5p_hash_final, |
| 1741 | .finup = s5p_hash_finup, |
| 1742 | .digest = s5p_hash_digest, |
| 1743 | .export = s5p_hash_export, |
| 1744 | .import = s5p_hash_import, |
| 1745 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, |
| 1746 | .halg.digestsize = SHA1_DIGEST_SIZE, |
| 1747 | .halg.base = { |
| 1748 | .cra_name = "sha1", |
| 1749 | .cra_driver_name = "exynos-sha1", |
| 1750 | .cra_priority = 100, |
Eric Biggers | 6a38f62 | 2018-06-30 15:16:12 -0700 | [diff] [blame] | 1751 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1752 | CRYPTO_ALG_ASYNC | |
| 1753 | CRYPTO_ALG_NEED_FALLBACK, |
| 1754 | .cra_blocksize = HASH_BLOCK_SIZE, |
| 1755 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), |
| 1756 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, |
| 1757 | .cra_module = THIS_MODULE, |
| 1758 | .cra_init = s5p_hash_cra_init, |
| 1759 | .cra_exit = s5p_hash_cra_exit, |
| 1760 | } |
| 1761 | }, |
| 1762 | { |
| 1763 | .init = s5p_hash_init, |
| 1764 | .update = s5p_hash_update, |
| 1765 | .final = s5p_hash_final, |
| 1766 | .finup = s5p_hash_finup, |
| 1767 | .digest = s5p_hash_digest, |
| 1768 | .export = s5p_hash_export, |
| 1769 | .import = s5p_hash_import, |
| 1770 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, |
| 1771 | .halg.digestsize = MD5_DIGEST_SIZE, |
| 1772 | .halg.base = { |
| 1773 | .cra_name = "md5", |
| 1774 | .cra_driver_name = "exynos-md5", |
| 1775 | .cra_priority = 100, |
Eric Biggers | 6a38f62 | 2018-06-30 15:16:12 -0700 | [diff] [blame] | 1776 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1777 | CRYPTO_ALG_ASYNC | |
| 1778 | CRYPTO_ALG_NEED_FALLBACK, |
| 1779 | .cra_blocksize = HASH_BLOCK_SIZE, |
| 1780 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), |
| 1781 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, |
| 1782 | .cra_module = THIS_MODULE, |
| 1783 | .cra_init = s5p_hash_cra_init, |
| 1784 | .cra_exit = s5p_hash_cra_exit, |
| 1785 | } |
| 1786 | }, |
| 1787 | { |
| 1788 | .init = s5p_hash_init, |
| 1789 | .update = s5p_hash_update, |
| 1790 | .final = s5p_hash_final, |
| 1791 | .finup = s5p_hash_finup, |
| 1792 | .digest = s5p_hash_digest, |
| 1793 | .export = s5p_hash_export, |
| 1794 | .import = s5p_hash_import, |
| 1795 | .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, |
| 1796 | .halg.digestsize = SHA256_DIGEST_SIZE, |
| 1797 | .halg.base = { |
| 1798 | .cra_name = "sha256", |
| 1799 | .cra_driver_name = "exynos-sha256", |
| 1800 | .cra_priority = 100, |
Eric Biggers | 6a38f62 | 2018-06-30 15:16:12 -0700 | [diff] [blame] | 1801 | .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 1802 | CRYPTO_ALG_ASYNC | |
| 1803 | CRYPTO_ALG_NEED_FALLBACK, |
| 1804 | .cra_blocksize = HASH_BLOCK_SIZE, |
| 1805 | .cra_ctxsize = sizeof(struct s5p_hash_ctx), |
| 1806 | .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, |
| 1807 | .cra_module = THIS_MODULE, |
| 1808 | .cra_init = s5p_hash_cra_init, |
| 1809 | .cra_exit = s5p_hash_cra_exit, |
| 1810 | } |
| 1811 | } |
| 1812 | |
| 1813 | }; |
| 1814 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1815 | static void s5p_set_aes(struct s5p_aes_dev *dev, |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1816 | const u8 *key, const u8 *iv, const u8 *ctr, |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 1817 | unsigned int keylen) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1818 | { |
| 1819 | void __iomem *keystart; |
| 1820 | |
Naveen Krishna Chatradhi | 8f9702a | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 1821 | if (iv) |
Krzysztof Kozlowski | ef5c73b | 2019-02-19 13:01:03 +0100 | [diff] [blame^] | 1822 | memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, |
| 1823 | AES_BLOCK_SIZE); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1824 | |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1825 | if (ctr) |
Krzysztof Kozlowski | ef5c73b | 2019-02-19 13:01:03 +0100 | [diff] [blame^] | 1826 | memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, |
| 1827 | AES_BLOCK_SIZE); |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1828 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1829 | if (keylen == AES_KEYSIZE_256) |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 1830 | keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1831 | else if (keylen == AES_KEYSIZE_192) |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 1832 | keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1833 | else |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 1834 | keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1835 | |
Krzysztof Koz?owski | 1e3012d | 2016-01-11 20:45:51 +0900 | [diff] [blame] | 1836 | memcpy_toio(keystart, key, keylen); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1837 | } |
| 1838 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1839 | static bool s5p_is_sg_aligned(struct scatterlist *sg) |
| 1840 | { |
| 1841 | while (sg) { |
Marek Szyprowski | d149797 | 2016-04-26 09:29:26 +0200 | [diff] [blame] | 1842 | if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1843 | return false; |
| 1844 | sg = sg_next(sg); |
| 1845 | } |
| 1846 | |
| 1847 | return true; |
| 1848 | } |
| 1849 | |
| 1850 | static int s5p_set_indata_start(struct s5p_aes_dev *dev, |
| 1851 | struct ablkcipher_request *req) |
| 1852 | { |
| 1853 | struct scatterlist *sg; |
| 1854 | int err; |
| 1855 | |
| 1856 | dev->sg_src_cpy = NULL; |
| 1857 | sg = req->src; |
| 1858 | if (!s5p_is_sg_aligned(sg)) { |
| 1859 | dev_dbg(dev->dev, |
| 1860 | "At least one unaligned source scatter list, making a copy\n"); |
| 1861 | err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy); |
| 1862 | if (err) |
| 1863 | return err; |
| 1864 | |
| 1865 | sg = dev->sg_src_cpy; |
| 1866 | } |
| 1867 | |
| 1868 | err = s5p_set_indata(dev, sg); |
| 1869 | if (err) { |
| 1870 | s5p_free_sg_cpy(dev, &dev->sg_src_cpy); |
| 1871 | return err; |
| 1872 | } |
| 1873 | |
| 1874 | return 0; |
| 1875 | } |
| 1876 | |
| 1877 | static int s5p_set_outdata_start(struct s5p_aes_dev *dev, |
Christoph Manszewski | 6c12b6b | 2018-09-17 17:09:28 +0200 | [diff] [blame] | 1878 | struct ablkcipher_request *req) |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1879 | { |
| 1880 | struct scatterlist *sg; |
| 1881 | int err; |
| 1882 | |
| 1883 | dev->sg_dst_cpy = NULL; |
| 1884 | sg = req->dst; |
| 1885 | if (!s5p_is_sg_aligned(sg)) { |
| 1886 | dev_dbg(dev->dev, |
| 1887 | "At least one unaligned dest scatter list, making a copy\n"); |
| 1888 | err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy); |
| 1889 | if (err) |
| 1890 | return err; |
| 1891 | |
| 1892 | sg = dev->sg_dst_cpy; |
| 1893 | } |
| 1894 | |
| 1895 | err = s5p_set_outdata(dev, sg); |
| 1896 | if (err) { |
| 1897 | s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); |
| 1898 | return err; |
| 1899 | } |
| 1900 | |
| 1901 | return 0; |
| 1902 | } |
| 1903 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1904 | static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) |
| 1905 | { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 1906 | struct ablkcipher_request *req = dev->req; |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 1907 | u32 aes_control; |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 1908 | unsigned long flags; |
| 1909 | int err; |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1910 | u8 *iv, *ctr; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1911 | |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1912 | /* This sets bit [13:12] to 00, which selects 128-bit counter */ |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1913 | aes_control = SSS_AES_KEY_CHANGE_MODE; |
| 1914 | if (mode & FLAGS_AES_DECRYPT) |
| 1915 | aes_control |= SSS_AES_MODE_DECRYPT; |
| 1916 | |
Kamil Konieczny | c927b08 | 2018-02-07 16:52:09 +0100 | [diff] [blame] | 1917 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1918 | aes_control |= SSS_AES_CHAIN_MODE_CBC; |
Kamil Konieczny | c927b08 | 2018-02-07 16:52:09 +0100 | [diff] [blame] | 1919 | iv = req->info; |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1920 | ctr = NULL; |
Kamil Konieczny | c927b08 | 2018-02-07 16:52:09 +0100 | [diff] [blame] | 1921 | } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1922 | aes_control |= SSS_AES_CHAIN_MODE_CTR; |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1923 | iv = NULL; |
| 1924 | ctr = req->info; |
Kamil Konieczny | c927b08 | 2018-02-07 16:52:09 +0100 | [diff] [blame] | 1925 | } else { |
| 1926 | iv = NULL; /* AES_ECB */ |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1927 | ctr = NULL; |
Kamil Konieczny | c927b08 | 2018-02-07 16:52:09 +0100 | [diff] [blame] | 1928 | } |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1929 | |
| 1930 | if (dev->ctx->keylen == AES_KEYSIZE_192) |
| 1931 | aes_control |= SSS_AES_KEY_SIZE_192; |
| 1932 | else if (dev->ctx->keylen == AES_KEYSIZE_256) |
| 1933 | aes_control |= SSS_AES_KEY_SIZE_256; |
| 1934 | |
| 1935 | aes_control |= SSS_AES_FIFO_MODE; |
| 1936 | |
| 1937 | /* as a variant it is possible to use byte swapping on DMA side */ |
| 1938 | aes_control |= SSS_AES_BYTESWAP_DI |
| 1939 | | SSS_AES_BYTESWAP_DO |
| 1940 | | SSS_AES_BYTESWAP_IV |
| 1941 | | SSS_AES_BYTESWAP_KEY |
| 1942 | | SSS_AES_BYTESWAP_CNT; |
| 1943 | |
| 1944 | spin_lock_irqsave(&dev->lock, flags); |
| 1945 | |
| 1946 | SSS_WRITE(dev, FCINTENCLR, |
| 1947 | SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); |
| 1948 | SSS_WRITE(dev, FCFIFOCTRL, 0x00); |
| 1949 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1950 | err = s5p_set_indata_start(dev, req); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1951 | if (err) |
| 1952 | goto indata_error; |
| 1953 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1954 | err = s5p_set_outdata_start(dev, req); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1955 | if (err) |
| 1956 | goto outdata_error; |
| 1957 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 1958 | SSS_AES_WRITE(dev, AES_CONTROL, aes_control); |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 1959 | s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1960 | |
Krzysztof Kozlowski | 9e4a110 | 2016-03-22 10:58:24 +0900 | [diff] [blame] | 1961 | s5p_set_dma_indata(dev, dev->sg_src); |
| 1962 | s5p_set_dma_outdata(dev, dev->sg_dst); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1963 | |
| 1964 | SSS_WRITE(dev, FCINTENSET, |
| 1965 | SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); |
| 1966 | |
| 1967 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1968 | |
| 1969 | return; |
| 1970 | |
Krzysztof Kozlowski | 119c3ab | 2016-03-22 10:58:23 +0900 | [diff] [blame] | 1971 | outdata_error: |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1972 | s5p_unset_indata(dev); |
| 1973 | |
Krzysztof Kozlowski | 119c3ab | 2016-03-22 10:58:23 +0900 | [diff] [blame] | 1974 | indata_error: |
Krzysztof Kozlowski | 28b62b1 | 2017-03-08 23:14:20 +0200 | [diff] [blame] | 1975 | s5p_sg_done(dev); |
Krzysztof Kozlowski | 42d5c17 | 2017-03-17 16:49:19 +0200 | [diff] [blame] | 1976 | dev->busy = false; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1977 | spin_unlock_irqrestore(&dev->lock, flags); |
Christoph Manszewski | 5842cd4 | 2018-09-17 17:09:27 +0200 | [diff] [blame] | 1978 | s5p_aes_complete(req, err); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1979 | } |
| 1980 | |
| 1981 | static void s5p_tasklet_cb(unsigned long data) |
| 1982 | { |
| 1983 | struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; |
| 1984 | struct crypto_async_request *async_req, *backlog; |
| 1985 | struct s5p_aes_reqctx *reqctx; |
| 1986 | unsigned long flags; |
| 1987 | |
| 1988 | spin_lock_irqsave(&dev->lock, flags); |
| 1989 | backlog = crypto_get_backlog(&dev->queue); |
| 1990 | async_req = crypto_dequeue_request(&dev->queue); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1991 | |
Naveen Krishna Chatradhi | dc5e3f1 | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 1992 | if (!async_req) { |
| 1993 | dev->busy = false; |
| 1994 | spin_unlock_irqrestore(&dev->lock, flags); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1995 | return; |
Naveen Krishna Chatradhi | dc5e3f1 | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 1996 | } |
| 1997 | spin_unlock_irqrestore(&dev->lock, flags); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 1998 | |
| 1999 | if (backlog) |
| 2000 | backlog->complete(backlog, -EINPROGRESS); |
| 2001 | |
| 2002 | dev->req = ablkcipher_request_cast(async_req); |
| 2003 | dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); |
| 2004 | reqctx = ablkcipher_request_ctx(dev->req); |
| 2005 | |
| 2006 | s5p_aes_crypt_start(dev, reqctx->mode); |
| 2007 | } |
| 2008 | |
| 2009 | static int s5p_aes_handle_req(struct s5p_aes_dev *dev, |
| 2010 | struct ablkcipher_request *req) |
| 2011 | { |
| 2012 | unsigned long flags; |
| 2013 | int err; |
| 2014 | |
| 2015 | spin_lock_irqsave(&dev->lock, flags); |
Naveen Krishna Chatradhi | dc5e3f1 | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 2016 | err = ablkcipher_enqueue_request(&dev->queue, req); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2017 | if (dev->busy) { |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2018 | spin_unlock_irqrestore(&dev->lock, flags); |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 2019 | return err; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2020 | } |
| 2021 | dev->busy = true; |
| 2022 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2023 | spin_unlock_irqrestore(&dev->lock, flags); |
| 2024 | |
| 2025 | tasklet_schedule(&dev->tasklet); |
| 2026 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2027 | return err; |
| 2028 | } |
| 2029 | |
| 2030 | static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
| 2031 | { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 2032 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 2033 | struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); |
| 2034 | struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 2035 | struct s5p_aes_dev *dev = ctx->dev; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2036 | |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 2037 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) && |
| 2038 | ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) { |
Krzysztof Koz?owski | 313becd | 2016-01-11 20:45:50 +0900 | [diff] [blame] | 2039 | dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2040 | return -EINVAL; |
| 2041 | } |
| 2042 | |
| 2043 | reqctx->mode = mode; |
| 2044 | |
| 2045 | return s5p_aes_handle_req(dev, req); |
| 2046 | } |
| 2047 | |
| 2048 | static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, |
Christoph Manszewski | b1b4416 | 2018-09-17 17:09:29 +0200 | [diff] [blame] | 2049 | const u8 *key, unsigned int keylen) |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2050 | { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 2051 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2052 | struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
| 2053 | |
| 2054 | if (keylen != AES_KEYSIZE_128 && |
| 2055 | keylen != AES_KEYSIZE_192 && |
| 2056 | keylen != AES_KEYSIZE_256) |
| 2057 | return -EINVAL; |
| 2058 | |
| 2059 | memcpy(ctx->aes_key, key, keylen); |
| 2060 | ctx->keylen = keylen; |
| 2061 | |
| 2062 | return 0; |
| 2063 | } |
| 2064 | |
| 2065 | static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) |
| 2066 | { |
| 2067 | return s5p_aes_crypt(req, 0); |
| 2068 | } |
| 2069 | |
| 2070 | static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) |
| 2071 | { |
| 2072 | return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); |
| 2073 | } |
| 2074 | |
| 2075 | static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) |
| 2076 | { |
| 2077 | return s5p_aes_crypt(req, FLAGS_AES_CBC); |
| 2078 | } |
| 2079 | |
| 2080 | static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) |
| 2081 | { |
| 2082 | return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); |
| 2083 | } |
| 2084 | |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 2085 | static int s5p_aes_ctr_crypt(struct ablkcipher_request *req) |
| 2086 | { |
| 2087 | return s5p_aes_crypt(req, FLAGS_AES_CTR); |
| 2088 | } |
| 2089 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2090 | static int s5p_aes_cra_init(struct crypto_tfm *tfm) |
| 2091 | { |
Krzysztof Koz?owski | 313becd | 2016-01-11 20:45:50 +0900 | [diff] [blame] | 2092 | struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2093 | |
| 2094 | ctx->dev = s5p_dev; |
| 2095 | tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); |
| 2096 | |
| 2097 | return 0; |
| 2098 | } |
| 2099 | |
| 2100 | static struct crypto_alg algs[] = { |
| 2101 | { |
| 2102 | .cra_name = "ecb(aes)", |
| 2103 | .cra_driver_name = "ecb-aes-s5p", |
| 2104 | .cra_priority = 100, |
| 2105 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
Nikos Mavrogiannopoulos | d912bb7 | 2011-11-01 13:39:56 +0100 | [diff] [blame] | 2106 | CRYPTO_ALG_ASYNC | |
| 2107 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2108 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2109 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), |
| 2110 | .cra_alignmask = 0x0f, |
| 2111 | .cra_type = &crypto_ablkcipher_type, |
| 2112 | .cra_module = THIS_MODULE, |
| 2113 | .cra_init = s5p_aes_cra_init, |
| 2114 | .cra_u.ablkcipher = { |
| 2115 | .min_keysize = AES_MIN_KEY_SIZE, |
| 2116 | .max_keysize = AES_MAX_KEY_SIZE, |
| 2117 | .setkey = s5p_aes_setkey, |
| 2118 | .encrypt = s5p_aes_ecb_encrypt, |
| 2119 | .decrypt = s5p_aes_ecb_decrypt, |
| 2120 | } |
| 2121 | }, |
| 2122 | { |
| 2123 | .cra_name = "cbc(aes)", |
| 2124 | .cra_driver_name = "cbc-aes-s5p", |
| 2125 | .cra_priority = 100, |
| 2126 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
Nikos Mavrogiannopoulos | d912bb7 | 2011-11-01 13:39:56 +0100 | [diff] [blame] | 2127 | CRYPTO_ALG_ASYNC | |
| 2128 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2129 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2130 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), |
| 2131 | .cra_alignmask = 0x0f, |
| 2132 | .cra_type = &crypto_ablkcipher_type, |
| 2133 | .cra_module = THIS_MODULE, |
| 2134 | .cra_init = s5p_aes_cra_init, |
| 2135 | .cra_u.ablkcipher = { |
| 2136 | .min_keysize = AES_MIN_KEY_SIZE, |
| 2137 | .max_keysize = AES_MAX_KEY_SIZE, |
| 2138 | .ivsize = AES_BLOCK_SIZE, |
| 2139 | .setkey = s5p_aes_setkey, |
| 2140 | .encrypt = s5p_aes_cbc_encrypt, |
| 2141 | .decrypt = s5p_aes_cbc_decrypt, |
| 2142 | } |
| 2143 | }, |
Christoph Manszewski | cdf640a | 2018-09-17 17:09:30 +0200 | [diff] [blame] | 2144 | { |
| 2145 | .cra_name = "ctr(aes)", |
| 2146 | .cra_driver_name = "ctr-aes-s5p", |
| 2147 | .cra_priority = 100, |
| 2148 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 2149 | CRYPTO_ALG_ASYNC | |
| 2150 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 2151 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2152 | .cra_ctxsize = sizeof(struct s5p_aes_ctx), |
| 2153 | .cra_alignmask = 0x0f, |
| 2154 | .cra_type = &crypto_ablkcipher_type, |
| 2155 | .cra_module = THIS_MODULE, |
| 2156 | .cra_init = s5p_aes_cra_init, |
| 2157 | .cra_u.ablkcipher = { |
| 2158 | .min_keysize = AES_MIN_KEY_SIZE, |
| 2159 | .max_keysize = AES_MAX_KEY_SIZE, |
| 2160 | .ivsize = AES_BLOCK_SIZE, |
| 2161 | .setkey = s5p_aes_setkey, |
| 2162 | .encrypt = s5p_aes_ctr_crypt, |
| 2163 | .decrypt = s5p_aes_ctr_crypt, |
| 2164 | } |
| 2165 | }, |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2166 | }; |
| 2167 | |
| 2168 | static int s5p_aes_probe(struct platform_device *pdev) |
| 2169 | { |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 2170 | struct device *dev = &pdev->dev; |
| 2171 | int i, j, err = -ENODEV; |
Krzysztof Kozlowski | 6584eac | 2018-03-01 21:50:13 +0100 | [diff] [blame] | 2172 | const struct samsung_aes_variant *variant; |
Krzysztof Kozlowski | 5318c53 | 2016-05-27 13:49:40 +0200 | [diff] [blame] | 2173 | struct s5p_aes_dev *pdata; |
| 2174 | struct resource *res; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2175 | unsigned int hash_i; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2176 | |
| 2177 | if (s5p_dev) |
| 2178 | return -EEXIST; |
| 2179 | |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2180 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
| 2181 | if (!pdata) |
| 2182 | return -ENOMEM; |
| 2183 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 2184 | variant = find_s5p_sss_version(pdev); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2185 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2186 | |
| 2187 | /* |
| 2188 | * Note: HASH and PRNG uses the same registers in secss, avoid |
| 2189 | * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG |
| 2190 | * is enabled in config. We need larger size for HASH registers in |
| 2191 | * secss, current describe only AES/DES |
| 2192 | */ |
| 2193 | if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) { |
| 2194 | if (variant == &exynos_aes_data) { |
| 2195 | res->end += 0x300; |
| 2196 | pdata->use_hash = true; |
| 2197 | } |
| 2198 | } |
| 2199 | |
| 2200 | pdata->res = res; |
| 2201 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); |
| 2202 | if (IS_ERR(pdata->ioaddr)) { |
| 2203 | if (!pdata->use_hash) |
| 2204 | return PTR_ERR(pdata->ioaddr); |
| 2205 | /* try AES without HASH */ |
| 2206 | res->end -= 0x300; |
| 2207 | pdata->use_hash = false; |
| 2208 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); |
| 2209 | if (IS_ERR(pdata->ioaddr)) |
| 2210 | return PTR_ERR(pdata->ioaddr); |
| 2211 | } |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 2212 | |
Jingoo Han | 5c22ba6 | 2013-01-10 11:05:30 +0900 | [diff] [blame] | 2213 | pdata->clk = devm_clk_get(dev, "secss"); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2214 | if (IS_ERR(pdata->clk)) { |
| 2215 | dev_err(dev, "failed to find secss clock source\n"); |
| 2216 | return -ENOENT; |
| 2217 | } |
| 2218 | |
Naveen Krishna Chatradhi | c1eb7ef | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 2219 | err = clk_prepare_enable(pdata->clk); |
| 2220 | if (err < 0) { |
| 2221 | dev_err(dev, "Enabling SSS clk failed, err %d\n", err); |
| 2222 | return err; |
| 2223 | } |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2224 | |
| 2225 | spin_lock_init(&pdata->lock); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2226 | spin_lock_init(&pdata->hash_lock); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2227 | |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 2228 | pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2229 | pdata->io_hash_base = pdata->ioaddr + variant->hash_offset; |
Naveen Krishna Chatradhi | 8924510 | 2014-05-08 21:58:14 +0800 | [diff] [blame] | 2230 | |
Naveen Krishna Chatradhi | 96fc70b | 2014-05-08 21:58:12 +0800 | [diff] [blame] | 2231 | pdata->irq_fc = platform_get_irq(pdev, 0); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2232 | if (pdata->irq_fc < 0) { |
| 2233 | err = pdata->irq_fc; |
| 2234 | dev_warn(dev, "feed control interrupt is not available.\n"); |
| 2235 | goto err_irq; |
| 2236 | } |
Krzysztof Kozlowski | 07de4bc | 2017-03-05 19:14:07 +0200 | [diff] [blame] | 2237 | err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, |
| 2238 | s5p_aes_interrupt, IRQF_ONESHOT, |
| 2239 | pdev->name, pdev); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2240 | if (err < 0) { |
| 2241 | dev_warn(dev, "feed control interrupt is not available.\n"); |
| 2242 | goto err_irq; |
| 2243 | } |
| 2244 | |
Naveen Krishna Chatradhi | dc5e3f1 | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 2245 | pdata->busy = false; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2246 | pdata->dev = dev; |
| 2247 | platform_set_drvdata(pdev, pdata); |
| 2248 | s5p_dev = pdata; |
| 2249 | |
| 2250 | tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); |
| 2251 | crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); |
| 2252 | |
| 2253 | for (i = 0; i < ARRAY_SIZE(algs); i++) { |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2254 | err = crypto_register_alg(&algs[i]); |
| 2255 | if (err) |
| 2256 | goto err_algs; |
| 2257 | } |
| 2258 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2259 | if (pdata->use_hash) { |
| 2260 | tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb, |
| 2261 | (unsigned long)pdata); |
| 2262 | crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH); |
| 2263 | |
| 2264 | for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256); |
| 2265 | hash_i++) { |
| 2266 | struct ahash_alg *alg; |
| 2267 | |
| 2268 | alg = &algs_sha1_md5_sha256[hash_i]; |
| 2269 | err = crypto_register_ahash(alg); |
| 2270 | if (err) { |
| 2271 | dev_err(dev, "can't register '%s': %d\n", |
| 2272 | alg->halg.base.cra_driver_name, err); |
| 2273 | goto err_hash; |
| 2274 | } |
| 2275 | } |
| 2276 | } |
| 2277 | |
Krzysztof Koz?owski | 313becd | 2016-01-11 20:45:50 +0900 | [diff] [blame] | 2278 | dev_info(dev, "s5p-sss driver registered\n"); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2279 | |
| 2280 | return 0; |
| 2281 | |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2282 | err_hash: |
| 2283 | for (j = hash_i - 1; j >= 0; j--) |
| 2284 | crypto_unregister_ahash(&algs_sha1_md5_sha256[j]); |
| 2285 | |
| 2286 | tasklet_kill(&pdata->hash_tasklet); |
| 2287 | res->end -= 0x300; |
| 2288 | |
Krzysztof Kozlowski | 119c3ab | 2016-03-22 10:58:23 +0900 | [diff] [blame] | 2289 | err_algs: |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2290 | if (i < ARRAY_SIZE(algs)) |
| 2291 | dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, |
| 2292 | err); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2293 | |
| 2294 | for (j = 0; j < i; j++) |
| 2295 | crypto_unregister_alg(&algs[j]); |
| 2296 | |
| 2297 | tasklet_kill(&pdata->tasklet); |
| 2298 | |
Krzysztof Kozlowski | 119c3ab | 2016-03-22 10:58:23 +0900 | [diff] [blame] | 2299 | err_irq: |
Naveen Krishna Chatradhi | c1eb7ef | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 2300 | clk_disable_unprepare(pdata->clk); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2301 | |
| 2302 | s5p_dev = NULL; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2303 | |
| 2304 | return err; |
| 2305 | } |
| 2306 | |
| 2307 | static int s5p_aes_remove(struct platform_device *pdev) |
| 2308 | { |
| 2309 | struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); |
| 2310 | int i; |
| 2311 | |
| 2312 | if (!pdata) |
| 2313 | return -ENODEV; |
| 2314 | |
| 2315 | for (i = 0; i < ARRAY_SIZE(algs); i++) |
| 2316 | crypto_unregister_alg(&algs[i]); |
| 2317 | |
| 2318 | tasklet_kill(&pdata->tasklet); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2319 | if (pdata->use_hash) { |
| 2320 | for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--) |
| 2321 | crypto_unregister_ahash(&algs_sha1_md5_sha256[i]); |
| 2322 | |
| 2323 | pdata->res->end -= 0x300; |
| 2324 | tasklet_kill(&pdata->hash_tasklet); |
| 2325 | pdata->use_hash = false; |
| 2326 | } |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2327 | |
Naveen Krishna Chatradhi | c1eb7ef | 2014-05-08 21:58:15 +0800 | [diff] [blame] | 2328 | clk_disable_unprepare(pdata->clk); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2329 | s5p_dev = NULL; |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2330 | |
| 2331 | return 0; |
| 2332 | } |
| 2333 | |
| 2334 | static struct platform_driver s5p_aes_crypto = { |
| 2335 | .probe = s5p_aes_probe, |
| 2336 | .remove = s5p_aes_remove, |
| 2337 | .driver = { |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2338 | .name = "s5p-secss", |
Naveen Krishna Chatradhi | 6b9f16e | 2014-05-08 21:58:13 +0800 | [diff] [blame] | 2339 | .of_match_table = s5p_sss_dt_match, |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2340 | }, |
| 2341 | }; |
| 2342 | |
Axel Lin | 741e8c2 | 2011-11-26 21:26:19 +0800 | [diff] [blame] | 2343 | module_platform_driver(s5p_aes_crypto); |
Vladimir Zapolskiy | a49e490 | 2011-04-08 20:40:51 +0800 | [diff] [blame] | 2344 | |
| 2345 | MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); |
| 2346 | MODULE_LICENSE("GPL v2"); |
| 2347 | MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); |
Kamil Konieczny | c2afad6 | 2017-10-25 17:27:35 +0200 | [diff] [blame] | 2348 | MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>"); |