Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Support for Marvell's crypto engine which can be found on some Orion5X |
| 3 | * boards. |
| 4 | * |
| 5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
| 6 | * License: GPLv2 |
| 7 | * |
| 8 | */ |
| 9 | #include <crypto/aes.h> |
| 10 | #include <crypto/algapi.h> |
| 11 | #include <linux/crypto.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/kthread.h> |
| 15 | #include <linux/platform_device.h> |
| 16 | #include <linux/scatterlist.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 18 | #include <crypto/internal/hash.h> |
| 19 | #include <crypto/sha.h> |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 20 | |
| 21 | #include "mv_cesa.h" |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 22 | |
| 23 | #define MV_CESA "MV-CESA:" |
| 24 | #define MAX_HW_HASH_SIZE 0xFFFF |
| 25 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 26 | /* |
| 27 | * STM: |
| 28 | * /---------------------------------------\ |
| 29 | * | | request complete |
| 30 | * \./ | |
| 31 | * IDLE -> new request -> BUSY -> done -> DEQUEUE |
| 32 | * /°\ | |
| 33 | * | | more scatter entries |
| 34 | * \________________/ |
| 35 | */ |
| 36 | enum engine_status { |
| 37 | ENGINE_IDLE, |
| 38 | ENGINE_BUSY, |
| 39 | ENGINE_W_DEQUEUE, |
| 40 | }; |
| 41 | |
| 42 | /** |
| 43 | * struct req_progress - used for every crypt request |
| 44 | * @src_sg_it: sg iterator for src |
| 45 | * @dst_sg_it: sg iterator for dst |
| 46 | * @sg_src_left: bytes left in src to process (scatter list) |
| 47 | * @src_start: offset to add to src start position (scatter list) |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 48 | * @crypt_len: length of current hw crypt/hash process |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 49 | * @hw_nbytes: total bytes to process in hw for this request |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 50 | * @copy_back: whether to copy data back (crypt) or not (hash) |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 51 | * @sg_dst_left: bytes left dst to process in this scatter list |
| 52 | * @dst_start: offset to add to dst start position (scatter list) |
Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 53 | * @hw_processed_bytes: number of bytes processed by hw (request). |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 54 | * |
| 55 | * sg helper are used to iterate over the scatterlist. Since the size of the |
| 56 | * SRAM may be less than the scatter size, this struct struct is used to keep |
| 57 | * track of progress within current scatterlist. |
| 58 | */ |
| 59 | struct req_progress { |
| 60 | struct sg_mapping_iter src_sg_it; |
| 61 | struct sg_mapping_iter dst_sg_it; |
Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 62 | void (*complete) (void); |
| 63 | void (*process) (int is_first); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 64 | |
| 65 | /* src mostly */ |
| 66 | int sg_src_left; |
| 67 | int src_start; |
| 68 | int crypt_len; |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 69 | int hw_nbytes; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 70 | /* dst mostly */ |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 71 | int copy_back; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 72 | int sg_dst_left; |
| 73 | int dst_start; |
Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 74 | int hw_processed_bytes; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 75 | }; |
| 76 | |
| 77 | struct crypto_priv { |
| 78 | void __iomem *reg; |
| 79 | void __iomem *sram; |
| 80 | int irq; |
| 81 | struct task_struct *queue_th; |
| 82 | |
| 83 | /* the lock protects queue and eng_st */ |
| 84 | spinlock_t lock; |
| 85 | struct crypto_queue queue; |
| 86 | enum engine_status eng_st; |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 87 | struct crypto_async_request *cur_req; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 88 | struct req_progress p; |
| 89 | int max_req_size; |
| 90 | int sram_size; |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 91 | int has_sha1; |
| 92 | int has_hmac_sha1; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 93 | }; |
| 94 | |
| 95 | static struct crypto_priv *cpg; |
| 96 | |
| 97 | struct mv_ctx { |
| 98 | u8 aes_enc_key[AES_KEY_LEN]; |
| 99 | u32 aes_dec_key[8]; |
| 100 | int key_len; |
| 101 | u32 need_calc_aes_dkey; |
| 102 | }; |
| 103 | |
| 104 | enum crypto_op { |
| 105 | COP_AES_ECB, |
| 106 | COP_AES_CBC, |
| 107 | }; |
| 108 | |
| 109 | struct mv_req_ctx { |
| 110 | enum crypto_op op; |
| 111 | int decrypt; |
| 112 | }; |
| 113 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 114 | enum hash_op { |
| 115 | COP_SHA1, |
| 116 | COP_HMAC_SHA1 |
| 117 | }; |
| 118 | |
| 119 | struct mv_tfm_hash_ctx { |
| 120 | struct crypto_shash *fallback; |
| 121 | struct crypto_shash *base_hash; |
| 122 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; |
| 123 | int count_add; |
| 124 | enum hash_op op; |
| 125 | }; |
| 126 | |
| 127 | struct mv_req_hash_ctx { |
| 128 | u64 count; |
| 129 | u32 state[SHA1_DIGEST_SIZE / 4]; |
| 130 | u8 buffer[SHA1_BLOCK_SIZE]; |
| 131 | int first_hash; /* marks that we don't have previous state */ |
| 132 | int last_chunk; /* marks that this is the 'final' request */ |
| 133 | int extra_bytes; /* unprocessed bytes in buffer */ |
| 134 | enum hash_op op; |
| 135 | int count_add; |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 136 | }; |
| 137 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 138 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
| 139 | { |
| 140 | struct crypto_aes_ctx gen_aes_key; |
| 141 | int key_pos; |
| 142 | |
| 143 | if (!ctx->need_calc_aes_dkey) |
| 144 | return; |
| 145 | |
| 146 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); |
| 147 | |
| 148 | key_pos = ctx->key_len + 24; |
| 149 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); |
| 150 | switch (ctx->key_len) { |
| 151 | case AES_KEYSIZE_256: |
| 152 | key_pos -= 2; |
| 153 | /* fall */ |
| 154 | case AES_KEYSIZE_192: |
| 155 | key_pos -= 2; |
| 156 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], |
| 157 | 4 * 4); |
| 158 | break; |
| 159 | } |
| 160 | ctx->need_calc_aes_dkey = 0; |
| 161 | } |
| 162 | |
| 163 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, |
| 164 | unsigned int len) |
| 165 | { |
| 166 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
| 167 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); |
| 168 | |
| 169 | switch (len) { |
| 170 | case AES_KEYSIZE_128: |
| 171 | case AES_KEYSIZE_192: |
| 172 | case AES_KEYSIZE_256: |
| 173 | break; |
| 174 | default: |
| 175 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 176 | return -EINVAL; |
| 177 | } |
| 178 | ctx->key_len = len; |
| 179 | ctx->need_calc_aes_dkey = 1; |
| 180 | |
| 181 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); |
| 182 | return 0; |
| 183 | } |
| 184 | |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 185 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 186 | { |
| 187 | int ret; |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 188 | void *sbuf; |
Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 189 | int copy_len; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 190 | |
Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 191 | while (len) { |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 192 | if (!p->sg_src_left) { |
| 193 | ret = sg_miter_next(&p->src_sg_it); |
| 194 | BUG_ON(!ret); |
| 195 | p->sg_src_left = p->src_sg_it.length; |
| 196 | p->src_start = 0; |
| 197 | } |
| 198 | |
| 199 | sbuf = p->src_sg_it.addr + p->src_start; |
| 200 | |
Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 201 | copy_len = min(p->sg_src_left, len); |
| 202 | memcpy(dbuf, sbuf, copy_len); |
| 203 | |
| 204 | p->src_start += copy_len; |
| 205 | p->sg_src_left -= copy_len; |
| 206 | |
| 207 | len -= copy_len; |
| 208 | dbuf += copy_len; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 209 | } |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 210 | } |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 211 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 212 | static void setup_data_in(void) |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 213 | { |
| 214 | struct req_progress *p = &cpg->p; |
Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 215 | int data_in_sram = |
Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 216 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); |
Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 217 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, |
| 218 | data_in_sram - p->crypt_len); |
| 219 | p->crypt_len = data_in_sram; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | static void mv_process_current_q(int first_block) |
| 223 | { |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 224 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 225 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 226 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 227 | struct sec_accel_config op; |
| 228 | |
| 229 | switch (req_ctx->op) { |
| 230 | case COP_AES_ECB: |
| 231 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; |
| 232 | break; |
| 233 | case COP_AES_CBC: |
Uri Simchoni | 6bc6fcd | 2010-04-08 19:25:56 +0300 | [diff] [blame] | 234 | default: |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 235 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
| 236 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | |
| 237 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); |
| 238 | if (first_block) |
| 239 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); |
| 240 | break; |
| 241 | } |
| 242 | if (req_ctx->decrypt) { |
| 243 | op.config |= CFG_DIR_DEC; |
| 244 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, |
| 245 | AES_KEY_LEN); |
| 246 | } else { |
| 247 | op.config |= CFG_DIR_ENC; |
| 248 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, |
| 249 | AES_KEY_LEN); |
| 250 | } |
| 251 | |
| 252 | switch (ctx->key_len) { |
| 253 | case AES_KEYSIZE_128: |
| 254 | op.config |= CFG_AES_LEN_128; |
| 255 | break; |
| 256 | case AES_KEYSIZE_192: |
| 257 | op.config |= CFG_AES_LEN_192; |
| 258 | break; |
| 259 | case AES_KEYSIZE_256: |
| 260 | op.config |= CFG_AES_LEN_256; |
| 261 | break; |
| 262 | } |
| 263 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | |
| 264 | ENC_P_DST(SRAM_DATA_OUT_START); |
| 265 | op.enc_key_p = SRAM_DATA_KEY_P; |
| 266 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 267 | setup_data_in(); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 268 | op.enc_len = cpg->p.crypt_len; |
| 269 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
| 270 | sizeof(struct sec_accel_config)); |
| 271 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 272 | /* GO */ |
| 273 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
| 274 | |
| 275 | /* |
| 276 | * XXX: add timer if the interrupt does not occur for some mystery |
| 277 | * reason |
| 278 | */ |
| 279 | } |
| 280 | |
| 281 | static void mv_crypto_algo_completion(void) |
| 282 | { |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 283 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 284 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 285 | |
Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 286 | sg_miter_stop(&cpg->p.src_sg_it); |
| 287 | sg_miter_stop(&cpg->p.dst_sg_it); |
| 288 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 289 | if (req_ctx->op != COP_AES_CBC) |
| 290 | return ; |
| 291 | |
| 292 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); |
| 293 | } |
| 294 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 295 | static void mv_process_hash_current(int first_block) |
| 296 | { |
| 297 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
Phil Sutter | cc8d350 | 2011-05-05 15:29:03 +0200 | [diff] [blame] | 298 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 299 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
| 300 | struct req_progress *p = &cpg->p; |
| 301 | struct sec_accel_config op = { 0 }; |
| 302 | int is_last; |
| 303 | |
| 304 | switch (req_ctx->op) { |
| 305 | case COP_SHA1: |
| 306 | default: |
| 307 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; |
| 308 | break; |
| 309 | case COP_HMAC_SHA1: |
| 310 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; |
Phil Sutter | cc8d350 | 2011-05-05 15:29:03 +0200 | [diff] [blame] | 311 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, |
| 312 | tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 313 | break; |
| 314 | } |
| 315 | |
| 316 | op.mac_src_p = |
| 317 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) |
| 318 | req_ctx-> |
| 319 | count); |
| 320 | |
| 321 | setup_data_in(); |
| 322 | |
| 323 | op.mac_digest = |
| 324 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); |
| 325 | op.mac_iv = |
| 326 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | |
| 327 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); |
| 328 | |
| 329 | is_last = req_ctx->last_chunk |
| 330 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) |
| 331 | && (req_ctx->count <= MAX_HW_HASH_SIZE); |
| 332 | if (req_ctx->first_hash) { |
| 333 | if (is_last) |
| 334 | op.config |= CFG_NOT_FRAG; |
| 335 | else |
| 336 | op.config |= CFG_FIRST_FRAG; |
| 337 | |
| 338 | req_ctx->first_hash = 0; |
| 339 | } else { |
| 340 | if (is_last) |
| 341 | op.config |= CFG_LAST_FRAG; |
| 342 | else |
| 343 | op.config |= CFG_MID_FRAG; |
Phil Sutter | 8652348 | 2011-05-05 15:29:04 +0200 | [diff] [blame] | 344 | |
| 345 | writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); |
| 346 | writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); |
| 347 | writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); |
| 348 | writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); |
| 349 | writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); |
| 353 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 354 | /* GO */ |
| 355 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
| 356 | |
| 357 | /* |
| 358 | * XXX: add timer if the interrupt does not occur for some mystery |
| 359 | * reason |
| 360 | */ |
| 361 | } |
| 362 | |
| 363 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, |
| 364 | struct shash_desc *desc) |
| 365 | { |
| 366 | int i; |
| 367 | struct sha1_state shash_state; |
| 368 | |
| 369 | shash_state.count = ctx->count + ctx->count_add; |
| 370 | for (i = 0; i < 5; i++) |
| 371 | shash_state.state[i] = ctx->state[i]; |
| 372 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); |
| 373 | return crypto_shash_import(desc, &shash_state); |
| 374 | } |
| 375 | |
| 376 | static int mv_hash_final_fallback(struct ahash_request *req) |
| 377 | { |
| 378 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
| 379 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); |
| 380 | struct { |
| 381 | struct shash_desc shash; |
| 382 | char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; |
| 383 | } desc; |
| 384 | int rc; |
| 385 | |
| 386 | desc.shash.tfm = tfm_ctx->fallback; |
| 387 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
| 388 | if (unlikely(req_ctx->first_hash)) { |
| 389 | crypto_shash_init(&desc.shash); |
| 390 | crypto_shash_update(&desc.shash, req_ctx->buffer, |
| 391 | req_ctx->extra_bytes); |
| 392 | } else { |
| 393 | /* only SHA1 for now.... |
| 394 | */ |
| 395 | rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); |
| 396 | if (rc) |
| 397 | goto out; |
| 398 | } |
| 399 | rc = crypto_shash_final(&desc.shash, req->result); |
| 400 | out: |
| 401 | return rc; |
| 402 | } |
| 403 | |
| 404 | static void mv_hash_algo_completion(void) |
| 405 | { |
| 406 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); |
| 407 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
| 408 | |
| 409 | if (ctx->extra_bytes) |
| 410 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); |
| 411 | sg_miter_stop(&cpg->p.src_sg_it); |
| 412 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 413 | if (likely(ctx->last_chunk)) { |
| 414 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { |
| 415 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, |
| 416 | crypto_ahash_digestsize(crypto_ahash_reqtfm |
| 417 | (req))); |
| 418 | } else |
| 419 | mv_hash_final_fallback(req); |
Phil Sutter | 7a1c6bc | 2011-05-05 15:29:01 +0200 | [diff] [blame] | 420 | } else { |
| 421 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); |
| 422 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); |
| 423 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); |
| 424 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); |
| 425 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 426 | } |
| 427 | } |
| 428 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 429 | static void dequeue_complete_req(void) |
| 430 | { |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 431 | struct crypto_async_request *req = cpg->cur_req; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 432 | void *buf; |
| 433 | int ret; |
Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 434 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 435 | if (cpg->p.copy_back) { |
| 436 | int need_copy_len = cpg->p.crypt_len; |
| 437 | int sram_offset = 0; |
| 438 | do { |
| 439 | int dst_copy; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 440 | |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 441 | if (!cpg->p.sg_dst_left) { |
| 442 | ret = sg_miter_next(&cpg->p.dst_sg_it); |
| 443 | BUG_ON(!ret); |
| 444 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; |
| 445 | cpg->p.dst_start = 0; |
| 446 | } |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 447 | |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 448 | buf = cpg->p.dst_sg_it.addr; |
| 449 | buf += cpg->p.dst_start; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 450 | |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 451 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 452 | |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 453 | memcpy(buf, |
| 454 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, |
| 455 | dst_copy); |
| 456 | sram_offset += dst_copy; |
| 457 | cpg->p.sg_dst_left -= dst_copy; |
| 458 | need_copy_len -= dst_copy; |
| 459 | cpg->p.dst_start += dst_copy; |
| 460 | } while (need_copy_len > 0); |
| 461 | } |
| 462 | |
Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 463 | cpg->p.crypt_len = 0; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 464 | |
| 465 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); |
Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 466 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 467 | /* process next scatter list entry */ |
| 468 | cpg->eng_st = ENGINE_BUSY; |
Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 469 | cpg->p.process(0); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 470 | } else { |
Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 471 | cpg->p.complete(); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 472 | cpg->eng_st = ENGINE_IDLE; |
Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 473 | local_bh_disable(); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 474 | req->complete(req, 0); |
Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 475 | local_bh_enable(); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 476 | } |
| 477 | } |
| 478 | |
| 479 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) |
| 480 | { |
| 481 | int i = 0; |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 482 | size_t cur_len; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 483 | |
Phil Sutter | 6ef8450 | 2011-05-05 15:29:06 +0200 | [diff] [blame] | 484 | while (sl) { |
Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 485 | cur_len = sl[i].length; |
| 486 | ++i; |
| 487 | if (total_bytes > cur_len) |
| 488 | total_bytes -= cur_len; |
| 489 | else |
| 490 | break; |
| 491 | } |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 492 | |
| 493 | return i; |
| 494 | } |
| 495 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 496 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 497 | { |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 498 | struct req_progress *p = &cpg->p; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 499 | int num_sgs; |
| 500 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 501 | cpg->cur_req = &req->base; |
| 502 | memset(p, 0, sizeof(struct req_progress)); |
| 503 | p->hw_nbytes = req->nbytes; |
Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 504 | p->complete = mv_crypto_algo_completion; |
| 505 | p->process = mv_process_current_q; |
Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 506 | p->copy_back = 1; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 507 | |
| 508 | num_sgs = count_sgs(req->src, req->nbytes); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 509 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 510 | |
| 511 | num_sgs = count_sgs(req->dst, req->nbytes); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 512 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
| 513 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 514 | mv_process_current_q(1); |
| 515 | } |
| 516 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 517 | static void mv_start_new_hash_req(struct ahash_request *req) |
| 518 | { |
| 519 | struct req_progress *p = &cpg->p; |
| 520 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 521 | int num_sgs, hw_bytes, old_extra_bytes, rc; |
| 522 | cpg->cur_req = &req->base; |
| 523 | memset(p, 0, sizeof(struct req_progress)); |
| 524 | hw_bytes = req->nbytes + ctx->extra_bytes; |
| 525 | old_extra_bytes = ctx->extra_bytes; |
| 526 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 527 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; |
| 528 | if (ctx->extra_bytes != 0 |
| 529 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) |
| 530 | hw_bytes -= ctx->extra_bytes; |
| 531 | else |
| 532 | ctx->extra_bytes = 0; |
| 533 | |
| 534 | num_sgs = count_sgs(req->src, req->nbytes); |
| 535 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
| 536 | |
| 537 | if (hw_bytes) { |
| 538 | p->hw_nbytes = hw_bytes; |
| 539 | p->complete = mv_hash_algo_completion; |
| 540 | p->process = mv_process_hash_current; |
| 541 | |
Phil Sutter | 7759995 | 2011-05-05 15:29:05 +0200 | [diff] [blame] | 542 | if (unlikely(old_extra_bytes)) { |
| 543 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, |
| 544 | old_extra_bytes); |
| 545 | p->crypt_len = old_extra_bytes; |
| 546 | } |
| 547 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 548 | mv_process_hash_current(1); |
| 549 | } else { |
| 550 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, |
| 551 | ctx->extra_bytes - old_extra_bytes); |
| 552 | sg_miter_stop(&p->src_sg_it); |
| 553 | if (ctx->last_chunk) |
| 554 | rc = mv_hash_final_fallback(req); |
| 555 | else |
| 556 | rc = 0; |
| 557 | cpg->eng_st = ENGINE_IDLE; |
| 558 | local_bh_disable(); |
| 559 | req->base.complete(&req->base, rc); |
| 560 | local_bh_enable(); |
| 561 | } |
| 562 | } |
| 563 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 564 | static int queue_manag(void *data) |
| 565 | { |
| 566 | cpg->eng_st = ENGINE_IDLE; |
| 567 | do { |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 568 | struct crypto_async_request *async_req = NULL; |
| 569 | struct crypto_async_request *backlog; |
| 570 | |
| 571 | __set_current_state(TASK_INTERRUPTIBLE); |
| 572 | |
| 573 | if (cpg->eng_st == ENGINE_W_DEQUEUE) |
| 574 | dequeue_complete_req(); |
| 575 | |
| 576 | spin_lock_irq(&cpg->lock); |
| 577 | if (cpg->eng_st == ENGINE_IDLE) { |
| 578 | backlog = crypto_get_backlog(&cpg->queue); |
| 579 | async_req = crypto_dequeue_request(&cpg->queue); |
| 580 | if (async_req) { |
| 581 | BUG_ON(cpg->eng_st != ENGINE_IDLE); |
| 582 | cpg->eng_st = ENGINE_BUSY; |
| 583 | } |
| 584 | } |
| 585 | spin_unlock_irq(&cpg->lock); |
| 586 | |
| 587 | if (backlog) { |
| 588 | backlog->complete(backlog, -EINPROGRESS); |
| 589 | backlog = NULL; |
| 590 | } |
| 591 | |
| 592 | if (async_req) { |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 593 | if (async_req->tfm->__crt_alg->cra_type != |
| 594 | &crypto_ahash_type) { |
| 595 | struct ablkcipher_request *req = |
Phil Sutter | 042e9e7 | 2011-05-05 15:28:57 +0200 | [diff] [blame] | 596 | ablkcipher_request_cast(async_req); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 597 | mv_start_new_crypt_req(req); |
| 598 | } else { |
| 599 | struct ahash_request *req = |
| 600 | ahash_request_cast(async_req); |
| 601 | mv_start_new_hash_req(req); |
| 602 | } |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 603 | async_req = NULL; |
| 604 | } |
| 605 | |
| 606 | schedule(); |
| 607 | |
| 608 | } while (!kthread_should_stop()); |
| 609 | return 0; |
| 610 | } |
| 611 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 612 | static int mv_handle_req(struct crypto_async_request *req) |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 613 | { |
| 614 | unsigned long flags; |
| 615 | int ret; |
| 616 | |
| 617 | spin_lock_irqsave(&cpg->lock, flags); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 618 | ret = crypto_enqueue_request(&cpg->queue, req); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 619 | spin_unlock_irqrestore(&cpg->lock, flags); |
| 620 | wake_up_process(cpg->queue_th); |
| 621 | return ret; |
| 622 | } |
| 623 | |
| 624 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) |
| 625 | { |
| 626 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 627 | |
| 628 | req_ctx->op = COP_AES_ECB; |
| 629 | req_ctx->decrypt = 0; |
| 630 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 631 | return mv_handle_req(&req->base); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 632 | } |
| 633 | |
| 634 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) |
| 635 | { |
| 636 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 637 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 638 | |
| 639 | req_ctx->op = COP_AES_ECB; |
| 640 | req_ctx->decrypt = 1; |
| 641 | |
| 642 | compute_aes_dec_key(ctx); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 643 | return mv_handle_req(&req->base); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 644 | } |
| 645 | |
| 646 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) |
| 647 | { |
| 648 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 649 | |
| 650 | req_ctx->op = COP_AES_CBC; |
| 651 | req_ctx->decrypt = 0; |
| 652 | |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 653 | return mv_handle_req(&req->base); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) |
| 657 | { |
| 658 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 659 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 660 | |
| 661 | req_ctx->op = COP_AES_CBC; |
| 662 | req_ctx->decrypt = 1; |
| 663 | |
| 664 | compute_aes_dec_key(ctx); |
Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 665 | return mv_handle_req(&req->base); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | static int mv_cra_init(struct crypto_tfm *tfm) |
| 669 | { |
| 670 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); |
| 671 | return 0; |
| 672 | } |
| 673 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 674 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, |
| 675 | int is_last, unsigned int req_len, |
| 676 | int count_add) |
| 677 | { |
| 678 | memset(ctx, 0, sizeof(*ctx)); |
| 679 | ctx->op = op; |
| 680 | ctx->count = req_len; |
| 681 | ctx->first_hash = 1; |
| 682 | ctx->last_chunk = is_last; |
| 683 | ctx->count_add = count_add; |
| 684 | } |
| 685 | |
| 686 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, |
| 687 | unsigned req_len) |
| 688 | { |
| 689 | ctx->last_chunk = is_last; |
| 690 | ctx->count += req_len; |
| 691 | } |
| 692 | |
| 693 | static int mv_hash_init(struct ahash_request *req) |
| 694 | { |
| 695 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
| 696 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, |
| 697 | tfm_ctx->count_add); |
| 698 | return 0; |
| 699 | } |
| 700 | |
| 701 | static int mv_hash_update(struct ahash_request *req) |
| 702 | { |
| 703 | if (!req->nbytes) |
| 704 | return 0; |
| 705 | |
| 706 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); |
| 707 | return mv_handle_req(&req->base); |
| 708 | } |
| 709 | |
| 710 | static int mv_hash_final(struct ahash_request *req) |
| 711 | { |
| 712 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); |
Phil Sutter | 6ef8450 | 2011-05-05 15:29:06 +0200 | [diff] [blame] | 713 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 714 | mv_update_hash_req_ctx(ctx, 1, 0); |
| 715 | return mv_handle_req(&req->base); |
| 716 | } |
| 717 | |
| 718 | static int mv_hash_finup(struct ahash_request *req) |
| 719 | { |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 720 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); |
| 721 | return mv_handle_req(&req->base); |
| 722 | } |
| 723 | |
| 724 | static int mv_hash_digest(struct ahash_request *req) |
| 725 | { |
| 726 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); |
| 727 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, |
| 728 | req->nbytes, tfm_ctx->count_add); |
| 729 | return mv_handle_req(&req->base); |
| 730 | } |
| 731 | |
| 732 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, |
| 733 | const void *ostate) |
| 734 | { |
| 735 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; |
| 736 | int i; |
| 737 | for (i = 0; i < 5; i++) { |
| 738 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); |
| 739 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); |
| 740 | } |
| 741 | } |
| 742 | |
| 743 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, |
| 744 | unsigned int keylen) |
| 745 | { |
| 746 | int rc; |
| 747 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); |
| 748 | int bs, ds, ss; |
| 749 | |
| 750 | if (!ctx->base_hash) |
| 751 | return 0; |
| 752 | |
| 753 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); |
| 754 | if (rc) |
| 755 | return rc; |
| 756 | |
| 757 | /* Can't see a way to extract the ipad/opad from the fallback tfm |
| 758 | so I'm basically copying code from the hmac module */ |
| 759 | bs = crypto_shash_blocksize(ctx->base_hash); |
| 760 | ds = crypto_shash_digestsize(ctx->base_hash); |
| 761 | ss = crypto_shash_statesize(ctx->base_hash); |
| 762 | |
| 763 | { |
| 764 | struct { |
| 765 | struct shash_desc shash; |
| 766 | char ctx[crypto_shash_descsize(ctx->base_hash)]; |
| 767 | } desc; |
| 768 | unsigned int i; |
| 769 | char ipad[ss]; |
| 770 | char opad[ss]; |
| 771 | |
| 772 | desc.shash.tfm = ctx->base_hash; |
| 773 | desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & |
| 774 | CRYPTO_TFM_REQ_MAY_SLEEP; |
| 775 | |
| 776 | if (keylen > bs) { |
| 777 | int err; |
| 778 | |
| 779 | err = |
| 780 | crypto_shash_digest(&desc.shash, key, keylen, ipad); |
| 781 | if (err) |
| 782 | return err; |
| 783 | |
| 784 | keylen = ds; |
| 785 | } else |
| 786 | memcpy(ipad, key, keylen); |
| 787 | |
| 788 | memset(ipad + keylen, 0, bs - keylen); |
| 789 | memcpy(opad, ipad, bs); |
| 790 | |
| 791 | for (i = 0; i < bs; i++) { |
| 792 | ipad[i] ^= 0x36; |
| 793 | opad[i] ^= 0x5c; |
| 794 | } |
| 795 | |
| 796 | rc = crypto_shash_init(&desc.shash) ? : |
| 797 | crypto_shash_update(&desc.shash, ipad, bs) ? : |
| 798 | crypto_shash_export(&desc.shash, ipad) ? : |
| 799 | crypto_shash_init(&desc.shash) ? : |
| 800 | crypto_shash_update(&desc.shash, opad, bs) ? : |
| 801 | crypto_shash_export(&desc.shash, opad); |
| 802 | |
| 803 | if (rc == 0) |
| 804 | mv_hash_init_ivs(ctx, ipad, opad); |
| 805 | |
| 806 | return rc; |
| 807 | } |
| 808 | } |
| 809 | |
| 810 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, |
| 811 | enum hash_op op, int count_add) |
| 812 | { |
| 813 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
| 814 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 815 | struct crypto_shash *fallback_tfm = NULL; |
| 816 | struct crypto_shash *base_hash = NULL; |
| 817 | int err = -ENOMEM; |
| 818 | |
| 819 | ctx->op = op; |
| 820 | ctx->count_add = count_add; |
| 821 | |
| 822 | /* Allocate a fallback and abort if it failed. */ |
| 823 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, |
| 824 | CRYPTO_ALG_NEED_FALLBACK); |
| 825 | if (IS_ERR(fallback_tfm)) { |
| 826 | printk(KERN_WARNING MV_CESA |
| 827 | "Fallback driver '%s' could not be loaded!\n", |
| 828 | fallback_driver_name); |
| 829 | err = PTR_ERR(fallback_tfm); |
| 830 | goto out; |
| 831 | } |
| 832 | ctx->fallback = fallback_tfm; |
| 833 | |
| 834 | if (base_hash_name) { |
| 835 | /* Allocate a hash to compute the ipad/opad of hmac. */ |
| 836 | base_hash = crypto_alloc_shash(base_hash_name, 0, |
| 837 | CRYPTO_ALG_NEED_FALLBACK); |
| 838 | if (IS_ERR(base_hash)) { |
| 839 | printk(KERN_WARNING MV_CESA |
| 840 | "Base driver '%s' could not be loaded!\n", |
| 841 | base_hash_name); |
Roel Kluin | 41f2977 | 2011-01-04 15:37:16 +1100 | [diff] [blame] | 842 | err = PTR_ERR(base_hash); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 843 | goto err_bad_base; |
| 844 | } |
| 845 | } |
| 846 | ctx->base_hash = base_hash; |
| 847 | |
| 848 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 849 | sizeof(struct mv_req_hash_ctx) + |
| 850 | crypto_shash_descsize(ctx->fallback)); |
| 851 | return 0; |
| 852 | err_bad_base: |
| 853 | crypto_free_shash(fallback_tfm); |
| 854 | out: |
| 855 | return err; |
| 856 | } |
| 857 | |
| 858 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) |
| 859 | { |
| 860 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 861 | |
| 862 | crypto_free_shash(ctx->fallback); |
| 863 | if (ctx->base_hash) |
| 864 | crypto_free_shash(ctx->base_hash); |
| 865 | } |
| 866 | |
| 867 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) |
| 868 | { |
| 869 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); |
| 870 | } |
| 871 | |
| 872 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) |
| 873 | { |
| 874 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); |
| 875 | } |
| 876 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 877 | irqreturn_t crypto_int(int irq, void *priv) |
| 878 | { |
| 879 | u32 val; |
| 880 | |
| 881 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); |
| 882 | if (!(val & SEC_INT_ACCEL0_DONE)) |
| 883 | return IRQ_NONE; |
| 884 | |
| 885 | val &= ~SEC_INT_ACCEL0_DONE; |
| 886 | writel(val, cpg->reg + FPGA_INT_STATUS); |
| 887 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); |
| 888 | BUG_ON(cpg->eng_st != ENGINE_BUSY); |
| 889 | cpg->eng_st = ENGINE_W_DEQUEUE; |
| 890 | wake_up_process(cpg->queue_th); |
| 891 | return IRQ_HANDLED; |
| 892 | } |
| 893 | |
| 894 | struct crypto_alg mv_aes_alg_ecb = { |
| 895 | .cra_name = "ecb(aes)", |
| 896 | .cra_driver_name = "mv-ecb-aes", |
| 897 | .cra_priority = 300, |
| 898 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 899 | .cra_blocksize = 16, |
| 900 | .cra_ctxsize = sizeof(struct mv_ctx), |
| 901 | .cra_alignmask = 0, |
| 902 | .cra_type = &crypto_ablkcipher_type, |
| 903 | .cra_module = THIS_MODULE, |
| 904 | .cra_init = mv_cra_init, |
| 905 | .cra_u = { |
| 906 | .ablkcipher = { |
| 907 | .min_keysize = AES_MIN_KEY_SIZE, |
| 908 | .max_keysize = AES_MAX_KEY_SIZE, |
| 909 | .setkey = mv_setkey_aes, |
| 910 | .encrypt = mv_enc_aes_ecb, |
| 911 | .decrypt = mv_dec_aes_ecb, |
| 912 | }, |
| 913 | }, |
| 914 | }; |
| 915 | |
| 916 | struct crypto_alg mv_aes_alg_cbc = { |
| 917 | .cra_name = "cbc(aes)", |
| 918 | .cra_driver_name = "mv-cbc-aes", |
| 919 | .cra_priority = 300, |
| 920 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 921 | .cra_blocksize = AES_BLOCK_SIZE, |
| 922 | .cra_ctxsize = sizeof(struct mv_ctx), |
| 923 | .cra_alignmask = 0, |
| 924 | .cra_type = &crypto_ablkcipher_type, |
| 925 | .cra_module = THIS_MODULE, |
| 926 | .cra_init = mv_cra_init, |
| 927 | .cra_u = { |
| 928 | .ablkcipher = { |
| 929 | .ivsize = AES_BLOCK_SIZE, |
| 930 | .min_keysize = AES_MIN_KEY_SIZE, |
| 931 | .max_keysize = AES_MAX_KEY_SIZE, |
| 932 | .setkey = mv_setkey_aes, |
| 933 | .encrypt = mv_enc_aes_cbc, |
| 934 | .decrypt = mv_dec_aes_cbc, |
| 935 | }, |
| 936 | }, |
| 937 | }; |
| 938 | |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 939 | struct ahash_alg mv_sha1_alg = { |
| 940 | .init = mv_hash_init, |
| 941 | .update = mv_hash_update, |
| 942 | .final = mv_hash_final, |
| 943 | .finup = mv_hash_finup, |
| 944 | .digest = mv_hash_digest, |
| 945 | .halg = { |
| 946 | .digestsize = SHA1_DIGEST_SIZE, |
| 947 | .base = { |
| 948 | .cra_name = "sha1", |
| 949 | .cra_driver_name = "mv-sha1", |
| 950 | .cra_priority = 300, |
| 951 | .cra_flags = |
| 952 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
| 953 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 954 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), |
| 955 | .cra_init = mv_cra_hash_sha1_init, |
| 956 | .cra_exit = mv_cra_hash_exit, |
| 957 | .cra_module = THIS_MODULE, |
| 958 | } |
| 959 | } |
| 960 | }; |
| 961 | |
| 962 | struct ahash_alg mv_hmac_sha1_alg = { |
| 963 | .init = mv_hash_init, |
| 964 | .update = mv_hash_update, |
| 965 | .final = mv_hash_final, |
| 966 | .finup = mv_hash_finup, |
| 967 | .digest = mv_hash_digest, |
| 968 | .setkey = mv_hash_setkey, |
| 969 | .halg = { |
| 970 | .digestsize = SHA1_DIGEST_SIZE, |
| 971 | .base = { |
| 972 | .cra_name = "hmac(sha1)", |
| 973 | .cra_driver_name = "mv-hmac-sha1", |
| 974 | .cra_priority = 300, |
| 975 | .cra_flags = |
| 976 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, |
| 977 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 978 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), |
| 979 | .cra_init = mv_cra_hash_hmac_sha1_init, |
| 980 | .cra_exit = mv_cra_hash_exit, |
| 981 | .cra_module = THIS_MODULE, |
| 982 | } |
| 983 | } |
| 984 | }; |
| 985 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 986 | static int mv_probe(struct platform_device *pdev) |
| 987 | { |
| 988 | struct crypto_priv *cp; |
| 989 | struct resource *res; |
| 990 | int irq; |
| 991 | int ret; |
| 992 | |
| 993 | if (cpg) { |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 994 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 995 | return -EEXIST; |
| 996 | } |
| 997 | |
| 998 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
| 999 | if (!res) |
| 1000 | return -ENXIO; |
| 1001 | |
| 1002 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
| 1003 | if (!cp) |
| 1004 | return -ENOMEM; |
| 1005 | |
| 1006 | spin_lock_init(&cp->lock); |
| 1007 | crypto_init_queue(&cp->queue, 50); |
Tobias Klauser | 5bdd5de | 2010-05-14 14:58:05 +1000 | [diff] [blame] | 1008 | cp->reg = ioremap(res->start, resource_size(res)); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1009 | if (!cp->reg) { |
| 1010 | ret = -ENOMEM; |
| 1011 | goto err; |
| 1012 | } |
| 1013 | |
| 1014 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); |
| 1015 | if (!res) { |
| 1016 | ret = -ENXIO; |
| 1017 | goto err_unmap_reg; |
| 1018 | } |
Tobias Klauser | 5bdd5de | 2010-05-14 14:58:05 +1000 | [diff] [blame] | 1019 | cp->sram_size = resource_size(res); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1020 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
| 1021 | cp->sram = ioremap(res->start, cp->sram_size); |
| 1022 | if (!cp->sram) { |
| 1023 | ret = -ENOMEM; |
| 1024 | goto err_unmap_reg; |
| 1025 | } |
| 1026 | |
| 1027 | irq = platform_get_irq(pdev, 0); |
| 1028 | if (irq < 0 || irq == NO_IRQ) { |
| 1029 | ret = irq; |
| 1030 | goto err_unmap_sram; |
| 1031 | } |
| 1032 | cp->irq = irq; |
| 1033 | |
| 1034 | platform_set_drvdata(pdev, cp); |
| 1035 | cpg = cp; |
| 1036 | |
| 1037 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
| 1038 | if (IS_ERR(cp->queue_th)) { |
| 1039 | ret = PTR_ERR(cp->queue_th); |
Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1040 | goto err_unmap_sram; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1041 | } |
| 1042 | |
| 1043 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), |
| 1044 | cp); |
| 1045 | if (ret) |
Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1046 | goto err_thread; |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1047 | |
| 1048 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
| 1049 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
Phil Sutter | 99db3ea | 2011-05-05 15:28:58 +0200 | [diff] [blame] | 1050 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1051 | |
| 1052 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1053 | if (ret) { |
| 1054 | printk(KERN_WARNING MV_CESA |
| 1055 | "Could not register aes-ecb driver\n"); |
Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1056 | goto err_irq; |
Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1057 | } |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1058 | |
| 1059 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1060 | if (ret) { |
| 1061 | printk(KERN_WARNING MV_CESA |
| 1062 | "Could not register aes-cbc driver\n"); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1063 | goto err_unreg_ecb; |
Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1064 | } |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 1065 | |
| 1066 | ret = crypto_register_ahash(&mv_sha1_alg); |
| 1067 | if (ret == 0) |
| 1068 | cpg->has_sha1 = 1; |
| 1069 | else |
| 1070 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); |
| 1071 | |
| 1072 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); |
| 1073 | if (ret == 0) { |
| 1074 | cpg->has_hmac_sha1 = 1; |
| 1075 | } else { |
| 1076 | printk(KERN_WARNING MV_CESA |
| 1077 | "Could not register hmac-sha1 driver\n"); |
| 1078 | } |
| 1079 | |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1080 | return 0; |
| 1081 | err_unreg_ecb: |
| 1082 | crypto_unregister_alg(&mv_aes_alg_ecb); |
Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1083 | err_irq: |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1084 | free_irq(irq, cp); |
Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1085 | err_thread: |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1086 | kthread_stop(cp->queue_th); |
| 1087 | err_unmap_sram: |
| 1088 | iounmap(cp->sram); |
| 1089 | err_unmap_reg: |
| 1090 | iounmap(cp->reg); |
| 1091 | err: |
| 1092 | kfree(cp); |
| 1093 | cpg = NULL; |
| 1094 | platform_set_drvdata(pdev, NULL); |
| 1095 | return ret; |
| 1096 | } |
| 1097 | |
| 1098 | static int mv_remove(struct platform_device *pdev) |
| 1099 | { |
| 1100 | struct crypto_priv *cp = platform_get_drvdata(pdev); |
| 1101 | |
| 1102 | crypto_unregister_alg(&mv_aes_alg_ecb); |
| 1103 | crypto_unregister_alg(&mv_aes_alg_cbc); |
Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 1104 | if (cp->has_sha1) |
| 1105 | crypto_unregister_ahash(&mv_sha1_alg); |
| 1106 | if (cp->has_hmac_sha1) |
| 1107 | crypto_unregister_ahash(&mv_hmac_sha1_alg); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1108 | kthread_stop(cp->queue_th); |
| 1109 | free_irq(cp->irq, cp); |
| 1110 | memset(cp->sram, 0, cp->sram_size); |
| 1111 | iounmap(cp->sram); |
| 1112 | iounmap(cp->reg); |
| 1113 | kfree(cp); |
| 1114 | cpg = NULL; |
| 1115 | return 0; |
| 1116 | } |
| 1117 | |
| 1118 | static struct platform_driver marvell_crypto = { |
| 1119 | .probe = mv_probe, |
| 1120 | .remove = mv_remove, |
| 1121 | .driver = { |
| 1122 | .owner = THIS_MODULE, |
| 1123 | .name = "mv_crypto", |
| 1124 | }, |
| 1125 | }; |
| 1126 | MODULE_ALIAS("platform:mv_crypto"); |
| 1127 | |
| 1128 | static int __init mv_crypto_init(void) |
| 1129 | { |
| 1130 | return platform_driver_register(&marvell_crypto); |
| 1131 | } |
| 1132 | module_init(mv_crypto_init); |
| 1133 | |
| 1134 | static void __exit mv_crypto_exit(void) |
| 1135 | { |
| 1136 | platform_driver_unregister(&marvell_crypto); |
| 1137 | } |
| 1138 | module_exit(mv_crypto_exit); |
| 1139 | |
| 1140 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); |
| 1141 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); |
| 1142 | MODULE_LICENSE("GPL"); |