Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Asynchronous block chaining cipher operations. |
Richard Hartmann | c4ede64 | 2010-02-16 20:23:37 +0800 | [diff] [blame] | 4 | * |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 5 | * This is the asynchronous version of blkcipher.c indicating completion |
| 6 | * via a callback. |
| 7 | * |
| 8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 9 | */ |
| 10 | |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 11 | #include <crypto/internal/skcipher.h> |
| 12 | #include <linux/err.h> |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 14 | #include <linux/slab.h> |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 15 | #include <linux/seq_file.h> |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 16 | #include <linux/cryptouser.h> |
Gideon Israel Dsouza | d8c34b9 | 2016-12-31 21:26:23 +0530 | [diff] [blame] | 17 | #include <linux/compiler.h> |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 18 | #include <net/netlink.h> |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 19 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 20 | #include <crypto/scatterwalk.h> |
| 21 | |
Herbert Xu | 378f4f5 | 2007-12-17 20:07:31 +0800 | [diff] [blame] | 22 | #include "internal.h" |
| 23 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 24 | struct ablkcipher_buffer { |
| 25 | struct list_head entry; |
| 26 | struct scatter_walk dst; |
| 27 | unsigned int len; |
| 28 | void *data; |
| 29 | }; |
| 30 | |
| 31 | enum { |
| 32 | ABLKCIPHER_WALK_SLOW = 1 << 0, |
| 33 | }; |
| 34 | |
| 35 | static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) |
| 36 | { |
| 37 | scatterwalk_copychunks(p->data, &p->dst, p->len, 1); |
| 38 | } |
| 39 | |
| 40 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) |
| 41 | { |
| 42 | struct ablkcipher_buffer *p, *tmp; |
| 43 | |
| 44 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { |
| 45 | ablkcipher_buffer_write(p); |
| 46 | list_del(&p->entry); |
| 47 | kfree(p); |
| 48 | } |
| 49 | } |
| 50 | EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); |
| 51 | |
| 52 | static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, |
| 53 | struct ablkcipher_buffer *p) |
| 54 | { |
| 55 | p->dst = walk->out; |
| 56 | list_add_tail(&p->entry, &walk->buffers); |
| 57 | } |
| 58 | |
| 59 | /* Get a spot of the specified length that does not straddle a page. |
| 60 | * The caller needs to ensure that there is enough space for this operation. |
| 61 | */ |
| 62 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) |
| 63 | { |
| 64 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
Joshua I. James | a861afb | 2014-12-05 14:06:16 +0900 | [diff] [blame] | 65 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 66 | return max(start, end_page); |
| 67 | } |
| 68 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 69 | static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, |
| 70 | unsigned int n) |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 71 | { |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 72 | for (;;) { |
| 73 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); |
| 74 | |
| 75 | if (len_this_page > n) |
| 76 | len_this_page = n; |
| 77 | scatterwalk_advance(&walk->out, n); |
| 78 | if (n == len_this_page) |
| 79 | break; |
| 80 | n -= len_this_page; |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 81 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 82 | } |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 83 | } |
| 84 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 85 | static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, |
| 86 | unsigned int n) |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 87 | { |
| 88 | scatterwalk_advance(&walk->in, n); |
| 89 | scatterwalk_advance(&walk->out, n); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static int ablkcipher_walk_next(struct ablkcipher_request *req, |
| 93 | struct ablkcipher_walk *walk); |
| 94 | |
| 95 | int ablkcipher_walk_done(struct ablkcipher_request *req, |
| 96 | struct ablkcipher_walk *walk, int err) |
| 97 | { |
| 98 | struct crypto_tfm *tfm = req->base.tfm; |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 99 | unsigned int n; /* bytes processed */ |
| 100 | bool more; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 101 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 102 | if (unlikely(err < 0)) |
| 103 | goto finish; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 104 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 105 | n = walk->nbytes - err; |
| 106 | walk->total -= n; |
| 107 | more = (walk->total != 0); |
| 108 | |
| 109 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { |
| 110 | ablkcipher_done_fast(walk, n); |
| 111 | } else { |
| 112 | if (WARN_ON(err)) { |
| 113 | /* unexpected case; didn't process all bytes */ |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 114 | err = -EINVAL; |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 115 | goto finish; |
| 116 | } |
| 117 | ablkcipher_done_slow(walk, n); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 118 | } |
| 119 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 120 | scatterwalk_done(&walk->in, 0, more); |
| 121 | scatterwalk_done(&walk->out, 1, more); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 122 | |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 123 | if (more) { |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 124 | crypto_yield(req->base.flags); |
| 125 | return ablkcipher_walk_next(req, walk); |
| 126 | } |
Eric Biggers | 318abdf | 2018-07-23 10:54:58 -0700 | [diff] [blame] | 127 | err = 0; |
| 128 | finish: |
| 129 | walk->nbytes = 0; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 130 | if (walk->iv != req->info) |
| 131 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); |
Davidlohr Bueso | 33c7c0f | 2011-01-29 15:09:43 +1100 | [diff] [blame] | 132 | kfree(walk->iv_buffer); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 133 | return err; |
| 134 | } |
| 135 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); |
| 136 | |
| 137 | static inline int ablkcipher_next_slow(struct ablkcipher_request *req, |
| 138 | struct ablkcipher_walk *walk, |
| 139 | unsigned int bsize, |
| 140 | unsigned int alignmask, |
| 141 | void **src_p, void **dst_p) |
| 142 | { |
| 143 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); |
| 144 | struct ablkcipher_buffer *p; |
| 145 | void *src, *dst, *base; |
| 146 | unsigned int n; |
| 147 | |
| 148 | n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); |
| 149 | n += (aligned_bsize * 3 - (alignmask + 1) + |
| 150 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); |
| 151 | |
| 152 | p = kmalloc(n, GFP_ATOMIC); |
| 153 | if (!p) |
Jiri Slaby | 2716fbf | 2010-06-23 20:01:45 +1000 | [diff] [blame] | 154 | return ablkcipher_walk_done(req, walk, -ENOMEM); |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 155 | |
| 156 | base = p + 1; |
| 157 | |
| 158 | dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); |
| 159 | src = dst = ablkcipher_get_spot(dst, bsize); |
| 160 | |
| 161 | p->len = bsize; |
| 162 | p->data = dst; |
| 163 | |
| 164 | scatterwalk_copychunks(src, &walk->in, bsize, 0); |
| 165 | |
| 166 | ablkcipher_queue_write(walk, p); |
| 167 | |
| 168 | walk->nbytes = bsize; |
| 169 | walk->flags |= ABLKCIPHER_WALK_SLOW; |
| 170 | |
| 171 | *src_p = src; |
| 172 | *dst_p = dst; |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, |
| 178 | struct crypto_tfm *tfm, |
| 179 | unsigned int alignmask) |
| 180 | { |
| 181 | unsigned bs = walk->blocksize; |
| 182 | unsigned int ivsize = tfm->crt_ablkcipher.ivsize; |
| 183 | unsigned aligned_bs = ALIGN(bs, alignmask + 1); |
| 184 | unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - |
| 185 | (alignmask + 1); |
| 186 | u8 *iv; |
| 187 | |
| 188 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
| 189 | walk->iv_buffer = kmalloc(size, GFP_ATOMIC); |
| 190 | if (!walk->iv_buffer) |
| 191 | return -ENOMEM; |
| 192 | |
| 193 | iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); |
| 194 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; |
| 195 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; |
| 196 | iv = ablkcipher_get_spot(iv, ivsize); |
| 197 | |
| 198 | walk->iv = memcpy(iv, walk->iv, ivsize); |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static inline int ablkcipher_next_fast(struct ablkcipher_request *req, |
| 203 | struct ablkcipher_walk *walk) |
| 204 | { |
| 205 | walk->src.page = scatterwalk_page(&walk->in); |
| 206 | walk->src.offset = offset_in_page(walk->in.offset); |
| 207 | walk->dst.page = scatterwalk_page(&walk->out); |
| 208 | walk->dst.offset = offset_in_page(walk->out.offset); |
| 209 | |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | static int ablkcipher_walk_next(struct ablkcipher_request *req, |
| 214 | struct ablkcipher_walk *walk) |
| 215 | { |
| 216 | struct crypto_tfm *tfm = req->base.tfm; |
| 217 | unsigned int alignmask, bsize, n; |
| 218 | void *src, *dst; |
| 219 | int err; |
| 220 | |
| 221 | alignmask = crypto_tfm_alg_alignmask(tfm); |
| 222 | n = walk->total; |
| 223 | if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { |
| 224 | req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
| 225 | return ablkcipher_walk_done(req, walk, -EINVAL); |
| 226 | } |
| 227 | |
| 228 | walk->flags &= ~ABLKCIPHER_WALK_SLOW; |
| 229 | src = dst = NULL; |
| 230 | |
| 231 | bsize = min(walk->blocksize, n); |
| 232 | n = scatterwalk_clamp(&walk->in, n); |
| 233 | n = scatterwalk_clamp(&walk->out, n); |
| 234 | |
| 235 | if (n < bsize || |
| 236 | !scatterwalk_aligned(&walk->in, alignmask) || |
| 237 | !scatterwalk_aligned(&walk->out, alignmask)) { |
| 238 | err = ablkcipher_next_slow(req, walk, bsize, alignmask, |
| 239 | &src, &dst); |
| 240 | goto set_phys_lowmem; |
| 241 | } |
| 242 | |
| 243 | walk->nbytes = n; |
| 244 | |
| 245 | return ablkcipher_next_fast(req, walk); |
| 246 | |
| 247 | set_phys_lowmem: |
| 248 | if (err >= 0) { |
| 249 | walk->src.page = virt_to_page(src); |
| 250 | walk->dst.page = virt_to_page(dst); |
| 251 | walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); |
| 252 | walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); |
| 253 | } |
| 254 | |
| 255 | return err; |
| 256 | } |
| 257 | |
| 258 | static int ablkcipher_walk_first(struct ablkcipher_request *req, |
| 259 | struct ablkcipher_walk *walk) |
| 260 | { |
| 261 | struct crypto_tfm *tfm = req->base.tfm; |
| 262 | unsigned int alignmask; |
| 263 | |
| 264 | alignmask = crypto_tfm_alg_alignmask(tfm); |
| 265 | if (WARN_ON_ONCE(in_irq())) |
| 266 | return -EDEADLK; |
| 267 | |
Jason A. Donenfeld | 70d906b | 2015-12-06 02:51:37 +0100 | [diff] [blame] | 268 | walk->iv = req->info; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 269 | walk->nbytes = walk->total; |
| 270 | if (unlikely(!walk->total)) |
| 271 | return 0; |
| 272 | |
| 273 | walk->iv_buffer = NULL; |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 274 | if (unlikely(((unsigned long)walk->iv & alignmask))) { |
| 275 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); |
Joshua I. James | a861afb | 2014-12-05 14:06:16 +0900 | [diff] [blame] | 276 | |
David S. Miller | bf06099 | 2010-05-19 14:13:07 +1000 | [diff] [blame] | 277 | if (err) |
| 278 | return err; |
| 279 | } |
| 280 | |
| 281 | scatterwalk_start(&walk->in, walk->in.sg); |
| 282 | scatterwalk_start(&walk->out, walk->out.sg); |
| 283 | |
| 284 | return ablkcipher_walk_next(req, walk); |
| 285 | } |
| 286 | |
| 287 | int ablkcipher_walk_phys(struct ablkcipher_request *req, |
| 288 | struct ablkcipher_walk *walk) |
| 289 | { |
| 290 | walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); |
| 291 | return ablkcipher_walk_first(req, walk); |
| 292 | } |
| 293 | EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); |
| 294 | |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 295 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
| 296 | unsigned int keylen) |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 297 | { |
| 298 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); |
| 299 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); |
| 300 | int ret; |
| 301 | u8 *buffer, *alignbuffer; |
| 302 | unsigned long absize; |
| 303 | |
| 304 | absize = keylen + alignmask; |
| 305 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 306 | if (!buffer) |
| 307 | return -ENOMEM; |
| 308 | |
| 309 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 310 | memcpy(alignbuffer, key, keylen); |
| 311 | ret = cipher->setkey(tfm, alignbuffer, keylen); |
Sebastian Siewior | 0681717 | 2007-08-03 20:33:47 +0800 | [diff] [blame] | 312 | memset(alignbuffer, 0, keylen); |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 313 | kfree(buffer); |
| 314 | return ret; |
| 315 | } |
| 316 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 317 | static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
| 318 | unsigned int keylen) |
| 319 | { |
| 320 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 321 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 322 | |
| 323 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { |
| 324 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 325 | return -EINVAL; |
| 326 | } |
| 327 | |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 328 | if ((unsigned long)key & alignmask) |
| 329 | return setkey_unaligned(tfm, key, keylen); |
| 330 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 331 | return cipher->setkey(tfm, key, keylen); |
| 332 | } |
| 333 | |
| 334 | static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, |
| 335 | u32 mask) |
| 336 | { |
| 337 | return alg->cra_ctxsize; |
| 338 | } |
| 339 | |
| 340 | static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, |
| 341 | u32 mask) |
| 342 | { |
| 343 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; |
| 344 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; |
| 345 | |
| 346 | if (alg->ivsize > PAGE_SIZE / 8) |
| 347 | return -EINVAL; |
| 348 | |
| 349 | crt->setkey = setkey; |
| 350 | crt->encrypt = alg->encrypt; |
| 351 | crt->decrypt = alg->decrypt; |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 352 | crt->base = __crypto_ablkcipher_cast(tfm); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 353 | crt->ivsize = alg->ivsize; |
| 354 | |
| 355 | return 0; |
| 356 | } |
| 357 | |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 358 | #ifdef CONFIG_NET |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 359 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 360 | { |
| 361 | struct crypto_report_blkcipher rblkcipher; |
| 362 | |
Eric Biggers | 37db69e | 2018-11-03 14:56:03 -0700 | [diff] [blame] | 363 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
| 364 | |
| 365 | strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); |
Eric Biggers | c79b411 | 2018-12-16 15:55:06 -0800 | [diff] [blame] | 366 | strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv)); |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 367 | |
| 368 | rblkcipher.blocksize = alg->cra_blocksize; |
| 369 | rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; |
| 370 | rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; |
| 371 | rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; |
| 372 | |
Eric Biggers | 37db69e | 2018-11-03 14:56:03 -0700 | [diff] [blame] | 373 | return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
| 374 | sizeof(rblkcipher), &rblkcipher); |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 375 | } |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 376 | #else |
| 377 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 378 | { |
| 379 | return -ENOSYS; |
| 380 | } |
| 381 | #endif |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 382 | |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 383 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
Gideon Israel Dsouza | d8c34b9 | 2016-12-31 21:26:23 +0530 | [diff] [blame] | 384 | __maybe_unused; |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 385 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 386 | { |
| 387 | struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; |
| 388 | |
| 389 | seq_printf(m, "type : ablkcipher\n"); |
Herbert Xu | 189ed66 | 2007-12-14 22:29:37 +0800 | [diff] [blame] | 390 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
| 391 | "yes" : "no"); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 392 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 393 | seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); |
| 394 | seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); |
| 395 | seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); |
Eric Biggers | c79b411 | 2018-12-16 15:55:06 -0800 | [diff] [blame] | 396 | seq_printf(m, "geniv : <default>\n"); |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | const struct crypto_type crypto_ablkcipher_type = { |
| 400 | .ctxsize = crypto_ablkcipher_ctxsize, |
| 401 | .init = crypto_init_ablkcipher_ops, |
| 402 | #ifdef CONFIG_PROC_FS |
| 403 | .show = crypto_ablkcipher_show, |
| 404 | #endif |
Steffen Klassert | 29ffc87 | 2011-09-27 07:42:32 +0200 | [diff] [blame] | 405 | .report = crypto_ablkcipher_report, |
Herbert Xu | b5b7f08 | 2007-04-16 20:48:54 +1000 | [diff] [blame] | 406 | }; |
| 407 | EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); |