blob: 41b4f7f27f454ea623a6e572130e0928363ef73e [file] [log] [blame]
Herbert Xu7a7ffe62015-08-20 15:21:45 +08001/*
2 * Symmetric key cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
Herbert Xub286d8b2016-11-22 20:08:12 +080017#include <crypto/internal/aead.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080018#include <crypto/internal/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080019#include <crypto/scatterwalk.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080020#include <linux/bug.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080021#include <linux/cryptouser.h>
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +053022#include <linux/compiler.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080023#include <linux/list.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080024#include <linux/module.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080025#include <linux/rtnetlink.h>
26#include <linux/seq_file.h>
27#include <net/netlink.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080028
29#include "internal.h"
30
Herbert Xub286d8b2016-11-22 20:08:12 +080031enum {
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
37};
38
39struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
42 unsigned int len;
43 u8 *data;
44 u8 buffer[];
45};
46
47static int skcipher_walk_next(struct skcipher_walk *walk);
48
49static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50{
51 if (PageHighMem(scatterwalk_page(walk)))
52 kunmap_atomic(vaddr);
53}
54
55static inline void *skcipher_map(struct scatter_walk *walk)
56{
57 struct page *page = scatterwalk_page(walk);
58
59 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 offset_in_page(walk->offset);
61}
62
63static inline void skcipher_map_src(struct skcipher_walk *walk)
64{
65 walk->src.virt.addr = skcipher_map(&walk->in);
66}
67
68static inline void skcipher_map_dst(struct skcipher_walk *walk)
69{
70 walk->dst.virt.addr = skcipher_map(&walk->out);
71}
72
73static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->in, walk->src.virt.addr);
76}
77
78static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79{
80 skcipher_unmap(&walk->out, walk->dst.virt.addr);
81}
82
83static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84{
85 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86}
87
88/* Get a spot of the specified length that does not straddle a page.
89 * The caller needs to ensure that there is enough space for this operation.
90 */
91static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92{
93 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94
95 return max(start, end_page);
96}
97
Eric Biggers8088d3d2018-07-23 10:54:56 -070098static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
Herbert Xub286d8b2016-11-22 20:08:12 +080099{
100 u8 *addr;
101
102 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 addr = skcipher_get_spot(addr, bsize);
104 scatterwalk_copychunks(addr, &walk->out, bsize,
105 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800106}
107
108int skcipher_walk_done(struct skcipher_walk *walk, int err)
109{
Eric Biggers8088d3d2018-07-23 10:54:56 -0700110 unsigned int n; /* bytes processed */
111 bool more;
Herbert Xub286d8b2016-11-22 20:08:12 +0800112
Eric Biggers8088d3d2018-07-23 10:54:56 -0700113 if (unlikely(err < 0))
114 goto finish;
Herbert Xub286d8b2016-11-22 20:08:12 +0800115
Eric Biggers8088d3d2018-07-23 10:54:56 -0700116 n = walk->nbytes - err;
117 walk->total -= n;
118 more = (walk->total != 0);
119
120 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
121 SKCIPHER_WALK_SLOW |
122 SKCIPHER_WALK_COPY |
123 SKCIPHER_WALK_DIFF)))) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800124unmap_src:
125 skcipher_unmap_src(walk);
126 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
127 skcipher_unmap_dst(walk);
128 goto unmap_src;
129 } else if (walk->flags & SKCIPHER_WALK_COPY) {
130 skcipher_map_dst(walk);
131 memcpy(walk->dst.virt.addr, walk->page, n);
132 skcipher_unmap_dst(walk);
133 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
134 if (WARN_ON(err)) {
Eric Biggers8088d3d2018-07-23 10:54:56 -0700135 /* unexpected case; didn't process all bytes */
Herbert Xub286d8b2016-11-22 20:08:12 +0800136 err = -EINVAL;
Eric Biggers8088d3d2018-07-23 10:54:56 -0700137 goto finish;
138 }
139 skcipher_done_slow(walk, n);
140 goto already_advanced;
Herbert Xub286d8b2016-11-22 20:08:12 +0800141 }
142
Herbert Xub286d8b2016-11-22 20:08:12 +0800143 scatterwalk_advance(&walk->in, n);
144 scatterwalk_advance(&walk->out, n);
Eric Biggers8088d3d2018-07-23 10:54:56 -0700145already_advanced:
146 scatterwalk_done(&walk->in, 0, more);
147 scatterwalk_done(&walk->out, 1, more);
Herbert Xub286d8b2016-11-22 20:08:12 +0800148
Eric Biggers8088d3d2018-07-23 10:54:56 -0700149 if (more) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800150 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
151 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
152 return skcipher_walk_next(walk);
153 }
Eric Biggers8088d3d2018-07-23 10:54:56 -0700154 err = 0;
155finish:
156 walk->nbytes = 0;
Herbert Xub286d8b2016-11-22 20:08:12 +0800157
158 /* Short-circuit for the common/fast path. */
159 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
160 goto out;
161
162 if (walk->flags & SKCIPHER_WALK_PHYS)
163 goto out;
164
165 if (walk->iv != walk->oiv)
166 memcpy(walk->oiv, walk->iv, walk->ivsize);
167 if (walk->buffer != walk->page)
168 kfree(walk->buffer);
169 if (walk->page)
170 free_page((unsigned long)walk->page);
171
172out:
173 return err;
174}
175EXPORT_SYMBOL_GPL(skcipher_walk_done);
176
177void skcipher_walk_complete(struct skcipher_walk *walk, int err)
178{
179 struct skcipher_walk_buffer *p, *tmp;
180
181 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
182 u8 *data;
183
184 if (err)
185 goto done;
186
187 data = p->data;
188 if (!data) {
189 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000190 data = skcipher_get_spot(data, walk->stride);
Herbert Xub286d8b2016-11-22 20:08:12 +0800191 }
192
193 scatterwalk_copychunks(data, &p->dst, p->len, 1);
194
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000195 if (offset_in_page(p->data) + p->len + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800196 PAGE_SIZE)
197 free_page((unsigned long)p->data);
198
199done:
200 list_del(&p->entry);
201 kfree(p);
202 }
203
204 if (!err && walk->iv != walk->oiv)
205 memcpy(walk->oiv, walk->iv, walk->ivsize);
206 if (walk->buffer != walk->page)
207 kfree(walk->buffer);
208 if (walk->page)
209 free_page((unsigned long)walk->page);
210}
211EXPORT_SYMBOL_GPL(skcipher_walk_complete);
212
213static void skcipher_queue_write(struct skcipher_walk *walk,
214 struct skcipher_walk_buffer *p)
215{
216 p->dst = walk->out;
217 list_add_tail(&p->entry, &walk->buffers);
218}
219
220static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
221{
222 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
223 unsigned alignmask = walk->alignmask;
224 struct skcipher_walk_buffer *p;
225 unsigned a;
226 unsigned n;
227 u8 *buffer;
228 void *v;
229
230 if (!phys) {
Ard Biesheuvel18e615a2016-12-13 13:34:02 +0000231 if (!walk->buffer)
232 walk->buffer = walk->page;
233 buffer = walk->buffer;
Herbert Xub286d8b2016-11-22 20:08:12 +0800234 if (buffer)
235 goto ok;
236 }
237
238 /* Start with the minimum alignment of kmalloc. */
239 a = crypto_tfm_ctx_alignment() - 1;
240 n = bsize;
241
242 if (phys) {
243 /* Calculate the minimum alignment of p->buffer. */
244 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
245 n += sizeof(*p);
246 }
247
248 /* Minimum size to align p->buffer by alignmask. */
249 n += alignmask & ~a;
250
251 /* Minimum size to ensure p->buffer does not straddle a page. */
252 n += (bsize - 1) & ~(alignmask | a);
253
254 v = kzalloc(n, skcipher_walk_gfp(walk));
255 if (!v)
256 return skcipher_walk_done(walk, -ENOMEM);
257
258 if (phys) {
259 p = v;
260 p->len = bsize;
261 skcipher_queue_write(walk, p);
262 buffer = p->buffer;
263 } else {
264 walk->buffer = v;
265 buffer = v;
266 }
267
268ok:
269 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
270 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
271 walk->src.virt.addr = walk->dst.virt.addr;
272
273 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
274
275 walk->nbytes = bsize;
276 walk->flags |= SKCIPHER_WALK_SLOW;
277
278 return 0;
279}
280
281static int skcipher_next_copy(struct skcipher_walk *walk)
282{
283 struct skcipher_walk_buffer *p;
284 u8 *tmp = walk->page;
285
286 skcipher_map_src(walk);
287 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
288 skcipher_unmap_src(walk);
289
290 walk->src.virt.addr = tmp;
291 walk->dst.virt.addr = tmp;
292
293 if (!(walk->flags & SKCIPHER_WALK_PHYS))
294 return 0;
295
296 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
297 if (!p)
298 return -ENOMEM;
299
300 p->data = walk->page;
301 p->len = walk->nbytes;
302 skcipher_queue_write(walk, p);
303
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000304 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800305 PAGE_SIZE)
306 walk->page = NULL;
307 else
308 walk->page += walk->nbytes;
309
310 return 0;
311}
312
313static int skcipher_next_fast(struct skcipher_walk *walk)
314{
315 unsigned long diff;
316
317 walk->src.phys.page = scatterwalk_page(&walk->in);
318 walk->src.phys.offset = offset_in_page(walk->in.offset);
319 walk->dst.phys.page = scatterwalk_page(&walk->out);
320 walk->dst.phys.offset = offset_in_page(walk->out.offset);
321
322 if (walk->flags & SKCIPHER_WALK_PHYS)
323 return 0;
324
325 diff = walk->src.phys.offset - walk->dst.phys.offset;
326 diff |= walk->src.virt.page - walk->dst.virt.page;
327
328 skcipher_map_src(walk);
329 walk->dst.virt.addr = walk->src.virt.addr;
330
331 if (diff) {
332 walk->flags |= SKCIPHER_WALK_DIFF;
333 skcipher_map_dst(walk);
334 }
335
336 return 0;
337}
338
339static int skcipher_walk_next(struct skcipher_walk *walk)
340{
341 unsigned int bsize;
342 unsigned int n;
343 int err;
344
345 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
346 SKCIPHER_WALK_DIFF);
347
348 n = walk->total;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000349 bsize = min(walk->stride, max(n, walk->blocksize));
Herbert Xub286d8b2016-11-22 20:08:12 +0800350 n = scatterwalk_clamp(&walk->in, n);
351 n = scatterwalk_clamp(&walk->out, n);
352
353 if (unlikely(n < bsize)) {
354 if (unlikely(walk->total < walk->blocksize))
355 return skcipher_walk_done(walk, -EINVAL);
356
357slow_path:
358 err = skcipher_next_slow(walk, bsize);
359 goto set_phys_lowmem;
360 }
361
362 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
363 if (!walk->page) {
364 gfp_t gfp = skcipher_walk_gfp(walk);
365
366 walk->page = (void *)__get_free_page(gfp);
367 if (!walk->page)
368 goto slow_path;
369 }
370
371 walk->nbytes = min_t(unsigned, n,
372 PAGE_SIZE - offset_in_page(walk->page));
373 walk->flags |= SKCIPHER_WALK_COPY;
374 err = skcipher_next_copy(walk);
375 goto set_phys_lowmem;
376 }
377
378 walk->nbytes = n;
379
380 return skcipher_next_fast(walk);
381
382set_phys_lowmem:
383 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
384 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
385 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
386 walk->src.phys.offset &= PAGE_SIZE - 1;
387 walk->dst.phys.offset &= PAGE_SIZE - 1;
388 }
389 return err;
390}
Herbert Xub286d8b2016-11-22 20:08:12 +0800391
392static int skcipher_copy_iv(struct skcipher_walk *walk)
393{
394 unsigned a = crypto_tfm_ctx_alignment() - 1;
395 unsigned alignmask = walk->alignmask;
396 unsigned ivsize = walk->ivsize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000397 unsigned bs = walk->stride;
Herbert Xub286d8b2016-11-22 20:08:12 +0800398 unsigned aligned_bs;
399 unsigned size;
400 u8 *iv;
401
Eric Biggers0567fc92018-07-23 09:57:50 -0700402 aligned_bs = ALIGN(bs, alignmask + 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800403
404 /* Minimum size to align buffer by alignmask. */
405 size = alignmask & ~a;
406
407 if (walk->flags & SKCIPHER_WALK_PHYS)
408 size += ivsize;
409 else {
410 size += aligned_bs + ivsize;
411
412 /* Minimum size to ensure buffer does not straddle a page. */
413 size += (bs - 1) & ~(alignmask | a);
414 }
415
416 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
417 if (!walk->buffer)
418 return -ENOMEM;
419
420 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
421 iv = skcipher_get_spot(iv, bs) + aligned_bs;
422
423 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
424 return 0;
425}
426
427static int skcipher_walk_first(struct skcipher_walk *walk)
428{
Herbert Xub286d8b2016-11-22 20:08:12 +0800429 if (WARN_ON_ONCE(in_irq()))
430 return -EDEADLK;
431
Herbert Xub286d8b2016-11-22 20:08:12 +0800432 walk->buffer = NULL;
433 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
434 int err = skcipher_copy_iv(walk);
435 if (err)
436 return err;
437 }
438
439 walk->page = NULL;
Herbert Xub286d8b2016-11-22 20:08:12 +0800440
441 return skcipher_walk_next(walk);
442}
443
444static int skcipher_walk_skcipher(struct skcipher_walk *walk,
445 struct skcipher_request *req)
446{
447 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
448
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800449 walk->total = req->cryptlen;
450 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800451 walk->iv = req->iv;
452 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800453
454 if (unlikely(!walk->total))
455 return 0;
456
Herbert Xub286d8b2016-11-22 20:08:12 +0800457 scatterwalk_start(&walk->in, req->src);
458 scatterwalk_start(&walk->out, req->dst);
459
Herbert Xub286d8b2016-11-22 20:08:12 +0800460 walk->flags &= ~SKCIPHER_WALK_SLEEP;
461 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
462 SKCIPHER_WALK_SLEEP : 0;
463
464 walk->blocksize = crypto_skcipher_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000465 walk->stride = crypto_skcipher_walksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800466 walk->ivsize = crypto_skcipher_ivsize(tfm);
467 walk->alignmask = crypto_skcipher_alignmask(tfm);
468
469 return skcipher_walk_first(walk);
470}
471
472int skcipher_walk_virt(struct skcipher_walk *walk,
473 struct skcipher_request *req, bool atomic)
474{
475 int err;
476
Eric Biggersbb648292018-12-15 12:41:53 -0800477 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
478
Herbert Xub286d8b2016-11-22 20:08:12 +0800479 walk->flags &= ~SKCIPHER_WALK_PHYS;
480
481 err = skcipher_walk_skcipher(walk, req);
482
483 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
484
485 return err;
486}
487EXPORT_SYMBOL_GPL(skcipher_walk_virt);
488
489void skcipher_walk_atomise(struct skcipher_walk *walk)
490{
491 walk->flags &= ~SKCIPHER_WALK_SLEEP;
492}
493EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
494
495int skcipher_walk_async(struct skcipher_walk *walk,
496 struct skcipher_request *req)
497{
498 walk->flags |= SKCIPHER_WALK_PHYS;
499
500 INIT_LIST_HEAD(&walk->buffers);
501
502 return skcipher_walk_skcipher(walk, req);
503}
504EXPORT_SYMBOL_GPL(skcipher_walk_async);
505
Herbert Xu34bc0852016-11-30 21:14:07 +0800506static int skcipher_walk_aead_common(struct skcipher_walk *walk,
507 struct aead_request *req, bool atomic)
Herbert Xub286d8b2016-11-22 20:08:12 +0800508{
509 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
510 int err;
511
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800512 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800513 walk->iv = req->iv;
514 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800515
516 if (unlikely(!walk->total))
517 return 0;
518
Ard Biesheuvel3cbf61f2016-11-29 13:05:31 +0000519 walk->flags &= ~SKCIPHER_WALK_PHYS;
520
Herbert Xub286d8b2016-11-22 20:08:12 +0800521 scatterwalk_start(&walk->in, req->src);
522 scatterwalk_start(&walk->out, req->dst);
523
524 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
525 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
526
Ondrej Mosnáčekc14ca832017-11-23 13:49:06 +0100527 scatterwalk_done(&walk->in, 0, walk->total);
528 scatterwalk_done(&walk->out, 0, walk->total);
529
Herbert Xub286d8b2016-11-22 20:08:12 +0800530 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
531 walk->flags |= SKCIPHER_WALK_SLEEP;
532 else
533 walk->flags &= ~SKCIPHER_WALK_SLEEP;
534
535 walk->blocksize = crypto_aead_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000536 walk->stride = crypto_aead_chunksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800537 walk->ivsize = crypto_aead_ivsize(tfm);
538 walk->alignmask = crypto_aead_alignmask(tfm);
539
540 err = skcipher_walk_first(walk);
541
542 if (atomic)
543 walk->flags &= ~SKCIPHER_WALK_SLEEP;
544
545 return err;
546}
Herbert Xu34bc0852016-11-30 21:14:07 +0800547
548int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
549 bool atomic)
550{
551 walk->total = req->cryptlen;
552
553 return skcipher_walk_aead_common(walk, req, atomic);
554}
Herbert Xub286d8b2016-11-22 20:08:12 +0800555EXPORT_SYMBOL_GPL(skcipher_walk_aead);
556
Herbert Xu34bc0852016-11-30 21:14:07 +0800557int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
558 struct aead_request *req, bool atomic)
559{
560 walk->total = req->cryptlen;
561
562 return skcipher_walk_aead_common(walk, req, atomic);
563}
564EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
565
566int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
567 struct aead_request *req, bool atomic)
568{
569 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
570
571 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
572
573 return skcipher_walk_aead_common(walk, req, atomic);
574}
575EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
576
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800577static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
578{
579 if (alg->cra_type == &crypto_blkcipher_type)
580 return sizeof(struct crypto_blkcipher *);
581
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800582 if (alg->cra_type == &crypto_ablkcipher_type ||
583 alg->cra_type == &crypto_givcipher_type)
584 return sizeof(struct crypto_ablkcipher *);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800585
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800586 return crypto_alg_extsize(alg);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800587}
588
589static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
590 const u8 *key, unsigned int keylen)
591{
592 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
593 struct crypto_blkcipher *blkcipher = *ctx;
594 int err;
595
596 crypto_blkcipher_clear_flags(blkcipher, ~0);
597 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
598 CRYPTO_TFM_REQ_MASK);
599 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
600 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
601 CRYPTO_TFM_RES_MASK);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800602 if (err)
603 return err;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800604
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800605 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
606 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800607}
608
609static int skcipher_crypt_blkcipher(struct skcipher_request *req,
610 int (*crypt)(struct blkcipher_desc *,
611 struct scatterlist *,
612 struct scatterlist *,
613 unsigned int))
614{
615 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
616 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
617 struct blkcipher_desc desc = {
618 .tfm = *ctx,
619 .info = req->iv,
620 .flags = req->base.flags,
621 };
622
623
624 return crypt(&desc, req->dst, req->src, req->cryptlen);
625}
626
627static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
628{
629 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
630 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
631 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
632
633 return skcipher_crypt_blkcipher(req, alg->encrypt);
634}
635
636static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
637{
638 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
639 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
640 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
641
642 return skcipher_crypt_blkcipher(req, alg->decrypt);
643}
644
645static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
646{
647 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
648
649 crypto_free_blkcipher(*ctx);
650}
651
Geliang Tangecdd6be2015-09-27 22:47:05 +0800652static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800653{
654 struct crypto_alg *calg = tfm->__crt_alg;
655 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
656 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
657 struct crypto_blkcipher *blkcipher;
658 struct crypto_tfm *btfm;
659
660 if (!crypto_mod_get(calg))
661 return -EAGAIN;
662
663 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
664 CRYPTO_ALG_TYPE_MASK);
665 if (IS_ERR(btfm)) {
666 crypto_mod_put(calg);
667 return PTR_ERR(btfm);
668 }
669
670 blkcipher = __crypto_blkcipher_cast(btfm);
671 *ctx = blkcipher;
672 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
673
674 skcipher->setkey = skcipher_setkey_blkcipher;
675 skcipher->encrypt = skcipher_encrypt_blkcipher;
676 skcipher->decrypt = skcipher_decrypt_blkcipher;
677
678 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
Herbert Xu973fb3f2016-01-21 17:10:56 +0800679 skcipher->keysize = calg->cra_blkcipher.max_keysize;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800680
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800681 if (skcipher->keysize)
682 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
683
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800684 return 0;
685}
686
687static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
688 const u8 *key, unsigned int keylen)
689{
690 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
691 struct crypto_ablkcipher *ablkcipher = *ctx;
692 int err;
693
694 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
695 crypto_ablkcipher_set_flags(ablkcipher,
696 crypto_skcipher_get_flags(tfm) &
697 CRYPTO_TFM_REQ_MASK);
698 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
699 crypto_skcipher_set_flags(tfm,
700 crypto_ablkcipher_get_flags(ablkcipher) &
701 CRYPTO_TFM_RES_MASK);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800702 if (err)
703 return err;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800704
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800705 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
706 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800707}
708
709static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
710 int (*crypt)(struct ablkcipher_request *))
711{
712 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
713 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
714 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
715
716 ablkcipher_request_set_tfm(subreq, *ctx);
717 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
718 req->base.complete, req->base.data);
719 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
720 req->iv);
721
722 return crypt(subreq);
723}
724
725static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
726{
727 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
728 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
729 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
730
731 return skcipher_crypt_ablkcipher(req, alg->encrypt);
732}
733
734static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
735{
736 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
737 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
738 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
739
740 return skcipher_crypt_ablkcipher(req, alg->decrypt);
741}
742
743static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
744{
745 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
746
747 crypto_free_ablkcipher(*ctx);
748}
749
Geliang Tangecdd6be2015-09-27 22:47:05 +0800750static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800751{
752 struct crypto_alg *calg = tfm->__crt_alg;
753 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
754 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
755 struct crypto_ablkcipher *ablkcipher;
756 struct crypto_tfm *abtfm;
757
758 if (!crypto_mod_get(calg))
759 return -EAGAIN;
760
761 abtfm = __crypto_alloc_tfm(calg, 0, 0);
762 if (IS_ERR(abtfm)) {
763 crypto_mod_put(calg);
764 return PTR_ERR(abtfm);
765 }
766
767 ablkcipher = __crypto_ablkcipher_cast(abtfm);
768 *ctx = ablkcipher;
769 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
770
771 skcipher->setkey = skcipher_setkey_ablkcipher;
772 skcipher->encrypt = skcipher_encrypt_ablkcipher;
773 skcipher->decrypt = skcipher_decrypt_ablkcipher;
774
775 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
776 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
777 sizeof(struct ablkcipher_request);
Herbert Xu973fb3f2016-01-21 17:10:56 +0800778 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800779
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800780 if (skcipher->keysize)
781 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
782
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800783 return 0;
784}
785
Herbert Xu9933e112017-05-10 03:48:23 +0800786static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
787 const u8 *key, unsigned int keylen)
788{
789 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
790 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
791 u8 *buffer, *alignbuffer;
792 unsigned long absize;
793 int ret;
794
795 absize = keylen + alignmask;
796 buffer = kmalloc(absize, GFP_ATOMIC);
797 if (!buffer)
798 return -ENOMEM;
799
800 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
801 memcpy(alignbuffer, key, keylen);
802 ret = cipher->setkey(tfm, alignbuffer, keylen);
803 kzfree(buffer);
804 return ret;
805}
806
807static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
808 unsigned int keylen)
809{
810 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
811 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800812 int err;
Herbert Xu9933e112017-05-10 03:48:23 +0800813
814 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
815 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
816 return -EINVAL;
817 }
818
819 if ((unsigned long)key & alignmask)
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800820 err = skcipher_setkey_unaligned(tfm, key, keylen);
821 else
822 err = cipher->setkey(tfm, key, keylen);
Herbert Xu9933e112017-05-10 03:48:23 +0800823
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800824 if (err)
825 return err;
826
827 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
828 return 0;
Herbert Xu9933e112017-05-10 03:48:23 +0800829}
830
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800831static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
832{
833 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
834 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
835
836 alg->exit(skcipher);
837}
838
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800839static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
840{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800841 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
842 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
843
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800844 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
845 return crypto_init_skcipher_ops_blkcipher(tfm);
846
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800847 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
848 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
849 return crypto_init_skcipher_ops_ablkcipher(tfm);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800850
Herbert Xu9933e112017-05-10 03:48:23 +0800851 skcipher->setkey = skcipher_setkey;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800852 skcipher->encrypt = alg->encrypt;
853 skcipher->decrypt = alg->decrypt;
854 skcipher->ivsize = alg->ivsize;
855 skcipher->keysize = alg->max_keysize;
856
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800857 if (skcipher->keysize)
858 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
859
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800860 if (alg->exit)
861 skcipher->base.exit = crypto_skcipher_exit_tfm;
862
863 if (alg->init)
864 return alg->init(skcipher);
865
866 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800867}
868
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800869static void crypto_skcipher_free_instance(struct crypto_instance *inst)
870{
871 struct skcipher_instance *skcipher =
872 container_of(inst, struct skcipher_instance, s.base);
873
874 skcipher->free(skcipher);
875}
876
877static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +0530878 __maybe_unused;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800879static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
880{
881 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
882 base);
883
884 seq_printf(m, "type : skcipher\n");
885 seq_printf(m, "async : %s\n",
886 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
887 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
888 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
889 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
890 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
891 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000892 seq_printf(m, "walksize : %u\n", skcipher->walksize);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800893}
894
895#ifdef CONFIG_NET
896static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
897{
898 struct crypto_report_blkcipher rblkcipher;
899 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
900 base);
901
Eric Biggers37db69e2018-11-03 14:56:03 -0700902 memset(&rblkcipher, 0, sizeof(rblkcipher));
903
904 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
905 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800906
907 rblkcipher.blocksize = alg->cra_blocksize;
908 rblkcipher.min_keysize = skcipher->min_keysize;
909 rblkcipher.max_keysize = skcipher->max_keysize;
910 rblkcipher.ivsize = skcipher->ivsize;
911
Eric Biggers37db69e2018-11-03 14:56:03 -0700912 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
913 sizeof(rblkcipher), &rblkcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800914}
915#else
916static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
917{
918 return -ENOSYS;
919}
920#endif
921
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800922static const struct crypto_type crypto_skcipher_type2 = {
923 .extsize = crypto_skcipher_extsize,
924 .init_tfm = crypto_skcipher_init_tfm,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800925 .free = crypto_skcipher_free_instance,
926#ifdef CONFIG_PROC_FS
927 .show = crypto_skcipher_show,
928#endif
929 .report = crypto_skcipher_report,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800930 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
931 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800932 .type = CRYPTO_ALG_TYPE_SKCIPHER,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800933 .tfmsize = offsetof(struct crypto_skcipher, base),
934};
935
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800936int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800937 const char *name, u32 type, u32 mask)
938{
939 spawn->base.frontend = &crypto_skcipher_type2;
940 return crypto_grab_spawn(&spawn->base, name, type, mask);
941}
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800942EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800943
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800944struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
945 u32 type, u32 mask)
946{
947 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
948}
949EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
950
Kees Cookb350bee2018-09-18 19:10:38 -0700951struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
952 const char *alg_name, u32 type, u32 mask)
953{
954 struct crypto_skcipher *tfm;
955
956 /* Only sync algorithms allowed. */
957 mask |= CRYPTO_ALG_ASYNC;
958
959 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
960
961 /*
962 * Make sure we do not allocate something that might get used with
963 * an on-stack request: check the request size.
964 */
965 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
966 MAX_SYNC_SKCIPHER_REQSIZE)) {
967 crypto_free_skcipher(tfm);
968 return ERR_PTR(-EINVAL);
969 }
970
971 return (struct crypto_sync_skcipher *)tfm;
972}
973EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
974
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800975int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
976{
977 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
978 type, mask);
979}
980EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
981
982static int skcipher_prepare_alg(struct skcipher_alg *alg)
983{
984 struct crypto_alg *base = &alg->base;
985
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000986 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
987 alg->walksize > PAGE_SIZE / 8)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800988 return -EINVAL;
989
990 if (!alg->chunksize)
991 alg->chunksize = base->cra_blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000992 if (!alg->walksize)
993 alg->walksize = alg->chunksize;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800994
995 base->cra_type = &crypto_skcipher_type2;
996 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
997 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
998
999 return 0;
1000}
1001
1002int crypto_register_skcipher(struct skcipher_alg *alg)
1003{
1004 struct crypto_alg *base = &alg->base;
1005 int err;
1006
1007 err = skcipher_prepare_alg(alg);
1008 if (err)
1009 return err;
1010
1011 return crypto_register_alg(base);
1012}
1013EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1014
1015void crypto_unregister_skcipher(struct skcipher_alg *alg)
1016{
1017 crypto_unregister_alg(&alg->base);
1018}
1019EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1020
1021int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1022{
1023 int i, ret;
1024
1025 for (i = 0; i < count; i++) {
1026 ret = crypto_register_skcipher(&algs[i]);
1027 if (ret)
1028 goto err;
1029 }
1030
1031 return 0;
1032
1033err:
1034 for (--i; i >= 0; --i)
1035 crypto_unregister_skcipher(&algs[i]);
1036
1037 return ret;
1038}
1039EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1040
1041void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1042{
1043 int i;
1044
1045 for (i = count - 1; i >= 0; --i)
1046 crypto_unregister_skcipher(&algs[i]);
1047}
1048EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1049
1050int skcipher_register_instance(struct crypto_template *tmpl,
1051 struct skcipher_instance *inst)
1052{
1053 int err;
1054
1055 err = skcipher_prepare_alg(&inst->alg);
1056 if (err)
1057 return err;
1058
1059 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1060}
1061EXPORT_SYMBOL_GPL(skcipher_register_instance);
1062
Herbert Xu7a7ffe62015-08-20 15:21:45 +08001063MODULE_LICENSE("GPL");
1064MODULE_DESCRIPTION("Symmetric key cipher type");