blob: 835e5d36ad59e0d53e11e652881a57a895e76498 [file] [log] [blame]
Herbert Xu7a7ffe62015-08-20 15:21:45 +08001/*
2 * Symmetric key cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
Herbert Xub286d8b2016-11-22 20:08:12 +080017#include <crypto/internal/aead.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080018#include <crypto/internal/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080019#include <crypto/scatterwalk.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080020#include <linux/bug.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080021#include <linux/cryptouser.h>
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +053022#include <linux/compiler.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080023#include <linux/list.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080024#include <linux/module.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080025#include <linux/rtnetlink.h>
26#include <linux/seq_file.h>
27#include <net/netlink.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080028
29#include "internal.h"
30
Herbert Xub286d8b2016-11-22 20:08:12 +080031enum {
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
37};
38
39struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
42 unsigned int len;
43 u8 *data;
44 u8 buffer[];
45};
46
47static int skcipher_walk_next(struct skcipher_walk *walk);
48
49static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50{
51 if (PageHighMem(scatterwalk_page(walk)))
52 kunmap_atomic(vaddr);
53}
54
55static inline void *skcipher_map(struct scatter_walk *walk)
56{
57 struct page *page = scatterwalk_page(walk);
58
59 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 offset_in_page(walk->offset);
61}
62
63static inline void skcipher_map_src(struct skcipher_walk *walk)
64{
65 walk->src.virt.addr = skcipher_map(&walk->in);
66}
67
68static inline void skcipher_map_dst(struct skcipher_walk *walk)
69{
70 walk->dst.virt.addr = skcipher_map(&walk->out);
71}
72
73static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->in, walk->src.virt.addr);
76}
77
78static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79{
80 skcipher_unmap(&walk->out, walk->dst.virt.addr);
81}
82
83static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84{
85 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86}
87
88/* Get a spot of the specified length that does not straddle a page.
89 * The caller needs to ensure that there is enough space for this operation.
90 */
91static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92{
93 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94
95 return max(start, end_page);
96}
97
98static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99{
100 u8 *addr;
101
102 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 addr = skcipher_get_spot(addr, bsize);
104 scatterwalk_copychunks(addr, &walk->out, bsize,
105 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106 return 0;
107}
108
109int skcipher_walk_done(struct skcipher_walk *walk, int err)
110{
111 unsigned int n = walk->nbytes - err;
112 unsigned int nbytes;
113
114 nbytes = walk->total - n;
115
116 if (unlikely(err < 0)) {
117 nbytes = 0;
118 n = 0;
119 } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
120 SKCIPHER_WALK_SLOW |
121 SKCIPHER_WALK_COPY |
122 SKCIPHER_WALK_DIFF)))) {
123unmap_src:
124 skcipher_unmap_src(walk);
125 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
126 skcipher_unmap_dst(walk);
127 goto unmap_src;
128 } else if (walk->flags & SKCIPHER_WALK_COPY) {
129 skcipher_map_dst(walk);
130 memcpy(walk->dst.virt.addr, walk->page, n);
131 skcipher_unmap_dst(walk);
132 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
133 if (WARN_ON(err)) {
134 err = -EINVAL;
135 nbytes = 0;
136 } else
137 n = skcipher_done_slow(walk, n);
138 }
139
140 if (err > 0)
141 err = 0;
142
143 walk->total = nbytes;
144 walk->nbytes = nbytes;
145
146 scatterwalk_advance(&walk->in, n);
147 scatterwalk_advance(&walk->out, n);
148 scatterwalk_done(&walk->in, 0, nbytes);
149 scatterwalk_done(&walk->out, 1, nbytes);
150
151 if (nbytes) {
152 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
153 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
154 return skcipher_walk_next(walk);
155 }
156
157 /* Short-circuit for the common/fast path. */
158 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
159 goto out;
160
161 if (walk->flags & SKCIPHER_WALK_PHYS)
162 goto out;
163
164 if (walk->iv != walk->oiv)
165 memcpy(walk->oiv, walk->iv, walk->ivsize);
166 if (walk->buffer != walk->page)
167 kfree(walk->buffer);
168 if (walk->page)
169 free_page((unsigned long)walk->page);
170
171out:
172 return err;
173}
174EXPORT_SYMBOL_GPL(skcipher_walk_done);
175
176void skcipher_walk_complete(struct skcipher_walk *walk, int err)
177{
178 struct skcipher_walk_buffer *p, *tmp;
179
180 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
181 u8 *data;
182
183 if (err)
184 goto done;
185
186 data = p->data;
187 if (!data) {
188 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000189 data = skcipher_get_spot(data, walk->stride);
Herbert Xub286d8b2016-11-22 20:08:12 +0800190 }
191
192 scatterwalk_copychunks(data, &p->dst, p->len, 1);
193
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000194 if (offset_in_page(p->data) + p->len + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800195 PAGE_SIZE)
196 free_page((unsigned long)p->data);
197
198done:
199 list_del(&p->entry);
200 kfree(p);
201 }
202
203 if (!err && walk->iv != walk->oiv)
204 memcpy(walk->oiv, walk->iv, walk->ivsize);
205 if (walk->buffer != walk->page)
206 kfree(walk->buffer);
207 if (walk->page)
208 free_page((unsigned long)walk->page);
209}
210EXPORT_SYMBOL_GPL(skcipher_walk_complete);
211
212static void skcipher_queue_write(struct skcipher_walk *walk,
213 struct skcipher_walk_buffer *p)
214{
215 p->dst = walk->out;
216 list_add_tail(&p->entry, &walk->buffers);
217}
218
219static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
220{
221 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
222 unsigned alignmask = walk->alignmask;
223 struct skcipher_walk_buffer *p;
224 unsigned a;
225 unsigned n;
226 u8 *buffer;
227 void *v;
228
229 if (!phys) {
Ard Biesheuvel18e615a2016-12-13 13:34:02 +0000230 if (!walk->buffer)
231 walk->buffer = walk->page;
232 buffer = walk->buffer;
Herbert Xub286d8b2016-11-22 20:08:12 +0800233 if (buffer)
234 goto ok;
235 }
236
237 /* Start with the minimum alignment of kmalloc. */
238 a = crypto_tfm_ctx_alignment() - 1;
239 n = bsize;
240
241 if (phys) {
242 /* Calculate the minimum alignment of p->buffer. */
243 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
244 n += sizeof(*p);
245 }
246
247 /* Minimum size to align p->buffer by alignmask. */
248 n += alignmask & ~a;
249
250 /* Minimum size to ensure p->buffer does not straddle a page. */
251 n += (bsize - 1) & ~(alignmask | a);
252
253 v = kzalloc(n, skcipher_walk_gfp(walk));
254 if (!v)
255 return skcipher_walk_done(walk, -ENOMEM);
256
257 if (phys) {
258 p = v;
259 p->len = bsize;
260 skcipher_queue_write(walk, p);
261 buffer = p->buffer;
262 } else {
263 walk->buffer = v;
264 buffer = v;
265 }
266
267ok:
268 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
269 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
270 walk->src.virt.addr = walk->dst.virt.addr;
271
272 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
273
274 walk->nbytes = bsize;
275 walk->flags |= SKCIPHER_WALK_SLOW;
276
277 return 0;
278}
279
280static int skcipher_next_copy(struct skcipher_walk *walk)
281{
282 struct skcipher_walk_buffer *p;
283 u8 *tmp = walk->page;
284
285 skcipher_map_src(walk);
286 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
287 skcipher_unmap_src(walk);
288
289 walk->src.virt.addr = tmp;
290 walk->dst.virt.addr = tmp;
291
292 if (!(walk->flags & SKCIPHER_WALK_PHYS))
293 return 0;
294
295 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
296 if (!p)
297 return -ENOMEM;
298
299 p->data = walk->page;
300 p->len = walk->nbytes;
301 skcipher_queue_write(walk, p);
302
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000303 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800304 PAGE_SIZE)
305 walk->page = NULL;
306 else
307 walk->page += walk->nbytes;
308
309 return 0;
310}
311
312static int skcipher_next_fast(struct skcipher_walk *walk)
313{
314 unsigned long diff;
315
316 walk->src.phys.page = scatterwalk_page(&walk->in);
317 walk->src.phys.offset = offset_in_page(walk->in.offset);
318 walk->dst.phys.page = scatterwalk_page(&walk->out);
319 walk->dst.phys.offset = offset_in_page(walk->out.offset);
320
321 if (walk->flags & SKCIPHER_WALK_PHYS)
322 return 0;
323
324 diff = walk->src.phys.offset - walk->dst.phys.offset;
325 diff |= walk->src.virt.page - walk->dst.virt.page;
326
327 skcipher_map_src(walk);
328 walk->dst.virt.addr = walk->src.virt.addr;
329
330 if (diff) {
331 walk->flags |= SKCIPHER_WALK_DIFF;
332 skcipher_map_dst(walk);
333 }
334
335 return 0;
336}
337
338static int skcipher_walk_next(struct skcipher_walk *walk)
339{
340 unsigned int bsize;
341 unsigned int n;
342 int err;
343
344 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
345 SKCIPHER_WALK_DIFF);
346
347 n = walk->total;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000348 bsize = min(walk->stride, max(n, walk->blocksize));
Herbert Xub286d8b2016-11-22 20:08:12 +0800349 n = scatterwalk_clamp(&walk->in, n);
350 n = scatterwalk_clamp(&walk->out, n);
351
352 if (unlikely(n < bsize)) {
353 if (unlikely(walk->total < walk->blocksize))
354 return skcipher_walk_done(walk, -EINVAL);
355
356slow_path:
357 err = skcipher_next_slow(walk, bsize);
358 goto set_phys_lowmem;
359 }
360
361 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
362 if (!walk->page) {
363 gfp_t gfp = skcipher_walk_gfp(walk);
364
365 walk->page = (void *)__get_free_page(gfp);
366 if (!walk->page)
367 goto slow_path;
368 }
369
370 walk->nbytes = min_t(unsigned, n,
371 PAGE_SIZE - offset_in_page(walk->page));
372 walk->flags |= SKCIPHER_WALK_COPY;
373 err = skcipher_next_copy(walk);
374 goto set_phys_lowmem;
375 }
376
377 walk->nbytes = n;
378
379 return skcipher_next_fast(walk);
380
381set_phys_lowmem:
382 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
383 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
384 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
385 walk->src.phys.offset &= PAGE_SIZE - 1;
386 walk->dst.phys.offset &= PAGE_SIZE - 1;
387 }
388 return err;
389}
Herbert Xub286d8b2016-11-22 20:08:12 +0800390
391static int skcipher_copy_iv(struct skcipher_walk *walk)
392{
393 unsigned a = crypto_tfm_ctx_alignment() - 1;
394 unsigned alignmask = walk->alignmask;
395 unsigned ivsize = walk->ivsize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000396 unsigned bs = walk->stride;
Herbert Xub286d8b2016-11-22 20:08:12 +0800397 unsigned aligned_bs;
398 unsigned size;
399 u8 *iv;
400
Eric Biggers0567fc92018-07-23 09:57:50 -0700401 aligned_bs = ALIGN(bs, alignmask + 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800402
403 /* Minimum size to align buffer by alignmask. */
404 size = alignmask & ~a;
405
406 if (walk->flags & SKCIPHER_WALK_PHYS)
407 size += ivsize;
408 else {
409 size += aligned_bs + ivsize;
410
411 /* Minimum size to ensure buffer does not straddle a page. */
412 size += (bs - 1) & ~(alignmask | a);
413 }
414
415 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
416 if (!walk->buffer)
417 return -ENOMEM;
418
419 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
420 iv = skcipher_get_spot(iv, bs) + aligned_bs;
421
422 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
423 return 0;
424}
425
426static int skcipher_walk_first(struct skcipher_walk *walk)
427{
Herbert Xub286d8b2016-11-22 20:08:12 +0800428 if (WARN_ON_ONCE(in_irq()))
429 return -EDEADLK;
430
Herbert Xub286d8b2016-11-22 20:08:12 +0800431 walk->buffer = NULL;
432 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
433 int err = skcipher_copy_iv(walk);
434 if (err)
435 return err;
436 }
437
438 walk->page = NULL;
Herbert Xub286d8b2016-11-22 20:08:12 +0800439
440 return skcipher_walk_next(walk);
441}
442
443static int skcipher_walk_skcipher(struct skcipher_walk *walk,
444 struct skcipher_request *req)
445{
446 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
447
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800448 walk->total = req->cryptlen;
449 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800450 walk->iv = req->iv;
451 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800452
453 if (unlikely(!walk->total))
454 return 0;
455
Herbert Xub286d8b2016-11-22 20:08:12 +0800456 scatterwalk_start(&walk->in, req->src);
457 scatterwalk_start(&walk->out, req->dst);
458
Herbert Xub286d8b2016-11-22 20:08:12 +0800459 walk->flags &= ~SKCIPHER_WALK_SLEEP;
460 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
461 SKCIPHER_WALK_SLEEP : 0;
462
463 walk->blocksize = crypto_skcipher_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000464 walk->stride = crypto_skcipher_walksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800465 walk->ivsize = crypto_skcipher_ivsize(tfm);
466 walk->alignmask = crypto_skcipher_alignmask(tfm);
467
468 return skcipher_walk_first(walk);
469}
470
471int skcipher_walk_virt(struct skcipher_walk *walk,
472 struct skcipher_request *req, bool atomic)
473{
474 int err;
475
476 walk->flags &= ~SKCIPHER_WALK_PHYS;
477
478 err = skcipher_walk_skcipher(walk, req);
479
480 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
481
482 return err;
483}
484EXPORT_SYMBOL_GPL(skcipher_walk_virt);
485
486void skcipher_walk_atomise(struct skcipher_walk *walk)
487{
488 walk->flags &= ~SKCIPHER_WALK_SLEEP;
489}
490EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
491
492int skcipher_walk_async(struct skcipher_walk *walk,
493 struct skcipher_request *req)
494{
495 walk->flags |= SKCIPHER_WALK_PHYS;
496
497 INIT_LIST_HEAD(&walk->buffers);
498
499 return skcipher_walk_skcipher(walk, req);
500}
501EXPORT_SYMBOL_GPL(skcipher_walk_async);
502
Herbert Xu34bc0852016-11-30 21:14:07 +0800503static int skcipher_walk_aead_common(struct skcipher_walk *walk,
504 struct aead_request *req, bool atomic)
Herbert Xub286d8b2016-11-22 20:08:12 +0800505{
506 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
507 int err;
508
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800509 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800510 walk->iv = req->iv;
511 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800512
513 if (unlikely(!walk->total))
514 return 0;
515
Ard Biesheuvel3cbf61f2016-11-29 13:05:31 +0000516 walk->flags &= ~SKCIPHER_WALK_PHYS;
517
Herbert Xub286d8b2016-11-22 20:08:12 +0800518 scatterwalk_start(&walk->in, req->src);
519 scatterwalk_start(&walk->out, req->dst);
520
521 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
522 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
523
Ondrej Mosnáčekc14ca832017-11-23 13:49:06 +0100524 scatterwalk_done(&walk->in, 0, walk->total);
525 scatterwalk_done(&walk->out, 0, walk->total);
526
Herbert Xub286d8b2016-11-22 20:08:12 +0800527 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
528 walk->flags |= SKCIPHER_WALK_SLEEP;
529 else
530 walk->flags &= ~SKCIPHER_WALK_SLEEP;
531
532 walk->blocksize = crypto_aead_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000533 walk->stride = crypto_aead_chunksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800534 walk->ivsize = crypto_aead_ivsize(tfm);
535 walk->alignmask = crypto_aead_alignmask(tfm);
536
537 err = skcipher_walk_first(walk);
538
539 if (atomic)
540 walk->flags &= ~SKCIPHER_WALK_SLEEP;
541
542 return err;
543}
Herbert Xu34bc0852016-11-30 21:14:07 +0800544
545int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
546 bool atomic)
547{
548 walk->total = req->cryptlen;
549
550 return skcipher_walk_aead_common(walk, req, atomic);
551}
Herbert Xub286d8b2016-11-22 20:08:12 +0800552EXPORT_SYMBOL_GPL(skcipher_walk_aead);
553
Herbert Xu34bc0852016-11-30 21:14:07 +0800554int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
555 struct aead_request *req, bool atomic)
556{
557 walk->total = req->cryptlen;
558
559 return skcipher_walk_aead_common(walk, req, atomic);
560}
561EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
562
563int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
564 struct aead_request *req, bool atomic)
565{
566 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567
568 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
569
570 return skcipher_walk_aead_common(walk, req, atomic);
571}
572EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
573
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800574static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
575{
576 if (alg->cra_type == &crypto_blkcipher_type)
577 return sizeof(struct crypto_blkcipher *);
578
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800579 if (alg->cra_type == &crypto_ablkcipher_type ||
580 alg->cra_type == &crypto_givcipher_type)
581 return sizeof(struct crypto_ablkcipher *);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800582
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800583 return crypto_alg_extsize(alg);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800584}
585
586static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
587 const u8 *key, unsigned int keylen)
588{
589 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
590 struct crypto_blkcipher *blkcipher = *ctx;
591 int err;
592
593 crypto_blkcipher_clear_flags(blkcipher, ~0);
594 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
595 CRYPTO_TFM_REQ_MASK);
596 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
597 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
598 CRYPTO_TFM_RES_MASK);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800599 if (err)
600 return err;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800601
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800602 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
603 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800604}
605
606static int skcipher_crypt_blkcipher(struct skcipher_request *req,
607 int (*crypt)(struct blkcipher_desc *,
608 struct scatterlist *,
609 struct scatterlist *,
610 unsigned int))
611{
612 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
613 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
614 struct blkcipher_desc desc = {
615 .tfm = *ctx,
616 .info = req->iv,
617 .flags = req->base.flags,
618 };
619
620
621 return crypt(&desc, req->dst, req->src, req->cryptlen);
622}
623
624static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
625{
626 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
627 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
628 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
629
630 return skcipher_crypt_blkcipher(req, alg->encrypt);
631}
632
633static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
634{
635 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
636 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
637 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
638
639 return skcipher_crypt_blkcipher(req, alg->decrypt);
640}
641
642static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
643{
644 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
645
646 crypto_free_blkcipher(*ctx);
647}
648
Geliang Tangecdd6be2015-09-27 22:47:05 +0800649static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800650{
651 struct crypto_alg *calg = tfm->__crt_alg;
652 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
653 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
654 struct crypto_blkcipher *blkcipher;
655 struct crypto_tfm *btfm;
656
657 if (!crypto_mod_get(calg))
658 return -EAGAIN;
659
660 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
661 CRYPTO_ALG_TYPE_MASK);
662 if (IS_ERR(btfm)) {
663 crypto_mod_put(calg);
664 return PTR_ERR(btfm);
665 }
666
667 blkcipher = __crypto_blkcipher_cast(btfm);
668 *ctx = blkcipher;
669 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
670
671 skcipher->setkey = skcipher_setkey_blkcipher;
672 skcipher->encrypt = skcipher_encrypt_blkcipher;
673 skcipher->decrypt = skcipher_decrypt_blkcipher;
674
675 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
Herbert Xu973fb3f2016-01-21 17:10:56 +0800676 skcipher->keysize = calg->cra_blkcipher.max_keysize;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800677
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800678 if (skcipher->keysize)
679 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
680
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800681 return 0;
682}
683
684static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
685 const u8 *key, unsigned int keylen)
686{
687 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
688 struct crypto_ablkcipher *ablkcipher = *ctx;
689 int err;
690
691 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
692 crypto_ablkcipher_set_flags(ablkcipher,
693 crypto_skcipher_get_flags(tfm) &
694 CRYPTO_TFM_REQ_MASK);
695 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
696 crypto_skcipher_set_flags(tfm,
697 crypto_ablkcipher_get_flags(ablkcipher) &
698 CRYPTO_TFM_RES_MASK);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800699 if (err)
700 return err;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800701
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800702 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
703 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800704}
705
706static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
707 int (*crypt)(struct ablkcipher_request *))
708{
709 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
710 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
711 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
712
713 ablkcipher_request_set_tfm(subreq, *ctx);
714 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
715 req->base.complete, req->base.data);
716 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
717 req->iv);
718
719 return crypt(subreq);
720}
721
722static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
723{
724 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
725 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
726 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
727
728 return skcipher_crypt_ablkcipher(req, alg->encrypt);
729}
730
731static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
732{
733 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
734 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
735 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
736
737 return skcipher_crypt_ablkcipher(req, alg->decrypt);
738}
739
740static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
741{
742 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
743
744 crypto_free_ablkcipher(*ctx);
745}
746
Geliang Tangecdd6be2015-09-27 22:47:05 +0800747static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800748{
749 struct crypto_alg *calg = tfm->__crt_alg;
750 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
751 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
752 struct crypto_ablkcipher *ablkcipher;
753 struct crypto_tfm *abtfm;
754
755 if (!crypto_mod_get(calg))
756 return -EAGAIN;
757
758 abtfm = __crypto_alloc_tfm(calg, 0, 0);
759 if (IS_ERR(abtfm)) {
760 crypto_mod_put(calg);
761 return PTR_ERR(abtfm);
762 }
763
764 ablkcipher = __crypto_ablkcipher_cast(abtfm);
765 *ctx = ablkcipher;
766 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
767
768 skcipher->setkey = skcipher_setkey_ablkcipher;
769 skcipher->encrypt = skcipher_encrypt_ablkcipher;
770 skcipher->decrypt = skcipher_decrypt_ablkcipher;
771
772 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
773 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
774 sizeof(struct ablkcipher_request);
Herbert Xu973fb3f2016-01-21 17:10:56 +0800775 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800776
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800777 if (skcipher->keysize)
778 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
779
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800780 return 0;
781}
782
Herbert Xu9933e112017-05-10 03:48:23 +0800783static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
784 const u8 *key, unsigned int keylen)
785{
786 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
787 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
788 u8 *buffer, *alignbuffer;
789 unsigned long absize;
790 int ret;
791
792 absize = keylen + alignmask;
793 buffer = kmalloc(absize, GFP_ATOMIC);
794 if (!buffer)
795 return -ENOMEM;
796
797 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
798 memcpy(alignbuffer, key, keylen);
799 ret = cipher->setkey(tfm, alignbuffer, keylen);
800 kzfree(buffer);
801 return ret;
802}
803
804static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
805 unsigned int keylen)
806{
807 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
808 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800809 int err;
Herbert Xu9933e112017-05-10 03:48:23 +0800810
811 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
812 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
813 return -EINVAL;
814 }
815
816 if ((unsigned long)key & alignmask)
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800817 err = skcipher_setkey_unaligned(tfm, key, keylen);
818 else
819 err = cipher->setkey(tfm, key, keylen);
Herbert Xu9933e112017-05-10 03:48:23 +0800820
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800821 if (err)
822 return err;
823
824 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
825 return 0;
Herbert Xu9933e112017-05-10 03:48:23 +0800826}
827
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800828static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
829{
830 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
831 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
832
833 alg->exit(skcipher);
834}
835
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800836static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
837{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800838 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
839 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
840
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800841 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
842 return crypto_init_skcipher_ops_blkcipher(tfm);
843
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800844 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
845 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
846 return crypto_init_skcipher_ops_ablkcipher(tfm);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800847
Herbert Xu9933e112017-05-10 03:48:23 +0800848 skcipher->setkey = skcipher_setkey;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800849 skcipher->encrypt = alg->encrypt;
850 skcipher->decrypt = alg->decrypt;
851 skcipher->ivsize = alg->ivsize;
852 skcipher->keysize = alg->max_keysize;
853
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800854 if (skcipher->keysize)
855 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
856
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800857 if (alg->exit)
858 skcipher->base.exit = crypto_skcipher_exit_tfm;
859
860 if (alg->init)
861 return alg->init(skcipher);
862
863 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800864}
865
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800866static void crypto_skcipher_free_instance(struct crypto_instance *inst)
867{
868 struct skcipher_instance *skcipher =
869 container_of(inst, struct skcipher_instance, s.base);
870
871 skcipher->free(skcipher);
872}
873
874static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +0530875 __maybe_unused;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800876static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
877{
878 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
879 base);
880
881 seq_printf(m, "type : skcipher\n");
882 seq_printf(m, "async : %s\n",
883 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
884 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
885 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
886 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
887 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
888 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000889 seq_printf(m, "walksize : %u\n", skcipher->walksize);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800890}
891
892#ifdef CONFIG_NET
893static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
894{
895 struct crypto_report_blkcipher rblkcipher;
896 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
897 base);
898
899 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
900 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
901
902 rblkcipher.blocksize = alg->cra_blocksize;
903 rblkcipher.min_keysize = skcipher->min_keysize;
904 rblkcipher.max_keysize = skcipher->max_keysize;
905 rblkcipher.ivsize = skcipher->ivsize;
906
907 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
908 sizeof(struct crypto_report_blkcipher), &rblkcipher))
909 goto nla_put_failure;
910 return 0;
911
912nla_put_failure:
913 return -EMSGSIZE;
914}
915#else
916static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
917{
918 return -ENOSYS;
919}
920#endif
921
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800922static const struct crypto_type crypto_skcipher_type2 = {
923 .extsize = crypto_skcipher_extsize,
924 .init_tfm = crypto_skcipher_init_tfm,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800925 .free = crypto_skcipher_free_instance,
926#ifdef CONFIG_PROC_FS
927 .show = crypto_skcipher_show,
928#endif
929 .report = crypto_skcipher_report,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800930 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
931 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800932 .type = CRYPTO_ALG_TYPE_SKCIPHER,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800933 .tfmsize = offsetof(struct crypto_skcipher, base),
934};
935
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800936int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800937 const char *name, u32 type, u32 mask)
938{
939 spawn->base.frontend = &crypto_skcipher_type2;
940 return crypto_grab_spawn(&spawn->base, name, type, mask);
941}
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800942EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800943
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800944struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
945 u32 type, u32 mask)
946{
947 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
948}
949EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
950
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800951int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
952{
953 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
954 type, mask);
955}
956EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
957
958static int skcipher_prepare_alg(struct skcipher_alg *alg)
959{
960 struct crypto_alg *base = &alg->base;
961
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000962 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
963 alg->walksize > PAGE_SIZE / 8)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800964 return -EINVAL;
965
966 if (!alg->chunksize)
967 alg->chunksize = base->cra_blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000968 if (!alg->walksize)
969 alg->walksize = alg->chunksize;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800970
971 base->cra_type = &crypto_skcipher_type2;
972 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
973 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
974
975 return 0;
976}
977
978int crypto_register_skcipher(struct skcipher_alg *alg)
979{
980 struct crypto_alg *base = &alg->base;
981 int err;
982
983 err = skcipher_prepare_alg(alg);
984 if (err)
985 return err;
986
987 return crypto_register_alg(base);
988}
989EXPORT_SYMBOL_GPL(crypto_register_skcipher);
990
991void crypto_unregister_skcipher(struct skcipher_alg *alg)
992{
993 crypto_unregister_alg(&alg->base);
994}
995EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
996
997int crypto_register_skciphers(struct skcipher_alg *algs, int count)
998{
999 int i, ret;
1000
1001 for (i = 0; i < count; i++) {
1002 ret = crypto_register_skcipher(&algs[i]);
1003 if (ret)
1004 goto err;
1005 }
1006
1007 return 0;
1008
1009err:
1010 for (--i; i >= 0; --i)
1011 crypto_unregister_skcipher(&algs[i]);
1012
1013 return ret;
1014}
1015EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1016
1017void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1018{
1019 int i;
1020
1021 for (i = count - 1; i >= 0; --i)
1022 crypto_unregister_skcipher(&algs[i]);
1023}
1024EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1025
1026int skcipher_register_instance(struct crypto_template *tmpl,
1027 struct skcipher_instance *inst)
1028{
1029 int err;
1030
1031 err = skcipher_prepare_alg(&inst->alg);
1032 if (err)
1033 return err;
1034
1035 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1036}
1037EXPORT_SYMBOL_GPL(skcipher_register_instance);
1038
Herbert Xu7a7ffe62015-08-20 15:21:45 +08001039MODULE_LICENSE("GPL");
1040MODULE_DESCRIPTION("Symmetric key cipher type");