blob: ff16d05644c7703267e9bf9aff50c2dec3605a10 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu7a7ffe62015-08-20 15:21:45 +08002/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080010 */
11
Herbert Xub286d8b2016-11-22 20:08:12 +080012#include <crypto/internal/aead.h>
Ard Biesheuvel665fe012020-12-11 13:27:15 +010013#include <crypto/internal/cipher.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080014#include <crypto/internal/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080015#include <crypto/scatterwalk.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080016#include <linux/bug.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080017#include <linux/cryptouser.h>
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +053018#include <linux/compiler.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080019#include <linux/list.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080020#include <linux/module.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080021#include <linux/rtnetlink.h>
22#include <linux/seq_file.h>
23#include <net/netlink.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080024
25#include "internal.h"
26
Herbert Xub286d8b2016-11-22 20:08:12 +080027enum {
28 SKCIPHER_WALK_PHYS = 1 << 0,
29 SKCIPHER_WALK_SLOW = 1 << 1,
30 SKCIPHER_WALK_COPY = 1 << 2,
31 SKCIPHER_WALK_DIFF = 1 << 3,
32 SKCIPHER_WALK_SLEEP = 1 << 4,
33};
34
35struct skcipher_walk_buffer {
36 struct list_head entry;
37 struct scatter_walk dst;
38 unsigned int len;
39 u8 *data;
40 u8 buffer[];
41};
42
43static int skcipher_walk_next(struct skcipher_walk *walk);
44
45static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
46{
47 if (PageHighMem(scatterwalk_page(walk)))
48 kunmap_atomic(vaddr);
49}
50
51static inline void *skcipher_map(struct scatter_walk *walk)
52{
53 struct page *page = scatterwalk_page(walk);
54
55 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
56 offset_in_page(walk->offset);
57}
58
59static inline void skcipher_map_src(struct skcipher_walk *walk)
60{
61 walk->src.virt.addr = skcipher_map(&walk->in);
62}
63
64static inline void skcipher_map_dst(struct skcipher_walk *walk)
65{
66 walk->dst.virt.addr = skcipher_map(&walk->out);
67}
68
69static inline void skcipher_unmap_src(struct skcipher_walk *walk)
70{
71 skcipher_unmap(&walk->in, walk->src.virt.addr);
72}
73
74static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
75{
76 skcipher_unmap(&walk->out, walk->dst.virt.addr);
77}
78
79static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
80{
81 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
82}
83
84/* Get a spot of the specified length that does not straddle a page.
85 * The caller needs to ensure that there is enough space for this operation.
86 */
87static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
88{
89 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
90
91 return max(start, end_page);
92}
93
Herbert Xu0ba3c022019-09-06 13:13:06 +100094static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
Herbert Xub286d8b2016-11-22 20:08:12 +080095{
96 u8 *addr;
97
98 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
99 addr = skcipher_get_spot(addr, bsize);
100 scatterwalk_copychunks(addr, &walk->out, bsize,
101 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
Herbert Xu0ba3c022019-09-06 13:13:06 +1000102 return 0;
Herbert Xub286d8b2016-11-22 20:08:12 +0800103}
104
105int skcipher_walk_done(struct skcipher_walk *walk, int err)
106{
Herbert Xu0ba3c022019-09-06 13:13:06 +1000107 unsigned int n = walk->nbytes;
108 unsigned int nbytes = 0;
Herbert Xub286d8b2016-11-22 20:08:12 +0800109
Herbert Xu0ba3c022019-09-06 13:13:06 +1000110 if (!n)
Eric Biggers8088d3d2018-07-23 10:54:56 -0700111 goto finish;
Herbert Xub286d8b2016-11-22 20:08:12 +0800112
Herbert Xu0ba3c022019-09-06 13:13:06 +1000113 if (likely(err >= 0)) {
114 n -= err;
115 nbytes = walk->total - n;
116 }
Eric Biggers8088d3d2018-07-23 10:54:56 -0700117
118 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
119 SKCIPHER_WALK_SLOW |
120 SKCIPHER_WALK_COPY |
121 SKCIPHER_WALK_DIFF)))) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800122unmap_src:
123 skcipher_unmap_src(walk);
124 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
125 skcipher_unmap_dst(walk);
126 goto unmap_src;
127 } else if (walk->flags & SKCIPHER_WALK_COPY) {
128 skcipher_map_dst(walk);
129 memcpy(walk->dst.virt.addr, walk->page, n);
130 skcipher_unmap_dst(walk);
131 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
Herbert Xu0ba3c022019-09-06 13:13:06 +1000132 if (err > 0) {
Eric Biggersdcaca012019-03-31 13:04:15 -0700133 /*
134 * Didn't process all bytes. Either the algorithm is
135 * broken, or this was the last step and it turned out
136 * the message wasn't evenly divisible into blocks but
137 * the algorithm requires it.
138 */
Herbert Xub286d8b2016-11-22 20:08:12 +0800139 err = -EINVAL;
Herbert Xu0ba3c022019-09-06 13:13:06 +1000140 nbytes = 0;
141 } else
142 n = skcipher_done_slow(walk, n);
Herbert Xub286d8b2016-11-22 20:08:12 +0800143 }
144
Herbert Xu0ba3c022019-09-06 13:13:06 +1000145 if (err > 0)
146 err = 0;
147
148 walk->total = nbytes;
149 walk->nbytes = 0;
150
Herbert Xub286d8b2016-11-22 20:08:12 +0800151 scatterwalk_advance(&walk->in, n);
152 scatterwalk_advance(&walk->out, n);
Herbert Xu0ba3c022019-09-06 13:13:06 +1000153 scatterwalk_done(&walk->in, 0, nbytes);
154 scatterwalk_done(&walk->out, 1, nbytes);
Herbert Xub286d8b2016-11-22 20:08:12 +0800155
Herbert Xu0ba3c022019-09-06 13:13:06 +1000156 if (nbytes) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800157 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
158 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
159 return skcipher_walk_next(walk);
160 }
161
Herbert Xu0ba3c022019-09-06 13:13:06 +1000162finish:
Herbert Xub286d8b2016-11-22 20:08:12 +0800163 /* Short-circuit for the common/fast path. */
164 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
165 goto out;
166
167 if (walk->flags & SKCIPHER_WALK_PHYS)
168 goto out;
169
170 if (walk->iv != walk->oiv)
171 memcpy(walk->oiv, walk->iv, walk->ivsize);
172 if (walk->buffer != walk->page)
173 kfree(walk->buffer);
174 if (walk->page)
175 free_page((unsigned long)walk->page);
176
177out:
178 return err;
179}
180EXPORT_SYMBOL_GPL(skcipher_walk_done);
181
182void skcipher_walk_complete(struct skcipher_walk *walk, int err)
183{
184 struct skcipher_walk_buffer *p, *tmp;
185
186 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
187 u8 *data;
188
189 if (err)
190 goto done;
191
192 data = p->data;
193 if (!data) {
194 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000195 data = skcipher_get_spot(data, walk->stride);
Herbert Xub286d8b2016-11-22 20:08:12 +0800196 }
197
198 scatterwalk_copychunks(data, &p->dst, p->len, 1);
199
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000200 if (offset_in_page(p->data) + p->len + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800201 PAGE_SIZE)
202 free_page((unsigned long)p->data);
203
204done:
205 list_del(&p->entry);
206 kfree(p);
207 }
208
209 if (!err && walk->iv != walk->oiv)
210 memcpy(walk->oiv, walk->iv, walk->ivsize);
211 if (walk->buffer != walk->page)
212 kfree(walk->buffer);
213 if (walk->page)
214 free_page((unsigned long)walk->page);
215}
216EXPORT_SYMBOL_GPL(skcipher_walk_complete);
217
218static void skcipher_queue_write(struct skcipher_walk *walk,
219 struct skcipher_walk_buffer *p)
220{
221 p->dst = walk->out;
222 list_add_tail(&p->entry, &walk->buffers);
223}
224
225static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
226{
227 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
228 unsigned alignmask = walk->alignmask;
229 struct skcipher_walk_buffer *p;
230 unsigned a;
231 unsigned n;
232 u8 *buffer;
233 void *v;
234
235 if (!phys) {
Ard Biesheuvel18e615a2016-12-13 13:34:02 +0000236 if (!walk->buffer)
237 walk->buffer = walk->page;
238 buffer = walk->buffer;
Herbert Xub286d8b2016-11-22 20:08:12 +0800239 if (buffer)
240 goto ok;
241 }
242
243 /* Start with the minimum alignment of kmalloc. */
244 a = crypto_tfm_ctx_alignment() - 1;
245 n = bsize;
246
247 if (phys) {
248 /* Calculate the minimum alignment of p->buffer. */
249 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
250 n += sizeof(*p);
251 }
252
253 /* Minimum size to align p->buffer by alignmask. */
254 n += alignmask & ~a;
255
256 /* Minimum size to ensure p->buffer does not straddle a page. */
257 n += (bsize - 1) & ~(alignmask | a);
258
259 v = kzalloc(n, skcipher_walk_gfp(walk));
260 if (!v)
261 return skcipher_walk_done(walk, -ENOMEM);
262
263 if (phys) {
264 p = v;
265 p->len = bsize;
266 skcipher_queue_write(walk, p);
267 buffer = p->buffer;
268 } else {
269 walk->buffer = v;
270 buffer = v;
271 }
272
273ok:
274 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
275 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
276 walk->src.virt.addr = walk->dst.virt.addr;
277
278 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
279
280 walk->nbytes = bsize;
281 walk->flags |= SKCIPHER_WALK_SLOW;
282
283 return 0;
284}
285
286static int skcipher_next_copy(struct skcipher_walk *walk)
287{
288 struct skcipher_walk_buffer *p;
289 u8 *tmp = walk->page;
290
291 skcipher_map_src(walk);
292 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
293 skcipher_unmap_src(walk);
294
295 walk->src.virt.addr = tmp;
296 walk->dst.virt.addr = tmp;
297
298 if (!(walk->flags & SKCIPHER_WALK_PHYS))
299 return 0;
300
301 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
302 if (!p)
303 return -ENOMEM;
304
305 p->data = walk->page;
306 p->len = walk->nbytes;
307 skcipher_queue_write(walk, p);
308
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000309 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800310 PAGE_SIZE)
311 walk->page = NULL;
312 else
313 walk->page += walk->nbytes;
314
315 return 0;
316}
317
318static int skcipher_next_fast(struct skcipher_walk *walk)
319{
320 unsigned long diff;
321
322 walk->src.phys.page = scatterwalk_page(&walk->in);
323 walk->src.phys.offset = offset_in_page(walk->in.offset);
324 walk->dst.phys.page = scatterwalk_page(&walk->out);
325 walk->dst.phys.offset = offset_in_page(walk->out.offset);
326
327 if (walk->flags & SKCIPHER_WALK_PHYS)
328 return 0;
329
330 diff = walk->src.phys.offset - walk->dst.phys.offset;
331 diff |= walk->src.virt.page - walk->dst.virt.page;
332
333 skcipher_map_src(walk);
334 walk->dst.virt.addr = walk->src.virt.addr;
335
336 if (diff) {
337 walk->flags |= SKCIPHER_WALK_DIFF;
338 skcipher_map_dst(walk);
339 }
340
341 return 0;
342}
343
344static int skcipher_walk_next(struct skcipher_walk *walk)
345{
346 unsigned int bsize;
347 unsigned int n;
348 int err;
349
350 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
351 SKCIPHER_WALK_DIFF);
352
353 n = walk->total;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000354 bsize = min(walk->stride, max(n, walk->blocksize));
Herbert Xub286d8b2016-11-22 20:08:12 +0800355 n = scatterwalk_clamp(&walk->in, n);
356 n = scatterwalk_clamp(&walk->out, n);
357
358 if (unlikely(n < bsize)) {
359 if (unlikely(walk->total < walk->blocksize))
360 return skcipher_walk_done(walk, -EINVAL);
361
362slow_path:
363 err = skcipher_next_slow(walk, bsize);
364 goto set_phys_lowmem;
365 }
366
367 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
368 if (!walk->page) {
369 gfp_t gfp = skcipher_walk_gfp(walk);
370
371 walk->page = (void *)__get_free_page(gfp);
372 if (!walk->page)
373 goto slow_path;
374 }
375
376 walk->nbytes = min_t(unsigned, n,
377 PAGE_SIZE - offset_in_page(walk->page));
378 walk->flags |= SKCIPHER_WALK_COPY;
379 err = skcipher_next_copy(walk);
380 goto set_phys_lowmem;
381 }
382
383 walk->nbytes = n;
384
385 return skcipher_next_fast(walk);
386
387set_phys_lowmem:
388 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
389 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
390 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
391 walk->src.phys.offset &= PAGE_SIZE - 1;
392 walk->dst.phys.offset &= PAGE_SIZE - 1;
393 }
394 return err;
395}
Herbert Xub286d8b2016-11-22 20:08:12 +0800396
397static int skcipher_copy_iv(struct skcipher_walk *walk)
398{
399 unsigned a = crypto_tfm_ctx_alignment() - 1;
400 unsigned alignmask = walk->alignmask;
401 unsigned ivsize = walk->ivsize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000402 unsigned bs = walk->stride;
Herbert Xub286d8b2016-11-22 20:08:12 +0800403 unsigned aligned_bs;
404 unsigned size;
405 u8 *iv;
406
Eric Biggers0567fc92018-07-23 09:57:50 -0700407 aligned_bs = ALIGN(bs, alignmask + 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800408
409 /* Minimum size to align buffer by alignmask. */
410 size = alignmask & ~a;
411
412 if (walk->flags & SKCIPHER_WALK_PHYS)
413 size += ivsize;
414 else {
415 size += aligned_bs + ivsize;
416
417 /* Minimum size to ensure buffer does not straddle a page. */
418 size += (bs - 1) & ~(alignmask | a);
419 }
420
421 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
422 if (!walk->buffer)
423 return -ENOMEM;
424
425 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
426 iv = skcipher_get_spot(iv, bs) + aligned_bs;
427
428 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
429 return 0;
430}
431
432static int skcipher_walk_first(struct skcipher_walk *walk)
433{
Herbert Xub286d8b2016-11-22 20:08:12 +0800434 if (WARN_ON_ONCE(in_irq()))
435 return -EDEADLK;
436
Herbert Xub286d8b2016-11-22 20:08:12 +0800437 walk->buffer = NULL;
438 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439 int err = skcipher_copy_iv(walk);
440 if (err)
441 return err;
442 }
443
444 walk->page = NULL;
Herbert Xub286d8b2016-11-22 20:08:12 +0800445
446 return skcipher_walk_next(walk);
447}
448
449static int skcipher_walk_skcipher(struct skcipher_walk *walk,
450 struct skcipher_request *req)
451{
452 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
453
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800454 walk->total = req->cryptlen;
455 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800456 walk->iv = req->iv;
457 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800458
459 if (unlikely(!walk->total))
460 return 0;
461
Herbert Xub286d8b2016-11-22 20:08:12 +0800462 scatterwalk_start(&walk->in, req->src);
463 scatterwalk_start(&walk->out, req->dst);
464
Herbert Xub286d8b2016-11-22 20:08:12 +0800465 walk->flags &= ~SKCIPHER_WALK_SLEEP;
466 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
467 SKCIPHER_WALK_SLEEP : 0;
468
469 walk->blocksize = crypto_skcipher_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000470 walk->stride = crypto_skcipher_walksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800471 walk->ivsize = crypto_skcipher_ivsize(tfm);
472 walk->alignmask = crypto_skcipher_alignmask(tfm);
473
474 return skcipher_walk_first(walk);
475}
476
477int skcipher_walk_virt(struct skcipher_walk *walk,
478 struct skcipher_request *req, bool atomic)
479{
480 int err;
481
Eric Biggersbb648292018-12-15 12:41:53 -0800482 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
483
Herbert Xub286d8b2016-11-22 20:08:12 +0800484 walk->flags &= ~SKCIPHER_WALK_PHYS;
485
486 err = skcipher_walk_skcipher(walk, req);
487
488 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
489
490 return err;
491}
492EXPORT_SYMBOL_GPL(skcipher_walk_virt);
493
494void skcipher_walk_atomise(struct skcipher_walk *walk)
495{
496 walk->flags &= ~SKCIPHER_WALK_SLEEP;
497}
498EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
499
500int skcipher_walk_async(struct skcipher_walk *walk,
501 struct skcipher_request *req)
502{
503 walk->flags |= SKCIPHER_WALK_PHYS;
504
505 INIT_LIST_HEAD(&walk->buffers);
506
507 return skcipher_walk_skcipher(walk, req);
508}
509EXPORT_SYMBOL_GPL(skcipher_walk_async);
510
Herbert Xu34bc0852016-11-30 21:14:07 +0800511static int skcipher_walk_aead_common(struct skcipher_walk *walk,
512 struct aead_request *req, bool atomic)
Herbert Xub286d8b2016-11-22 20:08:12 +0800513{
514 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
515 int err;
516
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800517 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800518 walk->iv = req->iv;
519 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800520
521 if (unlikely(!walk->total))
522 return 0;
523
Ard Biesheuvel3cbf61f2016-11-29 13:05:31 +0000524 walk->flags &= ~SKCIPHER_WALK_PHYS;
525
Herbert Xub286d8b2016-11-22 20:08:12 +0800526 scatterwalk_start(&walk->in, req->src);
527 scatterwalk_start(&walk->out, req->dst);
528
529 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
530 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
531
Ondrej Mosnáčekc14ca832017-11-23 13:49:06 +0100532 scatterwalk_done(&walk->in, 0, walk->total);
533 scatterwalk_done(&walk->out, 0, walk->total);
534
Herbert Xub286d8b2016-11-22 20:08:12 +0800535 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
536 walk->flags |= SKCIPHER_WALK_SLEEP;
537 else
538 walk->flags &= ~SKCIPHER_WALK_SLEEP;
539
540 walk->blocksize = crypto_aead_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000541 walk->stride = crypto_aead_chunksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800542 walk->ivsize = crypto_aead_ivsize(tfm);
543 walk->alignmask = crypto_aead_alignmask(tfm);
544
545 err = skcipher_walk_first(walk);
546
547 if (atomic)
548 walk->flags &= ~SKCIPHER_WALK_SLEEP;
549
550 return err;
551}
Herbert Xu34bc0852016-11-30 21:14:07 +0800552
Herbert Xu34bc0852016-11-30 21:14:07 +0800553int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
554 struct aead_request *req, bool atomic)
555{
556 walk->total = req->cryptlen;
557
558 return skcipher_walk_aead_common(walk, req, atomic);
559}
560EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
561
562int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
563 struct aead_request *req, bool atomic)
564{
565 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
566
567 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
568
569 return skcipher_walk_aead_common(walk, req, atomic);
570}
571EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
572
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800573static void skcipher_set_needkey(struct crypto_skcipher *tfm)
574{
Eric Biggers9ac0d132019-11-29 10:23:04 -0800575 if (crypto_skcipher_max_keysize(tfm) != 0)
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800576 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
577}
578
Herbert Xu9933e112017-05-10 03:48:23 +0800579static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
580 const u8 *key, unsigned int keylen)
581{
582 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
583 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
584 u8 *buffer, *alignbuffer;
585 unsigned long absize;
586 int ret;
587
588 absize = keylen + alignmask;
589 buffer = kmalloc(absize, GFP_ATOMIC);
590 if (!buffer)
591 return -ENOMEM;
592
593 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
594 memcpy(alignbuffer, key, keylen);
595 ret = cipher->setkey(tfm, alignbuffer, keylen);
Waiman Long453431a2020-08-06 23:18:13 -0700596 kfree_sensitive(buffer);
Herbert Xu9933e112017-05-10 03:48:23 +0800597 return ret;
598}
599
Eric Biggers15252d92019-11-29 10:23:05 -0800600int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
Herbert Xu9933e112017-05-10 03:48:23 +0800601 unsigned int keylen)
602{
603 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
604 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800605 int err;
Herbert Xu9933e112017-05-10 03:48:23 +0800606
Eric Biggers674f3682019-12-30 21:19:36 -0600607 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
Herbert Xu9933e112017-05-10 03:48:23 +0800608 return -EINVAL;
Herbert Xu9933e112017-05-10 03:48:23 +0800609
610 if ((unsigned long)key & alignmask)
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800611 err = skcipher_setkey_unaligned(tfm, key, keylen);
612 else
613 err = cipher->setkey(tfm, key, keylen);
Herbert Xu9933e112017-05-10 03:48:23 +0800614
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800615 if (unlikely(err)) {
616 skcipher_set_needkey(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800617 return err;
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800618 }
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800619
620 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
621 return 0;
Herbert Xu9933e112017-05-10 03:48:23 +0800622}
Eric Biggers15252d92019-11-29 10:23:05 -0800623EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
Herbert Xu9933e112017-05-10 03:48:23 +0800624
Eric Biggers81bcbb12019-06-02 22:45:51 -0700625int crypto_skcipher_encrypt(struct skcipher_request *req)
626{
627 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
628 struct crypto_alg *alg = tfm->base.__crt_alg;
629 unsigned int cryptlen = req->cryptlen;
630 int ret;
631
632 crypto_stats_get(alg);
633 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
634 ret = -ENOKEY;
635 else
Eric Biggers848755e2019-11-29 10:23:06 -0800636 ret = crypto_skcipher_alg(tfm)->encrypt(req);
Eric Biggers81bcbb12019-06-02 22:45:51 -0700637 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
638 return ret;
639}
640EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
641
642int crypto_skcipher_decrypt(struct skcipher_request *req)
643{
644 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
645 struct crypto_alg *alg = tfm->base.__crt_alg;
646 unsigned int cryptlen = req->cryptlen;
647 int ret;
648
649 crypto_stats_get(alg);
650 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
651 ret = -ENOKEY;
652 else
Eric Biggers7e1c1092019-11-29 10:23:07 -0800653 ret = crypto_skcipher_alg(tfm)->decrypt(req);
Eric Biggers81bcbb12019-06-02 22:45:51 -0700654 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
655 return ret;
656}
657EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
658
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800659static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
660{
661 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
662 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
663
664 alg->exit(skcipher);
665}
666
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800667static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
668{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800669 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
670 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
671
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800672 skcipher_set_needkey(skcipher);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800673
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800674 if (alg->exit)
675 skcipher->base.exit = crypto_skcipher_exit_tfm;
676
677 if (alg->init)
678 return alg->init(skcipher);
679
680 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800681}
682
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800683static void crypto_skcipher_free_instance(struct crypto_instance *inst)
684{
685 struct skcipher_instance *skcipher =
686 container_of(inst, struct skcipher_instance, s.base);
687
688 skcipher->free(skcipher);
689}
690
691static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +0530692 __maybe_unused;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800693static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
694{
695 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
696 base);
697
698 seq_printf(m, "type : skcipher\n");
699 seq_printf(m, "async : %s\n",
700 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
701 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
702 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
703 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
704 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
705 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000706 seq_printf(m, "walksize : %u\n", skcipher->walksize);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800707}
708
709#ifdef CONFIG_NET
710static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
711{
712 struct crypto_report_blkcipher rblkcipher;
713 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
714 base);
715
Eric Biggers37db69e2018-11-03 14:56:03 -0700716 memset(&rblkcipher, 0, sizeof(rblkcipher));
717
718 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
719 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800720
721 rblkcipher.blocksize = alg->cra_blocksize;
722 rblkcipher.min_keysize = skcipher->min_keysize;
723 rblkcipher.max_keysize = skcipher->max_keysize;
724 rblkcipher.ivsize = skcipher->ivsize;
725
Eric Biggers37db69e2018-11-03 14:56:03 -0700726 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
727 sizeof(rblkcipher), &rblkcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800728}
729#else
730static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
731{
732 return -ENOSYS;
733}
734#endif
735
Eric Biggers53253062019-10-25 12:41:11 -0700736static const struct crypto_type crypto_skcipher_type = {
Eric Biggers89873b42019-11-29 10:23:08 -0800737 .extsize = crypto_alg_extsize,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800738 .init_tfm = crypto_skcipher_init_tfm,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800739 .free = crypto_skcipher_free_instance,
740#ifdef CONFIG_PROC_FS
741 .show = crypto_skcipher_show,
742#endif
743 .report = crypto_skcipher_report,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800744 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
Eric Biggersc65058b2019-10-25 12:41:12 -0700745 .maskset = CRYPTO_ALG_TYPE_MASK,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800746 .type = CRYPTO_ALG_TYPE_SKCIPHER,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800747 .tfmsize = offsetof(struct crypto_skcipher, base),
748};
749
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800750int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
Eric Biggersb9f76dd2020-01-02 19:58:45 -0800751 struct crypto_instance *inst,
752 const char *name, u32 type, u32 mask)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800753{
Eric Biggers53253062019-10-25 12:41:11 -0700754 spawn->base.frontend = &crypto_skcipher_type;
Eric Biggersde95c952020-01-02 19:58:48 -0800755 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800756}
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800757EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800758
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800759struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
760 u32 type, u32 mask)
761{
Eric Biggers53253062019-10-25 12:41:11 -0700762 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800763}
764EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
765
Kees Cookb350bee2018-09-18 19:10:38 -0700766struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
767 const char *alg_name, u32 type, u32 mask)
768{
769 struct crypto_skcipher *tfm;
770
771 /* Only sync algorithms allowed. */
772 mask |= CRYPTO_ALG_ASYNC;
773
Eric Biggers53253062019-10-25 12:41:11 -0700774 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Kees Cookb350bee2018-09-18 19:10:38 -0700775
776 /*
777 * Make sure we do not allocate something that might get used with
778 * an on-stack request: check the request size.
779 */
780 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
781 MAX_SYNC_SKCIPHER_REQSIZE)) {
782 crypto_free_skcipher(tfm);
783 return ERR_PTR(-EINVAL);
784 }
785
786 return (struct crypto_sync_skcipher *)tfm;
787}
788EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
789
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700790int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800791{
Eric Biggers53253062019-10-25 12:41:11 -0700792 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800793}
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700794EXPORT_SYMBOL_GPL(crypto_has_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800795
796static int skcipher_prepare_alg(struct skcipher_alg *alg)
797{
798 struct crypto_alg *base = &alg->base;
799
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000800 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
801 alg->walksize > PAGE_SIZE / 8)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800802 return -EINVAL;
803
804 if (!alg->chunksize)
805 alg->chunksize = base->cra_blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000806 if (!alg->walksize)
807 alg->walksize = alg->chunksize;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800808
Eric Biggers53253062019-10-25 12:41:11 -0700809 base->cra_type = &crypto_skcipher_type;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800810 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
811 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
812
813 return 0;
814}
815
816int crypto_register_skcipher(struct skcipher_alg *alg)
817{
818 struct crypto_alg *base = &alg->base;
819 int err;
820
821 err = skcipher_prepare_alg(alg);
822 if (err)
823 return err;
824
825 return crypto_register_alg(base);
826}
827EXPORT_SYMBOL_GPL(crypto_register_skcipher);
828
829void crypto_unregister_skcipher(struct skcipher_alg *alg)
830{
831 crypto_unregister_alg(&alg->base);
832}
833EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
834
835int crypto_register_skciphers(struct skcipher_alg *algs, int count)
836{
837 int i, ret;
838
839 for (i = 0; i < count; i++) {
840 ret = crypto_register_skcipher(&algs[i]);
841 if (ret)
842 goto err;
843 }
844
845 return 0;
846
847err:
848 for (--i; i >= 0; --i)
849 crypto_unregister_skcipher(&algs[i]);
850
851 return ret;
852}
853EXPORT_SYMBOL_GPL(crypto_register_skciphers);
854
855void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
856{
857 int i;
858
859 for (i = count - 1; i >= 0; --i)
860 crypto_unregister_skcipher(&algs[i]);
861}
862EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
863
864int skcipher_register_instance(struct crypto_template *tmpl,
865 struct skcipher_instance *inst)
866{
867 int err;
868
Eric Biggersd4fdc2d2020-01-02 20:04:40 -0800869 if (WARN_ON(!inst->free))
870 return -EINVAL;
871
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800872 err = skcipher_prepare_alg(&inst->alg);
873 if (err)
874 return err;
875
876 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
877}
878EXPORT_SYMBOL_GPL(skcipher_register_instance);
879
Eric Biggers0872da12019-01-03 20:16:14 -0800880static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
881 unsigned int keylen)
882{
883 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
Eric Biggers0872da12019-01-03 20:16:14 -0800884
885 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
886 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
887 CRYPTO_TFM_REQ_MASK);
Eric Biggersaf5034e2019-12-30 21:19:38 -0600888 return crypto_cipher_setkey(cipher, key, keylen);
Eric Biggers0872da12019-01-03 20:16:14 -0800889}
890
891static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
892{
893 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
Eric Biggersd5ed3b62020-01-02 19:59:05 -0800894 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
Eric Biggers0872da12019-01-03 20:16:14 -0800895 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
896 struct crypto_cipher *cipher;
897
898 cipher = crypto_spawn_cipher(spawn);
899 if (IS_ERR(cipher))
900 return PTR_ERR(cipher);
901
902 ctx->cipher = cipher;
903 return 0;
904}
905
906static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
907{
908 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
909
910 crypto_free_cipher(ctx->cipher);
911}
912
913static void skcipher_free_instance_simple(struct skcipher_instance *inst)
914{
Eric Biggersaacd5b42020-01-02 19:59:00 -0800915 crypto_drop_cipher(skcipher_instance_ctx(inst));
Eric Biggers0872da12019-01-03 20:16:14 -0800916 kfree(inst);
917}
918
919/**
920 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
921 *
922 * Allocate an skcipher_instance for a simple block cipher mode of operation,
923 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
924 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
925 * alignmask, and priority are set from the underlying cipher but can be
926 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
927 * default ->setkey(), ->init(), and ->exit() methods are installed.
928 *
929 * @tmpl: the template being instantiated
930 * @tb: the template parameters
Eric Biggers0872da12019-01-03 20:16:14 -0800931 *
932 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
933 * needs to register the instance.
934 */
Herbert Xub3c16bf2019-12-20 13:29:40 +0800935struct skcipher_instance *skcipher_alloc_instance_simple(
936 struct crypto_template *tmpl, struct rtattr **tb)
Eric Biggers0872da12019-01-03 20:16:14 -0800937{
Eric Biggers0872da12019-01-03 20:16:14 -0800938 u32 mask;
Eric Biggersaacd5b42020-01-02 19:59:00 -0800939 struct skcipher_instance *inst;
940 struct crypto_cipher_spawn *spawn;
941 struct crypto_alg *cipher_alg;
Eric Biggers0872da12019-01-03 20:16:14 -0800942 int err;
943
Eric Biggers7bcb2c92020-07-09 23:20:38 -0700944 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
945 if (err)
946 return ERR_PTR(err);
Eric Biggers0872da12019-01-03 20:16:14 -0800947
948 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
Eric Biggersaacd5b42020-01-02 19:59:00 -0800949 if (!inst)
950 return ERR_PTR(-ENOMEM);
Eric Biggers0872da12019-01-03 20:16:14 -0800951 spawn = skcipher_instance_ctx(inst);
952
Eric Biggersaacd5b42020-01-02 19:59:00 -0800953 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
954 crypto_attr_alg_name(tb[1]), 0, mask);
955 if (err)
956 goto err_free_inst;
957 cipher_alg = crypto_spawn_cipher_alg(spawn);
958
Eric Biggers0872da12019-01-03 20:16:14 -0800959 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
960 cipher_alg);
961 if (err)
962 goto err_free_inst;
963
Eric Biggers0872da12019-01-03 20:16:14 -0800964 inst->free = skcipher_free_instance_simple;
965
966 /* Default algorithm properties, can be overridden */
967 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
968 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
969 inst->alg.base.cra_priority = cipher_alg->cra_priority;
970 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
971 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
972 inst->alg.ivsize = cipher_alg->cra_blocksize;
973
974 /* Use skcipher_ctx_simple by default, can be overridden */
975 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
976 inst->alg.setkey = skcipher_setkey_simple;
977 inst->alg.init = skcipher_init_tfm_simple;
978 inst->alg.exit = skcipher_exit_tfm_simple;
979
Eric Biggers0872da12019-01-03 20:16:14 -0800980 return inst;
981
982err_free_inst:
Eric Biggersaacd5b42020-01-02 19:59:00 -0800983 skcipher_free_instance_simple(inst);
Eric Biggers0872da12019-01-03 20:16:14 -0800984 return ERR_PTR(err);
985}
986EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
987
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800988MODULE_LICENSE("GPL");
989MODULE_DESCRIPTION("Symmetric key cipher type");
Ard Biesheuvel665fe012020-12-11 13:27:15 +0100990MODULE_IMPORT_NS(CRYPTO_INTERNAL);