Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Block chaining cipher operations. |
Gideon Israel Dsouza | d8c34b9 | 2016-12-31 21:26:23 +0530 | [diff] [blame] | 4 | * |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 5 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across |
| 6 | * multiple page boundaries by using temporary blocks. In user context, |
| 7 | * the kernel is given a chance to schedule us once per page. |
| 8 | * |
| 9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 10 | */ |
| 11 | |
Herbert Xu | d1a2fd5 | 2015-05-11 17:47:49 +0800 | [diff] [blame] | 12 | #include <crypto/aead.h> |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 13 | #include <crypto/internal/skcipher.h> |
Herbert Xu | 42c271c | 2007-12-07 18:52:49 +0800 | [diff] [blame] | 14 | #include <crypto/scatterwalk.h> |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 15 | #include <linux/errno.h> |
| 16 | #include <linux/kernel.h> |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 17 | #include <linux/module.h> |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 18 | #include <linux/seq_file.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/string.h> |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 21 | #include <linux/cryptouser.h> |
Gideon Israel Dsouza | d8c34b9 | 2016-12-31 21:26:23 +0530 | [diff] [blame] | 22 | #include <linux/compiler.h> |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 23 | #include <net/netlink.h> |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 24 | |
| 25 | #include "internal.h" |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 26 | |
| 27 | enum { |
| 28 | BLKCIPHER_WALK_PHYS = 1 << 0, |
| 29 | BLKCIPHER_WALK_SLOW = 1 << 1, |
| 30 | BLKCIPHER_WALK_COPY = 1 << 2, |
| 31 | BLKCIPHER_WALK_DIFF = 1 << 3, |
| 32 | }; |
| 33 | |
| 34 | static int blkcipher_walk_next(struct blkcipher_desc *desc, |
| 35 | struct blkcipher_walk *walk); |
| 36 | static int blkcipher_walk_first(struct blkcipher_desc *desc, |
| 37 | struct blkcipher_walk *walk); |
| 38 | |
| 39 | static inline void blkcipher_map_src(struct blkcipher_walk *walk) |
| 40 | { |
Cong Wang | f0dfc0b | 2011-11-25 23:14:17 +0800 | [diff] [blame] | 41 | walk->src.virt.addr = scatterwalk_map(&walk->in); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static inline void blkcipher_map_dst(struct blkcipher_walk *walk) |
| 45 | { |
Cong Wang | f0dfc0b | 2011-11-25 23:14:17 +0800 | [diff] [blame] | 46 | walk->dst.virt.addr = scatterwalk_map(&walk->out); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) |
| 50 | { |
Cong Wang | f0dfc0b | 2011-11-25 23:14:17 +0800 | [diff] [blame] | 51 | scatterwalk_unmap(walk->src.virt.addr); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) |
| 55 | { |
Cong Wang | f0dfc0b | 2011-11-25 23:14:17 +0800 | [diff] [blame] | 56 | scatterwalk_unmap(walk->dst.virt.addr); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 57 | } |
| 58 | |
Herbert Xu | e4630f9 | 2007-09-09 08:45:21 +0100 | [diff] [blame] | 59 | /* Get a spot of the specified length that does not straddle a page. |
| 60 | * The caller needs to ensure that there is enough space for this operation. |
| 61 | */ |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 62 | static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) |
| 63 | { |
Herbert Xu | e4630f9 | 2007-09-09 08:45:21 +0100 | [diff] [blame] | 64 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
Ingo Oeser | 5aaff0c | 2007-09-19 19:11:41 +0800 | [diff] [blame] | 65 | return max(start, end_page); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 66 | } |
| 67 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 68 | static inline void blkcipher_done_slow(struct blkcipher_walk *walk, |
| 69 | unsigned int bsize) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 70 | { |
| 71 | u8 *addr; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 72 | |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 73 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 74 | addr = blkcipher_get_spot(addr, bsize); |
| 75 | scatterwalk_copychunks(addr, &walk->out, bsize, 1); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 76 | } |
| 77 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 78 | static inline void blkcipher_done_fast(struct blkcipher_walk *walk, |
| 79 | unsigned int n) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 80 | { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 81 | if (walk->flags & BLKCIPHER_WALK_COPY) { |
| 82 | blkcipher_map_dst(walk); |
| 83 | memcpy(walk->dst.virt.addr, walk->page, n); |
| 84 | blkcipher_unmap_dst(walk); |
| 85 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 86 | if (walk->flags & BLKCIPHER_WALK_DIFF) |
| 87 | blkcipher_unmap_dst(walk); |
Peter Zijlstra | 61ecdb80 | 2010-10-26 14:21:47 -0700 | [diff] [blame] | 88 | blkcipher_unmap_src(walk); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | scatterwalk_advance(&walk->in, n); |
| 92 | scatterwalk_advance(&walk->out, n); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | int blkcipher_walk_done(struct blkcipher_desc *desc, |
| 96 | struct blkcipher_walk *walk, int err) |
| 97 | { |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 98 | unsigned int n; /* bytes processed */ |
| 99 | bool more; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 100 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 101 | if (unlikely(err < 0)) |
| 102 | goto finish; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 103 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 104 | n = walk->nbytes - err; |
| 105 | walk->total -= n; |
| 106 | more = (walk->total != 0); |
| 107 | |
| 108 | if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) { |
| 109 | blkcipher_done_fast(walk, n); |
| 110 | } else { |
| 111 | if (WARN_ON(err)) { |
| 112 | /* unexpected case; didn't process all bytes */ |
Herbert Xu | 7607bd8 | 2007-10-04 15:24:05 +0800 | [diff] [blame] | 113 | err = -EINVAL; |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 114 | goto finish; |
| 115 | } |
| 116 | blkcipher_done_slow(walk, n); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 117 | } |
| 118 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 119 | scatterwalk_done(&walk->in, 0, more); |
| 120 | scatterwalk_done(&walk->out, 1, more); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 121 | |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 122 | if (more) { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 123 | crypto_yield(desc->flags); |
| 124 | return blkcipher_walk_next(desc, walk); |
| 125 | } |
Eric Biggers | 0868def | 2018-07-23 10:54:57 -0700 | [diff] [blame] | 126 | err = 0; |
| 127 | finish: |
| 128 | walk->nbytes = 0; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 129 | if (walk->iv != desc->info) |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 130 | memcpy(desc->info, walk->iv, walk->ivsize); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 131 | if (walk->buffer != walk->page) |
| 132 | kfree(walk->buffer); |
| 133 | if (walk->page) |
| 134 | free_page((unsigned long)walk->page); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 135 | return err; |
| 136 | } |
| 137 | EXPORT_SYMBOL_GPL(blkcipher_walk_done); |
| 138 | |
| 139 | static inline int blkcipher_next_slow(struct blkcipher_desc *desc, |
| 140 | struct blkcipher_walk *walk, |
| 141 | unsigned int bsize, |
| 142 | unsigned int alignmask) |
| 143 | { |
| 144 | unsigned int n; |
Herbert Xu | 7061378 | 2007-09-29 21:24:23 +0800 | [diff] [blame] | 145 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 146 | |
| 147 | if (walk->buffer) |
| 148 | goto ok; |
| 149 | |
| 150 | walk->buffer = walk->page; |
| 151 | if (walk->buffer) |
| 152 | goto ok; |
| 153 | |
Herbert Xu | 2614de1 | 2007-10-04 14:49:00 +0800 | [diff] [blame] | 154 | n = aligned_bsize * 3 - (alignmask + 1) + |
Herbert Xu | e4630f9 | 2007-09-09 08:45:21 +0100 | [diff] [blame] | 155 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 156 | walk->buffer = kmalloc(n, GFP_ATOMIC); |
| 157 | if (!walk->buffer) |
| 158 | return blkcipher_walk_done(desc, walk, -ENOMEM); |
| 159 | |
| 160 | ok: |
| 161 | walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, |
| 162 | alignmask + 1); |
| 163 | walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); |
Herbert Xu | 7061378 | 2007-09-29 21:24:23 +0800 | [diff] [blame] | 164 | walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + |
| 165 | aligned_bsize, bsize); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 166 | |
| 167 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); |
| 168 | |
| 169 | walk->nbytes = bsize; |
| 170 | walk->flags |= BLKCIPHER_WALK_SLOW; |
| 171 | |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | static inline int blkcipher_next_copy(struct blkcipher_walk *walk) |
| 176 | { |
| 177 | u8 *tmp = walk->page; |
| 178 | |
| 179 | blkcipher_map_src(walk); |
| 180 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); |
| 181 | blkcipher_unmap_src(walk); |
| 182 | |
| 183 | walk->src.virt.addr = tmp; |
| 184 | walk->dst.virt.addr = tmp; |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | static inline int blkcipher_next_fast(struct blkcipher_desc *desc, |
| 190 | struct blkcipher_walk *walk) |
| 191 | { |
| 192 | unsigned long diff; |
| 193 | |
| 194 | walk->src.phys.page = scatterwalk_page(&walk->in); |
| 195 | walk->src.phys.offset = offset_in_page(walk->in.offset); |
| 196 | walk->dst.phys.page = scatterwalk_page(&walk->out); |
| 197 | walk->dst.phys.offset = offset_in_page(walk->out.offset); |
| 198 | |
| 199 | if (walk->flags & BLKCIPHER_WALK_PHYS) |
| 200 | return 0; |
| 201 | |
| 202 | diff = walk->src.phys.offset - walk->dst.phys.offset; |
| 203 | diff |= walk->src.virt.page - walk->dst.virt.page; |
| 204 | |
| 205 | blkcipher_map_src(walk); |
| 206 | walk->dst.virt.addr = walk->src.virt.addr; |
| 207 | |
| 208 | if (diff) { |
| 209 | walk->flags |= BLKCIPHER_WALK_DIFF; |
| 210 | blkcipher_map_dst(walk); |
| 211 | } |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static int blkcipher_walk_next(struct blkcipher_desc *desc, |
| 217 | struct blkcipher_walk *walk) |
| 218 | { |
Herbert Xu | 7607bd8 | 2007-10-04 15:24:05 +0800 | [diff] [blame] | 219 | unsigned int bsize; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 220 | unsigned int n; |
| 221 | int err; |
| 222 | |
| 223 | n = walk->total; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 224 | if (unlikely(n < walk->cipher_blocksize)) { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 225 | desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
| 226 | return blkcipher_walk_done(desc, walk, -EINVAL); |
| 227 | } |
| 228 | |
Herbert Xu | acdb04d | 2016-09-13 14:43:29 +0800 | [diff] [blame] | 229 | bsize = min(walk->walk_blocksize, n); |
| 230 | |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 231 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | |
| 232 | BLKCIPHER_WALK_DIFF); |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 233 | if (!scatterwalk_aligned(&walk->in, walk->alignmask) || |
| 234 | !scatterwalk_aligned(&walk->out, walk->alignmask)) { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 235 | walk->flags |= BLKCIPHER_WALK_COPY; |
| 236 | if (!walk->page) { |
| 237 | walk->page = (void *)__get_free_page(GFP_ATOMIC); |
| 238 | if (!walk->page) |
| 239 | n = 0; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | n = scatterwalk_clamp(&walk->in, n); |
| 244 | n = scatterwalk_clamp(&walk->out, n); |
| 245 | |
| 246 | if (unlikely(n < bsize)) { |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 247 | err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 248 | goto set_phys_lowmem; |
| 249 | } |
| 250 | |
| 251 | walk->nbytes = n; |
| 252 | if (walk->flags & BLKCIPHER_WALK_COPY) { |
| 253 | err = blkcipher_next_copy(walk); |
| 254 | goto set_phys_lowmem; |
| 255 | } |
| 256 | |
| 257 | return blkcipher_next_fast(desc, walk); |
| 258 | |
| 259 | set_phys_lowmem: |
| 260 | if (walk->flags & BLKCIPHER_WALK_PHYS) { |
| 261 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); |
| 262 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); |
| 263 | walk->src.phys.offset &= PAGE_SIZE - 1; |
| 264 | walk->dst.phys.offset &= PAGE_SIZE - 1; |
| 265 | } |
| 266 | return err; |
| 267 | } |
| 268 | |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 269 | static inline int blkcipher_copy_iv(struct blkcipher_walk *walk) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 270 | { |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 271 | unsigned bs = walk->walk_blocksize; |
| 272 | unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); |
| 273 | unsigned int size = aligned_bs * 2 + |
| 274 | walk->ivsize + max(aligned_bs, walk->ivsize) - |
| 275 | (walk->alignmask + 1); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 276 | u8 *iv; |
| 277 | |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 278 | size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 279 | walk->buffer = kmalloc(size, GFP_ATOMIC); |
| 280 | if (!walk->buffer) |
| 281 | return -ENOMEM; |
| 282 | |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 283 | iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); |
Herbert Xu | 7061378 | 2007-09-29 21:24:23 +0800 | [diff] [blame] | 284 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; |
| 285 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 286 | iv = blkcipher_get_spot(iv, walk->ivsize); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 287 | |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 288 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | int blkcipher_walk_virt(struct blkcipher_desc *desc, |
| 293 | struct blkcipher_walk *walk) |
| 294 | { |
| 295 | walk->flags &= ~BLKCIPHER_WALK_PHYS; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 296 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); |
| 297 | walk->cipher_blocksize = walk->walk_blocksize; |
| 298 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); |
| 299 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 300 | return blkcipher_walk_first(desc, walk); |
| 301 | } |
| 302 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt); |
| 303 | |
| 304 | int blkcipher_walk_phys(struct blkcipher_desc *desc, |
| 305 | struct blkcipher_walk *walk) |
| 306 | { |
| 307 | walk->flags |= BLKCIPHER_WALK_PHYS; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 308 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); |
| 309 | walk->cipher_blocksize = walk->walk_blocksize; |
| 310 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); |
| 311 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 312 | return blkcipher_walk_first(desc, walk); |
| 313 | } |
| 314 | EXPORT_SYMBOL_GPL(blkcipher_walk_phys); |
| 315 | |
| 316 | static int blkcipher_walk_first(struct blkcipher_desc *desc, |
| 317 | struct blkcipher_walk *walk) |
| 318 | { |
Herbert Xu | fb46984 | 2006-12-10 10:45:28 +1100 | [diff] [blame] | 319 | if (WARN_ON_ONCE(in_irq())) |
| 320 | return -EDEADLK; |
| 321 | |
Jason A. Donenfeld | 70d906b | 2015-12-06 02:51:37 +0100 | [diff] [blame] | 322 | walk->iv = desc->info; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 323 | walk->nbytes = walk->total; |
| 324 | if (unlikely(!walk->total)) |
| 325 | return 0; |
| 326 | |
| 327 | walk->buffer = NULL; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 328 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
| 329 | int err = blkcipher_copy_iv(walk); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 330 | if (err) |
| 331 | return err; |
| 332 | } |
| 333 | |
| 334 | scatterwalk_start(&walk->in, walk->in.sg); |
| 335 | scatterwalk_start(&walk->out, walk->out.sg); |
| 336 | walk->page = NULL; |
| 337 | |
| 338 | return blkcipher_walk_next(desc, walk); |
| 339 | } |
| 340 | |
Herbert Xu | 7607bd8 | 2007-10-04 15:24:05 +0800 | [diff] [blame] | 341 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, |
| 342 | struct blkcipher_walk *walk, |
| 343 | unsigned int blocksize) |
| 344 | { |
| 345 | walk->flags &= ~BLKCIPHER_WALK_PHYS; |
Ard Biesheuvel | 822be00 | 2014-03-04 13:28:38 +0800 | [diff] [blame] | 346 | walk->walk_blocksize = blocksize; |
| 347 | walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); |
| 348 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); |
| 349 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); |
Herbert Xu | 7607bd8 | 2007-10-04 15:24:05 +0800 | [diff] [blame] | 350 | return blkcipher_walk_first(desc, walk); |
| 351 | } |
| 352 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); |
| 353 | |
Ard Biesheuvel | 4f7f1d7 | 2014-03-04 13:28:39 +0800 | [diff] [blame] | 354 | int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, |
| 355 | struct blkcipher_walk *walk, |
| 356 | struct crypto_aead *tfm, |
| 357 | unsigned int blocksize) |
| 358 | { |
| 359 | walk->flags &= ~BLKCIPHER_WALK_PHYS; |
| 360 | walk->walk_blocksize = blocksize; |
| 361 | walk->cipher_blocksize = crypto_aead_blocksize(tfm); |
| 362 | walk->ivsize = crypto_aead_ivsize(tfm); |
| 363 | walk->alignmask = crypto_aead_alignmask(tfm); |
| 364 | return blkcipher_walk_first(desc, walk); |
| 365 | } |
| 366 | EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); |
| 367 | |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 368 | static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, |
| 369 | unsigned int keylen) |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 370 | { |
| 371 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; |
| 372 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); |
| 373 | int ret; |
| 374 | u8 *buffer, *alignbuffer; |
| 375 | unsigned long absize; |
| 376 | |
| 377 | absize = keylen + alignmask; |
| 378 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 379 | if (!buffer) |
| 380 | return -ENOMEM; |
| 381 | |
| 382 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 383 | memcpy(alignbuffer, key, keylen); |
| 384 | ret = cipher->setkey(tfm, alignbuffer, keylen); |
Sebastian Siewior | 0681717 | 2007-08-03 20:33:47 +0800 | [diff] [blame] | 385 | memset(alignbuffer, 0, keylen); |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 386 | kfree(buffer); |
| 387 | return ret; |
| 388 | } |
| 389 | |
Herbert Xu | 791b4d5 | 2007-08-23 16:23:01 +0800 | [diff] [blame] | 390 | static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 391 | { |
| 392 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 393 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 394 | |
| 395 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { |
| 396 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 397 | return -EINVAL; |
| 398 | } |
| 399 | |
Sebastian Siewior | ca7c393 | 2007-05-19 19:51:21 +1000 | [diff] [blame] | 400 | if ((unsigned long)key & alignmask) |
| 401 | return setkey_unaligned(tfm, key, keylen); |
| 402 | |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 403 | return cipher->setkey(tfm, key, keylen); |
| 404 | } |
| 405 | |
Herbert Xu | 32e3983f | 2007-03-24 14:35:34 +1100 | [diff] [blame] | 406 | static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
| 407 | unsigned int keylen) |
| 408 | { |
| 409 | return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); |
| 410 | } |
| 411 | |
| 412 | static int async_encrypt(struct ablkcipher_request *req) |
| 413 | { |
| 414 | struct crypto_tfm *tfm = req->base.tfm; |
| 415 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 416 | struct blkcipher_desc desc = { |
| 417 | .tfm = __crypto_blkcipher_cast(tfm), |
| 418 | .info = req->info, |
| 419 | .flags = req->base.flags, |
| 420 | }; |
| 421 | |
| 422 | |
| 423 | return alg->encrypt(&desc, req->dst, req->src, req->nbytes); |
| 424 | } |
| 425 | |
| 426 | static int async_decrypt(struct ablkcipher_request *req) |
| 427 | { |
| 428 | struct crypto_tfm *tfm = req->base.tfm; |
| 429 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 430 | struct blkcipher_desc desc = { |
| 431 | .tfm = __crypto_blkcipher_cast(tfm), |
| 432 | .info = req->info, |
| 433 | .flags = req->base.flags, |
| 434 | }; |
| 435 | |
| 436 | return alg->decrypt(&desc, req->dst, req->src, req->nbytes); |
| 437 | } |
| 438 | |
Herbert Xu | 27d2a33 | 2007-01-24 20:50:26 +1100 | [diff] [blame] | 439 | static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, |
| 440 | u32 mask) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 441 | { |
| 442 | struct blkcipher_alg *cipher = &alg->cra_blkcipher; |
| 443 | unsigned int len = alg->cra_ctxsize; |
| 444 | |
Herbert Xu | 332f8840 | 2007-11-15 22:36:07 +0800 | [diff] [blame] | 445 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && |
| 446 | cipher->ivsize) { |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 447 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); |
| 448 | len += cipher->ivsize; |
| 449 | } |
| 450 | |
| 451 | return len; |
| 452 | } |
| 453 | |
Herbert Xu | 32e3983f | 2007-03-24 14:35:34 +1100 | [diff] [blame] | 454 | static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) |
| 455 | { |
| 456 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; |
| 457 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 458 | |
| 459 | crt->setkey = async_setkey; |
| 460 | crt->encrypt = async_encrypt; |
| 461 | crt->decrypt = async_decrypt; |
Herbert Xu | ecfc432 | 2007-12-05 21:08:36 +1100 | [diff] [blame] | 462 | crt->base = __crypto_ablkcipher_cast(tfm); |
Herbert Xu | 32e3983f | 2007-03-24 14:35:34 +1100 | [diff] [blame] | 463 | crt->ivsize = alg->ivsize; |
| 464 | |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 469 | { |
| 470 | struct blkcipher_tfm *crt = &tfm->crt_blkcipher; |
| 471 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 472 | unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; |
| 473 | unsigned long addr; |
| 474 | |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 475 | crt->setkey = setkey; |
| 476 | crt->encrypt = alg->encrypt; |
| 477 | crt->decrypt = alg->decrypt; |
| 478 | |
| 479 | addr = (unsigned long)crypto_tfm_ctx(tfm); |
| 480 | addr = ALIGN(addr, align); |
| 481 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); |
| 482 | crt->iv = (void *)addr; |
| 483 | |
| 484 | return 0; |
| 485 | } |
| 486 | |
Herbert Xu | 32e3983f | 2007-03-24 14:35:34 +1100 | [diff] [blame] | 487 | static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) |
| 488 | { |
| 489 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; |
| 490 | |
| 491 | if (alg->ivsize > PAGE_SIZE / 8) |
| 492 | return -EINVAL; |
| 493 | |
Herbert Xu | 332f8840 | 2007-11-15 22:36:07 +0800 | [diff] [blame] | 494 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) |
Herbert Xu | 32e3983f | 2007-03-24 14:35:34 +1100 | [diff] [blame] | 495 | return crypto_init_blkcipher_ops_sync(tfm); |
| 496 | else |
| 497 | return crypto_init_blkcipher_ops_async(tfm); |
| 498 | } |
| 499 | |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 500 | #ifdef CONFIG_NET |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 501 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 502 | { |
| 503 | struct crypto_report_blkcipher rblkcipher; |
| 504 | |
Eric Biggers | 37db69e | 2018-11-03 14:56:03 -0700 | [diff] [blame] | 505 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
| 506 | |
| 507 | strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); |
Eric Biggers | c79b411 | 2018-12-16 15:55:06 -0800 | [diff] [blame] | 508 | strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv)); |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 509 | |
| 510 | rblkcipher.blocksize = alg->cra_blocksize; |
| 511 | rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
| 512 | rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
| 513 | rblkcipher.ivsize = alg->cra_blkcipher.ivsize; |
| 514 | |
Eric Biggers | 37db69e | 2018-11-03 14:56:03 -0700 | [diff] [blame] | 515 | return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
| 516 | sizeof(rblkcipher), &rblkcipher); |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 517 | } |
Herbert Xu | 3acc847 | 2011-11-03 23:46:07 +1100 | [diff] [blame] | 518 | #else |
| 519 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 520 | { |
| 521 | return -ENOSYS; |
| 522 | } |
| 523 | #endif |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 524 | |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 525 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
Gideon Israel Dsouza | d8c34b9 | 2016-12-31 21:26:23 +0530 | [diff] [blame] | 526 | __maybe_unused; |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 527 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 528 | { |
| 529 | seq_printf(m, "type : blkcipher\n"); |
| 530 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 531 | seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); |
| 532 | seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); |
| 533 | seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); |
Eric Biggers | c79b411 | 2018-12-16 15:55:06 -0800 | [diff] [blame] | 534 | seq_printf(m, "geniv : <default>\n"); |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 535 | } |
| 536 | |
| 537 | const struct crypto_type crypto_blkcipher_type = { |
| 538 | .ctxsize = crypto_blkcipher_ctxsize, |
| 539 | .init = crypto_init_blkcipher_ops, |
| 540 | #ifdef CONFIG_PROC_FS |
| 541 | .show = crypto_blkcipher_show, |
| 542 | #endif |
Steffen Klassert | 50496a1 | 2011-09-27 07:41:54 +0200 | [diff] [blame] | 543 | .report = crypto_blkcipher_report, |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 544 | }; |
| 545 | EXPORT_SYMBOL_GPL(crypto_blkcipher_type); |
| 546 | |
Herbert Xu | 5cde0af | 2006-08-22 00:07:53 +1000 | [diff] [blame] | 547 | MODULE_LICENSE("GPL"); |
| 548 | MODULE_DESCRIPTION("Generic block chaining cipher type"); |