blob: 0d9509dff891d54439152c7378b3ac9983098104 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xudb131ef2006-09-21 11:44:08 +10002/*
3 * CBC: Cipher Block Chaining mode
4 *
Herbert Xucc868d82016-11-22 20:08:42 +08005 * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xudb131ef2006-09-21 11:44:08 +10006 */
7
Marcelo Cerrie6c2e652017-02-27 09:38:25 -03008#include <crypto/algapi.h>
Herbert Xu79c65d12016-11-22 20:08:39 +08009#include <crypto/internal/skcipher.h>
Herbert Xudb131ef2006-09-21 11:44:08 +100010#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
Herbert Xu50b65442007-11-20 17:36:00 +080013#include <linux/log2.h>
Herbert Xudb131ef2006-09-21 11:44:08 +100014#include <linux/module.h>
Herbert Xudb131ef2006-09-21 11:44:08 +100015
Herbert Xu5f254dd2020-09-01 21:49:11 +100016static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
17 struct crypto_skcipher *skcipher)
Herbert Xudb131ef2006-09-21 11:44:08 +100018{
Herbert Xu5f254dd2020-09-01 21:49:11 +100019 unsigned int bsize = crypto_skcipher_blocksize(skcipher);
20 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
21 unsigned int nbytes = walk->nbytes;
22 u8 *src = walk->src.virt.addr;
23 u8 *dst = walk->dst.virt.addr;
24 struct crypto_cipher *cipher;
25 struct crypto_tfm *tfm;
26 u8 *iv = walk->iv;
27
28 cipher = skcipher_cipher_simple(skcipher);
29 tfm = crypto_cipher_tfm(cipher);
30 fn = crypto_cipher_alg(cipher)->cia_encrypt;
31
32 do {
33 crypto_xor(iv, src, bsize);
34 fn(tfm, dst, iv);
35 memcpy(iv, dst, bsize);
36
37 src += bsize;
38 dst += bsize;
39 } while ((nbytes -= bsize) >= bsize);
40
41 return nbytes;
42}
43
44static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
45 struct crypto_skcipher *skcipher)
46{
47 unsigned int bsize = crypto_skcipher_blocksize(skcipher);
48 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
49 unsigned int nbytes = walk->nbytes;
50 u8 *src = walk->src.virt.addr;
51 struct crypto_cipher *cipher;
52 struct crypto_tfm *tfm;
53 u8 *iv = walk->iv;
54
55 cipher = skcipher_cipher_simple(skcipher);
56 tfm = crypto_cipher_tfm(cipher);
57 fn = crypto_cipher_alg(cipher)->cia_encrypt;
58
59 do {
60 crypto_xor(src, iv, bsize);
61 fn(tfm, src, src);
62 iv = src;
63
64 src += bsize;
65 } while ((nbytes -= bsize) >= bsize);
66
67 memcpy(walk->iv, iv, bsize);
68
69 return nbytes;
Herbert Xu79c65d12016-11-22 20:08:39 +080070}
71
72static int crypto_cbc_encrypt(struct skcipher_request *req)
73{
Herbert Xu5f254dd2020-09-01 21:49:11 +100074 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
Herbert Xu79c65d12016-11-22 20:08:39 +080075 struct skcipher_walk walk;
Herbert Xudb131ef2006-09-21 11:44:08 +100076 int err;
77
Herbert Xu79c65d12016-11-22 20:08:39 +080078 err = skcipher_walk_virt(&walk, req, false);
Herbert Xudb131ef2006-09-21 11:44:08 +100079
Herbert Xu79c65d12016-11-22 20:08:39 +080080 while (walk.nbytes) {
Herbert Xu5f254dd2020-09-01 21:49:11 +100081 if (walk.src.virt.addr == walk.dst.virt.addr)
82 err = crypto_cbc_encrypt_inplace(&walk, skcipher);
83 else
84 err = crypto_cbc_encrypt_segment(&walk, skcipher);
85 err = skcipher_walk_done(&walk, err);
86 }
87
88 return err;
89}
90
91static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
92 struct crypto_skcipher *skcipher)
93{
94 unsigned int bsize = crypto_skcipher_blocksize(skcipher);
95 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
96 unsigned int nbytes = walk->nbytes;
97 u8 *src = walk->src.virt.addr;
98 u8 *dst = walk->dst.virt.addr;
99 struct crypto_cipher *cipher;
100 struct crypto_tfm *tfm;
101 u8 *iv = walk->iv;
102
103 cipher = skcipher_cipher_simple(skcipher);
104 tfm = crypto_cipher_tfm(cipher);
105 fn = crypto_cipher_alg(cipher)->cia_decrypt;
106
107 do {
108 fn(tfm, dst, src);
109 crypto_xor(dst, iv, bsize);
110 iv = src;
111
112 src += bsize;
113 dst += bsize;
114 } while ((nbytes -= bsize) >= bsize);
115
116 memcpy(walk->iv, iv, bsize);
117
118 return nbytes;
119}
120
121static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
122 struct crypto_skcipher *skcipher)
123{
124 unsigned int bsize = crypto_skcipher_blocksize(skcipher);
125 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
126 unsigned int nbytes = walk->nbytes;
127 u8 *src = walk->src.virt.addr;
128 u8 last_iv[MAX_CIPHER_BLOCKSIZE];
129 struct crypto_cipher *cipher;
130 struct crypto_tfm *tfm;
131
132 cipher = skcipher_cipher_simple(skcipher);
133 tfm = crypto_cipher_tfm(cipher);
134 fn = crypto_cipher_alg(cipher)->cia_decrypt;
135
136 /* Start of the last block. */
137 src += nbytes - (nbytes & (bsize - 1)) - bsize;
138 memcpy(last_iv, src, bsize);
139
140 for (;;) {
141 fn(tfm, src, src);
142 if ((nbytes -= bsize) < bsize)
143 break;
144 crypto_xor(src, src - bsize, bsize);
145 src -= bsize;
146 }
147
148 crypto_xor(src, walk->iv, bsize);
149 memcpy(walk->iv, last_iv, bsize);
150
151 return nbytes;
152}
153
154static int crypto_cbc_decrypt(struct skcipher_request *req)
155{
156 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
157 struct skcipher_walk walk;
158 int err;
159
160 err = skcipher_walk_virt(&walk, req, false);
161
162 while (walk.nbytes) {
163 if (walk.src.virt.addr == walk.dst.virt.addr)
164 err = crypto_cbc_decrypt_inplace(&walk, skcipher);
165 else
166 err = crypto_cbc_decrypt_segment(&walk, skcipher);
Herbert Xu79c65d12016-11-22 20:08:39 +0800167 err = skcipher_walk_done(&walk, err);
Herbert Xudb131ef2006-09-21 11:44:08 +1000168 }
169
170 return err;
171}
172
Herbert Xu79c65d12016-11-22 20:08:39 +0800173static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
174{
175 struct skcipher_instance *inst;
Herbert Xudb131ef2006-09-21 11:44:08 +1000176 struct crypto_alg *alg;
Herbert Xuebc610e2007-01-01 18:37:02 +1100177 int err;
Herbert Xudb131ef2006-09-21 11:44:08 +1000178
Herbert Xub3c16bf2019-12-20 13:29:40 +0800179 inst = skcipher_alloc_instance_simple(tmpl, tb);
Eric Biggersa5a84a92019-01-03 20:16:15 -0800180 if (IS_ERR(inst))
181 return PTR_ERR(inst);
Herbert Xu79c65d12016-11-22 20:08:39 +0800182
Herbert Xub3c16bf2019-12-20 13:29:40 +0800183 alg = skcipher_ialg_simple(inst);
184
Herbert Xu79c65d12016-11-22 20:08:39 +0800185 err = -EINVAL;
Herbert Xu50b65442007-11-20 17:36:00 +0800186 if (!is_power_of_2(alg->cra_blocksize))
Eric Biggersa5a84a92019-01-03 20:16:15 -0800187 goto out_free_inst;
Herbert Xu50b65442007-11-20 17:36:00 +0800188
Herbert Xu79c65d12016-11-22 20:08:39 +0800189 inst->alg.encrypt = crypto_cbc_encrypt;
190 inst->alg.decrypt = crypto_cbc_decrypt;
Herbert Xudb131ef2006-09-21 11:44:08 +1000191
Herbert Xu79c65d12016-11-22 20:08:39 +0800192 err = skcipher_register_instance(tmpl, inst);
Herbert Xub3c16bf2019-12-20 13:29:40 +0800193 if (err) {
Eric Biggersa5a84a92019-01-03 20:16:15 -0800194out_free_inst:
Herbert Xub3c16bf2019-12-20 13:29:40 +0800195 inst->free(inst);
196 }
197
Herbert Xu79c65d12016-11-22 20:08:39 +0800198 return err;
Herbert Xudb131ef2006-09-21 11:44:08 +1000199}
200
201static struct crypto_template crypto_cbc_tmpl = {
202 .name = "cbc",
Herbert Xu79c65d12016-11-22 20:08:39 +0800203 .create = crypto_cbc_create,
Herbert Xudb131ef2006-09-21 11:44:08 +1000204 .module = THIS_MODULE,
205};
206
207static int __init crypto_cbc_module_init(void)
208{
209 return crypto_register_template(&crypto_cbc_tmpl);
210}
211
212static void __exit crypto_cbc_module_exit(void)
213{
214 crypto_unregister_template(&crypto_cbc_tmpl);
215}
216
Eric Biggersc4741b22019-04-11 21:57:42 -0700217subsys_initcall(crypto_cbc_module_init);
Herbert Xudb131ef2006-09-21 11:44:08 +1000218module_exit(crypto_cbc_module_exit);
219
220MODULE_LICENSE("GPL");
Eric Biggersa5a84a92019-01-03 20:16:15 -0800221MODULE_DESCRIPTION("CBC block cipher mode of operation");
Kees Cook4943ba12014-11-24 16:32:38 -0800222MODULE_ALIAS_CRYPTO("cbc");