blob: ab117633d64e775520a6ab542282e4a839a8a714 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Rik Snelf19f5112007-09-19 20:23:13 +08002/* XTS: as defined in IEEE1619/D16
3 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
Rik Snelf19f5112007-09-19 20:23:13 +08004 *
5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
6 *
Corentin LABBEddbc7362016-08-10 11:29:33 +02007 * Based on ecb.c
Rik Snelf19f5112007-09-19 20:23:13 +08008 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
Rik Snelf19f5112007-09-19 20:23:13 +08009 */
Herbert Xuf1c131b2016-11-22 20:08:19 +080010#include <crypto/internal/skcipher.h>
11#include <crypto/scatterwalk.h>
Rik Snelf19f5112007-09-19 20:23:13 +080012#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18
Jussi Kivilinnace004552011-11-09 11:56:06 +080019#include <crypto/xts.h>
Rik Snelf19f5112007-09-19 20:23:13 +080020#include <crypto/b128ops.h>
21#include <crypto/gf128mul.h>
22
23struct priv {
Herbert Xuf1c131b2016-11-22 20:08:19 +080024 struct crypto_skcipher *child;
Rik Snelf19f5112007-09-19 20:23:13 +080025 struct crypto_cipher *tweak;
26};
27
Herbert Xuf1c131b2016-11-22 20:08:19 +080028struct xts_instance_ctx {
29 struct crypto_skcipher_spawn spawn;
30 char name[CRYPTO_MAX_ALG_NAME];
31};
32
33struct rctx {
Ondrej Mosnáčeke55318c2017-04-02 21:19:14 +020034 le128 t;
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +030035 struct scatterlist *tail;
36 struct scatterlist sg[2];
Herbert Xuf1c131b2016-11-22 20:08:19 +080037 struct skcipher_request subreq;
38};
39
40static int setkey(struct crypto_skcipher *parent, const u8 *key,
Rik Snelf19f5112007-09-19 20:23:13 +080041 unsigned int keylen)
42{
Herbert Xuf1c131b2016-11-22 20:08:19 +080043 struct priv *ctx = crypto_skcipher_ctx(parent);
44 struct crypto_skcipher *child;
45 struct crypto_cipher *tweak;
Rik Snelf19f5112007-09-19 20:23:13 +080046 int err;
47
Herbert Xuf1c131b2016-11-22 20:08:19 +080048 err = xts_verify_key(parent, key, keylen);
Stephan Mueller28856a92016-02-09 15:37:47 +010049 if (err)
50 return err;
Rik Snelf19f5112007-09-19 20:23:13 +080051
Herbert Xuf1c131b2016-11-22 20:08:19 +080052 keylen /= 2;
53
Lucas De Marchi25985ed2011-03-30 22:57:33 -030054 /* we need two cipher instances: one to compute the initial 'tweak'
Rik Snelf19f5112007-09-19 20:23:13 +080055 * by encrypting the IV (usually the 'plain' iv) and the other
56 * one to encrypt and decrypt the data */
57
58 /* tweak cipher, uses Key2 i.e. the second half of *key */
Herbert Xuf1c131b2016-11-22 20:08:19 +080059 tweak = ctx->tweak;
60 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
61 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
Rik Snelf19f5112007-09-19 20:23:13 +080062 CRYPTO_TFM_REQ_MASK);
Herbert Xuf1c131b2016-11-22 20:08:19 +080063 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
64 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
65 CRYPTO_TFM_RES_MASK);
Rik Snelf19f5112007-09-19 20:23:13 +080066 if (err)
67 return err;
68
Rik Snelf19f5112007-09-19 20:23:13 +080069 /* data cipher, uses Key1 i.e. the first half of *key */
Herbert Xuf1c131b2016-11-22 20:08:19 +080070 child = ctx->child;
71 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
72 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
73 CRYPTO_TFM_REQ_MASK);
74 err = crypto_skcipher_setkey(child, key, keylen);
75 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
76 CRYPTO_TFM_RES_MASK);
Rik Snelf19f5112007-09-19 20:23:13 +080077
78 return err;
79}
80
Ondrej Mosnacek78105c72018-09-11 09:40:08 +020081/*
82 * We compute the tweak masks twice (both before and after the ECB encryption or
83 * decryption) to avoid having to allocate a temporary buffer and/or make
84 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
85 * just doing the gf128mul_x_ble() calls again.
86 */
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +030087static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
Rik Snelf19f5112007-09-19 20:23:13 +080088{
Herbert Xuf1c131b2016-11-22 20:08:19 +080089 struct rctx *rctx = skcipher_request_ctx(req);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +020090 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +030091 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
Herbert Xuf1c131b2016-11-22 20:08:19 +080092 const int bs = XTS_BLOCK_SIZE;
93 struct skcipher_walk w;
Ondrej Mosnacek78105c72018-09-11 09:40:08 +020094 le128 t = rctx->t;
Herbert Xuf1c131b2016-11-22 20:08:19 +080095 int err;
Rik Snelf19f5112007-09-19 20:23:13 +080096
Ondrej Mosnacek78105c72018-09-11 09:40:08 +020097 if (second_pass) {
98 req = &rctx->subreq;
99 /* set to our TFM to enforce correct alignment: */
100 skcipher_request_set_tfm(req, tfm);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800101 }
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200102 err = skcipher_walk_virt(&w, req, false);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800103
104 while (w.nbytes) {
105 unsigned int avail = w.nbytes;
Ondrej Mosnáčeke55318c2017-04-02 21:19:14 +0200106 le128 *wsrc;
107 le128 *wdst;
Herbert Xuf1c131b2016-11-22 20:08:19 +0800108
109 wsrc = w.src.virt.addr;
110 wdst = w.dst.virt.addr;
111
112 do {
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300113 if (unlikely(cts) &&
114 w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
115 if (!enc) {
116 if (second_pass)
117 rctx->t = t;
118 gf128mul_x_ble(&t, &t);
119 }
120 le128_xor(wdst, &t, wsrc);
121 if (enc && second_pass)
122 gf128mul_x_ble(&rctx->t, &t);
123 skcipher_walk_done(&w, avail - bs);
124 return 0;
125 }
126
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200127 le128_xor(wdst++, &t, wsrc++);
128 gf128mul_x_ble(&t, &t);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800129 } while ((avail -= bs) >= bs);
130
131 err = skcipher_walk_done(&w, avail);
132 }
133
Herbert Xuf1c131b2016-11-22 20:08:19 +0800134 return err;
135}
136
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300137static int xor_tweak_pre(struct skcipher_request *req, bool enc)
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200138{
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300139 return xor_tweak(req, false, enc);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200140}
141
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300142static int xor_tweak_post(struct skcipher_request *req, bool enc)
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200143{
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300144 return xor_tweak(req, true, enc);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200145}
146
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300147static void cts_done(struct crypto_async_request *areq, int err)
148{
149 struct skcipher_request *req = areq->data;
150 le128 b;
151
152 if (!err) {
153 struct rctx *rctx = skcipher_request_ctx(req);
154
155 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
156 le128_xor(&b, &rctx->t, &b);
157 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
158 }
159
160 skcipher_request_complete(req, err);
161}
162
163static int cts_final(struct skcipher_request *req,
164 int (*crypt)(struct skcipher_request *req))
165{
166 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
167 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
168 struct rctx *rctx = skcipher_request_ctx(req);
169 struct skcipher_request *subreq = &rctx->subreq;
170 int tail = req->cryptlen % XTS_BLOCK_SIZE;
171 le128 b[2];
172 int err;
173
174 rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
175 offset - XTS_BLOCK_SIZE);
176
177 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
178 memcpy(b + 1, b, tail);
179 scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
180
181 le128_xor(b, &rctx->t, b);
182
183 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
184
185 skcipher_request_set_tfm(subreq, ctx->child);
186 skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
187 skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
188 XTS_BLOCK_SIZE, NULL);
189
190 err = crypt(subreq);
191 if (err)
192 return err;
193
194 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
195 le128_xor(b, &rctx->t, b);
196 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
197
198 return 0;
199}
200
201static void encrypt_done(struct crypto_async_request *areq, int err)
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200202{
203 struct skcipher_request *req = areq->data;
204
Herbert Xu44427c02019-04-15 14:35:19 +0800205 if (!err) {
206 struct rctx *rctx = skcipher_request_ctx(req);
207
208 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300209 err = xor_tweak_post(req, true);
210
211 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
212 err = cts_final(req, crypto_skcipher_encrypt);
213 if (err == -EINPROGRESS)
214 return;
215 }
Herbert Xu44427c02019-04-15 14:35:19 +0800216 }
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200217
218 skcipher_request_complete(req, err);
219}
220
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300221static void decrypt_done(struct crypto_async_request *areq, int err)
222{
223 struct skcipher_request *req = areq->data;
224
225 if (!err) {
226 struct rctx *rctx = skcipher_request_ctx(req);
227
228 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
229 err = xor_tweak_post(req, false);
230
231 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
232 err = cts_final(req, crypto_skcipher_decrypt);
233 if (err == -EINPROGRESS)
234 return;
235 }
236 }
237
238 skcipher_request_complete(req, err);
239}
240
241static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
Herbert Xuf1c131b2016-11-22 20:08:19 +0800242{
243 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
244 struct rctx *rctx = skcipher_request_ctx(req);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200245 struct skcipher_request *subreq = &rctx->subreq;
Herbert Xuf1c131b2016-11-22 20:08:19 +0800246
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300247 if (req->cryptlen < XTS_BLOCK_SIZE)
248 return -EINVAL;
249
Herbert Xuf1c131b2016-11-22 20:08:19 +0800250 skcipher_request_set_tfm(subreq, ctx->child);
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300251 skcipher_request_set_callback(subreq, req->base.flags, compl, req);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200252 skcipher_request_set_crypt(subreq, req->dst, req->dst,
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300253 req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800254
255 /* calculate first value of T */
256 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300257
258 return 0;
Herbert Xuf1c131b2016-11-22 20:08:19 +0800259}
260
261static int encrypt(struct skcipher_request *req)
262{
Herbert Xuf1c131b2016-11-22 20:08:19 +0800263 struct rctx *rctx = skcipher_request_ctx(req);
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200264 struct skcipher_request *subreq = &rctx->subreq;
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300265 int err;
Herbert Xuf1c131b2016-11-22 20:08:19 +0800266
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300267 err = init_crypt(req, encrypt_done) ?:
268 xor_tweak_pre(req, true) ?:
269 crypto_skcipher_encrypt(subreq) ?:
270 xor_tweak_post(req, true);
271
272 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
273 return err;
274
275 return cts_final(req, crypto_skcipher_encrypt);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800276}
277
278static int decrypt(struct skcipher_request *req)
279{
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200280 struct rctx *rctx = skcipher_request_ctx(req);
281 struct skcipher_request *subreq = &rctx->subreq;
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300282 int err;
Ondrej Mosnacek78105c72018-09-11 09:40:08 +0200283
Ard Biesheuvel8083b1b2019-08-09 20:14:57 +0300284 err = init_crypt(req, decrypt_done) ?:
285 xor_tweak_pre(req, false) ?:
286 crypto_skcipher_decrypt(subreq) ?:
287 xor_tweak_post(req, false);
288
289 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
290 return err;
291
292 return cts_final(req, crypto_skcipher_decrypt);
Rik Snelf19f5112007-09-19 20:23:13 +0800293}
294
Herbert Xuf1c131b2016-11-22 20:08:19 +0800295static int init_tfm(struct crypto_skcipher *tfm)
Rik Snelf19f5112007-09-19 20:23:13 +0800296{
Herbert Xuf1c131b2016-11-22 20:08:19 +0800297 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
298 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
299 struct priv *ctx = crypto_skcipher_ctx(tfm);
300 struct crypto_skcipher *child;
301 struct crypto_cipher *tweak;
Rik Snelf19f5112007-09-19 20:23:13 +0800302
Herbert Xuf1c131b2016-11-22 20:08:19 +0800303 child = crypto_spawn_skcipher(&ictx->spawn);
304 if (IS_ERR(child))
305 return PTR_ERR(child);
Rik Snelf19f5112007-09-19 20:23:13 +0800306
Herbert Xuf1c131b2016-11-22 20:08:19 +0800307 ctx->child = child;
308
309 tweak = crypto_alloc_cipher(ictx->name, 0, 0);
310 if (IS_ERR(tweak)) {
311 crypto_free_skcipher(ctx->child);
312 return PTR_ERR(tweak);
Rik Snelf19f5112007-09-19 20:23:13 +0800313 }
314
Herbert Xuf1c131b2016-11-22 20:08:19 +0800315 ctx->tweak = tweak;
Rik Snelf19f5112007-09-19 20:23:13 +0800316
Herbert Xuf1c131b2016-11-22 20:08:19 +0800317 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
318 sizeof(struct rctx));
Rik Snelf19f5112007-09-19 20:23:13 +0800319
320 return 0;
321}
322
Herbert Xuf1c131b2016-11-22 20:08:19 +0800323static void exit_tfm(struct crypto_skcipher *tfm)
Rik Snelf19f5112007-09-19 20:23:13 +0800324{
Herbert Xuf1c131b2016-11-22 20:08:19 +0800325 struct priv *ctx = crypto_skcipher_ctx(tfm);
326
327 crypto_free_skcipher(ctx->child);
Rik Snelf19f5112007-09-19 20:23:13 +0800328 crypto_free_cipher(ctx->tweak);
329}
330
Herbert Xuf1c131b2016-11-22 20:08:19 +0800331static void free(struct skcipher_instance *inst)
Rik Snelf19f5112007-09-19 20:23:13 +0800332{
Herbert Xuf1c131b2016-11-22 20:08:19 +0800333 crypto_drop_skcipher(skcipher_instance_ctx(inst));
334 kfree(inst);
Rik Snelf19f5112007-09-19 20:23:13 +0800335}
336
Herbert Xuf1c131b2016-11-22 20:08:19 +0800337static int create(struct crypto_template *tmpl, struct rtattr **tb)
Rik Snelf19f5112007-09-19 20:23:13 +0800338{
Herbert Xuf1c131b2016-11-22 20:08:19 +0800339 struct skcipher_instance *inst;
340 struct crypto_attr_type *algt;
341 struct xts_instance_ctx *ctx;
342 struct skcipher_alg *alg;
343 const char *cipher_name;
Herbert Xu89027572017-02-26 12:24:10 +0800344 u32 mask;
Herbert Xuf1c131b2016-11-22 20:08:19 +0800345 int err;
346
347 algt = crypto_get_attr_type(tb);
348 if (IS_ERR(algt))
349 return PTR_ERR(algt);
350
351 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
352 return -EINVAL;
353
354 cipher_name = crypto_attr_alg_name(tb[1]);
355 if (IS_ERR(cipher_name))
356 return PTR_ERR(cipher_name);
357
358 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
359 if (!inst)
360 return -ENOMEM;
361
362 ctx = skcipher_instance_ctx(inst);
363
364 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
Herbert Xu89027572017-02-26 12:24:10 +0800365
366 mask = crypto_requires_off(algt->type, algt->mask,
367 CRYPTO_ALG_NEED_FALLBACK |
368 CRYPTO_ALG_ASYNC);
369
370 err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800371 if (err == -ENOENT) {
372 err = -ENAMETOOLONG;
373 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
374 cipher_name) >= CRYPTO_MAX_ALG_NAME)
375 goto err_free_inst;
376
Herbert Xu89027572017-02-26 12:24:10 +0800377 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800378 }
379
380 if (err)
381 goto err_free_inst;
382
383 alg = crypto_skcipher_spawn_alg(&ctx->spawn);
384
385 err = -EINVAL;
386 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
387 goto err_drop_spawn;
388
389 if (crypto_skcipher_alg_ivsize(alg))
390 goto err_drop_spawn;
391
392 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
393 &alg->base);
394 if (err)
395 goto err_drop_spawn;
396
397 err = -EINVAL;
398 cipher_name = alg->base.cra_name;
399
400 /* Alas we screwed up the naming so we have to mangle the
401 * cipher name.
402 */
403 if (!strncmp(cipher_name, "ecb(", 4)) {
404 unsigned len;
405
406 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
407 if (len < 2 || len >= sizeof(ctx->name))
408 goto err_drop_spawn;
409
410 if (ctx->name[len - 1] != ')')
411 goto err_drop_spawn;
412
413 ctx->name[len - 1] = 0;
414
415 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
Christophe Jaillet5125e4e2017-09-26 08:17:44 +0200416 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
417 err = -ENAMETOOLONG;
418 goto err_drop_spawn;
419 }
Herbert Xuf1c131b2016-11-22 20:08:19 +0800420 } else
421 goto err_drop_spawn;
422
423 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
424 inst->alg.base.cra_priority = alg->base.cra_priority;
425 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
426 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
427 (__alignof__(u64) - 1);
428
429 inst->alg.ivsize = XTS_BLOCK_SIZE;
430 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
431 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
432
433 inst->alg.base.cra_ctxsize = sizeof(struct priv);
434
435 inst->alg.init = init_tfm;
436 inst->alg.exit = exit_tfm;
437
438 inst->alg.setkey = setkey;
439 inst->alg.encrypt = encrypt;
440 inst->alg.decrypt = decrypt;
441
442 inst->free = free;
443
444 err = skcipher_register_instance(tmpl, inst);
445 if (err)
446 goto err_drop_spawn;
447
448out:
449 return err;
450
451err_drop_spawn:
452 crypto_drop_skcipher(&ctx->spawn);
453err_free_inst:
Rik Snelf19f5112007-09-19 20:23:13 +0800454 kfree(inst);
Herbert Xuf1c131b2016-11-22 20:08:19 +0800455 goto out;
Rik Snelf19f5112007-09-19 20:23:13 +0800456}
457
458static struct crypto_template crypto_tmpl = {
459 .name = "xts",
Herbert Xuf1c131b2016-11-22 20:08:19 +0800460 .create = create,
Rik Snelf19f5112007-09-19 20:23:13 +0800461 .module = THIS_MODULE,
462};
463
464static int __init crypto_module_init(void)
465{
466 return crypto_register_template(&crypto_tmpl);
467}
468
469static void __exit crypto_module_exit(void)
470{
471 crypto_unregister_template(&crypto_tmpl);
472}
473
Eric Biggersc4741b22019-04-11 21:57:42 -0700474subsys_initcall(crypto_module_init);
Rik Snelf19f5112007-09-19 20:23:13 +0800475module_exit(crypto_module_exit);
476
477MODULE_LICENSE("GPL");
478MODULE_DESCRIPTION("XTS block cipher mode");
Kees Cook4943ba12014-11-24 16:32:38 -0800479MODULE_ALIAS_CRYPTO("xts");