blob: c8434042cbc1e8848a9ea48a5992b5ee8b31ae56 [file] [log] [blame]
Herbert Xu124b53d2007-04-16 20:49:20 +10001/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
Adrian Hoban298c9262010-09-20 16:05:12 +08006 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
Herbert Xu124b53d2007-04-16 20:49:20 +100013 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
Herbert Xu18e33e62008-07-10 16:01:22 +080020#include <crypto/internal/hash.h>
Adrian Hoban298c9262010-09-20 16:05:12 +080021#include <crypto/internal/aead.h>
Herbert Xu4e0958d2016-11-22 20:08:23 +080022#include <crypto/internal/skcipher.h>
Huang Ying1cac2cb2009-01-18 16:19:46 +110023#include <crypto/cryptd.h>
Huang Ying254eff72009-02-19 14:42:19 +080024#include <crypto/crypto_wq.h>
Herbert Xu81760ea2016-06-21 16:55:13 +080025#include <linux/atomic.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100026#include <linux/err.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100029#include <linux/list.h>
30#include <linux/module.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100031#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100034
Colin Ian Kingeaf356e2017-11-30 11:26:14 +000035static unsigned int cryptd_max_cpu_qlen = 1000;
Jon Maxwellc3a53602017-11-22 16:08:17 +110036module_param(cryptd_max_cpu_qlen, uint, 0);
37MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
Herbert Xu124b53d2007-04-16 20:49:20 +100038
Huang Ying254eff72009-02-19 14:42:19 +080039struct cryptd_cpu_queue {
Herbert Xu124b53d2007-04-16 20:49:20 +100040 struct crypto_queue queue;
Huang Ying254eff72009-02-19 14:42:19 +080041 struct work_struct work;
42};
43
44struct cryptd_queue {
Tejun Heoa29d8b82010-02-02 14:39:15 +090045 struct cryptd_cpu_queue __percpu *cpu_queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100046};
47
48struct cryptd_instance_ctx {
49 struct crypto_spawn spawn;
Huang Ying254eff72009-02-19 14:42:19 +080050 struct cryptd_queue *queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100051};
52
Herbert Xu4e0958d2016-11-22 20:08:23 +080053struct skcipherd_instance_ctx {
54 struct crypto_skcipher_spawn spawn;
55 struct cryptd_queue *queue;
56};
57
Herbert Xu46309d82009-07-12 21:38:59 +080058struct hashd_instance_ctx {
59 struct crypto_shash_spawn spawn;
60 struct cryptd_queue *queue;
61};
62
Adrian Hoban298c9262010-09-20 16:05:12 +080063struct aead_instance_ctx {
64 struct crypto_aead_spawn aead_spawn;
65 struct cryptd_queue *queue;
66};
67
Herbert Xu4e0958d2016-11-22 20:08:23 +080068struct cryptd_skcipher_ctx {
69 atomic_t refcnt;
Kees Cook36b38752018-09-18 19:10:52 -070070 struct crypto_sync_skcipher *child;
Herbert Xu4e0958d2016-11-22 20:08:23 +080071};
72
73struct cryptd_skcipher_request_ctx {
74 crypto_completion_t complete;
75};
76
Loc Hob8a28252008-05-14 21:23:00 +080077struct cryptd_hash_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080078 atomic_t refcnt;
Herbert Xu46309d82009-07-12 21:38:59 +080079 struct crypto_shash *child;
Loc Hob8a28252008-05-14 21:23:00 +080080};
81
82struct cryptd_hash_request_ctx {
83 crypto_completion_t complete;
Herbert Xu46309d82009-07-12 21:38:59 +080084 struct shash_desc desc;
Loc Hob8a28252008-05-14 21:23:00 +080085};
Herbert Xu124b53d2007-04-16 20:49:20 +100086
Adrian Hoban298c9262010-09-20 16:05:12 +080087struct cryptd_aead_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080088 atomic_t refcnt;
Adrian Hoban298c9262010-09-20 16:05:12 +080089 struct crypto_aead *child;
90};
91
92struct cryptd_aead_request_ctx {
93 crypto_completion_t complete;
94};
95
Huang Ying254eff72009-02-19 14:42:19 +080096static void cryptd_queue_worker(struct work_struct *work);
97
98static int cryptd_init_queue(struct cryptd_queue *queue,
99 unsigned int max_cpu_qlen)
100{
101 int cpu;
102 struct cryptd_cpu_queue *cpu_queue;
103
104 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
105 if (!queue->cpu_queue)
106 return -ENOMEM;
107 for_each_possible_cpu(cpu) {
108 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
109 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
110 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
111 }
Jon Maxwellc3a53602017-11-22 16:08:17 +1100112 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
Huang Ying254eff72009-02-19 14:42:19 +0800113 return 0;
114}
115
116static void cryptd_fini_queue(struct cryptd_queue *queue)
117{
118 int cpu;
119 struct cryptd_cpu_queue *cpu_queue;
120
121 for_each_possible_cpu(cpu) {
122 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
123 BUG_ON(cpu_queue->queue.qlen);
124 }
125 free_percpu(queue->cpu_queue);
126}
127
128static int cryptd_enqueue_request(struct cryptd_queue *queue,
129 struct crypto_async_request *request)
130{
131 int cpu, err;
132 struct cryptd_cpu_queue *cpu_queue;
Herbert Xu81760ea2016-06-21 16:55:13 +0800133 atomic_t *refcnt;
Huang Ying254eff72009-02-19 14:42:19 +0800134
135 cpu = get_cpu();
Christoph Lameter0b44f482009-10-03 19:48:23 +0900136 cpu_queue = this_cpu_ptr(queue->cpu_queue);
Huang Ying254eff72009-02-19 14:42:19 +0800137 err = crypto_enqueue_request(&cpu_queue->queue, request);
Herbert Xu81760ea2016-06-21 16:55:13 +0800138
139 refcnt = crypto_tfm_ctx(request->tfm);
Herbert Xu81760ea2016-06-21 16:55:13 +0800140
Gilad Ben-Yossef6b80ea32017-10-18 08:00:33 +0100141 if (err == -ENOSPC)
Herbert Xu81760ea2016-06-21 16:55:13 +0800142 goto out_put_cpu;
143
Huang Ying254eff72009-02-19 14:42:19 +0800144 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
Herbert Xu81760ea2016-06-21 16:55:13 +0800145
146 if (!atomic_read(refcnt))
147 goto out_put_cpu;
148
Herbert Xu81760ea2016-06-21 16:55:13 +0800149 atomic_inc(refcnt);
150
151out_put_cpu:
Huang Ying254eff72009-02-19 14:42:19 +0800152 put_cpu();
153
154 return err;
155}
156
157/* Called in workqueue context, do one real cryption work (via
158 * req->complete) and reschedule itself if there are more work to
159 * do. */
160static void cryptd_queue_worker(struct work_struct *work)
161{
162 struct cryptd_cpu_queue *cpu_queue;
163 struct crypto_async_request *req, *backlog;
164
165 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300166 /*
167 * Only handle one request at a time to avoid hogging crypto workqueue.
168 * preempt_disable/enable is used to prevent being preempted by
169 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
170 * cryptd_enqueue_request() being accessed from software interrupts.
171 */
172 local_bh_disable();
Huang Ying254eff72009-02-19 14:42:19 +0800173 preempt_disable();
174 backlog = crypto_get_backlog(&cpu_queue->queue);
175 req = crypto_dequeue_request(&cpu_queue->queue);
176 preempt_enable();
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300177 local_bh_enable();
Huang Ying254eff72009-02-19 14:42:19 +0800178
179 if (!req)
180 return;
181
182 if (backlog)
183 backlog->complete(backlog, -EINPROGRESS);
184 req->complete(req, 0);
185
186 if (cpu_queue->queue.qlen)
187 queue_work(kcrypto_wq, &cpu_queue->work);
188}
189
190static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
Herbert Xu124b53d2007-04-16 20:49:20 +1000191{
192 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
193 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
Huang Ying254eff72009-02-19 14:42:19 +0800194 return ictx->queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000195}
196
Stephan Mueller466a7b92015-03-30 21:57:06 +0200197static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
198 u32 *mask)
199{
200 struct crypto_attr_type *algt;
201
202 algt = crypto_get_attr_type(tb);
203 if (IS_ERR(algt))
204 return;
Herbert Xuf6da3202015-07-09 07:17:19 +0800205
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800206 *type |= algt->type & CRYPTO_ALG_INTERNAL;
207 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200208}
209
Herbert Xu9b8c4562015-05-21 15:10:57 +0800210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
Herbert Xu0b535ad2009-07-14 19:11:32 +0800227static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
228 unsigned int tail)
Herbert Xu124b53d2007-04-16 20:49:20 +1000229{
Herbert Xu0b535ad2009-07-14 19:11:32 +0800230 char *p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000231 struct crypto_instance *inst;
Herbert Xu124b53d2007-04-16 20:49:20 +1000232 int err;
233
Herbert Xu0b535ad2009-07-14 19:11:32 +0800234 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
235 if (!p)
236 return ERR_PTR(-ENOMEM);
237
238 inst = (void *)(p + head);
Herbert Xu124b53d2007-04-16 20:49:20 +1000239
Herbert Xu9b8c4562015-05-21 15:10:57 +0800240 err = cryptd_init_instance(inst, alg);
241 if (err)
Herbert Xu124b53d2007-04-16 20:49:20 +1000242 goto out_free_inst;
243
Herbert Xu124b53d2007-04-16 20:49:20 +1000244out:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800245 return p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000246
247out_free_inst:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800248 kfree(p);
249 p = ERR_PTR(err);
Herbert Xu124b53d2007-04-16 20:49:20 +1000250 goto out;
251}
252
Herbert Xu4e0958d2016-11-22 20:08:23 +0800253static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
254 const u8 *key, unsigned int keylen)
255{
256 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
Kees Cook36b38752018-09-18 19:10:52 -0700257 struct crypto_sync_skcipher *child = ctx->child;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800258 int err;
259
Kees Cook36b38752018-09-18 19:10:52 -0700260 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
261 crypto_sync_skcipher_set_flags(child,
262 crypto_skcipher_get_flags(parent) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800263 CRYPTO_TFM_REQ_MASK);
Kees Cook36b38752018-09-18 19:10:52 -0700264 err = crypto_sync_skcipher_setkey(child, key, keylen);
265 crypto_skcipher_set_flags(parent,
266 crypto_sync_skcipher_get_flags(child) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800267 CRYPTO_TFM_RES_MASK);
268 return err;
269}
270
271static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
272{
273 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
274 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
275 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
276 int refcnt = atomic_read(&ctx->refcnt);
277
278 local_bh_disable();
279 rctx->complete(&req->base, err);
280 local_bh_enable();
281
282 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
283 crypto_free_skcipher(tfm);
284}
285
286static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
287 int err)
288{
289 struct skcipher_request *req = skcipher_request_cast(base);
290 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
291 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
292 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700293 struct crypto_sync_skcipher *child = ctx->child;
294 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800295
296 if (unlikely(err == -EINPROGRESS))
297 goto out;
298
Kees Cook36b38752018-09-18 19:10:52 -0700299 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800300 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
301 NULL, NULL);
302 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
303 req->iv);
304
305 err = crypto_skcipher_encrypt(subreq);
306 skcipher_request_zero(subreq);
307
308 req->base.complete = rctx->complete;
309
310out:
311 cryptd_skcipher_complete(req, err);
312}
313
314static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
315 int err)
316{
317 struct skcipher_request *req = skcipher_request_cast(base);
318 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
319 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
320 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700321 struct crypto_sync_skcipher *child = ctx->child;
322 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800323
324 if (unlikely(err == -EINPROGRESS))
325 goto out;
326
Kees Cook36b38752018-09-18 19:10:52 -0700327 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800328 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
329 NULL, NULL);
330 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
331 req->iv);
332
333 err = crypto_skcipher_decrypt(subreq);
334 skcipher_request_zero(subreq);
335
336 req->base.complete = rctx->complete;
337
338out:
339 cryptd_skcipher_complete(req, err);
340}
341
342static int cryptd_skcipher_enqueue(struct skcipher_request *req,
343 crypto_completion_t compl)
344{
345 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
346 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347 struct cryptd_queue *queue;
348
349 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
350 rctx->complete = req->base.complete;
351 req->base.complete = compl;
352
353 return cryptd_enqueue_request(queue, &req->base);
354}
355
356static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
357{
358 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
359}
360
361static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
362{
363 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
364}
365
366static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
367{
368 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
369 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
370 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
371 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
372 struct crypto_skcipher *cipher;
373
374 cipher = crypto_spawn_skcipher(spawn);
375 if (IS_ERR(cipher))
376 return PTR_ERR(cipher);
377
Kees Cook36b38752018-09-18 19:10:52 -0700378 ctx->child = (struct crypto_sync_skcipher *)cipher;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800379 crypto_skcipher_set_reqsize(
380 tfm, sizeof(struct cryptd_skcipher_request_ctx));
381 return 0;
382}
383
384static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
385{
386 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
387
Kees Cook36b38752018-09-18 19:10:52 -0700388 crypto_free_sync_skcipher(ctx->child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800389}
390
391static void cryptd_skcipher_free(struct skcipher_instance *inst)
392{
393 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
394
395 crypto_drop_skcipher(&ctx->spawn);
Vincent Whitchurch1a0fad62019-07-02 09:53:25 +0200396 kfree(inst);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800397}
398
399static int cryptd_create_skcipher(struct crypto_template *tmpl,
400 struct rtattr **tb,
401 struct cryptd_queue *queue)
402{
403 struct skcipherd_instance_ctx *ctx;
404 struct skcipher_instance *inst;
405 struct skcipher_alg *alg;
406 const char *name;
407 u32 type;
408 u32 mask;
409 int err;
410
411 type = 0;
412 mask = CRYPTO_ALG_ASYNC;
413
414 cryptd_check_internal(tb, &type, &mask);
415
416 name = crypto_attr_alg_name(tb[1]);
417 if (IS_ERR(name))
418 return PTR_ERR(name);
419
420 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
421 if (!inst)
422 return -ENOMEM;
423
424 ctx = skcipher_instance_ctx(inst);
425 ctx->queue = queue;
426
427 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
428 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
429 if (err)
430 goto out_free_inst;
431
432 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
433 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
434 if (err)
435 goto out_drop_skcipher;
436
437 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
438 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
439
440 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
441 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
442 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
443 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
444
445 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
446
447 inst->alg.init = cryptd_skcipher_init_tfm;
448 inst->alg.exit = cryptd_skcipher_exit_tfm;
449
450 inst->alg.setkey = cryptd_skcipher_setkey;
451 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
452 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
453
454 inst->free = cryptd_skcipher_free;
455
456 err = skcipher_register_instance(tmpl, inst);
457 if (err) {
458out_drop_skcipher:
459 crypto_drop_skcipher(&ctx->spawn);
460out_free_inst:
461 kfree(inst);
462 }
463 return err;
464}
465
Loc Hob8a28252008-05-14 21:23:00 +0800466static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
467{
468 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800469 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
470 struct crypto_shash_spawn *spawn = &ictx->spawn;
Loc Hob8a28252008-05-14 21:23:00 +0800471 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800472 struct crypto_shash *hash;
Loc Hob8a28252008-05-14 21:23:00 +0800473
Herbert Xu46309d82009-07-12 21:38:59 +0800474 hash = crypto_spawn_shash(spawn);
475 if (IS_ERR(hash))
476 return PTR_ERR(hash);
Loc Hob8a28252008-05-14 21:23:00 +0800477
Herbert Xu46309d82009-07-12 21:38:59 +0800478 ctx->child = hash;
Herbert Xu0d6669e2009-07-12 23:06:33 +0800479 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
480 sizeof(struct cryptd_hash_request_ctx) +
481 crypto_shash_descsize(hash));
Loc Hob8a28252008-05-14 21:23:00 +0800482 return 0;
483}
484
485static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
486{
487 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Loc Hob8a28252008-05-14 21:23:00 +0800488
Herbert Xu46309d82009-07-12 21:38:59 +0800489 crypto_free_shash(ctx->child);
Loc Hob8a28252008-05-14 21:23:00 +0800490}
491
492static int cryptd_hash_setkey(struct crypto_ahash *parent,
493 const u8 *key, unsigned int keylen)
494{
495 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
Herbert Xu46309d82009-07-12 21:38:59 +0800496 struct crypto_shash *child = ctx->child;
Loc Hob8a28252008-05-14 21:23:00 +0800497 int err;
498
Herbert Xu46309d82009-07-12 21:38:59 +0800499 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
500 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
501 CRYPTO_TFM_REQ_MASK);
502 err = crypto_shash_setkey(child, key, keylen);
503 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
504 CRYPTO_TFM_RES_MASK);
Loc Hob8a28252008-05-14 21:23:00 +0800505 return err;
506}
507
508static int cryptd_hash_enqueue(struct ahash_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700509 crypto_completion_t compl)
Loc Hob8a28252008-05-14 21:23:00 +0800510{
511 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
512 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
Huang Ying254eff72009-02-19 14:42:19 +0800513 struct cryptd_queue *queue =
514 cryptd_get_queue(crypto_ahash_tfm(tfm));
Loc Hob8a28252008-05-14 21:23:00 +0800515
516 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700517 req->base.complete = compl;
Loc Hob8a28252008-05-14 21:23:00 +0800518
Huang Ying254eff72009-02-19 14:42:19 +0800519 return cryptd_enqueue_request(queue, &req->base);
Loc Hob8a28252008-05-14 21:23:00 +0800520}
521
Herbert Xu81760ea2016-06-21 16:55:13 +0800522static void cryptd_hash_complete(struct ahash_request *req, int err)
523{
524 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
525 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
526 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
527 int refcnt = atomic_read(&ctx->refcnt);
528
529 local_bh_disable();
530 rctx->complete(&req->base, err);
531 local_bh_enable();
532
533 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
534 crypto_free_ahash(tfm);
535}
536
Loc Hob8a28252008-05-14 21:23:00 +0800537static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
538{
Herbert Xu46309d82009-07-12 21:38:59 +0800539 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
540 struct crypto_shash *child = ctx->child;
541 struct ahash_request *req = ahash_request_cast(req_async);
542 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
543 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800544
545 if (unlikely(err == -EINPROGRESS))
546 goto out;
547
Herbert Xu46309d82009-07-12 21:38:59 +0800548 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800549
Herbert Xu46309d82009-07-12 21:38:59 +0800550 err = crypto_shash_init(desc);
Loc Hob8a28252008-05-14 21:23:00 +0800551
552 req->base.complete = rctx->complete;
553
554out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800555 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800556}
557
558static int cryptd_hash_init_enqueue(struct ahash_request *req)
559{
560 return cryptd_hash_enqueue(req, cryptd_hash_init);
561}
562
563static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
564{
Herbert Xu46309d82009-07-12 21:38:59 +0800565 struct ahash_request *req = ahash_request_cast(req_async);
Loc Hob8a28252008-05-14 21:23:00 +0800566 struct cryptd_hash_request_ctx *rctx;
Loc Hob8a28252008-05-14 21:23:00 +0800567
568 rctx = ahash_request_ctx(req);
569
570 if (unlikely(err == -EINPROGRESS))
571 goto out;
572
Herbert Xu46309d82009-07-12 21:38:59 +0800573 err = shash_ahash_update(req, &rctx->desc);
Loc Hob8a28252008-05-14 21:23:00 +0800574
575 req->base.complete = rctx->complete;
576
577out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800578 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800579}
580
581static int cryptd_hash_update_enqueue(struct ahash_request *req)
582{
583 return cryptd_hash_enqueue(req, cryptd_hash_update);
584}
585
586static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
587{
Herbert Xu46309d82009-07-12 21:38:59 +0800588 struct ahash_request *req = ahash_request_cast(req_async);
589 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
Loc Hob8a28252008-05-14 21:23:00 +0800590
591 if (unlikely(err == -EINPROGRESS))
592 goto out;
593
Herbert Xu46309d82009-07-12 21:38:59 +0800594 err = crypto_shash_final(&rctx->desc, req->result);
Loc Hob8a28252008-05-14 21:23:00 +0800595
596 req->base.complete = rctx->complete;
597
598out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800599 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800600}
601
602static int cryptd_hash_final_enqueue(struct ahash_request *req)
603{
604 return cryptd_hash_enqueue(req, cryptd_hash_final);
605}
606
Herbert Xu6fba00d2009-07-22 11:10:22 +0800607static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
608{
609 struct ahash_request *req = ahash_request_cast(req_async);
610 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
611
612 if (unlikely(err == -EINPROGRESS))
613 goto out;
614
615 err = shash_ahash_finup(req, &rctx->desc);
616
617 req->base.complete = rctx->complete;
618
619out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800620 cryptd_hash_complete(req, err);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800621}
622
623static int cryptd_hash_finup_enqueue(struct ahash_request *req)
624{
625 return cryptd_hash_enqueue(req, cryptd_hash_finup);
626}
627
Loc Hob8a28252008-05-14 21:23:00 +0800628static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
629{
Herbert Xu46309d82009-07-12 21:38:59 +0800630 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
631 struct crypto_shash *child = ctx->child;
632 struct ahash_request *req = ahash_request_cast(req_async);
633 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
634 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800635
636 if (unlikely(err == -EINPROGRESS))
637 goto out;
638
Herbert Xu46309d82009-07-12 21:38:59 +0800639 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800640
Herbert Xu46309d82009-07-12 21:38:59 +0800641 err = shash_ahash_digest(req, desc);
Loc Hob8a28252008-05-14 21:23:00 +0800642
643 req->base.complete = rctx->complete;
644
645out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800646 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800647}
648
649static int cryptd_hash_digest_enqueue(struct ahash_request *req)
650{
651 return cryptd_hash_enqueue(req, cryptd_hash_digest);
652}
653
Herbert Xu6fba00d2009-07-22 11:10:22 +0800654static int cryptd_hash_export(struct ahash_request *req, void *out)
655{
656 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
657
658 return crypto_shash_export(&rctx->desc, out);
659}
660
661static int cryptd_hash_import(struct ahash_request *req, const void *in)
662{
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100663 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
664 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
665 struct shash_desc *desc = cryptd_shash_desc(req);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800666
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100667 desc->tfm = ctx->child;
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100668
669 return crypto_shash_import(desc, in);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800670}
671
Herbert Xu9cd899a2009-07-14 18:45:45 +0800672static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
673 struct cryptd_queue *queue)
Loc Hob8a28252008-05-14 21:23:00 +0800674{
Herbert Xu46309d82009-07-12 21:38:59 +0800675 struct hashd_instance_ctx *ctx;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800676 struct ahash_instance *inst;
Herbert Xu46309d82009-07-12 21:38:59 +0800677 struct shash_alg *salg;
Loc Hob8a28252008-05-14 21:23:00 +0800678 struct crypto_alg *alg;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200679 u32 type = 0;
680 u32 mask = 0;
Herbert Xu46309d82009-07-12 21:38:59 +0800681 int err;
Loc Hob8a28252008-05-14 21:23:00 +0800682
Stephan Mueller466a7b92015-03-30 21:57:06 +0200683 cryptd_check_internal(tb, &type, &mask);
684
685 salg = shash_attr_alg(tb[1], type, mask);
Herbert Xu46309d82009-07-12 21:38:59 +0800686 if (IS_ERR(salg))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800687 return PTR_ERR(salg);
Loc Hob8a28252008-05-14 21:23:00 +0800688
Herbert Xu46309d82009-07-12 21:38:59 +0800689 alg = &salg->base;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800690 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
691 sizeof(*ctx));
Steffen Klassert05ed8752009-07-15 16:51:04 +0800692 err = PTR_ERR(inst);
Loc Hob8a28252008-05-14 21:23:00 +0800693 if (IS_ERR(inst))
694 goto out_put_alg;
695
Herbert Xu0b535ad2009-07-14 19:11:32 +0800696 ctx = ahash_instance_ctx(inst);
Herbert Xu46309d82009-07-12 21:38:59 +0800697 ctx->queue = queue;
698
Herbert Xu0b535ad2009-07-14 19:11:32 +0800699 err = crypto_init_shash_spawn(&ctx->spawn, salg,
700 ahash_crypto_instance(inst));
Herbert Xu46309d82009-07-12 21:38:59 +0800701 if (err)
702 goto out_free_inst;
703
Eric Biggersa208fa82018-01-03 11:16:26 -0800704 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
705 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
706 CRYPTO_ALG_OPTIONAL_KEY));
Loc Hob8a28252008-05-14 21:23:00 +0800707
Herbert Xu0b535ad2009-07-14 19:11:32 +0800708 inst->alg.halg.digestsize = salg->digestsize;
Wang, Rui Y1a078342015-11-29 22:45:34 +0800709 inst->alg.halg.statesize = salg->statesize;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800710 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
Loc Hob8a28252008-05-14 21:23:00 +0800711
Herbert Xu0b535ad2009-07-14 19:11:32 +0800712 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
713 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
Loc Hob8a28252008-05-14 21:23:00 +0800714
Herbert Xu0b535ad2009-07-14 19:11:32 +0800715 inst->alg.init = cryptd_hash_init_enqueue;
716 inst->alg.update = cryptd_hash_update_enqueue;
717 inst->alg.final = cryptd_hash_final_enqueue;
Herbert Xu6fba00d2009-07-22 11:10:22 +0800718 inst->alg.finup = cryptd_hash_finup_enqueue;
719 inst->alg.export = cryptd_hash_export;
720 inst->alg.import = cryptd_hash_import;
Eric Biggers841a3ff2018-01-03 11:16:23 -0800721 if (crypto_shash_alg_has_setkey(salg))
722 inst->alg.setkey = cryptd_hash_setkey;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800723 inst->alg.digest = cryptd_hash_digest_enqueue;
Loc Hob8a28252008-05-14 21:23:00 +0800724
Herbert Xu0b535ad2009-07-14 19:11:32 +0800725 err = ahash_register_instance(tmpl, inst);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800726 if (err) {
727 crypto_drop_shash(&ctx->spawn);
728out_free_inst:
729 kfree(inst);
730 }
731
Loc Hob8a28252008-05-14 21:23:00 +0800732out_put_alg:
733 crypto_mod_put(alg);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800734 return err;
Loc Hob8a28252008-05-14 21:23:00 +0800735}
736
Herbert Xu92b98762015-05-28 22:08:01 +0800737static int cryptd_aead_setkey(struct crypto_aead *parent,
738 const u8 *key, unsigned int keylen)
739{
740 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
741 struct crypto_aead *child = ctx->child;
742
743 return crypto_aead_setkey(child, key, keylen);
744}
745
746static int cryptd_aead_setauthsize(struct crypto_aead *parent,
747 unsigned int authsize)
748{
749 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
750 struct crypto_aead *child = ctx->child;
751
752 return crypto_aead_setauthsize(child, authsize);
753}
754
Adrian Hoban298c9262010-09-20 16:05:12 +0800755static void cryptd_aead_crypt(struct aead_request *req,
756 struct crypto_aead *child,
757 int err,
758 int (*crypt)(struct aead_request *req))
759{
760 struct cryptd_aead_request_ctx *rctx;
Herbert Xu81760ea2016-06-21 16:55:13 +0800761 struct cryptd_aead_ctx *ctx;
Herbert Xuec9f2002015-07-06 19:11:03 +0800762 crypto_completion_t compl;
Herbert Xu81760ea2016-06-21 16:55:13 +0800763 struct crypto_aead *tfm;
764 int refcnt;
Herbert Xuec9f2002015-07-06 19:11:03 +0800765
Adrian Hoban298c9262010-09-20 16:05:12 +0800766 rctx = aead_request_ctx(req);
Herbert Xuec9f2002015-07-06 19:11:03 +0800767 compl = rctx->complete;
Adrian Hoban298c9262010-09-20 16:05:12 +0800768
Herbert Xu31bd44e2016-08-25 16:49:51 +0800769 tfm = crypto_aead_reqtfm(req);
770
Adrian Hoban298c9262010-09-20 16:05:12 +0800771 if (unlikely(err == -EINPROGRESS))
772 goto out;
773 aead_request_set_tfm(req, child);
774 err = crypt( req );
Herbert Xu81760ea2016-06-21 16:55:13 +0800775
Adrian Hoban298c9262010-09-20 16:05:12 +0800776out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800777 ctx = crypto_aead_ctx(tfm);
778 refcnt = atomic_read(&ctx->refcnt);
779
Adrian Hoban298c9262010-09-20 16:05:12 +0800780 local_bh_disable();
Herbert Xuec9f2002015-07-06 19:11:03 +0800781 compl(&req->base, err);
Adrian Hoban298c9262010-09-20 16:05:12 +0800782 local_bh_enable();
Herbert Xu81760ea2016-06-21 16:55:13 +0800783
784 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
785 crypto_free_aead(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800786}
787
788static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
789{
790 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
791 struct crypto_aead *child = ctx->child;
792 struct aead_request *req;
793
794 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800795 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800796}
797
798static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
799{
800 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
801 struct crypto_aead *child = ctx->child;
802 struct aead_request *req;
803
804 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800805 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800806}
807
808static int cryptd_aead_enqueue(struct aead_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700809 crypto_completion_t compl)
Adrian Hoban298c9262010-09-20 16:05:12 +0800810{
811 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
812 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
813 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
814
815 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700816 req->base.complete = compl;
Adrian Hoban298c9262010-09-20 16:05:12 +0800817 return cryptd_enqueue_request(queue, &req->base);
818}
819
820static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
821{
822 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
823}
824
825static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
826{
827 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
828}
829
Herbert Xuf614e542015-05-28 22:08:04 +0800830static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800831{
Herbert Xuf614e542015-05-28 22:08:04 +0800832 struct aead_instance *inst = aead_alg_instance(tfm);
833 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800834 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
Herbert Xuf614e542015-05-28 22:08:04 +0800835 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800836 struct crypto_aead *cipher;
837
838 cipher = crypto_spawn_aead(spawn);
839 if (IS_ERR(cipher))
840 return PTR_ERR(cipher);
841
Adrian Hoban298c9262010-09-20 16:05:12 +0800842 ctx->child = cipher;
Herbert Xuec9f2002015-07-06 19:11:03 +0800843 crypto_aead_set_reqsize(
844 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
845 crypto_aead_reqsize(cipher)));
Adrian Hoban298c9262010-09-20 16:05:12 +0800846 return 0;
847}
848
Herbert Xuf614e542015-05-28 22:08:04 +0800849static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800850{
Herbert Xuf614e542015-05-28 22:08:04 +0800851 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800852 crypto_free_aead(ctx->child);
853}
854
855static int cryptd_create_aead(struct crypto_template *tmpl,
856 struct rtattr **tb,
857 struct cryptd_queue *queue)
858{
859 struct aead_instance_ctx *ctx;
Herbert Xuf614e542015-05-28 22:08:04 +0800860 struct aead_instance *inst;
861 struct aead_alg *alg;
Herbert Xu9b8c4562015-05-21 15:10:57 +0800862 const char *name;
863 u32 type = 0;
Herbert Xuec9f2002015-07-06 19:11:03 +0800864 u32 mask = CRYPTO_ALG_ASYNC;
Adrian Hoban298c9262010-09-20 16:05:12 +0800865 int err;
866
Stephan Mueller466a7b92015-03-30 21:57:06 +0200867 cryptd_check_internal(tb, &type, &mask);
868
Herbert Xu9b8c4562015-05-21 15:10:57 +0800869 name = crypto_attr_alg_name(tb[1]);
870 if (IS_ERR(name))
871 return PTR_ERR(name);
Adrian Hoban298c9262010-09-20 16:05:12 +0800872
Herbert Xu9b8c4562015-05-21 15:10:57 +0800873 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
874 if (!inst)
875 return -ENOMEM;
Adrian Hoban298c9262010-09-20 16:05:12 +0800876
Herbert Xuf614e542015-05-28 22:08:04 +0800877 ctx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800878 ctx->queue = queue;
879
Herbert Xuf614e542015-05-28 22:08:04 +0800880 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
Herbert Xu9b8c4562015-05-21 15:10:57 +0800881 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
Adrian Hoban298c9262010-09-20 16:05:12 +0800882 if (err)
883 goto out_free_inst;
884
Herbert Xuf614e542015-05-28 22:08:04 +0800885 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
886 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
Herbert Xu9b8c4562015-05-21 15:10:57 +0800887 if (err)
888 goto out_drop_aead;
889
Herbert Xuf614e542015-05-28 22:08:04 +0800890 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800891 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
Herbert Xuf614e542015-05-28 22:08:04 +0800892 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
Adrian Hoban298c9262010-09-20 16:05:12 +0800893
Herbert Xuf614e542015-05-28 22:08:04 +0800894 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
895 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
896
897 inst->alg.init = cryptd_aead_init_tfm;
898 inst->alg.exit = cryptd_aead_exit_tfm;
899 inst->alg.setkey = cryptd_aead_setkey;
900 inst->alg.setauthsize = cryptd_aead_setauthsize;
901 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
902 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
903
904 err = aead_register_instance(tmpl, inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800905 if (err) {
Herbert Xu9b8c4562015-05-21 15:10:57 +0800906out_drop_aead:
907 crypto_drop_aead(&ctx->aead_spawn);
Adrian Hoban298c9262010-09-20 16:05:12 +0800908out_free_inst:
909 kfree(inst);
910 }
Adrian Hoban298c9262010-09-20 16:05:12 +0800911 return err;
912}
913
Huang Ying254eff72009-02-19 14:42:19 +0800914static struct cryptd_queue queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000915
Herbert Xu9cd899a2009-07-14 18:45:45 +0800916static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
Herbert Xu124b53d2007-04-16 20:49:20 +1000917{
918 struct crypto_attr_type *algt;
919
920 algt = crypto_get_attr_type(tb);
921 if (IS_ERR(algt))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800922 return PTR_ERR(algt);
Herbert Xu124b53d2007-04-16 20:49:20 +1000923
924 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
925 case CRYPTO_ALG_TYPE_BLKCIPHER:
Herbert Xu4e0958d2016-11-22 20:08:23 +0800926 return cryptd_create_skcipher(tmpl, tb, &queue);
Loc Hob8a28252008-05-14 21:23:00 +0800927 case CRYPTO_ALG_TYPE_DIGEST:
Herbert Xu9cd899a2009-07-14 18:45:45 +0800928 return cryptd_create_hash(tmpl, tb, &queue);
Adrian Hoban298c9262010-09-20 16:05:12 +0800929 case CRYPTO_ALG_TYPE_AEAD:
930 return cryptd_create_aead(tmpl, tb, &queue);
Herbert Xu124b53d2007-04-16 20:49:20 +1000931 }
932
Herbert Xu9cd899a2009-07-14 18:45:45 +0800933 return -EINVAL;
Herbert Xu124b53d2007-04-16 20:49:20 +1000934}
935
936static void cryptd_free(struct crypto_instance *inst)
937{
938 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800939 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800940 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800941
942 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
943 case CRYPTO_ALG_TYPE_AHASH:
944 crypto_drop_shash(&hctx->spawn);
945 kfree(ahash_instance(inst));
946 return;
Adrian Hoban298c9262010-09-20 16:05:12 +0800947 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuf614e542015-05-28 22:08:04 +0800948 crypto_drop_aead(&aead_ctx->aead_spawn);
949 kfree(aead_instance(inst));
Adrian Hoban298c9262010-09-20 16:05:12 +0800950 return;
951 default:
952 crypto_drop_spawn(&ctx->spawn);
953 kfree(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800954 }
Herbert Xu124b53d2007-04-16 20:49:20 +1000955}
956
957static struct crypto_template cryptd_tmpl = {
958 .name = "cryptd",
Herbert Xu9cd899a2009-07-14 18:45:45 +0800959 .create = cryptd_create,
Herbert Xu124b53d2007-04-16 20:49:20 +1000960 .free = cryptd_free,
961 .module = THIS_MODULE,
962};
963
Herbert Xu4e0958d2016-11-22 20:08:23 +0800964struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
965 u32 type, u32 mask)
966{
967 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
968 struct cryptd_skcipher_ctx *ctx;
969 struct crypto_skcipher *tfm;
970
971 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
972 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
973 return ERR_PTR(-EINVAL);
974
975 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
976 if (IS_ERR(tfm))
977 return ERR_CAST(tfm);
978
979 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
980 crypto_free_skcipher(tfm);
981 return ERR_PTR(-EINVAL);
982 }
983
984 ctx = crypto_skcipher_ctx(tfm);
985 atomic_set(&ctx->refcnt, 1);
986
987 return container_of(tfm, struct cryptd_skcipher, base);
988}
989EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
990
991struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
992{
993 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
994
Kees Cook36b38752018-09-18 19:10:52 -0700995 return &ctx->child->base;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800996}
997EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
998
999bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1000{
1001 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1002
1003 return atomic_read(&ctx->refcnt) - 1;
1004}
1005EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1006
1007void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1008{
1009 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1010
1011 if (atomic_dec_and_test(&ctx->refcnt))
1012 crypto_free_skcipher(&tfm->base);
1013}
1014EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1015
Huang Yingace13662009-08-06 15:35:20 +10001016struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1017 u32 type, u32 mask)
1018{
1019 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001020 struct cryptd_hash_ctx *ctx;
Huang Yingace13662009-08-06 15:35:20 +10001021 struct crypto_ahash *tfm;
1022
1023 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1024 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1025 return ERR_PTR(-EINVAL);
1026 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1027 if (IS_ERR(tfm))
1028 return ERR_CAST(tfm);
1029 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1030 crypto_free_ahash(tfm);
1031 return ERR_PTR(-EINVAL);
1032 }
1033
Herbert Xu81760ea2016-06-21 16:55:13 +08001034 ctx = crypto_ahash_ctx(tfm);
1035 atomic_set(&ctx->refcnt, 1);
1036
Huang Yingace13662009-08-06 15:35:20 +10001037 return __cryptd_ahash_cast(tfm);
1038}
1039EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1040
1041struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1042{
1043 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044
1045 return ctx->child;
1046}
1047EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1048
Huang Ying0e1227d2009-10-19 11:53:06 +09001049struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1050{
1051 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1052 return &rctx->desc;
1053}
1054EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1055
Herbert Xu81760ea2016-06-21 16:55:13 +08001056bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1057{
1058 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1059
1060 return atomic_read(&ctx->refcnt) - 1;
1061}
1062EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1063
Huang Yingace13662009-08-06 15:35:20 +10001064void cryptd_free_ahash(struct cryptd_ahash *tfm)
1065{
Herbert Xu81760ea2016-06-21 16:55:13 +08001066 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1067
1068 if (atomic_dec_and_test(&ctx->refcnt))
1069 crypto_free_ahash(&tfm->base);
Huang Yingace13662009-08-06 15:35:20 +10001070}
1071EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1072
Adrian Hoban298c9262010-09-20 16:05:12 +08001073struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1074 u32 type, u32 mask)
1075{
1076 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001077 struct cryptd_aead_ctx *ctx;
Adrian Hoban298c9262010-09-20 16:05:12 +08001078 struct crypto_aead *tfm;
1079
1080 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1081 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1082 return ERR_PTR(-EINVAL);
1083 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1084 if (IS_ERR(tfm))
1085 return ERR_CAST(tfm);
1086 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1087 crypto_free_aead(tfm);
1088 return ERR_PTR(-EINVAL);
1089 }
Herbert Xu81760ea2016-06-21 16:55:13 +08001090
1091 ctx = crypto_aead_ctx(tfm);
1092 atomic_set(&ctx->refcnt, 1);
1093
Adrian Hoban298c9262010-09-20 16:05:12 +08001094 return __cryptd_aead_cast(tfm);
1095}
1096EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1097
1098struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1099{
1100 struct cryptd_aead_ctx *ctx;
1101 ctx = crypto_aead_ctx(&tfm->base);
1102 return ctx->child;
1103}
1104EXPORT_SYMBOL_GPL(cryptd_aead_child);
1105
Herbert Xu81760ea2016-06-21 16:55:13 +08001106bool cryptd_aead_queued(struct cryptd_aead *tfm)
1107{
1108 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1109
1110 return atomic_read(&ctx->refcnt) - 1;
1111}
1112EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1113
Adrian Hoban298c9262010-09-20 16:05:12 +08001114void cryptd_free_aead(struct cryptd_aead *tfm)
1115{
Herbert Xu81760ea2016-06-21 16:55:13 +08001116 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1117
1118 if (atomic_dec_and_test(&ctx->refcnt))
1119 crypto_free_aead(&tfm->base);
Adrian Hoban298c9262010-09-20 16:05:12 +08001120}
1121EXPORT_SYMBOL_GPL(cryptd_free_aead);
1122
Herbert Xu124b53d2007-04-16 20:49:20 +10001123static int __init cryptd_init(void)
1124{
1125 int err;
1126
Jon Maxwellc3a53602017-11-22 16:08:17 +11001127 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
Herbert Xu124b53d2007-04-16 20:49:20 +10001128 if (err)
1129 return err;
1130
1131 err = crypto_register_template(&cryptd_tmpl);
1132 if (err)
Huang Ying254eff72009-02-19 14:42:19 +08001133 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001134
1135 return err;
1136}
1137
1138static void __exit cryptd_exit(void)
1139{
Huang Ying254eff72009-02-19 14:42:19 +08001140 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001141 crypto_unregister_template(&cryptd_tmpl);
1142}
1143
Herbert Xub2bac6a2011-08-19 16:11:23 +08001144subsys_initcall(cryptd_init);
Herbert Xu124b53d2007-04-16 20:49:20 +10001145module_exit(cryptd_exit);
1146
1147MODULE_LICENSE("GPL");
1148MODULE_DESCRIPTION("Software async crypto daemon");
Kees Cook4943ba12014-11-24 16:32:38 -08001149MODULE_ALIAS_CRYPTO("cryptd");