blob: b3bb99390ae793670e3197415707a0699270a241 [file] [log] [blame]
Herbert Xu124b53d2007-04-16 20:49:20 +10001/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
Adrian Hoban298c9262010-09-20 16:05:12 +08006 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
Herbert Xu124b53d2007-04-16 20:49:20 +100013 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
Herbert Xu18e33e62008-07-10 16:01:22 +080020#include <crypto/internal/hash.h>
Adrian Hoban298c9262010-09-20 16:05:12 +080021#include <crypto/internal/aead.h>
Herbert Xu4e0958d2016-11-22 20:08:23 +080022#include <crypto/internal/skcipher.h>
Huang Ying1cac2cb2009-01-18 16:19:46 +110023#include <crypto/cryptd.h>
Huang Ying254eff72009-02-19 14:42:19 +080024#include <crypto/crypto_wq.h>
Herbert Xu81760ea2016-06-21 16:55:13 +080025#include <linux/atomic.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100026#include <linux/err.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100029#include <linux/list.h>
30#include <linux/module.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100031#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
Herbert Xu124b53d2007-04-16 20:49:20 +100034
Colin Ian Kingeaf356e2017-11-30 11:26:14 +000035static unsigned int cryptd_max_cpu_qlen = 1000;
Jon Maxwellc3a53602017-11-22 16:08:17 +110036module_param(cryptd_max_cpu_qlen, uint, 0);
37MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
Herbert Xu124b53d2007-04-16 20:49:20 +100038
Huang Ying254eff72009-02-19 14:42:19 +080039struct cryptd_cpu_queue {
Herbert Xu124b53d2007-04-16 20:49:20 +100040 struct crypto_queue queue;
Huang Ying254eff72009-02-19 14:42:19 +080041 struct work_struct work;
42};
43
44struct cryptd_queue {
Tejun Heoa29d8b82010-02-02 14:39:15 +090045 struct cryptd_cpu_queue __percpu *cpu_queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100046};
47
48struct cryptd_instance_ctx {
49 struct crypto_spawn spawn;
Huang Ying254eff72009-02-19 14:42:19 +080050 struct cryptd_queue *queue;
Herbert Xu124b53d2007-04-16 20:49:20 +100051};
52
Herbert Xu4e0958d2016-11-22 20:08:23 +080053struct skcipherd_instance_ctx {
54 struct crypto_skcipher_spawn spawn;
55 struct cryptd_queue *queue;
56};
57
Herbert Xu46309d82009-07-12 21:38:59 +080058struct hashd_instance_ctx {
59 struct crypto_shash_spawn spawn;
60 struct cryptd_queue *queue;
61};
62
Adrian Hoban298c9262010-09-20 16:05:12 +080063struct aead_instance_ctx {
64 struct crypto_aead_spawn aead_spawn;
65 struct cryptd_queue *queue;
66};
67
Herbert Xu4e0958d2016-11-22 20:08:23 +080068struct cryptd_skcipher_ctx {
69 atomic_t refcnt;
Kees Cook36b38752018-09-18 19:10:52 -070070 struct crypto_sync_skcipher *child;
Herbert Xu4e0958d2016-11-22 20:08:23 +080071};
72
73struct cryptd_skcipher_request_ctx {
74 crypto_completion_t complete;
75};
76
Loc Hob8a28252008-05-14 21:23:00 +080077struct cryptd_hash_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080078 atomic_t refcnt;
Herbert Xu46309d82009-07-12 21:38:59 +080079 struct crypto_shash *child;
Loc Hob8a28252008-05-14 21:23:00 +080080};
81
82struct cryptd_hash_request_ctx {
83 crypto_completion_t complete;
Herbert Xu46309d82009-07-12 21:38:59 +080084 struct shash_desc desc;
Loc Hob8a28252008-05-14 21:23:00 +080085};
Herbert Xu124b53d2007-04-16 20:49:20 +100086
Adrian Hoban298c9262010-09-20 16:05:12 +080087struct cryptd_aead_ctx {
Herbert Xu81760ea2016-06-21 16:55:13 +080088 atomic_t refcnt;
Adrian Hoban298c9262010-09-20 16:05:12 +080089 struct crypto_aead *child;
90};
91
92struct cryptd_aead_request_ctx {
93 crypto_completion_t complete;
94};
95
Huang Ying254eff72009-02-19 14:42:19 +080096static void cryptd_queue_worker(struct work_struct *work);
97
98static int cryptd_init_queue(struct cryptd_queue *queue,
99 unsigned int max_cpu_qlen)
100{
101 int cpu;
102 struct cryptd_cpu_queue *cpu_queue;
103
104 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
105 if (!queue->cpu_queue)
106 return -ENOMEM;
107 for_each_possible_cpu(cpu) {
108 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
109 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
110 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
111 }
Jon Maxwellc3a53602017-11-22 16:08:17 +1100112 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
Huang Ying254eff72009-02-19 14:42:19 +0800113 return 0;
114}
115
116static void cryptd_fini_queue(struct cryptd_queue *queue)
117{
118 int cpu;
119 struct cryptd_cpu_queue *cpu_queue;
120
121 for_each_possible_cpu(cpu) {
122 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
123 BUG_ON(cpu_queue->queue.qlen);
124 }
125 free_percpu(queue->cpu_queue);
126}
127
128static int cryptd_enqueue_request(struct cryptd_queue *queue,
129 struct crypto_async_request *request)
130{
131 int cpu, err;
132 struct cryptd_cpu_queue *cpu_queue;
Herbert Xu81760ea2016-06-21 16:55:13 +0800133 atomic_t *refcnt;
Huang Ying254eff72009-02-19 14:42:19 +0800134
135 cpu = get_cpu();
Christoph Lameter0b44f482009-10-03 19:48:23 +0900136 cpu_queue = this_cpu_ptr(queue->cpu_queue);
Huang Ying254eff72009-02-19 14:42:19 +0800137 err = crypto_enqueue_request(&cpu_queue->queue, request);
Herbert Xu81760ea2016-06-21 16:55:13 +0800138
139 refcnt = crypto_tfm_ctx(request->tfm);
Herbert Xu81760ea2016-06-21 16:55:13 +0800140
Gilad Ben-Yossef6b80ea32017-10-18 08:00:33 +0100141 if (err == -ENOSPC)
Herbert Xu81760ea2016-06-21 16:55:13 +0800142 goto out_put_cpu;
143
Huang Ying254eff72009-02-19 14:42:19 +0800144 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
Herbert Xu81760ea2016-06-21 16:55:13 +0800145
146 if (!atomic_read(refcnt))
147 goto out_put_cpu;
148
Herbert Xu81760ea2016-06-21 16:55:13 +0800149 atomic_inc(refcnt);
150
151out_put_cpu:
Huang Ying254eff72009-02-19 14:42:19 +0800152 put_cpu();
153
154 return err;
155}
156
157/* Called in workqueue context, do one real cryption work (via
158 * req->complete) and reschedule itself if there are more work to
159 * do. */
160static void cryptd_queue_worker(struct work_struct *work)
161{
162 struct cryptd_cpu_queue *cpu_queue;
163 struct crypto_async_request *req, *backlog;
164
165 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300166 /*
167 * Only handle one request at a time to avoid hogging crypto workqueue.
168 * preempt_disable/enable is used to prevent being preempted by
169 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
170 * cryptd_enqueue_request() being accessed from software interrupts.
171 */
172 local_bh_disable();
Huang Ying254eff72009-02-19 14:42:19 +0800173 preempt_disable();
174 backlog = crypto_get_backlog(&cpu_queue->queue);
175 req = crypto_dequeue_request(&cpu_queue->queue);
176 preempt_enable();
Jussi Kivilinna9efade12012-10-21 20:42:28 +0300177 local_bh_enable();
Huang Ying254eff72009-02-19 14:42:19 +0800178
179 if (!req)
180 return;
181
182 if (backlog)
183 backlog->complete(backlog, -EINPROGRESS);
184 req->complete(req, 0);
185
186 if (cpu_queue->queue.qlen)
187 queue_work(kcrypto_wq, &cpu_queue->work);
188}
189
190static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
Herbert Xu124b53d2007-04-16 20:49:20 +1000191{
192 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
193 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
Huang Ying254eff72009-02-19 14:42:19 +0800194 return ictx->queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000195}
196
Stephan Mueller466a7b92015-03-30 21:57:06 +0200197static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
198 u32 *mask)
199{
200 struct crypto_attr_type *algt;
201
202 algt = crypto_get_attr_type(tb);
203 if (IS_ERR(algt))
204 return;
Herbert Xuf6da3202015-07-09 07:17:19 +0800205
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800206 *type |= algt->type & CRYPTO_ALG_INTERNAL;
207 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200208}
209
Herbert Xu9b8c4562015-05-21 15:10:57 +0800210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
Herbert Xu0b535ad2009-07-14 19:11:32 +0800227static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
228 unsigned int tail)
Herbert Xu124b53d2007-04-16 20:49:20 +1000229{
Herbert Xu0b535ad2009-07-14 19:11:32 +0800230 char *p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000231 struct crypto_instance *inst;
Herbert Xu124b53d2007-04-16 20:49:20 +1000232 int err;
233
Herbert Xu0b535ad2009-07-14 19:11:32 +0800234 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
235 if (!p)
236 return ERR_PTR(-ENOMEM);
237
238 inst = (void *)(p + head);
Herbert Xu124b53d2007-04-16 20:49:20 +1000239
Herbert Xu9b8c4562015-05-21 15:10:57 +0800240 err = cryptd_init_instance(inst, alg);
241 if (err)
Herbert Xu124b53d2007-04-16 20:49:20 +1000242 goto out_free_inst;
243
Herbert Xu124b53d2007-04-16 20:49:20 +1000244out:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800245 return p;
Herbert Xu124b53d2007-04-16 20:49:20 +1000246
247out_free_inst:
Herbert Xu0b535ad2009-07-14 19:11:32 +0800248 kfree(p);
249 p = ERR_PTR(err);
Herbert Xu124b53d2007-04-16 20:49:20 +1000250 goto out;
251}
252
Herbert Xu4e0958d2016-11-22 20:08:23 +0800253static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
254 const u8 *key, unsigned int keylen)
255{
256 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
Kees Cook36b38752018-09-18 19:10:52 -0700257 struct crypto_sync_skcipher *child = ctx->child;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800258 int err;
259
Kees Cook36b38752018-09-18 19:10:52 -0700260 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
261 crypto_sync_skcipher_set_flags(child,
262 crypto_skcipher_get_flags(parent) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800263 CRYPTO_TFM_REQ_MASK);
Kees Cook36b38752018-09-18 19:10:52 -0700264 err = crypto_sync_skcipher_setkey(child, key, keylen);
265 crypto_skcipher_set_flags(parent,
266 crypto_sync_skcipher_get_flags(child) &
Herbert Xu4e0958d2016-11-22 20:08:23 +0800267 CRYPTO_TFM_RES_MASK);
268 return err;
269}
270
271static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
272{
273 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
274 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
275 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
276 int refcnt = atomic_read(&ctx->refcnt);
277
278 local_bh_disable();
279 rctx->complete(&req->base, err);
280 local_bh_enable();
281
282 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
283 crypto_free_skcipher(tfm);
284}
285
286static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
287 int err)
288{
289 struct skcipher_request *req = skcipher_request_cast(base);
290 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
291 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
292 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700293 struct crypto_sync_skcipher *child = ctx->child;
294 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800295
296 if (unlikely(err == -EINPROGRESS))
297 goto out;
298
Kees Cook36b38752018-09-18 19:10:52 -0700299 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800300 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
301 NULL, NULL);
302 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
303 req->iv);
304
305 err = crypto_skcipher_encrypt(subreq);
306 skcipher_request_zero(subreq);
307
308 req->base.complete = rctx->complete;
309
310out:
311 cryptd_skcipher_complete(req, err);
312}
313
314static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
315 int err)
316{
317 struct skcipher_request *req = skcipher_request_cast(base);
318 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
319 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
320 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
Kees Cook36b38752018-09-18 19:10:52 -0700321 struct crypto_sync_skcipher *child = ctx->child;
322 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800323
324 if (unlikely(err == -EINPROGRESS))
325 goto out;
326
Kees Cook36b38752018-09-18 19:10:52 -0700327 skcipher_request_set_sync_tfm(subreq, child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800328 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
329 NULL, NULL);
330 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
331 req->iv);
332
333 err = crypto_skcipher_decrypt(subreq);
334 skcipher_request_zero(subreq);
335
336 req->base.complete = rctx->complete;
337
338out:
339 cryptd_skcipher_complete(req, err);
340}
341
342static int cryptd_skcipher_enqueue(struct skcipher_request *req,
343 crypto_completion_t compl)
344{
345 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
346 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347 struct cryptd_queue *queue;
348
349 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
350 rctx->complete = req->base.complete;
351 req->base.complete = compl;
352
353 return cryptd_enqueue_request(queue, &req->base);
354}
355
356static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
357{
358 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
359}
360
361static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
362{
363 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
364}
365
366static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
367{
368 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
369 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
370 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
371 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
372 struct crypto_skcipher *cipher;
373
374 cipher = crypto_spawn_skcipher(spawn);
375 if (IS_ERR(cipher))
376 return PTR_ERR(cipher);
377
Kees Cook36b38752018-09-18 19:10:52 -0700378 ctx->child = (struct crypto_sync_skcipher *)cipher;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800379 crypto_skcipher_set_reqsize(
380 tfm, sizeof(struct cryptd_skcipher_request_ctx));
381 return 0;
382}
383
384static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
385{
386 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
387
Kees Cook36b38752018-09-18 19:10:52 -0700388 crypto_free_sync_skcipher(ctx->child);
Herbert Xu4e0958d2016-11-22 20:08:23 +0800389}
390
391static void cryptd_skcipher_free(struct skcipher_instance *inst)
392{
393 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
394
395 crypto_drop_skcipher(&ctx->spawn);
396}
397
398static int cryptd_create_skcipher(struct crypto_template *tmpl,
399 struct rtattr **tb,
400 struct cryptd_queue *queue)
401{
402 struct skcipherd_instance_ctx *ctx;
403 struct skcipher_instance *inst;
404 struct skcipher_alg *alg;
405 const char *name;
406 u32 type;
407 u32 mask;
408 int err;
409
410 type = 0;
411 mask = CRYPTO_ALG_ASYNC;
412
413 cryptd_check_internal(tb, &type, &mask);
414
415 name = crypto_attr_alg_name(tb[1]);
416 if (IS_ERR(name))
417 return PTR_ERR(name);
418
419 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
420 if (!inst)
421 return -ENOMEM;
422
423 ctx = skcipher_instance_ctx(inst);
424 ctx->queue = queue;
425
426 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
427 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
428 if (err)
429 goto out_free_inst;
430
431 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
432 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
433 if (err)
434 goto out_drop_skcipher;
435
436 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
437 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
438
439 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
440 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
441 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
442 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
443
444 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
445
446 inst->alg.init = cryptd_skcipher_init_tfm;
447 inst->alg.exit = cryptd_skcipher_exit_tfm;
448
449 inst->alg.setkey = cryptd_skcipher_setkey;
450 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
451 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
452
453 inst->free = cryptd_skcipher_free;
454
455 err = skcipher_register_instance(tmpl, inst);
456 if (err) {
457out_drop_skcipher:
458 crypto_drop_skcipher(&ctx->spawn);
459out_free_inst:
460 kfree(inst);
461 }
462 return err;
463}
464
Loc Hob8a28252008-05-14 21:23:00 +0800465static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
466{
467 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800468 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
469 struct crypto_shash_spawn *spawn = &ictx->spawn;
Loc Hob8a28252008-05-14 21:23:00 +0800470 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Herbert Xu46309d82009-07-12 21:38:59 +0800471 struct crypto_shash *hash;
Loc Hob8a28252008-05-14 21:23:00 +0800472
Herbert Xu46309d82009-07-12 21:38:59 +0800473 hash = crypto_spawn_shash(spawn);
474 if (IS_ERR(hash))
475 return PTR_ERR(hash);
Loc Hob8a28252008-05-14 21:23:00 +0800476
Herbert Xu46309d82009-07-12 21:38:59 +0800477 ctx->child = hash;
Herbert Xu0d6669e2009-07-12 23:06:33 +0800478 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
479 sizeof(struct cryptd_hash_request_ctx) +
480 crypto_shash_descsize(hash));
Loc Hob8a28252008-05-14 21:23:00 +0800481 return 0;
482}
483
484static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
485{
486 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Loc Hob8a28252008-05-14 21:23:00 +0800487
Herbert Xu46309d82009-07-12 21:38:59 +0800488 crypto_free_shash(ctx->child);
Loc Hob8a28252008-05-14 21:23:00 +0800489}
490
491static int cryptd_hash_setkey(struct crypto_ahash *parent,
492 const u8 *key, unsigned int keylen)
493{
494 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
Herbert Xu46309d82009-07-12 21:38:59 +0800495 struct crypto_shash *child = ctx->child;
Loc Hob8a28252008-05-14 21:23:00 +0800496 int err;
497
Herbert Xu46309d82009-07-12 21:38:59 +0800498 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
499 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
500 CRYPTO_TFM_REQ_MASK);
501 err = crypto_shash_setkey(child, key, keylen);
502 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
503 CRYPTO_TFM_RES_MASK);
Loc Hob8a28252008-05-14 21:23:00 +0800504 return err;
505}
506
507static int cryptd_hash_enqueue(struct ahash_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700508 crypto_completion_t compl)
Loc Hob8a28252008-05-14 21:23:00 +0800509{
510 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
511 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
Huang Ying254eff72009-02-19 14:42:19 +0800512 struct cryptd_queue *queue =
513 cryptd_get_queue(crypto_ahash_tfm(tfm));
Loc Hob8a28252008-05-14 21:23:00 +0800514
515 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700516 req->base.complete = compl;
Loc Hob8a28252008-05-14 21:23:00 +0800517
Huang Ying254eff72009-02-19 14:42:19 +0800518 return cryptd_enqueue_request(queue, &req->base);
Loc Hob8a28252008-05-14 21:23:00 +0800519}
520
Herbert Xu81760ea2016-06-21 16:55:13 +0800521static void cryptd_hash_complete(struct ahash_request *req, int err)
522{
523 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
524 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
525 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
526 int refcnt = atomic_read(&ctx->refcnt);
527
528 local_bh_disable();
529 rctx->complete(&req->base, err);
530 local_bh_enable();
531
532 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
533 crypto_free_ahash(tfm);
534}
535
Loc Hob8a28252008-05-14 21:23:00 +0800536static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
537{
Herbert Xu46309d82009-07-12 21:38:59 +0800538 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
539 struct crypto_shash *child = ctx->child;
540 struct ahash_request *req = ahash_request_cast(req_async);
541 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
542 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800543
544 if (unlikely(err == -EINPROGRESS))
545 goto out;
546
Herbert Xu46309d82009-07-12 21:38:59 +0800547 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800548
Herbert Xu46309d82009-07-12 21:38:59 +0800549 err = crypto_shash_init(desc);
Loc Hob8a28252008-05-14 21:23:00 +0800550
551 req->base.complete = rctx->complete;
552
553out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800554 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800555}
556
557static int cryptd_hash_init_enqueue(struct ahash_request *req)
558{
559 return cryptd_hash_enqueue(req, cryptd_hash_init);
560}
561
562static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
563{
Herbert Xu46309d82009-07-12 21:38:59 +0800564 struct ahash_request *req = ahash_request_cast(req_async);
Loc Hob8a28252008-05-14 21:23:00 +0800565 struct cryptd_hash_request_ctx *rctx;
Loc Hob8a28252008-05-14 21:23:00 +0800566
567 rctx = ahash_request_ctx(req);
568
569 if (unlikely(err == -EINPROGRESS))
570 goto out;
571
Herbert Xu46309d82009-07-12 21:38:59 +0800572 err = shash_ahash_update(req, &rctx->desc);
Loc Hob8a28252008-05-14 21:23:00 +0800573
574 req->base.complete = rctx->complete;
575
576out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800577 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800578}
579
580static int cryptd_hash_update_enqueue(struct ahash_request *req)
581{
582 return cryptd_hash_enqueue(req, cryptd_hash_update);
583}
584
585static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
586{
Herbert Xu46309d82009-07-12 21:38:59 +0800587 struct ahash_request *req = ahash_request_cast(req_async);
588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
Loc Hob8a28252008-05-14 21:23:00 +0800589
590 if (unlikely(err == -EINPROGRESS))
591 goto out;
592
Herbert Xu46309d82009-07-12 21:38:59 +0800593 err = crypto_shash_final(&rctx->desc, req->result);
Loc Hob8a28252008-05-14 21:23:00 +0800594
595 req->base.complete = rctx->complete;
596
597out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800598 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800599}
600
601static int cryptd_hash_final_enqueue(struct ahash_request *req)
602{
603 return cryptd_hash_enqueue(req, cryptd_hash_final);
604}
605
Herbert Xu6fba00d2009-07-22 11:10:22 +0800606static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
607{
608 struct ahash_request *req = ahash_request_cast(req_async);
609 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
610
611 if (unlikely(err == -EINPROGRESS))
612 goto out;
613
614 err = shash_ahash_finup(req, &rctx->desc);
615
616 req->base.complete = rctx->complete;
617
618out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800619 cryptd_hash_complete(req, err);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800620}
621
622static int cryptd_hash_finup_enqueue(struct ahash_request *req)
623{
624 return cryptd_hash_enqueue(req, cryptd_hash_finup);
625}
626
Loc Hob8a28252008-05-14 21:23:00 +0800627static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
628{
Herbert Xu46309d82009-07-12 21:38:59 +0800629 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
630 struct crypto_shash *child = ctx->child;
631 struct ahash_request *req = ahash_request_cast(req_async);
632 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
633 struct shash_desc *desc = &rctx->desc;
Loc Hob8a28252008-05-14 21:23:00 +0800634
635 if (unlikely(err == -EINPROGRESS))
636 goto out;
637
Herbert Xu46309d82009-07-12 21:38:59 +0800638 desc->tfm = child;
Loc Hob8a28252008-05-14 21:23:00 +0800639
Herbert Xu46309d82009-07-12 21:38:59 +0800640 err = shash_ahash_digest(req, desc);
Loc Hob8a28252008-05-14 21:23:00 +0800641
642 req->base.complete = rctx->complete;
643
644out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800645 cryptd_hash_complete(req, err);
Loc Hob8a28252008-05-14 21:23:00 +0800646}
647
648static int cryptd_hash_digest_enqueue(struct ahash_request *req)
649{
650 return cryptd_hash_enqueue(req, cryptd_hash_digest);
651}
652
Herbert Xu6fba00d2009-07-22 11:10:22 +0800653static int cryptd_hash_export(struct ahash_request *req, void *out)
654{
655 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
656
657 return crypto_shash_export(&rctx->desc, out);
658}
659
660static int cryptd_hash_import(struct ahash_request *req, const void *in)
661{
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100662 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
663 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
664 struct shash_desc *desc = cryptd_shash_desc(req);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800665
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100666 desc->tfm = ctx->child;
Ard Biesheuvel0bd22232016-09-01 14:25:43 +0100667
668 return crypto_shash_import(desc, in);
Herbert Xu6fba00d2009-07-22 11:10:22 +0800669}
670
Herbert Xu9cd899a2009-07-14 18:45:45 +0800671static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
672 struct cryptd_queue *queue)
Loc Hob8a28252008-05-14 21:23:00 +0800673{
Herbert Xu46309d82009-07-12 21:38:59 +0800674 struct hashd_instance_ctx *ctx;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800675 struct ahash_instance *inst;
Herbert Xu46309d82009-07-12 21:38:59 +0800676 struct shash_alg *salg;
Loc Hob8a28252008-05-14 21:23:00 +0800677 struct crypto_alg *alg;
Stephan Mueller466a7b92015-03-30 21:57:06 +0200678 u32 type = 0;
679 u32 mask = 0;
Herbert Xu46309d82009-07-12 21:38:59 +0800680 int err;
Loc Hob8a28252008-05-14 21:23:00 +0800681
Stephan Mueller466a7b92015-03-30 21:57:06 +0200682 cryptd_check_internal(tb, &type, &mask);
683
684 salg = shash_attr_alg(tb[1], type, mask);
Herbert Xu46309d82009-07-12 21:38:59 +0800685 if (IS_ERR(salg))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800686 return PTR_ERR(salg);
Loc Hob8a28252008-05-14 21:23:00 +0800687
Herbert Xu46309d82009-07-12 21:38:59 +0800688 alg = &salg->base;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800689 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
690 sizeof(*ctx));
Steffen Klassert05ed8752009-07-15 16:51:04 +0800691 err = PTR_ERR(inst);
Loc Hob8a28252008-05-14 21:23:00 +0800692 if (IS_ERR(inst))
693 goto out_put_alg;
694
Herbert Xu0b535ad2009-07-14 19:11:32 +0800695 ctx = ahash_instance_ctx(inst);
Herbert Xu46309d82009-07-12 21:38:59 +0800696 ctx->queue = queue;
697
Herbert Xu0b535ad2009-07-14 19:11:32 +0800698 err = crypto_init_shash_spawn(&ctx->spawn, salg,
699 ahash_crypto_instance(inst));
Herbert Xu46309d82009-07-12 21:38:59 +0800700 if (err)
701 goto out_free_inst;
702
Eric Biggersa208fa82018-01-03 11:16:26 -0800703 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
704 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
705 CRYPTO_ALG_OPTIONAL_KEY));
Loc Hob8a28252008-05-14 21:23:00 +0800706
Herbert Xu0b535ad2009-07-14 19:11:32 +0800707 inst->alg.halg.digestsize = salg->digestsize;
Wang, Rui Y1a078342015-11-29 22:45:34 +0800708 inst->alg.halg.statesize = salg->statesize;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800709 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
Loc Hob8a28252008-05-14 21:23:00 +0800710
Herbert Xu0b535ad2009-07-14 19:11:32 +0800711 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
712 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
Loc Hob8a28252008-05-14 21:23:00 +0800713
Herbert Xu0b535ad2009-07-14 19:11:32 +0800714 inst->alg.init = cryptd_hash_init_enqueue;
715 inst->alg.update = cryptd_hash_update_enqueue;
716 inst->alg.final = cryptd_hash_final_enqueue;
Herbert Xu6fba00d2009-07-22 11:10:22 +0800717 inst->alg.finup = cryptd_hash_finup_enqueue;
718 inst->alg.export = cryptd_hash_export;
719 inst->alg.import = cryptd_hash_import;
Eric Biggers841a3ff2018-01-03 11:16:23 -0800720 if (crypto_shash_alg_has_setkey(salg))
721 inst->alg.setkey = cryptd_hash_setkey;
Herbert Xu0b535ad2009-07-14 19:11:32 +0800722 inst->alg.digest = cryptd_hash_digest_enqueue;
Loc Hob8a28252008-05-14 21:23:00 +0800723
Herbert Xu0b535ad2009-07-14 19:11:32 +0800724 err = ahash_register_instance(tmpl, inst);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800725 if (err) {
726 crypto_drop_shash(&ctx->spawn);
727out_free_inst:
728 kfree(inst);
729 }
730
Loc Hob8a28252008-05-14 21:23:00 +0800731out_put_alg:
732 crypto_mod_put(alg);
Herbert Xu9cd899a2009-07-14 18:45:45 +0800733 return err;
Loc Hob8a28252008-05-14 21:23:00 +0800734}
735
Herbert Xu92b98762015-05-28 22:08:01 +0800736static int cryptd_aead_setkey(struct crypto_aead *parent,
737 const u8 *key, unsigned int keylen)
738{
739 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
740 struct crypto_aead *child = ctx->child;
741
742 return crypto_aead_setkey(child, key, keylen);
743}
744
745static int cryptd_aead_setauthsize(struct crypto_aead *parent,
746 unsigned int authsize)
747{
748 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
749 struct crypto_aead *child = ctx->child;
750
751 return crypto_aead_setauthsize(child, authsize);
752}
753
Adrian Hoban298c9262010-09-20 16:05:12 +0800754static void cryptd_aead_crypt(struct aead_request *req,
755 struct crypto_aead *child,
756 int err,
757 int (*crypt)(struct aead_request *req))
758{
759 struct cryptd_aead_request_ctx *rctx;
Herbert Xu81760ea2016-06-21 16:55:13 +0800760 struct cryptd_aead_ctx *ctx;
Herbert Xuec9f2002015-07-06 19:11:03 +0800761 crypto_completion_t compl;
Herbert Xu81760ea2016-06-21 16:55:13 +0800762 struct crypto_aead *tfm;
763 int refcnt;
Herbert Xuec9f2002015-07-06 19:11:03 +0800764
Adrian Hoban298c9262010-09-20 16:05:12 +0800765 rctx = aead_request_ctx(req);
Herbert Xuec9f2002015-07-06 19:11:03 +0800766 compl = rctx->complete;
Adrian Hoban298c9262010-09-20 16:05:12 +0800767
Herbert Xu31bd44e2016-08-25 16:49:51 +0800768 tfm = crypto_aead_reqtfm(req);
769
Adrian Hoban298c9262010-09-20 16:05:12 +0800770 if (unlikely(err == -EINPROGRESS))
771 goto out;
772 aead_request_set_tfm(req, child);
773 err = crypt( req );
Herbert Xu81760ea2016-06-21 16:55:13 +0800774
Adrian Hoban298c9262010-09-20 16:05:12 +0800775out:
Herbert Xu81760ea2016-06-21 16:55:13 +0800776 ctx = crypto_aead_ctx(tfm);
777 refcnt = atomic_read(&ctx->refcnt);
778
Adrian Hoban298c9262010-09-20 16:05:12 +0800779 local_bh_disable();
Herbert Xuec9f2002015-07-06 19:11:03 +0800780 compl(&req->base, err);
Adrian Hoban298c9262010-09-20 16:05:12 +0800781 local_bh_enable();
Herbert Xu81760ea2016-06-21 16:55:13 +0800782
783 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
784 crypto_free_aead(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800785}
786
787static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
788{
789 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
790 struct crypto_aead *child = ctx->child;
791 struct aead_request *req;
792
793 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800794 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800795}
796
797static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
798{
799 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
800 struct crypto_aead *child = ctx->child;
801 struct aead_request *req;
802
803 req = container_of(areq, struct aead_request, base);
Herbert Xuba3749a2015-08-13 17:29:02 +0800804 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
Adrian Hoban298c9262010-09-20 16:05:12 +0800805}
806
807static int cryptd_aead_enqueue(struct aead_request *req,
Mark Rustad3e3dc252014-07-25 02:53:38 -0700808 crypto_completion_t compl)
Adrian Hoban298c9262010-09-20 16:05:12 +0800809{
810 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
811 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
812 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
813
814 rctx->complete = req->base.complete;
Mark Rustad3e3dc252014-07-25 02:53:38 -0700815 req->base.complete = compl;
Adrian Hoban298c9262010-09-20 16:05:12 +0800816 return cryptd_enqueue_request(queue, &req->base);
817}
818
819static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
820{
821 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
822}
823
824static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
825{
826 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
827}
828
Herbert Xuf614e542015-05-28 22:08:04 +0800829static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800830{
Herbert Xuf614e542015-05-28 22:08:04 +0800831 struct aead_instance *inst = aead_alg_instance(tfm);
832 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800833 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
Herbert Xuf614e542015-05-28 22:08:04 +0800834 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800835 struct crypto_aead *cipher;
836
837 cipher = crypto_spawn_aead(spawn);
838 if (IS_ERR(cipher))
839 return PTR_ERR(cipher);
840
Adrian Hoban298c9262010-09-20 16:05:12 +0800841 ctx->child = cipher;
Herbert Xuec9f2002015-07-06 19:11:03 +0800842 crypto_aead_set_reqsize(
843 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
844 crypto_aead_reqsize(cipher)));
Adrian Hoban298c9262010-09-20 16:05:12 +0800845 return 0;
846}
847
Herbert Xuf614e542015-05-28 22:08:04 +0800848static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
Adrian Hoban298c9262010-09-20 16:05:12 +0800849{
Herbert Xuf614e542015-05-28 22:08:04 +0800850 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
Adrian Hoban298c9262010-09-20 16:05:12 +0800851 crypto_free_aead(ctx->child);
852}
853
854static int cryptd_create_aead(struct crypto_template *tmpl,
855 struct rtattr **tb,
856 struct cryptd_queue *queue)
857{
858 struct aead_instance_ctx *ctx;
Herbert Xuf614e542015-05-28 22:08:04 +0800859 struct aead_instance *inst;
860 struct aead_alg *alg;
Herbert Xu9b8c4562015-05-21 15:10:57 +0800861 const char *name;
862 u32 type = 0;
Herbert Xuec9f2002015-07-06 19:11:03 +0800863 u32 mask = CRYPTO_ALG_ASYNC;
Adrian Hoban298c9262010-09-20 16:05:12 +0800864 int err;
865
Stephan Mueller466a7b92015-03-30 21:57:06 +0200866 cryptd_check_internal(tb, &type, &mask);
867
Herbert Xu9b8c4562015-05-21 15:10:57 +0800868 name = crypto_attr_alg_name(tb[1]);
869 if (IS_ERR(name))
870 return PTR_ERR(name);
Adrian Hoban298c9262010-09-20 16:05:12 +0800871
Herbert Xu9b8c4562015-05-21 15:10:57 +0800872 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
873 if (!inst)
874 return -ENOMEM;
Adrian Hoban298c9262010-09-20 16:05:12 +0800875
Herbert Xuf614e542015-05-28 22:08:04 +0800876 ctx = aead_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800877 ctx->queue = queue;
878
Herbert Xuf614e542015-05-28 22:08:04 +0800879 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
Herbert Xu9b8c4562015-05-21 15:10:57 +0800880 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
Adrian Hoban298c9262010-09-20 16:05:12 +0800881 if (err)
882 goto out_free_inst;
883
Herbert Xuf614e542015-05-28 22:08:04 +0800884 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
885 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
Herbert Xu9b8c4562015-05-21 15:10:57 +0800886 if (err)
887 goto out_drop_aead;
888
Herbert Xuf614e542015-05-28 22:08:04 +0800889 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800890 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
Herbert Xuf614e542015-05-28 22:08:04 +0800891 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
Adrian Hoban298c9262010-09-20 16:05:12 +0800892
Herbert Xuf614e542015-05-28 22:08:04 +0800893 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
894 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
895
896 inst->alg.init = cryptd_aead_init_tfm;
897 inst->alg.exit = cryptd_aead_exit_tfm;
898 inst->alg.setkey = cryptd_aead_setkey;
899 inst->alg.setauthsize = cryptd_aead_setauthsize;
900 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
901 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
902
903 err = aead_register_instance(tmpl, inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800904 if (err) {
Herbert Xu9b8c4562015-05-21 15:10:57 +0800905out_drop_aead:
906 crypto_drop_aead(&ctx->aead_spawn);
Adrian Hoban298c9262010-09-20 16:05:12 +0800907out_free_inst:
908 kfree(inst);
909 }
Adrian Hoban298c9262010-09-20 16:05:12 +0800910 return err;
911}
912
Huang Ying254eff72009-02-19 14:42:19 +0800913static struct cryptd_queue queue;
Herbert Xu124b53d2007-04-16 20:49:20 +1000914
Herbert Xu9cd899a2009-07-14 18:45:45 +0800915static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
Herbert Xu124b53d2007-04-16 20:49:20 +1000916{
917 struct crypto_attr_type *algt;
918
919 algt = crypto_get_attr_type(tb);
920 if (IS_ERR(algt))
Herbert Xu9cd899a2009-07-14 18:45:45 +0800921 return PTR_ERR(algt);
Herbert Xu124b53d2007-04-16 20:49:20 +1000922
923 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
924 case CRYPTO_ALG_TYPE_BLKCIPHER:
Herbert Xu4e0958d2016-11-22 20:08:23 +0800925 return cryptd_create_skcipher(tmpl, tb, &queue);
Loc Hob8a28252008-05-14 21:23:00 +0800926 case CRYPTO_ALG_TYPE_DIGEST:
Herbert Xu9cd899a2009-07-14 18:45:45 +0800927 return cryptd_create_hash(tmpl, tb, &queue);
Adrian Hoban298c9262010-09-20 16:05:12 +0800928 case CRYPTO_ALG_TYPE_AEAD:
929 return cryptd_create_aead(tmpl, tb, &queue);
Herbert Xu124b53d2007-04-16 20:49:20 +1000930 }
931
Herbert Xu9cd899a2009-07-14 18:45:45 +0800932 return -EINVAL;
Herbert Xu124b53d2007-04-16 20:49:20 +1000933}
934
935static void cryptd_free(struct crypto_instance *inst)
936{
937 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800938 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
Adrian Hoban298c9262010-09-20 16:05:12 +0800939 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800940
941 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
942 case CRYPTO_ALG_TYPE_AHASH:
943 crypto_drop_shash(&hctx->spawn);
944 kfree(ahash_instance(inst));
945 return;
Adrian Hoban298c9262010-09-20 16:05:12 +0800946 case CRYPTO_ALG_TYPE_AEAD:
Herbert Xuf614e542015-05-28 22:08:04 +0800947 crypto_drop_aead(&aead_ctx->aead_spawn);
948 kfree(aead_instance(inst));
Adrian Hoban298c9262010-09-20 16:05:12 +0800949 return;
950 default:
951 crypto_drop_spawn(&ctx->spawn);
952 kfree(inst);
Herbert Xu0b535ad2009-07-14 19:11:32 +0800953 }
Herbert Xu124b53d2007-04-16 20:49:20 +1000954}
955
956static struct crypto_template cryptd_tmpl = {
957 .name = "cryptd",
Herbert Xu9cd899a2009-07-14 18:45:45 +0800958 .create = cryptd_create,
Herbert Xu124b53d2007-04-16 20:49:20 +1000959 .free = cryptd_free,
960 .module = THIS_MODULE,
961};
962
Herbert Xu4e0958d2016-11-22 20:08:23 +0800963struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
964 u32 type, u32 mask)
965{
966 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
967 struct cryptd_skcipher_ctx *ctx;
968 struct crypto_skcipher *tfm;
969
970 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
971 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
972 return ERR_PTR(-EINVAL);
973
974 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
975 if (IS_ERR(tfm))
976 return ERR_CAST(tfm);
977
978 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
979 crypto_free_skcipher(tfm);
980 return ERR_PTR(-EINVAL);
981 }
982
983 ctx = crypto_skcipher_ctx(tfm);
984 atomic_set(&ctx->refcnt, 1);
985
986 return container_of(tfm, struct cryptd_skcipher, base);
987}
988EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
989
990struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
991{
992 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
993
Kees Cook36b38752018-09-18 19:10:52 -0700994 return &ctx->child->base;
Herbert Xu4e0958d2016-11-22 20:08:23 +0800995}
996EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
997
998bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
999{
1000 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1001
1002 return atomic_read(&ctx->refcnt) - 1;
1003}
1004EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1005
1006void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1007{
1008 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1009
1010 if (atomic_dec_and_test(&ctx->refcnt))
1011 crypto_free_skcipher(&tfm->base);
1012}
1013EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1014
Huang Yingace13662009-08-06 15:35:20 +10001015struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1016 u32 type, u32 mask)
1017{
1018 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001019 struct cryptd_hash_ctx *ctx;
Huang Yingace13662009-08-06 15:35:20 +10001020 struct crypto_ahash *tfm;
1021
1022 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1023 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1024 return ERR_PTR(-EINVAL);
1025 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1026 if (IS_ERR(tfm))
1027 return ERR_CAST(tfm);
1028 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1029 crypto_free_ahash(tfm);
1030 return ERR_PTR(-EINVAL);
1031 }
1032
Herbert Xu81760ea2016-06-21 16:55:13 +08001033 ctx = crypto_ahash_ctx(tfm);
1034 atomic_set(&ctx->refcnt, 1);
1035
Huang Yingace13662009-08-06 15:35:20 +10001036 return __cryptd_ahash_cast(tfm);
1037}
1038EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1039
1040struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1041{
1042 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1043
1044 return ctx->child;
1045}
1046EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1047
Huang Ying0e1227d2009-10-19 11:53:06 +09001048struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1049{
1050 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1051 return &rctx->desc;
1052}
1053EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1054
Herbert Xu81760ea2016-06-21 16:55:13 +08001055bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1056{
1057 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1058
1059 return atomic_read(&ctx->refcnt) - 1;
1060}
1061EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1062
Huang Yingace13662009-08-06 15:35:20 +10001063void cryptd_free_ahash(struct cryptd_ahash *tfm)
1064{
Herbert Xu81760ea2016-06-21 16:55:13 +08001065 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1066
1067 if (atomic_dec_and_test(&ctx->refcnt))
1068 crypto_free_ahash(&tfm->base);
Huang Yingace13662009-08-06 15:35:20 +10001069}
1070EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1071
Adrian Hoban298c9262010-09-20 16:05:12 +08001072struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1073 u32 type, u32 mask)
1074{
1075 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu81760ea2016-06-21 16:55:13 +08001076 struct cryptd_aead_ctx *ctx;
Adrian Hoban298c9262010-09-20 16:05:12 +08001077 struct crypto_aead *tfm;
1078
1079 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1080 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1081 return ERR_PTR(-EINVAL);
1082 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1083 if (IS_ERR(tfm))
1084 return ERR_CAST(tfm);
1085 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1086 crypto_free_aead(tfm);
1087 return ERR_PTR(-EINVAL);
1088 }
Herbert Xu81760ea2016-06-21 16:55:13 +08001089
1090 ctx = crypto_aead_ctx(tfm);
1091 atomic_set(&ctx->refcnt, 1);
1092
Adrian Hoban298c9262010-09-20 16:05:12 +08001093 return __cryptd_aead_cast(tfm);
1094}
1095EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1096
1097struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1098{
1099 struct cryptd_aead_ctx *ctx;
1100 ctx = crypto_aead_ctx(&tfm->base);
1101 return ctx->child;
1102}
1103EXPORT_SYMBOL_GPL(cryptd_aead_child);
1104
Herbert Xu81760ea2016-06-21 16:55:13 +08001105bool cryptd_aead_queued(struct cryptd_aead *tfm)
1106{
1107 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1108
1109 return atomic_read(&ctx->refcnt) - 1;
1110}
1111EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1112
Adrian Hoban298c9262010-09-20 16:05:12 +08001113void cryptd_free_aead(struct cryptd_aead *tfm)
1114{
Herbert Xu81760ea2016-06-21 16:55:13 +08001115 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1116
1117 if (atomic_dec_and_test(&ctx->refcnt))
1118 crypto_free_aead(&tfm->base);
Adrian Hoban298c9262010-09-20 16:05:12 +08001119}
1120EXPORT_SYMBOL_GPL(cryptd_free_aead);
1121
Herbert Xu124b53d2007-04-16 20:49:20 +10001122static int __init cryptd_init(void)
1123{
1124 int err;
1125
Jon Maxwellc3a53602017-11-22 16:08:17 +11001126 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
Herbert Xu124b53d2007-04-16 20:49:20 +10001127 if (err)
1128 return err;
1129
1130 err = crypto_register_template(&cryptd_tmpl);
1131 if (err)
Huang Ying254eff72009-02-19 14:42:19 +08001132 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001133
1134 return err;
1135}
1136
1137static void __exit cryptd_exit(void)
1138{
Huang Ying254eff72009-02-19 14:42:19 +08001139 cryptd_fini_queue(&queue);
Herbert Xu124b53d2007-04-16 20:49:20 +10001140 crypto_unregister_template(&cryptd_tmpl);
1141}
1142
Herbert Xub2bac6a2011-08-19 16:11:23 +08001143subsys_initcall(cryptd_init);
Herbert Xu124b53d2007-04-16 20:49:20 +10001144module_exit(cryptd_exit);
1145
1146MODULE_LICENSE("GPL");
1147MODULE_DESCRIPTION("Software async crypto daemon");
Kees Cook4943ba12014-11-24 16:32:38 -08001148MODULE_ALIAS_CRYPTO("cryptd");