blob: 10226c12c5df050939f029216f7790f3b61c4ab1 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Herbert Xu378f4f52007-12-17 20:07:31 +08002/*
3 * Symmetric key ciphers.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xu378f4f52007-12-17 20:07:31 +08006 */
7
8#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
9#define _CRYPTO_INTERNAL_SKCIPHER_H
10
11#include <crypto/algapi.h>
Herbert Xu61da88e2007-12-17 21:51:27 +080012#include <crypto/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080013#include <linux/list.h>
Herbert Xuecfc4322007-12-05 21:08:36 +110014#include <linux/types.h>
15
Herbert Xub286d8b2016-11-22 20:08:12 +080016struct aead_request;
Herbert Xuecfc4322007-12-05 21:08:36 +110017struct rtattr;
Herbert Xu378f4f52007-12-17 20:07:31 +080018
Herbert Xu4e6c3df2016-07-12 13:17:31 +080019struct skcipher_instance {
20 void (*free)(struct skcipher_instance *inst);
21 union {
22 struct {
23 char head[offsetof(struct skcipher_alg, base)];
24 struct crypto_instance base;
25 } s;
26 struct skcipher_alg alg;
27 };
28};
29
Herbert Xu378f4f52007-12-17 20:07:31 +080030struct crypto_skcipher_spawn {
31 struct crypto_spawn base;
32};
33
Herbert Xub286d8b2016-11-22 20:08:12 +080034struct skcipher_walk {
35 union {
36 struct {
37 struct page *page;
38 unsigned long offset;
39 } phys;
40
41 struct {
42 u8 *page;
43 void *addr;
44 } virt;
45 } src, dst;
46
47 struct scatter_walk in;
48 unsigned int nbytes;
49
50 struct scatter_walk out;
51 unsigned int total;
52
53 struct list_head buffers;
54
55 u8 *page;
56 u8 *buffer;
57 u8 *oiv;
58 void *iv;
59
60 unsigned int ivsize;
61
62 int flags;
63 unsigned int blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +000064 unsigned int stride;
Herbert Xub286d8b2016-11-22 20:08:12 +080065 unsigned int alignmask;
66};
67
Herbert Xu4e6c3df2016-07-12 13:17:31 +080068static inline struct crypto_instance *skcipher_crypto_instance(
69 struct skcipher_instance *inst)
70{
71 return &inst->s.base;
72}
73
74static inline struct skcipher_instance *skcipher_alg_instance(
75 struct crypto_skcipher *skcipher)
76{
77 return container_of(crypto_skcipher_alg(skcipher),
78 struct skcipher_instance, alg);
79}
80
81static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
82{
83 return crypto_instance_ctx(skcipher_crypto_instance(inst));
84}
85
86static inline void skcipher_request_complete(struct skcipher_request *req, int err)
87{
88 req->base.complete(&req->base, err);
89}
90
Eric Biggersb9f76dd2020-01-02 19:58:45 -080091int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
92 struct crypto_instance *inst,
93 const char *name, u32 type, u32 mask);
Herbert Xu3a01d0e2016-07-12 13:17:50 +080094
Herbert Xu378f4f52007-12-17 20:07:31 +080095static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
96{
97 crypto_drop_spawn(&spawn->base);
98}
99
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800100static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800101 struct crypto_skcipher_spawn *spawn)
102{
103 return container_of(spawn->base.alg, struct skcipher_alg, base);
104}
105
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800106static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
Herbert Xu378f4f52007-12-17 20:07:31 +0800107 struct crypto_skcipher_spawn *spawn)
108{
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800109 return crypto_skcipher_spawn_alg(spawn);
110}
111
112static inline struct crypto_skcipher *crypto_spawn_skcipher(
113 struct crypto_skcipher_spawn *spawn)
114{
115 return crypto_spawn_tfm2(&spawn->base);
Herbert Xu378f4f52007-12-17 20:07:31 +0800116}
117
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800118static inline void crypto_skcipher_set_reqsize(
119 struct crypto_skcipher *skcipher, unsigned int reqsize)
120{
121 skcipher->reqsize = reqsize;
122}
123
124int crypto_register_skcipher(struct skcipher_alg *alg);
125void crypto_unregister_skcipher(struct skcipher_alg *alg);
126int crypto_register_skciphers(struct skcipher_alg *algs, int count);
127void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
128int skcipher_register_instance(struct crypto_template *tmpl,
129 struct skcipher_instance *inst);
130
Herbert Xub286d8b2016-11-22 20:08:12 +0800131int skcipher_walk_done(struct skcipher_walk *walk, int err);
132int skcipher_walk_virt(struct skcipher_walk *walk,
133 struct skcipher_request *req,
134 bool atomic);
135void skcipher_walk_atomise(struct skcipher_walk *walk);
136int skcipher_walk_async(struct skcipher_walk *walk,
137 struct skcipher_request *req);
Herbert Xu34bc0852016-11-30 21:14:07 +0800138int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
139 struct aead_request *req, bool atomic);
140int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
141 struct aead_request *req, bool atomic);
Herbert Xub286d8b2016-11-22 20:08:12 +0800142void skcipher_walk_complete(struct skcipher_walk *walk, int err);
143
Ard Biesheuvel60178262019-09-03 09:43:30 -0700144static inline void skcipher_walk_abort(struct skcipher_walk *walk)
145{
146 skcipher_walk_done(walk, -ECANCELED);
147}
148
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800149static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
150{
151 return crypto_tfm_ctx(&tfm->base);
152}
153
154static inline void *skcipher_request_ctx(struct skcipher_request *req)
155{
156 return req->__ctx;
157}
158
159static inline u32 skcipher_request_flags(struct skcipher_request *req)
160{
161 return req->base.flags;
162}
163
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800164static inline unsigned int crypto_skcipher_alg_min_keysize(
165 struct skcipher_alg *alg)
166{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800167 return alg->min_keysize;
168}
169
170static inline unsigned int crypto_skcipher_alg_max_keysize(
171 struct skcipher_alg *alg)
172{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800173 return alg->max_keysize;
174}
175
Eric Biggers314d0f02019-06-02 22:46:11 -0700176static inline unsigned int crypto_skcipher_alg_walksize(
177 struct skcipher_alg *alg)
178{
Eric Biggers314d0f02019-06-02 22:46:11 -0700179 return alg->walksize;
180}
181
182/**
Eric Biggers314d0f02019-06-02 22:46:11 -0700183 * crypto_skcipher_walksize() - obtain walk size
184 * @tfm: cipher handle
185 *
186 * In some cases, algorithms can only perform optimally when operating on
187 * multiple blocks in parallel. This is reflected by the walksize, which
188 * must be a multiple of the chunksize (or equal if the concern does not
189 * apply)
190 *
191 * Return: walk size in bytes
192 */
193static inline unsigned int crypto_skcipher_walksize(
194 struct crypto_skcipher *tfm)
195{
196 return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
197}
198
Eric Biggers0872da12019-01-03 20:16:14 -0800199/* Helpers for simple block cipher modes of operation */
200struct skcipher_ctx_simple {
201 struct crypto_cipher *cipher; /* underlying block cipher */
202};
203static inline struct crypto_cipher *
204skcipher_cipher_simple(struct crypto_skcipher *tfm)
205{
206 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
207
208 return ctx->cipher;
209}
Herbert Xub3c16bf2019-12-20 13:29:40 +0800210
211struct skcipher_instance *skcipher_alloc_instance_simple(
212 struct crypto_template *tmpl, struct rtattr **tb);
213
214static inline struct crypto_alg *skcipher_ialg_simple(
215 struct skcipher_instance *inst)
216{
Eric Biggersaacd5b42020-01-02 19:59:00 -0800217 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
Herbert Xub3c16bf2019-12-20 13:29:40 +0800218
Eric Biggersaacd5b42020-01-02 19:59:00 -0800219 return crypto_spawn_cipher_alg(spawn);
Herbert Xub3c16bf2019-12-20 13:29:40 +0800220}
Eric Biggers0872da12019-01-03 20:16:14 -0800221
Herbert Xu378f4f52007-12-17 20:07:31 +0800222#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
223