blob: c1601edd70e359854aeca195f72f385ca17ba721 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu8ff59092010-10-19 21:31:55 +08002/*
3 * algif_skcipher: User-space interface for skcipher algorithms
4 *
5 * This file provides the user-space API for symmetric key ciphers.
6 *
7 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
8 *
Stephan Muellere8704562017-06-25 17:12:39 +02009 * The following concept of the memory management is used:
10 *
11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
12 * filled by user space with the data submitted via sendpage/sendmsg. Filling
13 * up the TX SGL does not cause a crypto operation -- the data will only be
14 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
15 * provide a buffer which is tracked with the RX SGL.
16 *
17 * During the processing of the recvmsg operation, the cipher request is
18 * allocated and prepared. As part of the recvmsg operation, the processed
19 * TX buffers are extracted from the TX SGL into a separate SGL.
20 *
21 * After the completion of the crypto operation, the RX SGL and the cipher
22 * request is released. The extracted TX SGL parts are released together with
23 * the RX SGL release.
Herbert Xu8ff59092010-10-19 21:31:55 +080024 */
25
26#include <crypto/scatterwalk.h>
27#include <crypto/skcipher.h>
28#include <crypto/if_alg.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34#include <linux/net.h>
35#include <net/sock.h>
36
Ying Xue1b784142015-03-02 15:37:48 +080037static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
38 size_t size)
Herbert Xu8ff59092010-10-19 21:31:55 +080039{
40 struct sock *sk = sock->sk;
41 struct alg_sock *ask = alg_sk(sk);
Herbert Xu6454c2b2016-02-03 21:39:26 +080042 struct sock *psk = ask->parent;
43 struct alg_sock *pask = alg_sk(psk);
Eric Biggersf8d33fa2018-01-03 11:16:29 -080044 struct crypto_skcipher *tfm = pask->private;
Herbert Xu0d96e4b2015-12-18 19:16:57 +080045 unsigned ivsize = crypto_skcipher_ivsize(tfm);
Herbert Xu8ff59092010-10-19 21:31:55 +080046
Stephan Mueller2d975912017-08-02 07:56:19 +020047 return af_alg_sendmsg(sock, msg, size, ivsize);
Tadeusz Struka5969992015-03-19 12:31:40 -070048}
49
Stephan Muellere8704562017-06-25 17:12:39 +020050static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
51 size_t ignored, int flags)
Tadeusz Struka5969992015-03-19 12:31:40 -070052{
53 struct sock *sk = sock->sk;
54 struct alg_sock *ask = alg_sk(sk);
Herbert Xuec69bbf2016-02-03 21:39:24 +080055 struct sock *psk = ask->parent;
56 struct alg_sock *pask = alg_sk(psk);
Stephan Mueller2d975912017-08-02 07:56:19 +020057 struct af_alg_ctx *ctx = ask->private;
Eric Biggersf8d33fa2018-01-03 11:16:29 -080058 struct crypto_skcipher *tfm = pask->private;
Stephan Muellere8704562017-06-25 17:12:39 +020059 unsigned int bs = crypto_skcipher_blocksize(tfm);
Stephan Mueller2d975912017-08-02 07:56:19 +020060 struct af_alg_async_req *areq;
Stephan Muellere8704562017-06-25 17:12:39 +020061 int err = 0;
62 size_t len = 0;
Herbert Xuec69bbf2016-02-03 21:39:24 +080063
Stephan Mueller11edb552017-11-29 12:02:23 +010064 if (!ctx->used) {
65 err = af_alg_wait_for_data(sk, flags);
66 if (err)
67 return err;
68 }
69
Stephan Muellere8704562017-06-25 17:12:39 +020070 /* Allocate cipher request for current operation. */
Stephan Mueller2d975912017-08-02 07:56:19 +020071 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
72 crypto_skcipher_reqsize(tfm));
73 if (IS_ERR(areq))
74 return PTR_ERR(areq);
Herbert Xuec69bbf2016-02-03 21:39:24 +080075
Stephan Muellere8704562017-06-25 17:12:39 +020076 /* convert iovecs of output buffers into RX SGL */
Stephan Mueller2d975912017-08-02 07:56:19 +020077 err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
78 if (err)
79 goto free;
Tadeusz Struka5969992015-03-19 12:31:40 -070080
Stephan Muellere8704562017-06-25 17:12:39 +020081 /* Process only as much RX buffers for which we have TX data */
82 if (len > ctx->used)
83 len = ctx->used;
tadeusz.struk@intel.com033f46b2015-04-01 13:53:06 -070084
Stephan Muellere8704562017-06-25 17:12:39 +020085 /*
86 * If more buffers are to be expected to be processed, process only
87 * full block size buffers.
88 */
89 if (ctx->more || len < ctx->used)
90 len -= len % bs;
91
92 /*
93 * Create a per request TX SGL for this request which tracks the
94 * SG entries from the global TX SGL.
95 */
Stephan Mueller2d975912017-08-02 07:56:19 +020096 areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
Stephan Muellere8704562017-06-25 17:12:39 +020097 if (!areq->tsgl_entries)
98 areq->tsgl_entries = 1;
Kees Cook76e43e32018-06-12 14:28:11 -070099 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
100 areq->tsgl_entries),
Stephan Muellere8704562017-06-25 17:12:39 +0200101 GFP_KERNEL);
102 if (!areq->tsgl) {
103 err = -ENOMEM;
104 goto free;
105 }
106 sg_init_table(areq->tsgl, areq->tsgl_entries);
Stephan Mueller2d975912017-08-02 07:56:19 +0200107 af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
Stephan Muellere8704562017-06-25 17:12:39 +0200108
109 /* Initialize the crypto operation */
Stephan Mueller2d975912017-08-02 07:56:19 +0200110 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
111 skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
112 areq->first_rsgl.sgl.sg, len, ctx->iv);
Stephan Muellere8704562017-06-25 17:12:39 +0200113
114 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
115 /* AIO operation */
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100116 sock_hold(sk);
Stephan Muellere8704562017-06-25 17:12:39 +0200117 areq->iocb = msg->msg_iocb;
Stephan Muellerd53c5132017-12-08 11:50:37 +0100118
119 /* Remember output size that will be generated. */
120 areq->outlen = len;
121
Stephan Mueller2d975912017-08-02 07:56:19 +0200122 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
Stephan Muellere8704562017-06-25 17:12:39 +0200123 CRYPTO_TFM_REQ_MAY_SLEEP,
Stephan Mueller2d975912017-08-02 07:56:19 +0200124 af_alg_async_cb, areq);
125 err = ctx->enc ?
126 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
127 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100128
129 /* AIO operation in progress */
Stephan Muellerd53c5132017-12-08 11:50:37 +0100130 if (err == -EINPROGRESS || err == -EBUSY)
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100131 return -EIOCBQUEUED;
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100132
133 sock_put(sk);
Stephan Muellere8704562017-06-25 17:12:39 +0200134 } else {
135 /* Synchronous operation */
Stephan Mueller2d975912017-08-02 07:56:19 +0200136 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
Stephan Muellere8704562017-06-25 17:12:39 +0200137 CRYPTO_TFM_REQ_MAY_SLEEP |
138 CRYPTO_TFM_REQ_MAY_BACKLOG,
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100139 crypto_req_done, &ctx->wait);
140 err = crypto_wait_req(ctx->enc ?
Stephan Mueller2d975912017-08-02 07:56:19 +0200141 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
142 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100143 &ctx->wait);
Stephan Muellere8704562017-06-25 17:12:39 +0200144 }
145
Herbert Xu8ff59092010-10-19 21:31:55 +0800146
Stephan Muellere8704562017-06-25 17:12:39 +0200147free:
Stephan Mueller7d2c3f52017-11-10 13:20:55 +0100148 af_alg_free_resources(areq);
Herbert Xu8ff59092010-10-19 21:31:55 +0800149
Stephan Muellere8704562017-06-25 17:12:39 +0200150 return err ? err : len;
Herbert Xu8ff59092010-10-19 21:31:55 +0800151}
152
Tadeusz Struka5969992015-03-19 12:31:40 -0700153static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
154 size_t ignored, int flags)
155{
Stephan Muellere8704562017-06-25 17:12:39 +0200156 struct sock *sk = sock->sk;
157 int ret = 0;
158
159 lock_sock(sk);
160 while (msg_data_left(msg)) {
161 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
162
163 /*
164 * This error covers -EIOCBQUEUED which implies that we can
165 * only handle one AIO request. If the caller wants to have
166 * multiple AIO requests in parallel, he must make multiple
167 * separate AIO calls.
Stephan Mueller5703c822017-07-30 14:31:18 +0200168 *
169 * Also return the error if no data has been processed so far.
Stephan Muellere8704562017-06-25 17:12:39 +0200170 */
171 if (err <= 0) {
Stephan Mueller5703c822017-07-30 14:31:18 +0200172 if (err == -EIOCBQUEUED || !ret)
Stephan Muellere8704562017-06-25 17:12:39 +0200173 ret = err;
174 goto out;
175 }
176
177 ret += err;
178 }
179
180out:
Stephan Mueller2d975912017-08-02 07:56:19 +0200181 af_alg_wmem_wakeup(sk);
Stephan Muellere8704562017-06-25 17:12:39 +0200182 release_sock(sk);
183 return ret;
Tadeusz Struka5969992015-03-19 12:31:40 -0700184}
Herbert Xu8ff59092010-10-19 21:31:55 +0800185
Herbert Xu8ff59092010-10-19 21:31:55 +0800186static struct proto_ops algif_skcipher_ops = {
187 .family = PF_ALG,
188
189 .connect = sock_no_connect,
190 .socketpair = sock_no_socketpair,
191 .getname = sock_no_getname,
192 .ioctl = sock_no_ioctl,
193 .listen = sock_no_listen,
194 .shutdown = sock_no_shutdown,
195 .getsockopt = sock_no_getsockopt,
196 .mmap = sock_no_mmap,
197 .bind = sock_no_bind,
198 .accept = sock_no_accept,
199 .setsockopt = sock_no_setsockopt,
200
201 .release = af_alg_release,
202 .sendmsg = skcipher_sendmsg,
Stephan Mueller2d975912017-08-02 07:56:19 +0200203 .sendpage = af_alg_sendpage,
Herbert Xu8ff59092010-10-19 21:31:55 +0800204 .recvmsg = skcipher_recvmsg,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700205 .poll = af_alg_poll,
Herbert Xu8ff59092010-10-19 21:31:55 +0800206};
207
Herbert Xua0fa2d02016-01-04 13:36:12 +0900208static int skcipher_check_key(struct socket *sock)
209{
Herbert Xu18227932016-01-15 22:02:20 +0800210 int err = 0;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900211 struct sock *psk;
212 struct alg_sock *pask;
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800213 struct crypto_skcipher *tfm;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900214 struct sock *sk = sock->sk;
215 struct alg_sock *ask = alg_sk(sk);
216
Herbert Xu18227932016-01-15 22:02:20 +0800217 lock_sock(sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900218 if (ask->refcnt)
Herbert Xu18227932016-01-15 22:02:20 +0800219 goto unlock_child;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900220
221 psk = ask->parent;
222 pask = alg_sk(ask->parent);
223 tfm = pask->private;
224
225 err = -ENOKEY;
Herbert Xu18227932016-01-15 22:02:20 +0800226 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800227 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
Herbert Xua0fa2d02016-01-04 13:36:12 +0900228 goto unlock;
229
230 if (!pask->refcnt++)
231 sock_hold(psk);
232
233 ask->refcnt = 1;
234 sock_put(psk);
235
236 err = 0;
237
238unlock:
239 release_sock(psk);
Herbert Xu18227932016-01-15 22:02:20 +0800240unlock_child:
241 release_sock(sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900242
243 return err;
244}
245
246static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
247 size_t size)
248{
249 int err;
250
251 err = skcipher_check_key(sock);
252 if (err)
253 return err;
254
255 return skcipher_sendmsg(sock, msg, size);
256}
257
258static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
259 int offset, size_t size, int flags)
260{
261 int err;
262
263 err = skcipher_check_key(sock);
264 if (err)
265 return err;
266
Stephan Mueller2d975912017-08-02 07:56:19 +0200267 return af_alg_sendpage(sock, page, offset, size, flags);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900268}
269
270static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
271 size_t ignored, int flags)
272{
273 int err;
274
275 err = skcipher_check_key(sock);
276 if (err)
277 return err;
278
279 return skcipher_recvmsg(sock, msg, ignored, flags);
280}
281
282static struct proto_ops algif_skcipher_ops_nokey = {
283 .family = PF_ALG,
284
285 .connect = sock_no_connect,
286 .socketpair = sock_no_socketpair,
287 .getname = sock_no_getname,
288 .ioctl = sock_no_ioctl,
289 .listen = sock_no_listen,
290 .shutdown = sock_no_shutdown,
291 .getsockopt = sock_no_getsockopt,
292 .mmap = sock_no_mmap,
293 .bind = sock_no_bind,
294 .accept = sock_no_accept,
295 .setsockopt = sock_no_setsockopt,
296
297 .release = af_alg_release,
298 .sendmsg = skcipher_sendmsg_nokey,
299 .sendpage = skcipher_sendpage_nokey,
300 .recvmsg = skcipher_recvmsg_nokey,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700301 .poll = af_alg_poll,
Herbert Xua0fa2d02016-01-04 13:36:12 +0900302};
303
Herbert Xu8ff59092010-10-19 21:31:55 +0800304static void *skcipher_bind(const char *name, u32 type, u32 mask)
305{
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800306 return crypto_alloc_skcipher(name, type, mask);
Herbert Xu8ff59092010-10-19 21:31:55 +0800307}
308
309static void skcipher_release(void *private)
310{
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800311 crypto_free_skcipher(private);
Herbert Xu8ff59092010-10-19 21:31:55 +0800312}
313
314static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
315{
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800316 return crypto_skcipher_setkey(private, key, keylen);
Herbert Xu8ff59092010-10-19 21:31:55 +0800317}
318
319static void skcipher_sock_destruct(struct sock *sk)
320{
321 struct alg_sock *ask = alg_sk(sk);
Stephan Mueller2d975912017-08-02 07:56:19 +0200322 struct af_alg_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200323 struct sock *psk = ask->parent;
324 struct alg_sock *pask = alg_sk(psk);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800325 struct crypto_skcipher *tfm = pask->private;
Herbert Xu8ff59092010-10-19 21:31:55 +0800326
Stephan Mueller2d975912017-08-02 07:56:19 +0200327 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
Herbert Xu0d96e4b2015-12-18 19:16:57 +0800328 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
Herbert Xu8ff59092010-10-19 21:31:55 +0800329 sock_kfree_s(sk, ctx, ctx->len);
330 af_alg_release_parent(sk);
331}
332
Herbert Xud7b65ae2016-01-13 15:01:06 +0800333static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
Herbert Xu8ff59092010-10-19 21:31:55 +0800334{
Stephan Mueller2d975912017-08-02 07:56:19 +0200335 struct af_alg_ctx *ctx;
Herbert Xu8ff59092010-10-19 21:31:55 +0800336 struct alg_sock *ask = alg_sk(sk);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800337 struct crypto_skcipher *tfm = private;
Stephan Muellere8704562017-06-25 17:12:39 +0200338 unsigned int len = sizeof(*ctx);
Herbert Xu8ff59092010-10-19 21:31:55 +0800339
340 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
341 if (!ctx)
342 return -ENOMEM;
343
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800344 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
Herbert Xu8ff59092010-10-19 21:31:55 +0800345 GFP_KERNEL);
346 if (!ctx->iv) {
347 sock_kfree_s(sk, ctx, len);
348 return -ENOMEM;
349 }
350
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800351 memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
Herbert Xu8ff59092010-10-19 21:31:55 +0800352
Stephan Muellere8704562017-06-25 17:12:39 +0200353 INIT_LIST_HEAD(&ctx->tsgl_list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800354 ctx->len = len;
355 ctx->used = 0;
Jonathan Cameronaf955bf2017-12-19 10:27:24 +0000356 atomic_set(&ctx->rcvused, 0);
Herbert Xu8ff59092010-10-19 21:31:55 +0800357 ctx->more = 0;
358 ctx->merge = 0;
359 ctx->enc = 0;
Gilad Ben-Yossef2c3f8b12017-10-18 08:00:39 +0100360 crypto_init_wait(&ctx->wait);
Herbert Xu8ff59092010-10-19 21:31:55 +0800361
362 ask->private = ctx;
363
Herbert Xu8ff59092010-10-19 21:31:55 +0800364 sk->sk_destruct = skcipher_sock_destruct;
365
366 return 0;
367}
368
Herbert Xua0fa2d02016-01-04 13:36:12 +0900369static int skcipher_accept_parent(void *private, struct sock *sk)
370{
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800371 struct crypto_skcipher *tfm = private;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900372
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800373 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
Herbert Xua0fa2d02016-01-04 13:36:12 +0900374 return -ENOKEY;
375
Herbert Xud7b65ae2016-01-13 15:01:06 +0800376 return skcipher_accept_parent_nokey(private, sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900377}
378
Herbert Xu8ff59092010-10-19 21:31:55 +0800379static const struct af_alg_type algif_type_skcipher = {
380 .bind = skcipher_bind,
381 .release = skcipher_release,
382 .setkey = skcipher_setkey,
383 .accept = skcipher_accept_parent,
Herbert Xua0fa2d02016-01-04 13:36:12 +0900384 .accept_nokey = skcipher_accept_parent_nokey,
Herbert Xu8ff59092010-10-19 21:31:55 +0800385 .ops = &algif_skcipher_ops,
Herbert Xua0fa2d02016-01-04 13:36:12 +0900386 .ops_nokey = &algif_skcipher_ops_nokey,
Herbert Xu8ff59092010-10-19 21:31:55 +0800387 .name = "skcipher",
388 .owner = THIS_MODULE
389};
390
391static int __init algif_skcipher_init(void)
392{
393 return af_alg_register_type(&algif_type_skcipher);
394}
395
396static void __exit algif_skcipher_exit(void)
397{
398 int err = af_alg_unregister_type(&algif_type_skcipher);
399 BUG_ON(err);
400}
401
402module_init(algif_skcipher_init);
403module_exit(algif_skcipher_exit);
404MODULE_LICENSE("GPL");