blob: 968d094f0bcc03722ec8a9cd404aeced184e8529 [file] [log] [blame]
Herbert Xu8ff59092010-10-19 21:31:55 +08001/*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
Stephan Muellere8704562017-06-25 17:12:39 +020013 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
Herbert Xu8ff59092010-10-19 21:31:55 +080028 */
29
30#include <crypto/scatterwalk.h>
31#include <crypto/skcipher.h>
32#include <crypto/if_alg.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/kernel.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010036#include <linux/sched/signal.h>
Herbert Xu8ff59092010-10-19 21:31:55 +080037#include <linux/mm.h>
38#include <linux/module.h>
39#include <linux/net.h>
40#include <net/sock.h>
41
Stephan Muellere8704562017-06-25 17:12:39 +020042struct skcipher_tsgl {
Herbert Xu8ff59092010-10-19 21:31:55 +080043 struct list_head list;
Herbert Xu8ff59092010-10-19 21:31:55 +080044 int cur;
Herbert Xu8ff59092010-10-19 21:31:55 +080045 struct scatterlist sg[0];
46};
47
Stephan Muellere8704562017-06-25 17:12:39 +020048struct skcipher_rsgl {
49 struct af_alg_sgl sgl;
50 struct list_head list;
51 size_t sg_num_bytes;
52};
53
54struct skcipher_async_req {
55 struct kiocb *iocb;
56 struct sock *sk;
57
58 struct skcipher_rsgl first_sgl;
59 struct list_head rsgl_list;
60
61 struct scatterlist *tsgl;
62 unsigned int tsgl_entries;
63
64 unsigned int areqlen;
65 struct skcipher_request req;
66};
67
Herbert Xudd504582015-12-25 15:40:05 +080068struct skcipher_tfm {
69 struct crypto_skcipher *skcipher;
70 bool has_key;
71};
72
Herbert Xu8ff59092010-10-19 21:31:55 +080073struct skcipher_ctx {
Stephan Muellere8704562017-06-25 17:12:39 +020074 struct list_head tsgl_list;
Herbert Xu8ff59092010-10-19 21:31:55 +080075
76 void *iv;
77
78 struct af_alg_completion completion;
79
LABBE Corentin652d5b82015-10-23 14:10:36 +020080 size_t used;
Stephan Muellere8704562017-06-25 17:12:39 +020081 size_t rcvused;
Herbert Xu8ff59092010-10-19 21:31:55 +080082
Herbert Xu8ff59092010-10-19 21:31:55 +080083 bool more;
84 bool merge;
85 bool enc;
86
Stephan Muellere8704562017-06-25 17:12:39 +020087 unsigned int len;
Herbert Xu8ff59092010-10-19 21:31:55 +080088};
89
Stephan Muellere8704562017-06-25 17:12:39 +020090#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \
Herbert Xu8ff59092010-10-19 21:31:55 +080091 sizeof(struct scatterlist) - 1)
92
Herbert Xu0f6bb832010-11-30 16:49:02 +080093static inline int skcipher_sndbuf(struct sock *sk)
Herbert Xu8ff59092010-10-19 21:31:55 +080094{
95 struct alg_sock *ask = alg_sk(sk);
96 struct skcipher_ctx *ctx = ask->private;
97
Herbert Xu0f6bb832010-11-30 16:49:02 +080098 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
99 ctx->used, 0);
100}
101
102static inline bool skcipher_writable(struct sock *sk)
103{
104 return PAGE_SIZE <= skcipher_sndbuf(sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800105}
106
Stephan Muellere8704562017-06-25 17:12:39 +0200107static inline int skcipher_rcvbuf(struct sock *sk)
Herbert Xu8ff59092010-10-19 21:31:55 +0800108{
109 struct alg_sock *ask = alg_sk(sk);
110 struct skcipher_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200111
112 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
113 ctx->rcvused, 0);
114}
115
116static inline bool skcipher_readable(struct sock *sk)
117{
118 return PAGE_SIZE <= skcipher_rcvbuf(sk);
119}
120
121static int skcipher_alloc_tsgl(struct sock *sk)
122{
123 struct alg_sock *ask = alg_sk(sk);
124 struct skcipher_ctx *ctx = ask->private;
125 struct skcipher_tsgl *sgl;
Herbert Xu8ff59092010-10-19 21:31:55 +0800126 struct scatterlist *sg = NULL;
127
Stephan Muellere8704562017-06-25 17:12:39 +0200128 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
129 if (!list_empty(&ctx->tsgl_list))
Herbert Xu8ff59092010-10-19 21:31:55 +0800130 sg = sgl->sg;
131
132 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
133 sgl = sock_kmalloc(sk, sizeof(*sgl) +
134 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
135 GFP_KERNEL);
136 if (!sgl)
137 return -ENOMEM;
138
139 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
140 sgl->cur = 0;
141
142 if (sg)
Dan Williamsc56f6d12015-08-07 18:15:13 +0200143 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
Herbert Xu8ff59092010-10-19 21:31:55 +0800144
Stephan Muellere8704562017-06-25 17:12:39 +0200145 list_add_tail(&sgl->list, &ctx->tsgl_list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800146 }
147
148 return 0;
149}
150
Stephan Muellere8704562017-06-25 17:12:39 +0200151static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes)
Herbert Xu8ff59092010-10-19 21:31:55 +0800152{
153 struct alg_sock *ask = alg_sk(sk);
154 struct skcipher_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200155 struct skcipher_tsgl *sgl, *tmp;
156 unsigned int i;
157 unsigned int sgl_count = 0;
Herbert Xu8ff59092010-10-19 21:31:55 +0800158
Stephan Muellere8704562017-06-25 17:12:39 +0200159 if (!bytes)
160 return 0;
161
162 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
163 struct scatterlist *sg = sgl->sg;
164
165 for (i = 0; i < sgl->cur; i++) {
166 sgl_count++;
167 if (sg[i].length >= bytes)
168 return sgl_count;
169
170 bytes -= sg[i].length;
171 }
172 }
173
174 return sgl_count;
175}
176
177static void skcipher_pull_tsgl(struct sock *sk, size_t used,
178 struct scatterlist *dst)
179{
180 struct alg_sock *ask = alg_sk(sk);
181 struct skcipher_ctx *ctx = ask->private;
182 struct skcipher_tsgl *sgl;
183 struct scatterlist *sg;
184 unsigned int i;
185
186 while (!list_empty(&ctx->tsgl_list)) {
187 sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl,
Herbert Xu8ff59092010-10-19 21:31:55 +0800188 list);
189 sg = sgl->sg;
190
191 for (i = 0; i < sgl->cur; i++) {
LABBE Corentin652d5b82015-10-23 14:10:36 +0200192 size_t plen = min_t(size_t, used, sg[i].length);
Stephan Muellere8704562017-06-25 17:12:39 +0200193 struct page *page = sg_page(sg + i);
Herbert Xu8ff59092010-10-19 21:31:55 +0800194
Stephan Muellere8704562017-06-25 17:12:39 +0200195 if (!page)
Herbert Xu8ff59092010-10-19 21:31:55 +0800196 continue;
197
Stephan Muellere8704562017-06-25 17:12:39 +0200198 /*
199 * Assumption: caller created skcipher_count_tsgl(len)
200 * SG entries in dst.
201 */
202 if (dst)
203 sg_set_page(dst + i, page, plen, sg[i].offset);
204
Herbert Xu8ff59092010-10-19 21:31:55 +0800205 sg[i].length -= plen;
206 sg[i].offset += plen;
207
208 used -= plen;
209 ctx->used -= plen;
210
211 if (sg[i].length)
212 return;
Stephan Muellere8704562017-06-25 17:12:39 +0200213
214 if (!dst)
215 put_page(page);
Herbert Xu8ff59092010-10-19 21:31:55 +0800216 sg_assign_page(sg + i, NULL);
217 }
218
219 list_del(&sgl->list);
Stephan Muellere8704562017-06-25 17:12:39 +0200220 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
221 (MAX_SGL_ENTS + 1));
Herbert Xu8ff59092010-10-19 21:31:55 +0800222 }
223
224 if (!ctx->used)
225 ctx->merge = 0;
226}
227
Stephan Muellere8704562017-06-25 17:12:39 +0200228static void skcipher_free_areq_sgls(struct skcipher_async_req *areq)
Herbert Xu8ff59092010-10-19 21:31:55 +0800229{
Stephan Muellere8704562017-06-25 17:12:39 +0200230 struct sock *sk = areq->sk;
Herbert Xu8ff59092010-10-19 21:31:55 +0800231 struct alg_sock *ask = alg_sk(sk);
232 struct skcipher_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200233 struct skcipher_rsgl *rsgl, *tmp;
234 struct scatterlist *tsgl;
235 struct scatterlist *sg;
236 unsigned int i;
Herbert Xu8ff59092010-10-19 21:31:55 +0800237
Stephan Muellere8704562017-06-25 17:12:39 +0200238 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
239 ctx->rcvused -= rsgl->sg_num_bytes;
240 af_alg_free_sg(&rsgl->sgl);
241 list_del(&rsgl->list);
242 if (rsgl != &areq->first_sgl)
243 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
244 }
245
246 tsgl = areq->tsgl;
247 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
248 if (!sg_page(sg))
249 continue;
250 put_page(sg_page(sg));
251 }
252
253 if (areq->tsgl && areq->tsgl_entries)
254 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
Herbert Xu8ff59092010-10-19 21:31:55 +0800255}
256
257static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
258{
WANG Congd9dc8b02016-11-11 10:20:50 -0800259 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Herbert Xu8ff59092010-10-19 21:31:55 +0800260 int err = -ERESTARTSYS;
WANG Congd9dc8b02016-11-11 10:20:50 -0800261 long timeout;
Herbert Xu8ff59092010-10-19 21:31:55 +0800262
263 if (flags & MSG_DONTWAIT)
264 return -EAGAIN;
265
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800266 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800267
WANG Congd9dc8b02016-11-11 10:20:50 -0800268 add_wait_queue(sk_sleep(sk), &wait);
Herbert Xu8ff59092010-10-19 21:31:55 +0800269 for (;;) {
270 if (signal_pending(current))
271 break;
Herbert Xu8ff59092010-10-19 21:31:55 +0800272 timeout = MAX_SCHEDULE_TIMEOUT;
WANG Congd9dc8b02016-11-11 10:20:50 -0800273 if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
Herbert Xu8ff59092010-10-19 21:31:55 +0800274 err = 0;
275 break;
276 }
277 }
WANG Congd9dc8b02016-11-11 10:20:50 -0800278 remove_wait_queue(sk_sleep(sk), &wait);
Herbert Xu8ff59092010-10-19 21:31:55 +0800279
280 return err;
281}
282
283static void skcipher_wmem_wakeup(struct sock *sk)
284{
285 struct socket_wq *wq;
286
287 if (!skcipher_writable(sk))
288 return;
289
290 rcu_read_lock();
291 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800292 if (skwq_has_sleeper(wq))
Herbert Xu8ff59092010-10-19 21:31:55 +0800293 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
294 POLLRDNORM |
295 POLLRDBAND);
296 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
297 rcu_read_unlock();
298}
299
300static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
301{
WANG Congd9dc8b02016-11-11 10:20:50 -0800302 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Herbert Xu8ff59092010-10-19 21:31:55 +0800303 struct alg_sock *ask = alg_sk(sk);
304 struct skcipher_ctx *ctx = ask->private;
305 long timeout;
Herbert Xu8ff59092010-10-19 21:31:55 +0800306 int err = -ERESTARTSYS;
307
308 if (flags & MSG_DONTWAIT) {
309 return -EAGAIN;
310 }
311
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800312 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800313
WANG Congd9dc8b02016-11-11 10:20:50 -0800314 add_wait_queue(sk_sleep(sk), &wait);
Herbert Xu8ff59092010-10-19 21:31:55 +0800315 for (;;) {
316 if (signal_pending(current))
317 break;
Herbert Xu8ff59092010-10-19 21:31:55 +0800318 timeout = MAX_SCHEDULE_TIMEOUT;
WANG Congd9dc8b02016-11-11 10:20:50 -0800319 if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
Herbert Xu8ff59092010-10-19 21:31:55 +0800320 err = 0;
321 break;
322 }
323 }
WANG Congd9dc8b02016-11-11 10:20:50 -0800324 remove_wait_queue(sk_sleep(sk), &wait);
Herbert Xu8ff59092010-10-19 21:31:55 +0800325
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800326 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800327
328 return err;
329}
330
331static void skcipher_data_wakeup(struct sock *sk)
332{
333 struct alg_sock *ask = alg_sk(sk);
334 struct skcipher_ctx *ctx = ask->private;
335 struct socket_wq *wq;
336
337 if (!ctx->used)
338 return;
339
340 rcu_read_lock();
341 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800342 if (skwq_has_sleeper(wq))
Herbert Xu8ff59092010-10-19 21:31:55 +0800343 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
344 POLLRDNORM |
345 POLLRDBAND);
346 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
347 rcu_read_unlock();
348}
349
Ying Xue1b784142015-03-02 15:37:48 +0800350static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
351 size_t size)
Herbert Xu8ff59092010-10-19 21:31:55 +0800352{
353 struct sock *sk = sock->sk;
354 struct alg_sock *ask = alg_sk(sk);
Herbert Xu6454c2b2016-02-03 21:39:26 +0800355 struct sock *psk = ask->parent;
356 struct alg_sock *pask = alg_sk(psk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800357 struct skcipher_ctx *ctx = ask->private;
Herbert Xu6454c2b2016-02-03 21:39:26 +0800358 struct skcipher_tfm *skc = pask->private;
359 struct crypto_skcipher *tfm = skc->skcipher;
Herbert Xu0d96e4b2015-12-18 19:16:57 +0800360 unsigned ivsize = crypto_skcipher_ivsize(tfm);
Stephan Muellere8704562017-06-25 17:12:39 +0200361 struct skcipher_tsgl *sgl;
Herbert Xu8ff59092010-10-19 21:31:55 +0800362 struct af_alg_control con = {};
363 long copied = 0;
364 bool enc = 0;
Stephan Muellerf26b7b82014-11-30 10:55:26 +0100365 bool init = 0;
Herbert Xu8ff59092010-10-19 21:31:55 +0800366 int err;
367 int i;
368
369 if (msg->msg_controllen) {
370 err = af_alg_cmsg_send(msg, &con);
371 if (err)
372 return err;
373
Stephan Muellerf26b7b82014-11-30 10:55:26 +0100374 init = 1;
Herbert Xu8ff59092010-10-19 21:31:55 +0800375 switch (con.op) {
376 case ALG_OP_ENCRYPT:
377 enc = 1;
378 break;
379 case ALG_OP_DECRYPT:
380 enc = 0;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 if (con.iv && con.iv->ivlen != ivsize)
387 return -EINVAL;
388 }
389
390 err = -EINVAL;
391
392 lock_sock(sk);
393 if (!ctx->more && ctx->used)
394 goto unlock;
395
Stephan Muellerf26b7b82014-11-30 10:55:26 +0100396 if (init) {
Herbert Xu8ff59092010-10-19 21:31:55 +0800397 ctx->enc = enc;
398 if (con.iv)
399 memcpy(ctx->iv, con.iv->iv, ivsize);
400 }
401
Herbert Xu8ff59092010-10-19 21:31:55 +0800402 while (size) {
403 struct scatterlist *sg;
404 unsigned long len = size;
LABBE Corentin652d5b82015-10-23 14:10:36 +0200405 size_t plen;
Herbert Xu8ff59092010-10-19 21:31:55 +0800406
407 if (ctx->merge) {
Stephan Muellere8704562017-06-25 17:12:39 +0200408 sgl = list_entry(ctx->tsgl_list.prev,
409 struct skcipher_tsgl, list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800410 sg = sgl->sg + sgl->cur - 1;
411 len = min_t(unsigned long, len,
412 PAGE_SIZE - sg->offset - sg->length);
413
Al Viro6ce8e9c2014-04-06 21:25:44 -0400414 err = memcpy_from_msg(page_address(sg_page(sg)) +
415 sg->offset + sg->length,
416 msg, len);
Herbert Xu8ff59092010-10-19 21:31:55 +0800417 if (err)
418 goto unlock;
419
420 sg->length += len;
421 ctx->merge = (sg->offset + sg->length) &
422 (PAGE_SIZE - 1);
423
424 ctx->used += len;
425 copied += len;
426 size -= len;
Herbert Xu8ff59092010-10-19 21:31:55 +0800427 continue;
428 }
429
Herbert Xu0f6bb832010-11-30 16:49:02 +0800430 if (!skcipher_writable(sk)) {
Herbert Xu8ff59092010-10-19 21:31:55 +0800431 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
432 if (err)
433 goto unlock;
Herbert Xu8ff59092010-10-19 21:31:55 +0800434 }
435
Herbert Xu0f6bb832010-11-30 16:49:02 +0800436 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
Herbert Xu8ff59092010-10-19 21:31:55 +0800437
Stephan Muellere8704562017-06-25 17:12:39 +0200438 err = skcipher_alloc_tsgl(sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800439 if (err)
440 goto unlock;
441
Stephan Muellere8704562017-06-25 17:12:39 +0200442 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl,
443 list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800444 sg = sgl->sg;
Herbert Xu202736d2016-01-19 21:23:57 +0800445 if (sgl->cur)
446 sg_unmark_end(sg + sgl->cur - 1);
Herbert Xu8ff59092010-10-19 21:31:55 +0800447 do {
448 i = sgl->cur;
LABBE Corentin652d5b82015-10-23 14:10:36 +0200449 plen = min_t(size_t, len, PAGE_SIZE);
Herbert Xu8ff59092010-10-19 21:31:55 +0800450
451 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
452 err = -ENOMEM;
453 if (!sg_page(sg + i))
454 goto unlock;
455
Al Viro6ce8e9c2014-04-06 21:25:44 -0400456 err = memcpy_from_msg(page_address(sg_page(sg + i)),
457 msg, plen);
Herbert Xu8ff59092010-10-19 21:31:55 +0800458 if (err) {
459 __free_page(sg_page(sg + i));
460 sg_assign_page(sg + i, NULL);
461 goto unlock;
462 }
463
464 sg[i].length = plen;
465 len -= plen;
466 ctx->used += plen;
467 copied += plen;
468 size -= plen;
Herbert Xu8ff59092010-10-19 21:31:55 +0800469 sgl->cur++;
470 } while (len && sgl->cur < MAX_SGL_ENTS);
471
Tadeusz Struk0f477b62014-12-08 12:03:42 -0800472 if (!size)
473 sg_mark_end(sg + sgl->cur - 1);
474
Herbert Xu8ff59092010-10-19 21:31:55 +0800475 ctx->merge = plen & (PAGE_SIZE - 1);
476 }
477
478 err = 0;
479
480 ctx->more = msg->msg_flags & MSG_MORE;
Herbert Xu8ff59092010-10-19 21:31:55 +0800481
482unlock:
483 skcipher_data_wakeup(sk);
484 release_sock(sk);
485
486 return copied ?: err;
487}
488
489static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
490 int offset, size_t size, int flags)
491{
492 struct sock *sk = sock->sk;
493 struct alg_sock *ask = alg_sk(sk);
494 struct skcipher_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200495 struct skcipher_tsgl *sgl;
Herbert Xu8ff59092010-10-19 21:31:55 +0800496 int err = -EINVAL;
Herbert Xu8ff59092010-10-19 21:31:55 +0800497
Shawn Landdend3f7d562013-11-24 22:36:28 -0800498 if (flags & MSG_SENDPAGE_NOTLAST)
499 flags |= MSG_MORE;
500
Herbert Xu8ff59092010-10-19 21:31:55 +0800501 lock_sock(sk);
502 if (!ctx->more && ctx->used)
503 goto unlock;
504
505 if (!size)
506 goto done;
507
Herbert Xu0f6bb832010-11-30 16:49:02 +0800508 if (!skcipher_writable(sk)) {
Herbert Xu8ff59092010-10-19 21:31:55 +0800509 err = skcipher_wait_for_wmem(sk, flags);
510 if (err)
511 goto unlock;
Herbert Xu8ff59092010-10-19 21:31:55 +0800512 }
513
Stephan Muellere8704562017-06-25 17:12:39 +0200514 err = skcipher_alloc_tsgl(sk);
Herbert Xu8ff59092010-10-19 21:31:55 +0800515 if (err)
516 goto unlock;
517
518 ctx->merge = 0;
Stephan Muellere8704562017-06-25 17:12:39 +0200519 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800520
Tadeusz Struk0f477b62014-12-08 12:03:42 -0800521 if (sgl->cur)
522 sg_unmark_end(sgl->sg + sgl->cur - 1);
523
524 sg_mark_end(sgl->sg + sgl->cur);
Herbert Xu8ff59092010-10-19 21:31:55 +0800525 get_page(page);
526 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
527 sgl->cur++;
528 ctx->used += size;
529
530done:
531 ctx->more = flags & MSG_MORE;
Herbert Xu8ff59092010-10-19 21:31:55 +0800532
533unlock:
534 skcipher_data_wakeup(sk);
535 release_sock(sk);
536
537 return err ?: size;
538}
539
Stephan Muellere8704562017-06-25 17:12:39 +0200540static void skcipher_async_cb(struct crypto_async_request *req, int err)
Tadeusz Struka5969992015-03-19 12:31:40 -0700541{
Stephan Muellere8704562017-06-25 17:12:39 +0200542 struct skcipher_async_req *areq = req->data;
543 struct sock *sk = areq->sk;
544 struct kiocb *iocb = areq->iocb;
545 unsigned int resultlen;
Tadeusz Struka5969992015-03-19 12:31:40 -0700546
Stephan Muellere8704562017-06-25 17:12:39 +0200547 lock_sock(sk);
Tadeusz Struka5969992015-03-19 12:31:40 -0700548
Stephan Muellere8704562017-06-25 17:12:39 +0200549 /* Buffer size written by crypto operation. */
550 resultlen = areq->req.cryptlen;
Tadeusz Struka5969992015-03-19 12:31:40 -0700551
Stephan Muellere8704562017-06-25 17:12:39 +0200552 skcipher_free_areq_sgls(areq);
553 sock_kfree_s(sk, areq, areq->areqlen);
554 __sock_put(sk);
555
556 iocb->ki_complete(iocb, err ? err : resultlen, 0);
557
558 release_sock(sk);
Tadeusz Struka5969992015-03-19 12:31:40 -0700559}
560
Stephan Muellere8704562017-06-25 17:12:39 +0200561static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
562 size_t ignored, int flags)
Tadeusz Struka5969992015-03-19 12:31:40 -0700563{
564 struct sock *sk = sock->sk;
565 struct alg_sock *ask = alg_sk(sk);
Herbert Xuec69bbf2016-02-03 21:39:24 +0800566 struct sock *psk = ask->parent;
567 struct alg_sock *pask = alg_sk(psk);
Tadeusz Struka5969992015-03-19 12:31:40 -0700568 struct skcipher_ctx *ctx = ask->private;
Herbert Xuec69bbf2016-02-03 21:39:24 +0800569 struct skcipher_tfm *skc = pask->private;
570 struct crypto_skcipher *tfm = skc->skcipher;
Stephan Muellere8704562017-06-25 17:12:39 +0200571 unsigned int bs = crypto_skcipher_blocksize(tfm);
572 unsigned int areqlen = sizeof(struct skcipher_async_req) +
573 crypto_skcipher_reqsize(tfm);
574 struct skcipher_async_req *areq;
575 struct skcipher_rsgl *last_rsgl = NULL;
576 int err = 0;
577 size_t len = 0;
Herbert Xuec69bbf2016-02-03 21:39:24 +0800578
Stephan Muellere8704562017-06-25 17:12:39 +0200579 /* Allocate cipher request for current operation. */
580 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
581 if (unlikely(!areq))
582 return -ENOMEM;
583 areq->areqlen = areqlen;
584 areq->sk = sk;
585 INIT_LIST_HEAD(&areq->rsgl_list);
586 areq->tsgl = NULL;
587 areq->tsgl_entries = 0;
Herbert Xuec69bbf2016-02-03 21:39:24 +0800588
Stephan Muellere8704562017-06-25 17:12:39 +0200589 /* convert iovecs of output buffers into RX SGL */
590 while (msg_data_left(msg)) {
591 struct skcipher_rsgl *rsgl;
592 size_t seglen;
Tadeusz Struka5969992015-03-19 12:31:40 -0700593
Stephan Muellere8704562017-06-25 17:12:39 +0200594 /* limit the amount of readable buffers */
595 if (!skcipher_readable(sk))
596 break;
Tadeusz Struka5969992015-03-19 12:31:40 -0700597
598 if (!ctx->used) {
599 err = skcipher_wait_for_data(sk, flags);
600 if (err)
601 goto free;
602 }
Tadeusz Struka5969992015-03-19 12:31:40 -0700603
Stephan Muellere8704562017-06-25 17:12:39 +0200604 seglen = min_t(size_t, ctx->used, msg_data_left(msg));
Tadeusz Struka5969992015-03-19 12:31:40 -0700605
Stephan Muellere8704562017-06-25 17:12:39 +0200606 if (list_empty(&areq->rsgl_list)) {
607 rsgl = &areq->first_sgl;
Tadeusz Struka5969992015-03-19 12:31:40 -0700608 } else {
Stephan Muellere8704562017-06-25 17:12:39 +0200609 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
Tadeusz Struka5969992015-03-19 12:31:40 -0700610 if (!rsgl) {
611 err = -ENOMEM;
612 goto free;
613 }
Tadeusz Struka5969992015-03-19 12:31:40 -0700614 }
615
Stephan Muellere8704562017-06-25 17:12:39 +0200616 rsgl->sgl.npages = 0;
617 list_add_tail(&rsgl->list, &areq->rsgl_list);
618
619 /* make one iovec available as scatterlist */
620 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
621 if (err < 0)
Tadeusz Struka5969992015-03-19 12:31:40 -0700622 goto free;
Stephan Muellere8704562017-06-25 17:12:39 +0200623
624 /* chain the new scatterlist with previous one */
Tadeusz Struka5969992015-03-19 12:31:40 -0700625 if (last_rsgl)
626 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
627
628 last_rsgl = rsgl;
Stephan Muellere8704562017-06-25 17:12:39 +0200629 len += err;
630 ctx->rcvused += err;
631 rsgl->sg_num_bytes = err;
632 iov_iter_advance(&msg->msg_iter, err);
Tadeusz Struka5969992015-03-19 12:31:40 -0700633 }
634
Stephan Muellere8704562017-06-25 17:12:39 +0200635 /* Process only as much RX buffers for which we have TX data */
636 if (len > ctx->used)
637 len = ctx->used;
tadeusz.struk@intel.com033f46b2015-04-01 13:53:06 -0700638
Stephan Muellere8704562017-06-25 17:12:39 +0200639 /*
640 * If more buffers are to be expected to be processed, process only
641 * full block size buffers.
642 */
643 if (ctx->more || len < ctx->used)
644 len -= len % bs;
645
646 /*
647 * Create a per request TX SGL for this request which tracks the
648 * SG entries from the global TX SGL.
649 */
650 areq->tsgl_entries = skcipher_count_tsgl(sk, len);
651 if (!areq->tsgl_entries)
652 areq->tsgl_entries = 1;
653 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
654 GFP_KERNEL);
655 if (!areq->tsgl) {
656 err = -ENOMEM;
657 goto free;
658 }
659 sg_init_table(areq->tsgl, areq->tsgl_entries);
660 skcipher_pull_tsgl(sk, len, areq->tsgl);
661
662 /* Initialize the crypto operation */
663 skcipher_request_set_tfm(&areq->req, tfm);
664 skcipher_request_set_crypt(&areq->req, areq->tsgl,
665 areq->first_sgl.sgl.sg, len, ctx->iv);
666
667 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
668 /* AIO operation */
669 areq->iocb = msg->msg_iocb;
670 skcipher_request_set_callback(&areq->req,
671 CRYPTO_TFM_REQ_MAY_SLEEP,
672 skcipher_async_cb, areq);
673 err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) :
674 crypto_skcipher_decrypt(&areq->req);
675 } else {
676 /* Synchronous operation */
677 skcipher_request_set_callback(&areq->req,
678 CRYPTO_TFM_REQ_MAY_SLEEP |
679 CRYPTO_TFM_REQ_MAY_BACKLOG,
680 af_alg_complete,
681 &ctx->completion);
682 err = af_alg_wait_for_completion(ctx->enc ?
683 crypto_skcipher_encrypt(&areq->req) :
684 crypto_skcipher_decrypt(&areq->req),
685 &ctx->completion);
686 }
687
688 /* AIO operation in progress */
Tadeusz Struka5969992015-03-19 12:31:40 -0700689 if (err == -EINPROGRESS) {
Stephan Muellere8704562017-06-25 17:12:39 +0200690 sock_hold(sk);
691 return -EIOCBQUEUED;
Herbert Xu8ff59092010-10-19 21:31:55 +0800692 }
693
Stephan Muellere8704562017-06-25 17:12:39 +0200694free:
695 skcipher_free_areq_sgls(areq);
696 if (areq)
697 sock_kfree_s(sk, areq, areqlen);
Herbert Xu8ff59092010-10-19 21:31:55 +0800698
Stephan Muellere8704562017-06-25 17:12:39 +0200699 return err ? err : len;
Herbert Xu8ff59092010-10-19 21:31:55 +0800700}
701
Tadeusz Struka5969992015-03-19 12:31:40 -0700702static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
703 size_t ignored, int flags)
704{
Stephan Muellere8704562017-06-25 17:12:39 +0200705 struct sock *sk = sock->sk;
706 int ret = 0;
707
708 lock_sock(sk);
709 while (msg_data_left(msg)) {
710 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
711
712 /*
713 * This error covers -EIOCBQUEUED which implies that we can
714 * only handle one AIO request. If the caller wants to have
715 * multiple AIO requests in parallel, he must make multiple
716 * separate AIO calls.
717 */
718 if (err <= 0) {
719 if (err == -EIOCBQUEUED)
720 ret = err;
721 goto out;
722 }
723
724 ret += err;
725 }
726
727out:
728 skcipher_wmem_wakeup(sk);
729 release_sock(sk);
730 return ret;
Tadeusz Struka5969992015-03-19 12:31:40 -0700731}
Herbert Xu8ff59092010-10-19 21:31:55 +0800732
733static unsigned int skcipher_poll(struct file *file, struct socket *sock,
734 poll_table *wait)
735{
736 struct sock *sk = sock->sk;
737 struct alg_sock *ask = alg_sk(sk);
738 struct skcipher_ctx *ctx = ask->private;
739 unsigned int mask;
740
741 sock_poll_wait(file, sk_sleep(sk), wait);
742 mask = 0;
743
744 if (ctx->used)
745 mask |= POLLIN | POLLRDNORM;
746
747 if (skcipher_writable(sk))
748 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
749
750 return mask;
751}
752
753static struct proto_ops algif_skcipher_ops = {
754 .family = PF_ALG,
755
756 .connect = sock_no_connect,
757 .socketpair = sock_no_socketpair,
758 .getname = sock_no_getname,
759 .ioctl = sock_no_ioctl,
760 .listen = sock_no_listen,
761 .shutdown = sock_no_shutdown,
762 .getsockopt = sock_no_getsockopt,
763 .mmap = sock_no_mmap,
764 .bind = sock_no_bind,
765 .accept = sock_no_accept,
766 .setsockopt = sock_no_setsockopt,
767
768 .release = af_alg_release,
769 .sendmsg = skcipher_sendmsg,
770 .sendpage = skcipher_sendpage,
771 .recvmsg = skcipher_recvmsg,
772 .poll = skcipher_poll,
773};
774
Herbert Xua0fa2d02016-01-04 13:36:12 +0900775static int skcipher_check_key(struct socket *sock)
776{
Herbert Xu18227932016-01-15 22:02:20 +0800777 int err = 0;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900778 struct sock *psk;
779 struct alg_sock *pask;
780 struct skcipher_tfm *tfm;
781 struct sock *sk = sock->sk;
782 struct alg_sock *ask = alg_sk(sk);
783
Herbert Xu18227932016-01-15 22:02:20 +0800784 lock_sock(sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900785 if (ask->refcnt)
Herbert Xu18227932016-01-15 22:02:20 +0800786 goto unlock_child;
Herbert Xua0fa2d02016-01-04 13:36:12 +0900787
788 psk = ask->parent;
789 pask = alg_sk(ask->parent);
790 tfm = pask->private;
791
792 err = -ENOKEY;
Herbert Xu18227932016-01-15 22:02:20 +0800793 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900794 if (!tfm->has_key)
795 goto unlock;
796
797 if (!pask->refcnt++)
798 sock_hold(psk);
799
800 ask->refcnt = 1;
801 sock_put(psk);
802
803 err = 0;
804
805unlock:
806 release_sock(psk);
Herbert Xu18227932016-01-15 22:02:20 +0800807unlock_child:
808 release_sock(sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900809
810 return err;
811}
812
813static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
814 size_t size)
815{
816 int err;
817
818 err = skcipher_check_key(sock);
819 if (err)
820 return err;
821
822 return skcipher_sendmsg(sock, msg, size);
823}
824
825static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
826 int offset, size_t size, int flags)
827{
828 int err;
829
830 err = skcipher_check_key(sock);
831 if (err)
832 return err;
833
834 return skcipher_sendpage(sock, page, offset, size, flags);
835}
836
837static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
838 size_t ignored, int flags)
839{
840 int err;
841
842 err = skcipher_check_key(sock);
843 if (err)
844 return err;
845
846 return skcipher_recvmsg(sock, msg, ignored, flags);
847}
848
849static struct proto_ops algif_skcipher_ops_nokey = {
850 .family = PF_ALG,
851
852 .connect = sock_no_connect,
853 .socketpair = sock_no_socketpair,
854 .getname = sock_no_getname,
855 .ioctl = sock_no_ioctl,
856 .listen = sock_no_listen,
857 .shutdown = sock_no_shutdown,
858 .getsockopt = sock_no_getsockopt,
859 .mmap = sock_no_mmap,
860 .bind = sock_no_bind,
861 .accept = sock_no_accept,
862 .setsockopt = sock_no_setsockopt,
863
864 .release = af_alg_release,
865 .sendmsg = skcipher_sendmsg_nokey,
866 .sendpage = skcipher_sendpage_nokey,
867 .recvmsg = skcipher_recvmsg_nokey,
868 .poll = skcipher_poll,
869};
870
Herbert Xu8ff59092010-10-19 21:31:55 +0800871static void *skcipher_bind(const char *name, u32 type, u32 mask)
872{
Herbert Xudd504582015-12-25 15:40:05 +0800873 struct skcipher_tfm *tfm;
874 struct crypto_skcipher *skcipher;
875
876 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
877 if (!tfm)
878 return ERR_PTR(-ENOMEM);
879
880 skcipher = crypto_alloc_skcipher(name, type, mask);
881 if (IS_ERR(skcipher)) {
882 kfree(tfm);
883 return ERR_CAST(skcipher);
884 }
885
886 tfm->skcipher = skcipher;
887
888 return tfm;
Herbert Xu8ff59092010-10-19 21:31:55 +0800889}
890
891static void skcipher_release(void *private)
892{
Herbert Xudd504582015-12-25 15:40:05 +0800893 struct skcipher_tfm *tfm = private;
894
895 crypto_free_skcipher(tfm->skcipher);
896 kfree(tfm);
Herbert Xu8ff59092010-10-19 21:31:55 +0800897}
898
899static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
900{
Herbert Xudd504582015-12-25 15:40:05 +0800901 struct skcipher_tfm *tfm = private;
902 int err;
903
904 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
905 tfm->has_key = !err;
906
907 return err;
Herbert Xu8ff59092010-10-19 21:31:55 +0800908}
909
910static void skcipher_sock_destruct(struct sock *sk)
911{
912 struct alg_sock *ask = alg_sk(sk);
913 struct skcipher_ctx *ctx = ask->private;
Stephan Muellere8704562017-06-25 17:12:39 +0200914 struct sock *psk = ask->parent;
915 struct alg_sock *pask = alg_sk(psk);
916 struct skcipher_tfm *skc = pask->private;
917 struct crypto_skcipher *tfm = skc->skcipher;
Herbert Xu8ff59092010-10-19 21:31:55 +0800918
Stephan Muellere8704562017-06-25 17:12:39 +0200919 skcipher_pull_tsgl(sk, ctx->used, NULL);
Herbert Xu0d96e4b2015-12-18 19:16:57 +0800920 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
Herbert Xu8ff59092010-10-19 21:31:55 +0800921 sock_kfree_s(sk, ctx, ctx->len);
922 af_alg_release_parent(sk);
923}
924
Herbert Xud7b65ae2016-01-13 15:01:06 +0800925static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
Herbert Xu8ff59092010-10-19 21:31:55 +0800926{
927 struct skcipher_ctx *ctx;
928 struct alg_sock *ask = alg_sk(sk);
Herbert Xudd504582015-12-25 15:40:05 +0800929 struct skcipher_tfm *tfm = private;
930 struct crypto_skcipher *skcipher = tfm->skcipher;
Stephan Muellere8704562017-06-25 17:12:39 +0200931 unsigned int len = sizeof(*ctx);
Herbert Xu8ff59092010-10-19 21:31:55 +0800932
933 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
934 if (!ctx)
935 return -ENOMEM;
936
Herbert Xudd504582015-12-25 15:40:05 +0800937 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
Herbert Xu8ff59092010-10-19 21:31:55 +0800938 GFP_KERNEL);
939 if (!ctx->iv) {
940 sock_kfree_s(sk, ctx, len);
941 return -ENOMEM;
942 }
943
Herbert Xudd504582015-12-25 15:40:05 +0800944 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
Herbert Xu8ff59092010-10-19 21:31:55 +0800945
Stephan Muellere8704562017-06-25 17:12:39 +0200946 INIT_LIST_HEAD(&ctx->tsgl_list);
Herbert Xu8ff59092010-10-19 21:31:55 +0800947 ctx->len = len;
948 ctx->used = 0;
Stephan Muellere8704562017-06-25 17:12:39 +0200949 ctx->rcvused = 0;
Herbert Xu8ff59092010-10-19 21:31:55 +0800950 ctx->more = 0;
951 ctx->merge = 0;
952 ctx->enc = 0;
Herbert Xu8ff59092010-10-19 21:31:55 +0800953 af_alg_init_completion(&ctx->completion);
954
955 ask->private = ctx;
956
Herbert Xu8ff59092010-10-19 21:31:55 +0800957 sk->sk_destruct = skcipher_sock_destruct;
958
959 return 0;
960}
961
Herbert Xua0fa2d02016-01-04 13:36:12 +0900962static int skcipher_accept_parent(void *private, struct sock *sk)
963{
964 struct skcipher_tfm *tfm = private;
965
Herbert Xu6e8d8ec2016-01-11 21:29:41 +0800966 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
Herbert Xua0fa2d02016-01-04 13:36:12 +0900967 return -ENOKEY;
968
Herbert Xud7b65ae2016-01-13 15:01:06 +0800969 return skcipher_accept_parent_nokey(private, sk);
Herbert Xua0fa2d02016-01-04 13:36:12 +0900970}
971
Herbert Xu8ff59092010-10-19 21:31:55 +0800972static const struct af_alg_type algif_type_skcipher = {
973 .bind = skcipher_bind,
974 .release = skcipher_release,
975 .setkey = skcipher_setkey,
976 .accept = skcipher_accept_parent,
Herbert Xua0fa2d02016-01-04 13:36:12 +0900977 .accept_nokey = skcipher_accept_parent_nokey,
Herbert Xu8ff59092010-10-19 21:31:55 +0800978 .ops = &algif_skcipher_ops,
Herbert Xua0fa2d02016-01-04 13:36:12 +0900979 .ops_nokey = &algif_skcipher_ops_nokey,
Herbert Xu8ff59092010-10-19 21:31:55 +0800980 .name = "skcipher",
981 .owner = THIS_MODULE
982};
983
984static int __init algif_skcipher_init(void)
985{
986 return af_alg_register_type(&algif_type_skcipher);
987}
988
989static void __exit algif_skcipher_exit(void)
990{
991 int err = af_alg_unregister_type(&algif_type_skcipher);
992 BUG_ON(err);
993}
994
995module_init(algif_skcipher_init);
996module_exit(algif_skcipher_exit);
997MODULE_LICENSE("GPL");