blob: 8051a9164139e8328741e81fba090b89c35f5e82 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Kees Cookb16520f2018-04-10 17:52:34 -070045#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
46
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070047static int __skb_nsg(struct sk_buff *skb, int offset, int len,
48 unsigned int recursion_level)
49{
50 int start = skb_headlen(skb);
51 int i, chunk = start - offset;
52 struct sk_buff *frag_iter;
53 int elt = 0;
54
55 if (unlikely(recursion_level >= 24))
56 return -EMSGSIZE;
57
58 if (chunk > 0) {
59 if (chunk > len)
60 chunk = len;
61 elt++;
62 len -= chunk;
63 if (len == 0)
64 return elt;
65 offset += chunk;
66 }
67
68 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
69 int end;
70
71 WARN_ON(start > offset + len);
72
73 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
74 chunk = end - offset;
75 if (chunk > 0) {
76 if (chunk > len)
77 chunk = len;
78 elt++;
79 len -= chunk;
80 if (len == 0)
81 return elt;
82 offset += chunk;
83 }
84 start = end;
85 }
86
87 if (unlikely(skb_has_frag_list(skb))) {
88 skb_walk_frags(skb, frag_iter) {
89 int end, ret;
90
91 WARN_ON(start > offset + len);
92
93 end = start + frag_iter->len;
94 chunk = end - offset;
95 if (chunk > 0) {
96 if (chunk > len)
97 chunk = len;
98 ret = __skb_nsg(frag_iter, offset - start, chunk,
99 recursion_level + 1);
100 if (unlikely(ret < 0))
101 return ret;
102 elt += ret;
103 len -= chunk;
104 if (len == 0)
105 return elt;
106 offset += chunk;
107 }
108 start = end;
109 }
110 }
111 BUG_ON(len);
112 return elt;
113}
114
115/* Return the number of scatterlist elements required to completely map the
116 * skb, or -EMSGSIZE if the recursion depth is exceeded.
117 */
118static int skb_nsg(struct sk_buff *skb, int offset, int len)
119{
120 return __skb_nsg(skb, offset, len, 0);
121}
122
Dave Watson130b3922019-01-30 21:58:31 +0000123static int padding_length(struct tls_sw_context_rx *ctx,
124 struct tls_context *tls_ctx, struct sk_buff *skb)
125{
126 struct strp_msg *rxm = strp_msg(skb);
127 int sub = 0;
128
129 /* Determine zero-padding length */
130 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) {
131 char content_type = 0;
132 int err;
133 int back = 17;
134
135 while (content_type == 0) {
136 if (back > rxm->full_len)
137 return -EBADMSG;
138 err = skb_copy_bits(skb,
139 rxm->offset + rxm->full_len - back,
140 &content_type, 1);
141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530158 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700159 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530160 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700161 int pending;
162
163 skb = (struct sk_buff *)req->data;
164 tls_ctx = tls_get_ctx(skb->sk);
165 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg94524d82018-08-29 15:26:55 +0530166
167 /* Propagate if there was an err */
168 if (err) {
169 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700170 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000171 } else {
172 struct strp_msg *rxm = strp_msg(skb);
Dave Watson130b3922019-01-30 21:58:31 +0000173 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
Vakul Garg692d7b52019-01-16 10:40:16 +0000174 rxm->offset += tls_ctx->rx.prepend_size;
175 rxm->full_len -= tls_ctx->rx.overhead_size;
Vakul Garg94524d82018-08-29 15:26:55 +0530176 }
177
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700178 /* After using skb->sk to propagate sk through crypto async callback
179 * we need to NULL it again.
180 */
181 skb->sk = NULL;
182
Vakul Garg94524d82018-08-29 15:26:55 +0530183
Vakul Garg692d7b52019-01-16 10:40:16 +0000184 /* Free the destination pages if skb was not decrypted inplace */
185 if (sgout != sgin) {
186 /* Skip the first S/G entry as it points to AAD */
187 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
188 if (!sg)
189 break;
190 put_page(sg_page(sg));
191 }
Vakul Garg94524d82018-08-29 15:26:55 +0530192 }
193
194 kfree(aead_req);
195
Vakul Garg692d7b52019-01-16 10:40:16 +0000196 pending = atomic_dec_return(&ctx->decrypt_pending);
197
Vakul Garg94524d82018-08-29 15:26:55 +0530198 if (!pending && READ_ONCE(ctx->async_notify))
199 complete(&ctx->async_wait.completion);
200}
201
Dave Watsonc46234e2018-03-22 10:10:35 -0700202static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530203 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700204 struct scatterlist *sgin,
205 struct scatterlist *sgout,
206 char *iv_recv,
207 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530208 struct aead_request *aead_req,
209 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700210{
211 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300212 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700214
Vakul Garg0b243d02018-08-10 20:46:41 +0530215 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsona2ef9b62019-01-30 21:58:12 +0000216 aead_request_set_ad(aead_req, tls_ctx->rx.aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700217 aead_request_set_crypt(aead_req, sgin, sgout,
218 data_len + tls_ctx->rx.tag_size,
219 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700220
Vakul Garg94524d82018-08-29 15:26:55 +0530221 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700222 /* Using skb->sk to push sk through to crypto async callback
223 * handler. This allows propagating errors up to the socket
224 * if needed. It _must_ be cleared in the async handler
225 * before kfree_skb is called. We _know_ skb->sk is NULL
226 * because it is a clone from strparser.
227 */
228 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530229 aead_request_set_callback(aead_req,
230 CRYPTO_TFM_REQ_MAY_BACKLOG,
231 tls_decrypt_done, skb);
232 atomic_inc(&ctx->decrypt_pending);
233 } else {
234 aead_request_set_callback(aead_req,
235 CRYPTO_TFM_REQ_MAY_BACKLOG,
236 crypto_req_done, &ctx->async_wait);
237 }
238
239 ret = crypto_aead_decrypt(aead_req);
240 if (ret == -EINPROGRESS) {
241 if (async)
242 return ret;
243
244 ret = crypto_wait_req(ret, &ctx->async_wait);
245 }
246
247 if (async)
248 atomic_dec(&ctx->decrypt_pending);
249
Dave Watsonc46234e2018-03-22 10:10:35 -0700250 return ret;
251}
252
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200253static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700254{
255 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300256 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530257 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700258
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200259 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700260 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700261 target_size += tls_ctx->tx.overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200262 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700263}
264
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200265static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700266{
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200270 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700271
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200272 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700273}
274
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200275static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700276{
277 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300278 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530279 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200280 struct sk_msg *msg_pl = &rec->msg_plaintext;
281 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530282 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700283
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200284 /* We add page references worth len bytes from encrypted sg
285 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530286 * has enough required room (ensured by caller).
287 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200288 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530289
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200290 /* Skip initial bytes in msg_en's data to be able to use
291 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530292 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200293 skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530294
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200295 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700296}
297
John Fastabendd3b18ad32018-10-13 02:46:01 +0200298static struct tls_rec *tls_get_rec(struct sock *sk)
299{
300 struct tls_context *tls_ctx = tls_get_ctx(sk);
301 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
302 struct sk_msg *msg_pl, *msg_en;
303 struct tls_rec *rec;
304 int mem_size;
305
306 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
307
308 rec = kzalloc(mem_size, sk->sk_allocation);
309 if (!rec)
310 return NULL;
311
312 msg_pl = &rec->msg_plaintext;
313 msg_en = &rec->msg_encrypted;
314
315 sk_msg_init(msg_pl);
316 sk_msg_init(msg_en);
317
318 sg_init_table(rec->sg_aead_in, 2);
319 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
Dave Watsona2ef9b62019-01-30 21:58:12 +0000320 tls_ctx->tx.aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200321 sg_unmark_end(&rec->sg_aead_in[1]);
322
323 sg_init_table(rec->sg_aead_out, 2);
324 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
Dave Watsona2ef9b62019-01-30 21:58:12 +0000325 tls_ctx->tx.aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200326 sg_unmark_end(&rec->sg_aead_out[1]);
327
328 return rec;
329}
330
331static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
332{
333 sk_msg_free(sk, &rec->msg_encrypted);
334 sk_msg_free(sk, &rec->msg_plaintext);
335 kfree(rec);
336}
337
Vakul Gargc7749732018-09-25 20:21:51 +0530338static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700339{
340 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300341 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530342 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700343
John Fastabendd3b18ad32018-10-13 02:46:01 +0200344 if (rec) {
345 tls_free_rec(sk, rec);
346 ctx->open_rec = NULL;
347 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700348}
349
Vakul Garga42055e2018-09-21 09:46:13 +0530350int tls_tx_records(struct sock *sk, int flags)
351{
352 struct tls_context *tls_ctx = tls_get_ctx(sk);
353 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
354 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200355 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530356 int tx_flags, rc = 0;
357
358 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530359 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530360 struct tls_rec, list);
361
362 if (flags == -1)
363 tx_flags = rec->tx_flags;
364 else
365 tx_flags = flags;
366
367 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
368 if (rc)
369 goto tx_err;
370
371 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530372 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530373 */
Vakul Garga42055e2018-09-21 09:46:13 +0530374 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200375 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530376 kfree(rec);
377 }
378
Vakul Garg9932a292018-09-24 15:35:56 +0530379 /* Tx all ready records */
380 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
381 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530382 if (flags == -1)
383 tx_flags = rec->tx_flags;
384 else
385 tx_flags = flags;
386
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200387 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530388 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200389 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530390 0, tx_flags);
391 if (rc)
392 goto tx_err;
393
Vakul Garga42055e2018-09-21 09:46:13 +0530394 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200395 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530396 kfree(rec);
397 } else {
398 break;
399 }
400 }
401
402tx_err:
403 if (rc < 0 && rc != -EAGAIN)
404 tls_err_abort(sk, EBADMSG);
405
406 return rc;
407}
408
409static void tls_encrypt_done(struct crypto_async_request *req, int err)
410{
411 struct aead_request *aead_req = (struct aead_request *)req;
412 struct sock *sk = req->data;
413 struct tls_context *tls_ctx = tls_get_ctx(sk);
414 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200415 struct scatterlist *sge;
416 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530417 struct tls_rec *rec;
418 bool ready = false;
419 int pending;
420
421 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200422 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530423
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200424 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
425 sge->offset -= tls_ctx->tx.prepend_size;
426 sge->length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530427
Vakul Garg80ece6a2018-09-26 16:22:08 +0530428 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530429 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530430 rec = NULL;
431
432 /* If err is already set on socket, return the same code */
433 if (sk->sk_err) {
434 ctx->async_wait.err = sk->sk_err;
435 } else {
436 ctx->async_wait.err = err;
437 tls_err_abort(sk, err);
438 }
439 }
440
Vakul Garg9932a292018-09-24 15:35:56 +0530441 if (rec) {
442 struct tls_rec *first_rec;
443
444 /* Mark the record as ready for transmission */
445 smp_store_mb(rec->tx_ready, true);
446
447 /* If received record is at head of tx_list, schedule tx */
448 first_rec = list_first_entry(&ctx->tx_list,
449 struct tls_rec, list);
450 if (rec == first_rec)
451 ready = true;
452 }
Vakul Garga42055e2018-09-21 09:46:13 +0530453
454 pending = atomic_dec_return(&ctx->encrypt_pending);
455
456 if (!pending && READ_ONCE(ctx->async_notify))
457 complete(&ctx->async_wait.completion);
458
459 if (!ready)
460 return;
461
462 /* Schedule the transmission */
463 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200464 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530465}
466
467static int tls_do_encryption(struct sock *sk,
468 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200469 struct tls_sw_context_tx *ctx,
470 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200471 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700472{
Vakul Garga42055e2018-09-21 09:46:13 +0530473 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200474 struct sk_msg *msg_en = &rec->msg_encrypted;
475 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Dave Watson3c4d7552017-06-14 11:37:39 -0700476 int rc;
477
Dave Watson32eb67b2019-01-27 00:57:38 +0000478 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
Dave Watson130b3922019-01-30 21:58:31 +0000479 xor_iv_with_seq(tls_ctx->crypto_send.info.version, rec->iv_data,
480 tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000481
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200482 sge->offset += tls_ctx->tx.prepend_size;
483 sge->length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700484
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200485 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530486
Dave Watson3c4d7552017-06-14 11:37:39 -0700487 aead_request_set_tfm(aead_req, ctx->aead_send);
Dave Watsona2ef9b62019-01-30 21:58:12 +0000488 aead_request_set_ad(aead_req, tls_ctx->tx.aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200489 aead_request_set_crypt(aead_req, rec->sg_aead_in,
490 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000491 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530492
493 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530494 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530495
Vakul Garg9932a292018-09-24 15:35:56 +0530496 /* Add the record in tx_list */
497 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530498 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700499
Vakul Garga42055e2018-09-21 09:46:13 +0530500 rc = crypto_aead_encrypt(aead_req);
501 if (!rc || rc != -EINPROGRESS) {
502 atomic_dec(&ctx->encrypt_pending);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200503 sge->offset -= tls_ctx->tx.prepend_size;
504 sge->length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530505 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700506
Vakul Garg9932a292018-09-24 15:35:56 +0530507 if (!rc) {
508 WRITE_ONCE(rec->tx_ready, true);
509 } else if (rc != -EINPROGRESS) {
510 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530511 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530512 }
Vakul Garga42055e2018-09-21 09:46:13 +0530513
514 /* Unhook the record from context if encryption is not failure */
515 ctx->open_rec = NULL;
Dave Watson130b3922019-01-30 21:58:31 +0000516 tls_advance_record_sn(sk, &tls_ctx->tx,
517 tls_ctx->crypto_send.info.version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700518 return rc;
519}
520
John Fastabendd3b18ad32018-10-13 02:46:01 +0200521static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
522 struct tls_rec **to, struct sk_msg *msg_opl,
523 struct sk_msg *msg_oen, u32 split_point,
524 u32 tx_overhead_size, u32 *orig_end)
525{
526 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
527 struct scatterlist *sge, *osge, *nsge;
528 u32 orig_size = msg_opl->sg.size;
529 struct scatterlist tmp = { };
530 struct sk_msg *msg_npl;
531 struct tls_rec *new;
532 int ret;
533
534 new = tls_get_rec(sk);
535 if (!new)
536 return -ENOMEM;
537 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
538 tx_overhead_size, 0);
539 if (ret < 0) {
540 tls_free_rec(sk, new);
541 return ret;
542 }
543
544 *orig_end = msg_opl->sg.end;
545 i = msg_opl->sg.start;
546 sge = sk_msg_elem(msg_opl, i);
547 while (apply && sge->length) {
548 if (sge->length > apply) {
549 u32 len = sge->length - apply;
550
551 get_page(sg_page(sge));
552 sg_set_page(&tmp, sg_page(sge), len,
553 sge->offset + apply);
554 sge->length = apply;
555 bytes += apply;
556 apply = 0;
557 } else {
558 apply -= sge->length;
559 bytes += sge->length;
560 }
561
562 sk_msg_iter_var_next(i);
563 if (i == msg_opl->sg.end)
564 break;
565 sge = sk_msg_elem(msg_opl, i);
566 }
567
568 msg_opl->sg.end = i;
569 msg_opl->sg.curr = i;
570 msg_opl->sg.copybreak = 0;
571 msg_opl->apply_bytes = 0;
572 msg_opl->sg.size = bytes;
573
574 msg_npl = &new->msg_plaintext;
575 msg_npl->apply_bytes = apply;
576 msg_npl->sg.size = orig_size - bytes;
577
578 j = msg_npl->sg.start;
579 nsge = sk_msg_elem(msg_npl, j);
580 if (tmp.length) {
581 memcpy(nsge, &tmp, sizeof(*nsge));
582 sk_msg_iter_var_next(j);
583 nsge = sk_msg_elem(msg_npl, j);
584 }
585
586 osge = sk_msg_elem(msg_opl, i);
587 while (osge->length) {
588 memcpy(nsge, osge, sizeof(*nsge));
589 sg_unmark_end(nsge);
590 sk_msg_iter_var_next(i);
591 sk_msg_iter_var_next(j);
592 if (i == *orig_end)
593 break;
594 osge = sk_msg_elem(msg_opl, i);
595 nsge = sk_msg_elem(msg_npl, j);
596 }
597
598 msg_npl->sg.end = j;
599 msg_npl->sg.curr = j;
600 msg_npl->sg.copybreak = 0;
601
602 *to = new;
603 return 0;
604}
605
606static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
607 struct tls_rec *from, u32 orig_end)
608{
609 struct sk_msg *msg_npl = &from->msg_plaintext;
610 struct sk_msg *msg_opl = &to->msg_plaintext;
611 struct scatterlist *osge, *nsge;
612 u32 i, j;
613
614 i = msg_opl->sg.end;
615 sk_msg_iter_var_prev(i);
616 j = msg_npl->sg.start;
617
618 osge = sk_msg_elem(msg_opl, i);
619 nsge = sk_msg_elem(msg_npl, j);
620
621 if (sg_page(osge) == sg_page(nsge) &&
622 osge->offset + osge->length == nsge->offset) {
623 osge->length += nsge->length;
624 put_page(sg_page(nsge));
625 }
626
627 msg_opl->sg.end = orig_end;
628 msg_opl->sg.curr = orig_end;
629 msg_opl->sg.copybreak = 0;
630 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
631 msg_opl->sg.size += msg_npl->sg.size;
632
633 sk_msg_free(sk, &to->msg_encrypted);
634 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
635
636 kfree(from);
637}
638
Dave Watson3c4d7552017-06-14 11:37:39 -0700639static int tls_push_record(struct sock *sk, int flags,
640 unsigned char record_type)
641{
642 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300643 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200644 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
645 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200646 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200647 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200648 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700649 int rc;
650
Vakul Garga42055e2018-09-21 09:46:13 +0530651 if (!rec)
652 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200653
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200654 msg_pl = &rec->msg_plaintext;
655 msg_en = &rec->msg_encrypted;
656
John Fastabendd3b18ad32018-10-13 02:46:01 +0200657 split_point = msg_pl->apply_bytes;
658 split = split_point && split_point < msg_pl->sg.size;
659 if (split) {
660 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
661 split_point, tls_ctx->tx.overhead_size,
662 &orig_end);
663 if (rc < 0)
664 return rc;
665 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
666 tls_ctx->tx.overhead_size);
667 }
668
Vakul Garga42055e2018-09-21 09:46:13 +0530669 rec->tx_flags = flags;
670 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700671
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200672 i = msg_pl->sg.end;
673 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000674
675 rec->content_type = record_type;
676 if (tls_ctx->crypto_send.info.version == TLS_1_3_VERSION) {
677 /* Add content type to end of message. No padding added */
678 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
679 sg_mark_end(&rec->sg_content_type);
680 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
681 &rec->sg_content_type);
682 } else {
683 sg_mark_end(sk_msg_elem(msg_pl, i));
684 }
Vakul Garga42055e2018-09-21 09:46:13 +0530685
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200686 i = msg_pl->sg.start;
687 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
688 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
689
690 i = msg_en->sg.end;
691 sk_msg_iter_var_prev(i);
692 sg_mark_end(sk_msg_elem(msg_en, i));
693
694 i = msg_en->sg.start;
695 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
696
Dave Watson130b3922019-01-30 21:58:31 +0000697 tls_make_aad(rec->aad_space, msg_pl->sg.size + tls_ctx->tx.tail_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700698 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson130b3922019-01-30 21:58:31 +0000699 record_type,
700 tls_ctx->crypto_send.info.version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700701
702 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200703 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000704 msg_en->sg.data[i].offset,
705 msg_pl->sg.size + tls_ctx->tx.tail_size,
706 record_type,
707 tls_ctx->crypto_send.info.version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700708
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200709 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700710
Dave Watson130b3922019-01-30 21:58:31 +0000711 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
712 msg_pl->sg.size + tls_ctx->tx.tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700713 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200714 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200715 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200716 if (split) {
717 tls_ctx->pending_open_record_frags = true;
718 tls_merge_open_record(sk, rec, tmp, orig_end);
719 }
720 }
Dave Watson5b053e12019-01-30 22:08:21 +0000721 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530722 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200723 } else if (split) {
724 msg_pl = &tmp->msg_plaintext;
725 msg_en = &tmp->msg_encrypted;
726 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
727 tls_ctx->tx.overhead_size);
728 tls_ctx->pending_open_record_frags = true;
729 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700730 }
731
Vakul Garg9932a292018-09-24 15:35:56 +0530732 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700733}
734
John Fastabendd3b18ad32018-10-13 02:46:01 +0200735static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
736 bool full_record, u8 record_type,
737 size_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700738{
739 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300740 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200741 struct sk_msg msg_redir = { };
742 struct sk_psock *psock;
743 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530744 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800745 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200746 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800747 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530748
John Fastabend0608c692018-12-20 11:35:35 -0800749 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200750 psock = sk_psock_get(sk);
John Fastabend0608c692018-12-20 11:35:35 -0800751 if (!psock || !policy)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200752 return tls_push_record(sk, flags, record_type);
753more_data:
754 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800755 if (psock->eval == __SK_NONE) {
756 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200757 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800758 if (delta < msg->sg.size)
759 delta -= msg->sg.size;
760 else
761 delta = 0;
762 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200763 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
764 !enospc && !full_record) {
765 err = -ENOSPC;
766 goto out_err;
767 }
768 msg->cork_bytes = 0;
769 send = msg->sg.size;
770 if (msg->apply_bytes && msg->apply_bytes < send)
771 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530772
John Fastabendd3b18ad32018-10-13 02:46:01 +0200773 switch (psock->eval) {
774 case __SK_PASS:
775 err = tls_push_record(sk, flags, record_type);
776 if (err < 0) {
777 *copied -= sk_msg_free(sk, msg);
778 tls_free_open_rec(sk);
779 goto out_err;
780 }
781 break;
782 case __SK_REDIRECT:
783 sk_redir = psock->sk_redir;
784 memcpy(&msg_redir, msg, sizeof(*msg));
785 if (msg->apply_bytes < send)
786 msg->apply_bytes = 0;
787 else
788 msg->apply_bytes -= send;
789 sk_msg_return_zero(sk, msg, send);
790 msg->sg.size -= send;
791 release_sock(sk);
792 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
793 lock_sock(sk);
794 if (err < 0) {
795 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
796 msg->sg.size = 0;
797 }
798 if (msg->sg.size == 0)
799 tls_free_open_rec(sk);
800 break;
801 case __SK_DROP:
802 default:
803 sk_msg_free_partial(sk, msg, send);
804 if (msg->apply_bytes < send)
805 msg->apply_bytes = 0;
806 else
807 msg->apply_bytes -= send;
808 if (msg->sg.size == 0)
809 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800810 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200811 err = -EACCES;
812 }
Vakul Garga42055e2018-09-21 09:46:13 +0530813
John Fastabendd3b18ad32018-10-13 02:46:01 +0200814 if (likely(!err)) {
815 bool reset_eval = !ctx->open_rec;
816
817 rec = ctx->open_rec;
818 if (rec) {
819 msg = &rec->msg_plaintext;
820 if (!msg->apply_bytes)
821 reset_eval = true;
822 }
823 if (reset_eval) {
824 psock->eval = __SK_NONE;
825 if (psock->sk_redir) {
826 sock_put(psock->sk_redir);
827 psock->sk_redir = NULL;
828 }
829 }
830 if (rec)
831 goto more_data;
832 }
833 out_err:
834 sk_psock_put(sk, psock);
835 return err;
836}
837
838static int tls_sw_push_pending_record(struct sock *sk, int flags)
839{
840 struct tls_context *tls_ctx = tls_get_ctx(sk);
841 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
842 struct tls_rec *rec = ctx->open_rec;
843 struct sk_msg *msg_pl;
844 size_t copied;
845
Vakul Garga42055e2018-09-21 09:46:13 +0530846 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200847 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530848
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200849 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200850 copied = msg_pl->sg.size;
851 if (!copied)
852 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530853
John Fastabendd3b18ad32018-10-13 02:46:01 +0200854 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
855 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530856}
857
858int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
859{
Dave Watson3c4d7552017-06-14 11:37:39 -0700860 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530861 struct tls_context *tls_ctx = tls_get_ctx(sk);
862 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000863 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530864 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100865 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700866 bool eor = !(msg->msg_flags & MSG_MORE);
867 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200868 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530869 struct tls_rec *rec;
870 int required_size;
871 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700872 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530873 int record_room;
874 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700875 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530876 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700877
878 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
879 return -ENOTSUPP;
880
881 lock_sock(sk);
882
Vakul Garga42055e2018-09-21 09:46:13 +0530883 /* Wait till there is any pending write on socket */
884 if (unlikely(sk->sk_write_pending)) {
885 ret = wait_on_pending_writer(sk, &timeo);
886 if (unlikely(ret))
887 goto send_end;
888 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700889
890 if (unlikely(msg->msg_controllen)) {
891 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530892 if (ret) {
893 if (ret == -EINPROGRESS)
894 num_async++;
895 else if (ret != -EAGAIN)
896 goto send_end;
897 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700898 }
899
900 while (msg_data_left(msg)) {
901 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100902 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700903 goto send_end;
904 }
905
John Fastabendd3b18ad32018-10-13 02:46:01 +0200906 if (ctx->open_rec)
907 rec = ctx->open_rec;
908 else
909 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530910 if (!rec) {
911 ret = -ENOMEM;
912 goto send_end;
913 }
914
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200915 msg_pl = &rec->msg_plaintext;
916 msg_en = &rec->msg_encrypted;
917
918 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700919 full_record = false;
920 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200921 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700922 if (try_to_copy >= record_room) {
923 try_to_copy = record_room;
924 full_record = true;
925 }
926
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200927 required_size = msg_pl->sg.size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700928 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700929
930 if (!sk_stream_memory_free(sk))
931 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530932
Dave Watson3c4d7552017-06-14 11:37:39 -0700933alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200934 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700935 if (ret) {
936 if (ret != -ENOSPC)
937 goto wait_for_memory;
938
939 /* Adjust try_to_copy according to the amount that was
940 * actually allocated. The difference is due
941 * to max sg elements limit
942 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200943 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700944 full_record = true;
945 }
Vakul Garga42055e2018-09-21 09:46:13 +0530946
947 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200948 u32 first = msg_pl->sg.end;
949
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200950 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
951 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -0700952 if (ret)
953 goto fallback_to_reg_send;
954
Vakul Garg4e6d4722018-09-30 08:04:35 +0530955 rec->inplace_crypto = 0;
956
Vakul Garga42055e2018-09-21 09:46:13 +0530957 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700958 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200959
960 sk_msg_sg_copy_set(msg_pl, first);
961 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
962 record_type, &copied,
963 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530964 if (ret) {
965 if (ret == -EINPROGRESS)
966 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200967 else if (ret == -ENOMEM)
968 goto wait_for_memory;
969 else if (ret == -ENOSPC)
970 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +0530971 else if (ret != -EAGAIN)
972 goto send_end;
973 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700974 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200975rollback_iter:
976 copied -= try_to_copy;
977 sk_msg_sg_copy_clear(msg_pl, first);
978 iov_iter_revert(&msg->msg_iter,
979 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700980fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200981 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700982 }
983
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200984 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530985
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200986 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700987 if (ret) {
988 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +0530989 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700990
991 /* Adjust try_to_copy according to the amount that was
992 * actually allocated. The difference is due
993 * to max sg elements limit
994 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200995 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700996 full_record = true;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200997 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
998 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700999 }
1000
Vakul Garg65a10e22018-12-21 15:16:52 +00001001 if (try_to_copy) {
1002 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1003 msg_pl, try_to_copy);
1004 if (ret < 0)
1005 goto trim_sgl;
1006 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001007
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001008 /* Open records defined only if successfully copied, otherwise
1009 * we would trim the sg but not reset the open record frags.
1010 */
1011 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001012 copied += try_to_copy;
1013 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001014 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1015 record_type, &copied,
1016 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001017 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301018 if (ret == -EINPROGRESS)
1019 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001020 else if (ret == -ENOMEM)
1021 goto wait_for_memory;
1022 else if (ret != -EAGAIN) {
1023 if (ret == -ENOSPC)
1024 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301025 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001026 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001027 }
1028 }
1029
1030 continue;
1031
1032wait_for_sndbuf:
1033 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1034wait_for_memory:
1035 ret = sk_stream_wait_memory(sk, &timeo);
1036 if (ret) {
1037trim_sgl:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001038 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001039 goto send_end;
1040 }
1041
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001042 if (msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001043 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001044 }
1045
Vakul Garga42055e2018-09-21 09:46:13 +05301046 if (!num_async) {
1047 goto send_end;
1048 } else if (num_zc) {
1049 /* Wait for pending encryptions to get completed */
1050 smp_store_mb(ctx->async_notify, true);
1051
1052 if (atomic_read(&ctx->encrypt_pending))
1053 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1054 else
1055 reinit_completion(&ctx->async_wait.completion);
1056
1057 WRITE_ONCE(ctx->async_notify, false);
1058
1059 if (ctx->async_wait.err) {
1060 ret = ctx->async_wait.err;
1061 copied = 0;
1062 }
1063 }
1064
1065 /* Transmit if any encryptions have completed */
1066 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1067 cancel_delayed_work(&ctx->tx_work.work);
1068 tls_tx_records(sk, msg->msg_flags);
1069 }
1070
Dave Watson3c4d7552017-06-14 11:37:39 -07001071send_end:
1072 ret = sk_stream_error(sk, msg->msg_flags, ret);
1073
1074 release_sock(sk);
1075 return copied ? copied : ret;
1076}
1077
YueHaibing01cb8a12019-01-16 10:39:28 +08001078static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1079 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001080{
Vakul Garga42055e2018-09-21 09:46:13 +05301081 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001082 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001083 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07001084 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001085 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301086 struct tls_rec *rec;
1087 int num_async = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001088 size_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001089 bool full_record;
1090 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301091 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301092 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001093
Dave Watson3c4d7552017-06-14 11:37:39 -07001094 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001095 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1096
Vakul Garga42055e2018-09-21 09:46:13 +05301097 /* Wait till there is any pending write on socket */
1098 if (unlikely(sk->sk_write_pending)) {
1099 ret = wait_on_pending_writer(sk, &timeo);
1100 if (unlikely(ret))
1101 goto sendpage_end;
1102 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001103
1104 /* Call the sk_stream functions to manage the sndbuf mem. */
1105 while (size > 0) {
1106 size_t copy, required_size;
1107
1108 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001109 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001110 goto sendpage_end;
1111 }
1112
John Fastabendd3b18ad32018-10-13 02:46:01 +02001113 if (ctx->open_rec)
1114 rec = ctx->open_rec;
1115 else
1116 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301117 if (!rec) {
1118 ret = -ENOMEM;
1119 goto sendpage_end;
1120 }
1121
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001122 msg_pl = &rec->msg_plaintext;
1123
Dave Watson3c4d7552017-06-14 11:37:39 -07001124 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001125 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001126 copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001127 copy = size;
1128 if (copy >= record_room) {
1129 copy = record_room;
1130 full_record = true;
1131 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001132
1133 required_size = msg_pl->sg.size + copy +
1134 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001135
1136 if (!sk_stream_memory_free(sk))
1137 goto wait_for_sndbuf;
1138alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001139 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001140 if (ret) {
1141 if (ret != -ENOSPC)
1142 goto wait_for_memory;
1143
1144 /* Adjust copy according to the amount that was
1145 * actually allocated. The difference is due
1146 * to max sg elements limit
1147 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001148 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001149 full_record = true;
1150 }
1151
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001152 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001153 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001154
Dave Watson3c4d7552017-06-14 11:37:39 -07001155 offset += copy;
1156 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001157 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001158
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001159 tls_ctx->pending_open_record_frags = true;
1160 if (full_record || eor || sk_msg_full(msg_pl)) {
Vakul Garg4e6d4722018-09-30 08:04:35 +05301161 rec->inplace_crypto = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001162 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1163 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001164 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301165 if (ret == -EINPROGRESS)
1166 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001167 else if (ret == -ENOMEM)
1168 goto wait_for_memory;
1169 else if (ret != -EAGAIN) {
1170 if (ret == -ENOSPC)
1171 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301172 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001173 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001174 }
1175 }
1176 continue;
1177wait_for_sndbuf:
1178 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1179wait_for_memory:
1180 ret = sk_stream_wait_memory(sk, &timeo);
1181 if (ret) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001182 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001183 goto sendpage_end;
1184 }
1185
Dave Watson3c4d7552017-06-14 11:37:39 -07001186 goto alloc_payload;
1187 }
1188
Vakul Garga42055e2018-09-21 09:46:13 +05301189 if (num_async) {
1190 /* Transmit if any encryptions have completed */
1191 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1192 cancel_delayed_work(&ctx->tx_work.work);
1193 tls_tx_records(sk, flags);
1194 }
1195 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001196sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001197 ret = sk_stream_error(sk, flags, ret);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001198 return copied ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001199}
1200
John Fastabend0608c692018-12-20 11:35:35 -08001201int tls_sw_sendpage(struct sock *sk, struct page *page,
1202 int offset, size_t size, int flags)
1203{
1204 int ret;
1205
1206 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1207 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1208 return -ENOTSUPP;
1209
1210 lock_sock(sk);
1211 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1212 release_sock(sk);
1213 return ret;
1214}
1215
John Fastabendd3b18ad32018-10-13 02:46:01 +02001216static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1217 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001218{
1219 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001220 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001221 struct sk_buff *skb;
1222 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1223
John Fastabendd3b18ad32018-10-13 02:46:01 +02001224 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001225 if (sk->sk_err) {
1226 *err = sock_error(sk);
1227 return NULL;
1228 }
1229
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001230 if (sk->sk_shutdown & RCV_SHUTDOWN)
1231 return NULL;
1232
Dave Watsonc46234e2018-03-22 10:10:35 -07001233 if (sock_flag(sk, SOCK_DONE))
1234 return NULL;
1235
1236 if ((flags & MSG_DONTWAIT) || !timeo) {
1237 *err = -EAGAIN;
1238 return NULL;
1239 }
1240
1241 add_wait_queue(sk_sleep(sk), &wait);
1242 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001243 sk_wait_event(sk, &timeo,
1244 ctx->recv_pkt != skb ||
1245 !sk_psock_queue_empty(psock),
1246 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001247 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1249
1250 /* Handle signals */
1251 if (signal_pending(current)) {
1252 *err = sock_intr_errno(timeo);
1253 return NULL;
1254 }
1255 }
1256
1257 return skb;
1258}
1259
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001260static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1261 int length, int *pages_used,
1262 unsigned int *size_used,
1263 struct scatterlist *to,
1264 int to_max_pages)
1265{
1266 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1267 struct page *pages[MAX_SKB_FRAGS];
1268 unsigned int size = *size_used;
1269 ssize_t copied, use;
1270 size_t offset;
1271
1272 while (length > 0) {
1273 i = 0;
1274 maxpages = to_max_pages - num_elem;
1275 if (maxpages == 0) {
1276 rc = -EFAULT;
1277 goto out;
1278 }
1279 copied = iov_iter_get_pages(from, pages,
1280 length,
1281 maxpages, &offset);
1282 if (copied <= 0) {
1283 rc = -EFAULT;
1284 goto out;
1285 }
1286
1287 iov_iter_advance(from, copied);
1288
1289 length -= copied;
1290 size += copied;
1291 while (copied) {
1292 use = min_t(int, copied, PAGE_SIZE - offset);
1293
1294 sg_set_page(&to[num_elem],
1295 pages[i], use, offset);
1296 sg_unmark_end(&to[num_elem]);
1297 /* We do not uncharge memory from this API */
1298
1299 offset = 0;
1300 copied -= use;
1301
1302 i++;
1303 num_elem++;
1304 }
1305 }
1306 /* Mark the end in the last sg entry if newly added */
1307 if (num_elem > *pages_used)
1308 sg_mark_end(&to[num_elem - 1]);
1309out:
1310 if (rc)
1311 iov_iter_revert(from, size - *size_used);
1312 *size_used = size;
1313 *pages_used = num_elem;
1314
1315 return rc;
1316}
1317
Vakul Garg0b243d02018-08-10 20:46:41 +05301318/* This function decrypts the input skb into either out_iov or in out_sg
1319 * or in skb buffers itself. The input parameter 'zc' indicates if
1320 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1321 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1322 * NULL, then the decryption happens inside skb buffers itself, i.e.
1323 * zero-copy gets disabled and 'zc' is updated.
1324 */
1325
1326static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1327 struct iov_iter *out_iov,
1328 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001329 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301330{
1331 struct tls_context *tls_ctx = tls_get_ctx(sk);
1332 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1333 struct strp_msg *rxm = strp_msg(skb);
1334 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1335 struct aead_request *aead_req;
1336 struct sk_buff *unused;
1337 u8 *aad, *iv, *mem = NULL;
1338 struct scatterlist *sgin = NULL;
1339 struct scatterlist *sgout = NULL;
Dave Watson130b3922019-01-30 21:58:31 +00001340 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size +
1341 tls_ctx->rx.tail_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301342
1343 if (*zc && (out_iov || out_sg)) {
1344 if (out_iov)
1345 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1346 else
1347 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001348 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1349 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301350 } else {
1351 n_sgout = 0;
1352 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001353 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301354 }
1355
Vakul Garg0b243d02018-08-10 20:46:41 +05301356 if (n_sgin < 1)
1357 return -EBADMSG;
1358
1359 /* Increment to accommodate AAD */
1360 n_sgin = n_sgin + 1;
1361
1362 nsg = n_sgin + n_sgout;
1363
1364 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1365 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Dave Watsona2ef9b62019-01-30 21:58:12 +00001366 mem_size = mem_size + tls_ctx->rx.aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301367 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1368
1369 /* Allocate a single block of memory which contains
1370 * aead_req || sgin[] || sgout[] || aad || iv.
1371 * This order achieves correct alignment for aead_req, sgin, sgout.
1372 */
1373 mem = kmalloc(mem_size, sk->sk_allocation);
1374 if (!mem)
1375 return -ENOMEM;
1376
1377 /* Segment the allocated memory */
1378 aead_req = (struct aead_request *)mem;
1379 sgin = (struct scatterlist *)(mem + aead_size);
1380 sgout = sgin + n_sgin;
1381 aad = (u8 *)(sgout + n_sgout);
Dave Watsona2ef9b62019-01-30 21:58:12 +00001382 iv = aad + tls_ctx->rx.aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301383
1384 /* Prepare IV */
1385 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1386 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1387 tls_ctx->rx.iv_size);
1388 if (err < 0) {
1389 kfree(mem);
1390 return err;
1391 }
Dave Watson130b3922019-01-30 21:58:31 +00001392 if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION)
1393 memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
1394 else
1395 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1396
1397 xor_iv_with_seq(tls_ctx->crypto_recv.info.version, iv,
1398 tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301399
1400 /* Prepare AAD */
Dave Watson130b3922019-01-30 21:58:31 +00001401 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size +
1402 tls_ctx->rx.tail_size,
Vakul Garg0b243d02018-08-10 20:46:41 +05301403 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
Dave Watson130b3922019-01-30 21:58:31 +00001404 ctx->control,
1405 tls_ctx->crypto_recv.info.version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301406
1407 /* Prepare sgin */
1408 sg_init_table(sgin, n_sgin);
Dave Watsona2ef9b62019-01-30 21:58:12 +00001409 sg_set_buf(&sgin[0], aad, tls_ctx->rx.aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301410 err = skb_to_sgvec(skb, &sgin[1],
1411 rxm->offset + tls_ctx->rx.prepend_size,
1412 rxm->full_len - tls_ctx->rx.prepend_size);
1413 if (err < 0) {
1414 kfree(mem);
1415 return err;
1416 }
1417
1418 if (n_sgout) {
1419 if (out_iov) {
1420 sg_init_table(sgout, n_sgout);
Dave Watsona2ef9b62019-01-30 21:58:12 +00001421 sg_set_buf(&sgout[0], aad, tls_ctx->rx.aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301422
1423 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001424 err = tls_setup_from_iter(sk, out_iov, data_len,
1425 &pages, chunk, &sgout[1],
1426 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301427 if (err < 0)
1428 goto fallback_to_reg_recv;
1429 } else if (out_sg) {
1430 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1431 } else {
1432 goto fallback_to_reg_recv;
1433 }
1434 } else {
1435fallback_to_reg_recv:
1436 sgout = sgin;
1437 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001438 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301439 *zc = false;
1440 }
1441
1442 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301443 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001444 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301445 if (err == -EINPROGRESS)
1446 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301447
1448 /* Release the pages in case iov was mapped to pages */
1449 for (; pages > 0; pages--)
1450 put_page(sg_page(&sgout[pages]));
1451
1452 kfree(mem);
1453 return err;
1454}
1455
Boris Pismennydafb67f2018-07-13 14:33:40 +03001456static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001457 struct iov_iter *dest, int *chunk, bool *zc,
1458 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001459{
1460 struct tls_context *tls_ctx = tls_get_ctx(sk);
1461 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watson130b3922019-01-30 21:58:31 +00001462 int version = tls_ctx->crypto_recv.info.version;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001463 struct strp_msg *rxm = strp_msg(skb);
1464 int err = 0;
1465
Boris Pismenny4799ac82018-07-13 14:33:43 +03001466#ifdef CONFIG_TLS_DEVICE
1467 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001468 if (err < 0)
1469 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001470#endif
1471 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001472 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301473 if (err < 0) {
1474 if (err == -EINPROGRESS)
Dave Watson130b3922019-01-30 21:58:31 +00001475 tls_advance_record_sn(sk, &tls_ctx->rx,
1476 version);
Vakul Garg94524d82018-08-29 15:26:55 +05301477
Boris Pismenny4799ac82018-07-13 14:33:43 +03001478 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301479 }
Dave Watson130b3922019-01-30 21:58:31 +00001480
1481 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
1482
Dave Watsonfedf2012019-01-30 21:58:24 +00001483 rxm->offset += tls_ctx->rx.prepend_size;
1484 rxm->full_len -= tls_ctx->rx.overhead_size;
Dave Watson130b3922019-01-30 21:58:31 +00001485 tls_advance_record_sn(sk, &tls_ctx->rx, version);
Dave Watsonfedf2012019-01-30 21:58:24 +00001486 ctx->decrypted = true;
1487 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001488 } else {
1489 *zc = false;
1490 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001491
Boris Pismennydafb67f2018-07-13 14:33:40 +03001492 return err;
1493}
1494
1495int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1496 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001497{
Vakul Garg0b243d02018-08-10 20:46:41 +05301498 bool zc = true;
1499 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001500
Vakul Garg692d7b52019-01-16 10:40:16 +00001501 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001502}
1503
1504static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1505 unsigned int len)
1506{
1507 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001508 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001509
Vakul Garg94524d82018-08-29 15:26:55 +05301510 if (skb) {
1511 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001512
Vakul Garg94524d82018-08-29 15:26:55 +05301513 if (len < rxm->full_len) {
1514 rxm->offset += len;
1515 rxm->full_len -= len;
1516 return false;
1517 }
1518 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001519 }
1520
1521 /* Finished with message */
1522 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001523 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001524
1525 return true;
1526}
1527
Vakul Garg692d7b52019-01-16 10:40:16 +00001528/* This function traverses the rx_list in tls receive context to copies the
1529 * decrypted data records into the buffer provided by caller zero copy is not
1530 * true. Further, the records are removed from the rx_list if it is not a peek
1531 * case and the record has been consumed completely.
1532 */
1533static int process_rx_list(struct tls_sw_context_rx *ctx,
1534 struct msghdr *msg,
1535 size_t skip,
1536 size_t len,
1537 bool zc,
1538 bool is_peek)
1539{
1540 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1541 ssize_t copied = 0;
1542
1543 while (skip && skb) {
1544 struct strp_msg *rxm = strp_msg(skb);
1545
1546 if (skip < rxm->full_len)
1547 break;
1548
1549 skip = skip - rxm->full_len;
1550 skb = skb_peek_next(skb, &ctx->rx_list);
1551 }
1552
1553 while (len && skb) {
1554 struct sk_buff *next_skb;
1555 struct strp_msg *rxm = strp_msg(skb);
1556 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1557
1558 if (!zc || (rxm->full_len - skip) > len) {
1559 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1560 msg, chunk);
1561 if (err < 0)
1562 return err;
1563 }
1564
1565 len = len - chunk;
1566 copied = copied + chunk;
1567
1568 /* Consume the data from record if it is non-peek case*/
1569 if (!is_peek) {
1570 rxm->offset = rxm->offset + chunk;
1571 rxm->full_len = rxm->full_len - chunk;
1572
1573 /* Return if there is unconsumed data in the record */
1574 if (rxm->full_len - skip)
1575 break;
1576 }
1577
1578 /* The remaining skip-bytes must lie in 1st record in rx_list.
1579 * So from the 2nd record, 'skip' should be 0.
1580 */
1581 skip = 0;
1582
1583 if (msg)
1584 msg->msg_flags |= MSG_EOR;
1585
1586 next_skb = skb_peek_next(skb, &ctx->rx_list);
1587
1588 if (!is_peek) {
1589 skb_unlink(skb, &ctx->rx_list);
1590 kfree_skb(skb);
1591 }
1592
1593 skb = next_skb;
1594 }
1595
1596 return copied;
1597}
1598
Dave Watsonc46234e2018-03-22 10:10:35 -07001599int tls_sw_recvmsg(struct sock *sk,
1600 struct msghdr *msg,
1601 size_t len,
1602 int nonblock,
1603 int flags,
1604 int *addr_len)
1605{
1606 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001607 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001608 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001609 unsigned char control = 0;
1610 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001611 struct strp_msg *rxm;
1612 struct sk_buff *skb;
1613 ssize_t copied = 0;
1614 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001615 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001616 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001617 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001618 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301619 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001620
1621 flags |= nonblock;
1622
1623 if (unlikely(flags & MSG_ERRQUEUE))
1624 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1625
John Fastabendd3b18ad32018-10-13 02:46:01 +02001626 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001627 lock_sock(sk);
1628
Vakul Garg692d7b52019-01-16 10:40:16 +00001629 /* Process pending decrypted records. It must be non-zero-copy */
1630 err = process_rx_list(ctx, msg, 0, len, false, is_peek);
1631 if (err < 0) {
1632 tls_err_abort(sk, err);
1633 goto end;
1634 } else {
1635 copied = err;
1636 }
1637
1638 len = len - copied;
1639 if (len) {
1640 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1641 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1642 } else {
1643 goto recv_end;
1644 }
1645
Dave Watsonc46234e2018-03-22 10:10:35 -07001646 do {
Vakul Garg692d7b52019-01-16 10:40:16 +00001647 bool retain_skb = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301648 bool async = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001649 bool zc = false;
1650 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001651 int chunk = 0;
1652
John Fastabendd3b18ad32018-10-13 02:46:01 +02001653 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1654 if (!skb) {
1655 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001656 int ret = __tcp_bpf_recvmsg(sk, psock,
1657 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001658
1659 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001660 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001661 len -= ret;
1662 continue;
1663 }
1664 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001665 goto recv_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001666 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001667
1668 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301669
Dave Watsonfedf2012019-01-30 21:58:24 +00001670 to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size;
1671
1672 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001673 ctx->control == TLS_RECORD_TYPE_DATA &&
1674 tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001675 zc = true;
1676
1677 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1678 &chunk, &zc, ctx->async_capable);
1679 if (err < 0 && err != -EINPROGRESS) {
1680 tls_err_abort(sk, EBADMSG);
1681 goto recv_end;
1682 }
1683
1684 if (err == -EINPROGRESS) {
1685 async = true;
1686 num_async++;
1687 goto pick_next_record;
1688 }
1689
Dave Watsonc46234e2018-03-22 10:10:35 -07001690 if (!cmsg) {
1691 int cerr;
1692
1693 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1694 sizeof(ctx->control), &ctx->control);
1695 cmsg = true;
1696 control = ctx->control;
1697 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1698 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1699 err = -EIO;
1700 goto recv_end;
1701 }
1702 }
1703 } else if (control != ctx->control) {
1704 goto recv_end;
1705 }
1706
Dave Watsonfedf2012019-01-30 21:58:24 +00001707 if (!zc) {
1708 if (rxm->full_len > len) {
1709 retain_skb = true;
1710 chunk = len;
1711 } else {
1712 chunk = rxm->full_len;
1713 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001714
Dave Watsonfedf2012019-01-30 21:58:24 +00001715 err = skb_copy_datagram_msg(skb, rxm->offset,
1716 msg, chunk);
1717 if (err < 0)
1718 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001719
Dave Watsonfedf2012019-01-30 21:58:24 +00001720 if (!is_peek) {
1721 rxm->offset = rxm->offset + chunk;
1722 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001723 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001724 }
1725
Vakul Garg94524d82018-08-29 15:26:55 +05301726pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001727 if (chunk > len)
1728 chunk = len;
1729
1730 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001731 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001732
Vakul Garg692d7b52019-01-16 10:40:16 +00001733 /* For async or peek case, queue the current skb */
1734 if (async || is_peek || retain_skb) {
1735 skb_queue_tail(&ctx->rx_list, skb);
1736 skb = NULL;
1737 }
Vakul Garg94524d82018-08-29 15:26:55 +05301738
Vakul Garg692d7b52019-01-16 10:40:16 +00001739 if (tls_sw_advance_skb(sk, skb, chunk)) {
1740 /* Return full control message to
1741 * userspace before trying to parse
1742 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001743 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001744 msg->msg_flags |= MSG_EOR;
1745 if (ctx->control != TLS_RECORD_TYPE_DATA)
1746 goto recv_end;
1747 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001748 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001749 }
Vakul Garg94524d82018-08-29 15:26:55 +05301750
Daniel Borkmann06030db2018-06-15 03:07:46 +02001751 /* If we have a new message from strparser, continue now. */
Vakul Garg692d7b52019-01-16 10:40:16 +00001752 if (decrypted >= target && !ctx->recv_pkt)
Daniel Borkmann06030db2018-06-15 03:07:46 +02001753 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001754 } while (len);
1755
1756recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301757 if (num_async) {
1758 /* Wait for all previously submitted records to be decrypted */
1759 smp_store_mb(ctx->async_notify, true);
1760 if (atomic_read(&ctx->decrypt_pending)) {
1761 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1762 if (err) {
1763 /* one of async decrypt failed */
1764 tls_err_abort(sk, err);
1765 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001766 decrypted = 0;
1767 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301768 }
1769 } else {
1770 reinit_completion(&ctx->async_wait.completion);
1771 }
1772 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001773
1774 /* Drain records from the rx_list & copy if required */
1775 if (is_peek || is_kvec)
1776 err = process_rx_list(ctx, msg, copied,
1777 decrypted, false, is_peek);
1778 else
1779 err = process_rx_list(ctx, msg, 0,
1780 decrypted, true, is_peek);
1781 if (err < 0) {
1782 tls_err_abort(sk, err);
1783 copied = 0;
1784 goto end;
1785 }
1786
1787 WARN_ON(decrypted != err);
Vakul Garg94524d82018-08-29 15:26:55 +05301788 }
1789
Vakul Garg692d7b52019-01-16 10:40:16 +00001790 copied += decrypted;
1791
1792end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001793 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001794 if (psock)
1795 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001796 return copied ? : err;
1797}
1798
1799ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1800 struct pipe_inode_info *pipe,
1801 size_t len, unsigned int flags)
1802{
1803 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001804 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001805 struct strp_msg *rxm = NULL;
1806 struct sock *sk = sock->sk;
1807 struct sk_buff *skb;
1808 ssize_t copied = 0;
1809 int err = 0;
1810 long timeo;
1811 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301812 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001813
1814 lock_sock(sk);
1815
1816 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1817
John Fastabendd3b18ad32018-10-13 02:46:01 +02001818 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001819 if (!skb)
1820 goto splice_read_end;
1821
Dave Watsonc46234e2018-03-22 10:10:35 -07001822 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001823 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001824
Dave Watsonfedf2012019-01-30 21:58:24 +00001825 /* splice does not support reading control messages */
1826 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1827 err = -ENOTSUPP;
1828 goto splice_read_end;
1829 }
1830
Dave Watsonc46234e2018-03-22 10:10:35 -07001831 if (err < 0) {
1832 tls_err_abort(sk, EBADMSG);
1833 goto splice_read_end;
1834 }
1835 ctx->decrypted = true;
1836 }
1837 rxm = strp_msg(skb);
1838
1839 chunk = min_t(unsigned int, rxm->full_len, len);
1840 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1841 if (copied < 0)
1842 goto splice_read_end;
1843
1844 if (likely(!(flags & MSG_PEEK)))
1845 tls_sw_advance_skb(sk, skb, copied);
1846
1847splice_read_end:
1848 release_sock(sk);
1849 return copied ? : err;
1850}
1851
John Fastabend924ad652018-10-13 02:46:00 +02001852bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07001853{
Dave Watsonc46234e2018-03-22 10:10:35 -07001854 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001855 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001856 bool ingress_empty = true;
1857 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001858
John Fastabendd3b18ad32018-10-13 02:46:01 +02001859 rcu_read_lock();
1860 psock = sk_psock(sk);
1861 if (psock)
1862 ingress_empty = list_empty(&psock->ingress_msg);
1863 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07001864
John Fastabendd3b18ad32018-10-13 02:46:01 +02001865 return !ingress_empty || ctx->recv_pkt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001866}
1867
1868static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1869{
1870 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001871 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001872 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001873 struct strp_msg *rxm = strp_msg(skb);
1874 size_t cipher_overhead;
1875 size_t data_len = 0;
1876 int ret;
1877
1878 /* Verify that we have a full TLS header, or wait for more data */
1879 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1880 return 0;
1881
Kees Cook3463e512018-06-25 16:55:05 -07001882 /* Sanity-check size of on-stack buffer. */
1883 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1884 ret = -EINVAL;
1885 goto read_failure;
1886 }
1887
Dave Watsonc46234e2018-03-22 10:10:35 -07001888 /* Linearize header to local buffer */
1889 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1890
1891 if (ret < 0)
1892 goto read_failure;
1893
1894 ctx->control = header[0];
1895
1896 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1897
Dave Watson130b3922019-01-30 21:58:31 +00001898 cipher_overhead = tls_ctx->rx.tag_size;
1899 if (tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
1900 cipher_overhead += tls_ctx->rx.iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001901
Dave Watson130b3922019-01-30 21:58:31 +00001902 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
1903 tls_ctx->rx.tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001904 ret = -EMSGSIZE;
1905 goto read_failure;
1906 }
1907 if (data_len < cipher_overhead) {
1908 ret = -EBADMSG;
1909 goto read_failure;
1910 }
1911
Dave Watson130b3922019-01-30 21:58:31 +00001912 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
1913 if (header[1] != TLS_1_2_VERSION_MINOR ||
1914 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001915 ret = -EINVAL;
1916 goto read_failure;
1917 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001918#ifdef CONFIG_TLS_DEVICE
1919 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1920 *(u64*)tls_ctx->rx.rec_seq);
1921#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001922 return data_len + TLS_HEADER_SIZE;
1923
1924read_failure:
1925 tls_err_abort(strp->sk, ret);
1926
1927 return ret;
1928}
1929
1930static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1931{
1932 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001933 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001934
1935 ctx->decrypted = false;
1936
1937 ctx->recv_pkt = skb;
1938 strp_pause(strp);
1939
Vakul Gargad13acc2018-07-30 16:08:33 +05301940 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001941}
1942
1943static void tls_data_ready(struct sock *sk)
1944{
1945 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001946 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001947 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001948
1949 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001950
1951 psock = sk_psock_get(sk);
1952 if (psock && !list_empty(&psock->ingress_msg)) {
1953 ctx->saved_data_ready(sk);
1954 sk_psock_put(sk, psock);
1955 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001956}
1957
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001958void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001959{
1960 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001961 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301962 struct tls_rec *rec, *tmp;
1963
1964 /* Wait for any pending async encryptions to complete */
1965 smp_store_mb(ctx->async_notify, true);
1966 if (atomic_read(&ctx->encrypt_pending))
1967 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1968
Dave Watson10231212019-01-27 00:59:03 +00001969 release_sock(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301970 cancel_delayed_work_sync(&ctx->tx_work.work);
Dave Watson10231212019-01-27 00:59:03 +00001971 lock_sock(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301972
1973 /* Tx whatever records we can transmit and abandon the rest */
1974 tls_tx_records(sk, -1);
1975
Vakul Garg9932a292018-09-24 15:35:56 +05301976 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05301977 * the partially sent record if any at head of tx_list.
1978 */
1979 if (tls_ctx->partially_sent_record) {
1980 struct scatterlist *sg = tls_ctx->partially_sent_record;
1981
1982 while (1) {
1983 put_page(sg_page(sg));
1984 sk_mem_uncharge(sk, sg->length);
1985
1986 if (sg_is_last(sg))
1987 break;
1988 sg++;
1989 }
1990
1991 tls_ctx->partially_sent_record = NULL;
1992
Vakul Garg9932a292018-09-24 15:35:56 +05301993 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05301994 struct tls_rec, list);
1995 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001996 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05301997 kfree(rec);
1998 }
1999
Vakul Garg9932a292018-09-24 15:35:56 +05302000 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302001 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002002 sk_msg_free(sk, &rec->msg_encrypted);
2003 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302004 kfree(rec);
2005 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002006
Vakul Garg201876b2018-07-24 16:54:27 +05302007 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302008 tls_free_open_rec(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002009
2010 kfree(ctx);
2011}
2012
Boris Pismenny39f56e12018-07-13 14:33:41 +03002013void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002014{
2015 struct tls_context *tls_ctx = tls_get_ctx(sk);
2016 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2017
Dave Watsonc46234e2018-03-22 10:10:35 -07002018 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302019 kfree_skb(ctx->recv_pkt);
2020 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002021 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002022 crypto_free_aead(ctx->aead_recv);
2023 strp_stop(&ctx->strp);
2024 write_lock_bh(&sk->sk_callback_lock);
2025 sk->sk_data_ready = ctx->saved_data_ready;
2026 write_unlock_bh(&sk->sk_callback_lock);
2027 release_sock(sk);
2028 strp_done(&ctx->strp);
2029 lock_sock(sk);
2030 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002031}
2032
2033void tls_sw_free_resources_rx(struct sock *sk)
2034{
2035 struct tls_context *tls_ctx = tls_get_ctx(sk);
2036 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2037
2038 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07002039
Dave Watson3c4d7552017-06-14 11:37:39 -07002040 kfree(ctx);
2041}
2042
Vakul Garg9932a292018-09-24 15:35:56 +05302043/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302044static void tx_work_handler(struct work_struct *work)
2045{
2046 struct delayed_work *delayed_work = to_delayed_work(work);
2047 struct tx_work *tx_work = container_of(delayed_work,
2048 struct tx_work, work);
2049 struct sock *sk = tx_work->sk;
2050 struct tls_context *tls_ctx = tls_get_ctx(sk);
2051 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2052
2053 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2054 return;
2055
2056 lock_sock(sk);
2057 tls_tx_records(sk, -1);
2058 release_sock(sk);
2059}
2060
Dave Watsonc46234e2018-03-22 10:10:35 -07002061int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002062{
Dave Watson3c4d7552017-06-14 11:37:39 -07002063 struct tls_crypto_info *crypto_info;
2064 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002065 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002066 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2067 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002068 struct cipher_context *cctx;
2069 struct crypto_aead **aead;
2070 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07002071 u16 nonce_size, tag_size, iv_size, rec_seq_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002072 struct crypto_tfm *tfm;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002073 char *iv, *rec_seq, *key, *salt;
2074 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002075 int rc = 0;
2076
2077 if (!ctx) {
2078 rc = -EINVAL;
2079 goto out;
2080 }
2081
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002082 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002083 if (!ctx->priv_ctx_tx) {
2084 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2085 if (!sw_ctx_tx) {
2086 rc = -ENOMEM;
2087 goto out;
2088 }
2089 ctx->priv_ctx_tx = sw_ctx_tx;
2090 } else {
2091 sw_ctx_tx =
2092 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002093 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002094 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002095 if (!ctx->priv_ctx_rx) {
2096 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2097 if (!sw_ctx_rx) {
2098 rc = -ENOMEM;
2099 goto out;
2100 }
2101 ctx->priv_ctx_rx = sw_ctx_rx;
2102 } else {
2103 sw_ctx_rx =
2104 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002105 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002106 }
2107
Dave Watsonc46234e2018-03-22 10:10:35 -07002108 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002109 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002110 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002111 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002112 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302113 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302114 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2115 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002116 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002117 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002118 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002119 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002120 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002121 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002122 }
2123
Dave Watson3c4d7552017-06-14 11:37:39 -07002124 switch (crypto_info->cipher_type) {
2125 case TLS_CIPHER_AES_GCM_128: {
2126 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2127 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2128 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2129 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2130 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2131 rec_seq =
2132 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2133 gcm_128_info =
2134 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002135 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2136 key = gcm_128_info->key;
2137 salt = gcm_128_info->salt;
2138 break;
2139 }
2140 case TLS_CIPHER_AES_GCM_256: {
2141 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2142 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2143 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2144 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2145 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2146 rec_seq =
2147 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2148 gcm_256_info =
2149 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2150 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2151 key = gcm_256_info->key;
2152 salt = gcm_256_info->salt;
Dave Watson3c4d7552017-06-14 11:37:39 -07002153 break;
2154 }
2155 default:
2156 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002157 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002158 }
2159
Kees Cookb16520f2018-04-10 17:52:34 -07002160 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07002161 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002162 rc = -EINVAL;
2163 goto free_priv;
2164 }
2165
Dave Watson130b3922019-01-30 21:58:31 +00002166 if (crypto_info->version == TLS_1_3_VERSION) {
2167 nonce_size = 0;
2168 cctx->aad_size = TLS_HEADER_SIZE;
2169 cctx->tail_size = 1;
2170 } else {
2171 cctx->aad_size = TLS_AAD_SPACE_SIZE;
2172 cctx->tail_size = 0;
2173 }
2174
Dave Watsonc46234e2018-03-22 10:10:35 -07002175 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
2176 cctx->tag_size = tag_size;
Dave Watson130b3922019-01-30 21:58:31 +00002177 cctx->overhead_size = cctx->prepend_size + cctx->tag_size +
2178 cctx->tail_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002179 cctx->iv_size = iv_size;
2180 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
2181 GFP_KERNEL);
2182 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002183 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002184 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002185 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002186 /* Note: 128 & 256 bit salt are the same size */
2187 memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -07002188 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
2189 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08002190 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002191 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002192 rc = -ENOMEM;
2193 goto free_iv;
2194 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002195
Dave Watsonc46234e2018-03-22 10:10:35 -07002196 if (!*aead) {
2197 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
2198 if (IS_ERR(*aead)) {
2199 rc = PTR_ERR(*aead);
2200 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002201 goto free_rec_seq;
2202 }
2203 }
2204
2205 ctx->push_pending_record = tls_sw_push_pending_record;
2206
Dave Watsonfb99bce2019-01-30 21:58:05 +00002207 rc = crypto_aead_setkey(*aead, key, keysize);
2208
Dave Watson3c4d7552017-06-14 11:37:39 -07002209 if (rc)
2210 goto free_aead;
2211
Dave Watsonc46234e2018-03-22 10:10:35 -07002212 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
2213 if (rc)
2214 goto free_aead;
2215
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002216 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002217 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2218 sw_ctx_rx->async_capable =
2219 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
2220
Dave Watsonc46234e2018-03-22 10:10:35 -07002221 /* Set up strparser */
2222 memset(&cb, 0, sizeof(cb));
2223 cb.rcv_msg = tls_queue;
2224 cb.parse_msg = tls_read_size;
2225
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002226 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002227
2228 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002229 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07002230 sk->sk_data_ready = tls_data_ready;
2231 write_unlock_bh(&sk->sk_callback_lock);
2232
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002233 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07002234 }
2235
2236 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002237
2238free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002239 crypto_free_aead(*aead);
2240 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002241free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002242 kfree(cctx->rec_seq);
2243 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002244free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002245 kfree(cctx->iv);
2246 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002247free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002248 if (tx) {
2249 kfree(ctx->priv_ctx_tx);
2250 ctx->priv_ctx_tx = NULL;
2251 } else {
2252 kfree(ctx->priv_ctx_rx);
2253 ctx->priv_ctx_rx = NULL;
2254 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002255out:
2256 return rc;
2257}