blob: 8c2763eb6aae29589dbdfef3c7cd3f4e01f09c56 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070045static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
47{
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
51 int elt = 0;
52
53 if (unlikely(recursion_level >= 24))
54 return -EMSGSIZE;
55
56 if (chunk > 0) {
57 if (chunk > len)
58 chunk = len;
59 elt++;
60 len -= chunk;
61 if (len == 0)
62 return elt;
63 offset += chunk;
64 }
65
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67 int end;
68
69 WARN_ON(start > offset + len);
70
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72 chunk = end - offset;
73 if (chunk > 0) {
74 if (chunk > len)
75 chunk = len;
76 elt++;
77 len -= chunk;
78 if (len == 0)
79 return elt;
80 offset += chunk;
81 }
82 start = end;
83 }
84
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
87 int end, ret;
88
89 WARN_ON(start > offset + len);
90
91 end = start + frag_iter->len;
92 chunk = end - offset;
93 if (chunk > 0) {
94 if (chunk > len)
95 chunk = len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
97 recursion_level + 1);
98 if (unlikely(ret < 0))
99 return ret;
100 elt += ret;
101 len -= chunk;
102 if (len == 0)
103 return elt;
104 offset += chunk;
105 }
106 start = end;
107 }
108 }
109 BUG_ON(len);
110 return elt;
111}
112
113/* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
115 */
116static int skb_nsg(struct sk_buff *skb, int offset, int len)
117{
118 return __skb_nsg(skb, offset, len, 0);
119}
120
Dave Watson130b3922019-01-30 21:58:31 +0000121static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700122 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000123{
124 struct strp_msg *rxm = strp_msg(skb);
125 int sub = 0;
126
127 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700128 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000129 char content_type = 0;
130 int err;
131 int back = 17;
132
133 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700134 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000135 return -EBADMSG;
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
138 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700139 if (err)
140 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000158 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700160 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530161 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000167 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530168
169 /* Propagate if there was an err */
170 if (err) {
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700171 if (err == -EBADMSG)
172 TLS_INC_STATS(sock_net(skb->sk),
173 LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530174 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700175 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000176 } else {
177 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700178 int pad;
179
180 pad = padding_length(ctx, prot, skb);
181 if (pad < 0) {
182 ctx->async_wait.err = pad;
183 tls_err_abort(skb->sk, pad);
184 } else {
185 rxm->full_len -= pad;
186 rxm->offset += prot->prepend_size;
187 rxm->full_len -= prot->overhead_size;
188 }
Vakul Garg94524d82018-08-29 15:26:55 +0530189 }
190
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700191 /* After using skb->sk to propagate sk through crypto async callback
192 * we need to NULL it again.
193 */
194 skb->sk = NULL;
195
Vakul Garg94524d82018-08-29 15:26:55 +0530196
Vakul Garg692d7b52019-01-16 10:40:16 +0000197 /* Free the destination pages if skb was not decrypted inplace */
198 if (sgout != sgin) {
199 /* Skip the first S/G entry as it points to AAD */
200 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
201 if (!sg)
202 break;
203 put_page(sg_page(sg));
204 }
Vakul Garg94524d82018-08-29 15:26:55 +0530205 }
206
207 kfree(aead_req);
208
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530209 spin_lock_bh(&ctx->decrypt_compl_lock);
Vakul Garg692d7b52019-01-16 10:40:16 +0000210 pending = atomic_dec_return(&ctx->decrypt_pending);
211
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530212 if (!pending && ctx->async_notify)
Vakul Garg94524d82018-08-29 15:26:55 +0530213 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530214 spin_unlock_bh(&ctx->decrypt_compl_lock);
Vakul Garg94524d82018-08-29 15:26:55 +0530215}
216
Dave Watsonc46234e2018-03-22 10:10:35 -0700217static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530218 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700219 struct scatterlist *sgin,
220 struct scatterlist *sgout,
221 char *iv_recv,
222 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530223 struct aead_request *aead_req,
224 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700225{
226 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000227 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300228 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700229 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700230
Vakul Garg0b243d02018-08-10 20:46:41 +0530231 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000232 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700233 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000234 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700235 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700236
Vakul Garg94524d82018-08-29 15:26:55 +0530237 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700238 /* Using skb->sk to push sk through to crypto async callback
239 * handler. This allows propagating errors up to the socket
240 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000241 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700242 * because it is a clone from strparser.
243 */
244 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530245 aead_request_set_callback(aead_req,
246 CRYPTO_TFM_REQ_MAY_BACKLOG,
247 tls_decrypt_done, skb);
248 atomic_inc(&ctx->decrypt_pending);
249 } else {
250 aead_request_set_callback(aead_req,
251 CRYPTO_TFM_REQ_MAY_BACKLOG,
252 crypto_req_done, &ctx->async_wait);
253 }
254
255 ret = crypto_aead_decrypt(aead_req);
256 if (ret == -EINPROGRESS) {
257 if (async)
258 return ret;
259
260 ret = crypto_wait_req(ret, &ctx->async_wait);
261 }
262
263 if (async)
264 atomic_dec(&ctx->decrypt_pending);
265
Dave Watsonc46234e2018-03-22 10:10:35 -0700266 return ret;
267}
268
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200269static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700270{
271 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000272 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300273 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530274 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700275
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200276 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700277 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000278 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200279 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700280}
281
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200282static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700283{
284 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300285 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530286 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200287 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200289 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700290}
291
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200292static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700293{
294 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000295 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300296 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530297 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200298 struct sk_msg *msg_pl = &rec->msg_plaintext;
299 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530300 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700301
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200302 /* We add page references worth len bytes from encrypted sg
303 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530304 * has enough required room (ensured by caller).
305 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200306 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530307
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200308 /* Skip initial bytes in msg_en's data to be able to use
309 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530310 */
Vakul Garg4509de12019-02-14 07:11:35 +0000311 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530312
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200313 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700314}
315
John Fastabendd3b18ad32018-10-13 02:46:01 +0200316static struct tls_rec *tls_get_rec(struct sock *sk)
317{
318 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000319 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200320 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
321 struct sk_msg *msg_pl, *msg_en;
322 struct tls_rec *rec;
323 int mem_size;
324
325 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
326
327 rec = kzalloc(mem_size, sk->sk_allocation);
328 if (!rec)
329 return NULL;
330
331 msg_pl = &rec->msg_plaintext;
332 msg_en = &rec->msg_encrypted;
333
334 sk_msg_init(msg_pl);
335 sk_msg_init(msg_en);
336
337 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000338 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200339 sg_unmark_end(&rec->sg_aead_in[1]);
340
341 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000342 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200343 sg_unmark_end(&rec->sg_aead_out[1]);
344
345 return rec;
346}
347
348static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
349{
350 sk_msg_free(sk, &rec->msg_encrypted);
351 sk_msg_free(sk, &rec->msg_plaintext);
352 kfree(rec);
353}
354
Vakul Gargc7749732018-09-25 20:21:51 +0530355static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700356{
357 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300358 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530359 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700360
John Fastabendd3b18ad32018-10-13 02:46:01 +0200361 if (rec) {
362 tls_free_rec(sk, rec);
363 ctx->open_rec = NULL;
364 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700365}
366
Vakul Garga42055e2018-09-21 09:46:13 +0530367int tls_tx_records(struct sock *sk, int flags)
368{
369 struct tls_context *tls_ctx = tls_get_ctx(sk);
370 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
371 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200372 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530373 int tx_flags, rc = 0;
374
375 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530376 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530377 struct tls_rec, list);
378
379 if (flags == -1)
380 tx_flags = rec->tx_flags;
381 else
382 tx_flags = flags;
383
384 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
385 if (rc)
386 goto tx_err;
387
388 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530389 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530390 */
Vakul Garga42055e2018-09-21 09:46:13 +0530391 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200392 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530393 kfree(rec);
394 }
395
Vakul Garg9932a292018-09-24 15:35:56 +0530396 /* Tx all ready records */
397 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
398 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530399 if (flags == -1)
400 tx_flags = rec->tx_flags;
401 else
402 tx_flags = flags;
403
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200404 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530405 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200406 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530407 0, tx_flags);
408 if (rc)
409 goto tx_err;
410
Vakul Garga42055e2018-09-21 09:46:13 +0530411 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200412 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530413 kfree(rec);
414 } else {
415 break;
416 }
417 }
418
419tx_err:
420 if (rc < 0 && rc != -EAGAIN)
421 tls_err_abort(sk, EBADMSG);
422
423 return rc;
424}
425
426static void tls_encrypt_done(struct crypto_async_request *req, int err)
427{
428 struct aead_request *aead_req = (struct aead_request *)req;
429 struct sock *sk = req->data;
430 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000431 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530432 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200433 struct scatterlist *sge;
434 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530435 struct tls_rec *rec;
436 bool ready = false;
437 int pending;
438
439 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200440 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530441
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200442 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000443 sge->offset -= prot->prepend_size;
444 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530445
Vakul Garg80ece6a2018-09-26 16:22:08 +0530446 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530447 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530448 rec = NULL;
449
450 /* If err is already set on socket, return the same code */
451 if (sk->sk_err) {
452 ctx->async_wait.err = sk->sk_err;
453 } else {
454 ctx->async_wait.err = err;
455 tls_err_abort(sk, err);
456 }
457 }
458
Vakul Garg9932a292018-09-24 15:35:56 +0530459 if (rec) {
460 struct tls_rec *first_rec;
461
462 /* Mark the record as ready for transmission */
463 smp_store_mb(rec->tx_ready, true);
464
465 /* If received record is at head of tx_list, schedule tx */
466 first_rec = list_first_entry(&ctx->tx_list,
467 struct tls_rec, list);
468 if (rec == first_rec)
469 ready = true;
470 }
Vakul Garga42055e2018-09-21 09:46:13 +0530471
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530472 spin_lock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530473 pending = atomic_dec_return(&ctx->encrypt_pending);
474
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530475 if (!pending && ctx->async_notify)
Vakul Garga42055e2018-09-21 09:46:13 +0530476 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530477 spin_unlock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530478
479 if (!ready)
480 return;
481
482 /* Schedule the transmission */
483 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200484 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530485}
486
487static int tls_do_encryption(struct sock *sk,
488 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200489 struct tls_sw_context_tx *ctx,
490 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200491 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700492{
Vakul Garg4509de12019-02-14 07:11:35 +0000493 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530494 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200495 struct sk_msg *msg_en = &rec->msg_encrypted;
496 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000497 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700498
Vakul Gargf295b3a2019-03-20 02:03:36 +0000499 /* For CCM based ciphers, first byte of IV is a constant */
500 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
501 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
502 iv_offset = 1;
503 }
504
505 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
506 prot->iv_size + prot->salt_size);
507
508 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000509
Vakul Garg4509de12019-02-14 07:11:35 +0000510 sge->offset += prot->prepend_size;
511 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700512
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200513 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530514
Dave Watson3c4d7552017-06-14 11:37:39 -0700515 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000516 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200517 aead_request_set_crypt(aead_req, rec->sg_aead_in,
518 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000519 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530520
521 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530522 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530523
Vakul Garg9932a292018-09-24 15:35:56 +0530524 /* Add the record in tx_list */
525 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530526 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700527
Vakul Garga42055e2018-09-21 09:46:13 +0530528 rc = crypto_aead_encrypt(aead_req);
529 if (!rc || rc != -EINPROGRESS) {
530 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000531 sge->offset -= prot->prepend_size;
532 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530533 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700534
Vakul Garg9932a292018-09-24 15:35:56 +0530535 if (!rc) {
536 WRITE_ONCE(rec->tx_ready, true);
537 } else if (rc != -EINPROGRESS) {
538 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530539 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530540 }
Vakul Garga42055e2018-09-21 09:46:13 +0530541
542 /* Unhook the record from context if encryption is not failure */
543 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700544 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700545 return rc;
546}
547
John Fastabendd3b18ad32018-10-13 02:46:01 +0200548static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
549 struct tls_rec **to, struct sk_msg *msg_opl,
550 struct sk_msg *msg_oen, u32 split_point,
551 u32 tx_overhead_size, u32 *orig_end)
552{
553 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
554 struct scatterlist *sge, *osge, *nsge;
555 u32 orig_size = msg_opl->sg.size;
556 struct scatterlist tmp = { };
557 struct sk_msg *msg_npl;
558 struct tls_rec *new;
559 int ret;
560
561 new = tls_get_rec(sk);
562 if (!new)
563 return -ENOMEM;
564 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
565 tx_overhead_size, 0);
566 if (ret < 0) {
567 tls_free_rec(sk, new);
568 return ret;
569 }
570
571 *orig_end = msg_opl->sg.end;
572 i = msg_opl->sg.start;
573 sge = sk_msg_elem(msg_opl, i);
574 while (apply && sge->length) {
575 if (sge->length > apply) {
576 u32 len = sge->length - apply;
577
578 get_page(sg_page(sge));
579 sg_set_page(&tmp, sg_page(sge), len,
580 sge->offset + apply);
581 sge->length = apply;
582 bytes += apply;
583 apply = 0;
584 } else {
585 apply -= sge->length;
586 bytes += sge->length;
587 }
588
589 sk_msg_iter_var_next(i);
590 if (i == msg_opl->sg.end)
591 break;
592 sge = sk_msg_elem(msg_opl, i);
593 }
594
595 msg_opl->sg.end = i;
596 msg_opl->sg.curr = i;
597 msg_opl->sg.copybreak = 0;
598 msg_opl->apply_bytes = 0;
599 msg_opl->sg.size = bytes;
600
601 msg_npl = &new->msg_plaintext;
602 msg_npl->apply_bytes = apply;
603 msg_npl->sg.size = orig_size - bytes;
604
605 j = msg_npl->sg.start;
606 nsge = sk_msg_elem(msg_npl, j);
607 if (tmp.length) {
608 memcpy(nsge, &tmp, sizeof(*nsge));
609 sk_msg_iter_var_next(j);
610 nsge = sk_msg_elem(msg_npl, j);
611 }
612
613 osge = sk_msg_elem(msg_opl, i);
614 while (osge->length) {
615 memcpy(nsge, osge, sizeof(*nsge));
616 sg_unmark_end(nsge);
617 sk_msg_iter_var_next(i);
618 sk_msg_iter_var_next(j);
619 if (i == *orig_end)
620 break;
621 osge = sk_msg_elem(msg_opl, i);
622 nsge = sk_msg_elem(msg_npl, j);
623 }
624
625 msg_npl->sg.end = j;
626 msg_npl->sg.curr = j;
627 msg_npl->sg.copybreak = 0;
628
629 *to = new;
630 return 0;
631}
632
633static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
634 struct tls_rec *from, u32 orig_end)
635{
636 struct sk_msg *msg_npl = &from->msg_plaintext;
637 struct sk_msg *msg_opl = &to->msg_plaintext;
638 struct scatterlist *osge, *nsge;
639 u32 i, j;
640
641 i = msg_opl->sg.end;
642 sk_msg_iter_var_prev(i);
643 j = msg_npl->sg.start;
644
645 osge = sk_msg_elem(msg_opl, i);
646 nsge = sk_msg_elem(msg_npl, j);
647
648 if (sg_page(osge) == sg_page(nsge) &&
649 osge->offset + osge->length == nsge->offset) {
650 osge->length += nsge->length;
651 put_page(sg_page(nsge));
652 }
653
654 msg_opl->sg.end = orig_end;
655 msg_opl->sg.curr = orig_end;
656 msg_opl->sg.copybreak = 0;
657 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
658 msg_opl->sg.size += msg_npl->sg.size;
659
660 sk_msg_free(sk, &to->msg_encrypted);
661 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
662
663 kfree(from);
664}
665
Dave Watson3c4d7552017-06-14 11:37:39 -0700666static int tls_push_record(struct sock *sk, int flags,
667 unsigned char record_type)
668{
669 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000670 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300671 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200672 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
673 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200674 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200675 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200676 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700677 int rc;
678
Vakul Garga42055e2018-09-21 09:46:13 +0530679 if (!rec)
680 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200681
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200682 msg_pl = &rec->msg_plaintext;
683 msg_en = &rec->msg_encrypted;
684
John Fastabendd3b18ad32018-10-13 02:46:01 +0200685 split_point = msg_pl->apply_bytes;
686 split = split_point && split_point < msg_pl->sg.size;
John Fastabendd468e472020-01-11 06:12:04 +0000687 if (unlikely((!split &&
688 msg_pl->sg.size +
689 prot->overhead_size > msg_en->sg.size) ||
690 (split &&
691 split_point +
692 prot->overhead_size > msg_en->sg.size))) {
693 split = true;
694 split_point = msg_en->sg.size;
695 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200696 if (split) {
697 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000698 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200699 &orig_end);
700 if (rc < 0)
701 return rc;
John Fastabendd468e472020-01-11 06:12:04 +0000702 /* This can happen if above tls_split_open_record allocates
703 * a single large encryption buffer instead of two smaller
704 * ones. In this case adjust pointers and continue without
705 * split.
706 */
707 if (!msg_pl->sg.size) {
708 tls_merge_open_record(sk, rec, tmp, orig_end);
709 msg_pl = &rec->msg_plaintext;
710 msg_en = &rec->msg_encrypted;
711 split = false;
712 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200713 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000714 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200715 }
716
Vakul Garga42055e2018-09-21 09:46:13 +0530717 rec->tx_flags = flags;
718 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700719
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200720 i = msg_pl->sg.end;
721 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000722
723 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000724 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000725 /* Add content type to end of message. No padding added */
726 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
727 sg_mark_end(&rec->sg_content_type);
728 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
729 &rec->sg_content_type);
730 } else {
731 sg_mark_end(sk_msg_elem(msg_pl, i));
732 }
Vakul Garga42055e2018-09-21 09:46:13 +0530733
John Fastabend9aaaa562020-01-11 06:12:05 +0000734 if (msg_pl->sg.end < msg_pl->sg.start) {
735 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
736 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
737 msg_pl->sg.data);
738 }
739
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200740 i = msg_pl->sg.start;
Jakub Kicinski9e5ffed2019-11-27 12:16:43 -0800741 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200742
743 i = msg_en->sg.end;
744 sk_msg_iter_var_prev(i);
745 sg_mark_end(sk_msg_elem(msg_en, i));
746
747 i = msg_en->sg.start;
748 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
749
Vakul Garg4509de12019-02-14 07:11:35 +0000750 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
751 tls_ctx->tx.rec_seq, prot->rec_seq_size,
752 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700753
754 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200755 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000756 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000757 msg_pl->sg.size + prot->tail_size,
758 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700759
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200760 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700761
Dave Watson130b3922019-01-30 21:58:31 +0000762 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000763 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700764 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200765 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200766 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200767 if (split) {
768 tls_ctx->pending_open_record_frags = true;
769 tls_merge_open_record(sk, rec, tmp, orig_end);
770 }
771 }
Dave Watson5b053e12019-01-30 22:08:21 +0000772 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530773 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200774 } else if (split) {
775 msg_pl = &tmp->msg_plaintext;
776 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000777 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200778 tls_ctx->pending_open_record_frags = true;
779 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700780 }
781
Vakul Garg9932a292018-09-24 15:35:56 +0530782 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700783}
784
John Fastabendd3b18ad32018-10-13 02:46:01 +0200785static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
786 bool full_record, u8 record_type,
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300787 ssize_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700788{
789 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300790 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200791 struct sk_msg msg_redir = { };
792 struct sk_psock *psock;
793 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530794 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800795 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200796 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800797 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530798
John Fastabend0608c692018-12-20 11:35:35 -0800799 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200800 psock = sk_psock_get(sk);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800801 if (!psock || !policy) {
802 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300803 if (err && sk->sk_err == EBADMSG) {
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800804 *copied -= sk_msg_free(sk, msg);
805 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300806 err = -sk->sk_err;
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800807 }
Xiyu Yang095f5612020-04-25 20:54:37 +0800808 if (psock)
809 sk_psock_put(sk, psock);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800810 return err;
811 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200812more_data:
813 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800814 if (psock->eval == __SK_NONE) {
815 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200816 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7361d442020-01-11 06:12:06 +0000817 delta -= msg->sg.size;
John Fastabend7246d8e2018-11-26 14:16:17 -0800818 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200819 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
820 !enospc && !full_record) {
821 err = -ENOSPC;
822 goto out_err;
823 }
824 msg->cork_bytes = 0;
825 send = msg->sg.size;
826 if (msg->apply_bytes && msg->apply_bytes < send)
827 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530828
John Fastabendd3b18ad32018-10-13 02:46:01 +0200829 switch (psock->eval) {
830 case __SK_PASS:
831 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300832 if (err && sk->sk_err == EBADMSG) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200833 *copied -= sk_msg_free(sk, msg);
834 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300835 err = -sk->sk_err;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200836 goto out_err;
837 }
838 break;
839 case __SK_REDIRECT:
840 sk_redir = psock->sk_redir;
841 memcpy(&msg_redir, msg, sizeof(*msg));
842 if (msg->apply_bytes < send)
843 msg->apply_bytes = 0;
844 else
845 msg->apply_bytes -= send;
846 sk_msg_return_zero(sk, msg, send);
847 msg->sg.size -= send;
848 release_sock(sk);
849 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
850 lock_sock(sk);
851 if (err < 0) {
852 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
853 msg->sg.size = 0;
854 }
855 if (msg->sg.size == 0)
856 tls_free_open_rec(sk);
857 break;
858 case __SK_DROP:
859 default:
860 sk_msg_free_partial(sk, msg, send);
861 if (msg->apply_bytes < send)
862 msg->apply_bytes = 0;
863 else
864 msg->apply_bytes -= send;
865 if (msg->sg.size == 0)
866 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800867 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200868 err = -EACCES;
869 }
Vakul Garga42055e2018-09-21 09:46:13 +0530870
John Fastabendd3b18ad32018-10-13 02:46:01 +0200871 if (likely(!err)) {
872 bool reset_eval = !ctx->open_rec;
873
874 rec = ctx->open_rec;
875 if (rec) {
876 msg = &rec->msg_plaintext;
877 if (!msg->apply_bytes)
878 reset_eval = true;
879 }
880 if (reset_eval) {
881 psock->eval = __SK_NONE;
882 if (psock->sk_redir) {
883 sock_put(psock->sk_redir);
884 psock->sk_redir = NULL;
885 }
886 }
887 if (rec)
888 goto more_data;
889 }
890 out_err:
891 sk_psock_put(sk, psock);
892 return err;
893}
894
895static int tls_sw_push_pending_record(struct sock *sk, int flags)
896{
897 struct tls_context *tls_ctx = tls_get_ctx(sk);
898 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
899 struct tls_rec *rec = ctx->open_rec;
900 struct sk_msg *msg_pl;
901 size_t copied;
902
Vakul Garga42055e2018-09-21 09:46:13 +0530903 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200904 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530905
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200906 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200907 copied = msg_pl->sg.size;
908 if (!copied)
909 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530910
John Fastabendd3b18ad32018-10-13 02:46:01 +0200911 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
912 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530913}
914
915int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
916{
Dave Watson3c4d7552017-06-14 11:37:39 -0700917 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530918 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000919 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530920 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000921 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530922 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100923 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700924 bool eor = !(msg->msg_flags & MSG_MORE);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300925 size_t try_to_copy;
926 ssize_t copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200927 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530928 struct tls_rec *rec;
929 int required_size;
930 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700931 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530932 int record_room;
933 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700934 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530935 int ret = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530936 int pending;
Dave Watson3c4d7552017-06-14 11:37:39 -0700937
938 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100939 return -EOPNOTSUPP;
Dave Watson3c4d7552017-06-14 11:37:39 -0700940
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800941 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700942 lock_sock(sk);
943
Dave Watson3c4d7552017-06-14 11:37:39 -0700944 if (unlikely(msg->msg_controllen)) {
945 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530946 if (ret) {
947 if (ret == -EINPROGRESS)
948 num_async++;
949 else if (ret != -EAGAIN)
950 goto send_end;
951 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700952 }
953
954 while (msg_data_left(msg)) {
955 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100956 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700957 goto send_end;
958 }
959
John Fastabendd3b18ad32018-10-13 02:46:01 +0200960 if (ctx->open_rec)
961 rec = ctx->open_rec;
962 else
963 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530964 if (!rec) {
965 ret = -ENOMEM;
966 goto send_end;
967 }
968
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200969 msg_pl = &rec->msg_plaintext;
970 msg_en = &rec->msg_encrypted;
971
972 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700973 full_record = false;
974 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200975 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700976 if (try_to_copy >= record_room) {
977 try_to_copy = record_room;
978 full_record = true;
979 }
980
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200981 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000982 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700983
984 if (!sk_stream_memory_free(sk))
985 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530986
Dave Watson3c4d7552017-06-14 11:37:39 -0700987alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200988 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700989 if (ret) {
990 if (ret != -ENOSPC)
991 goto wait_for_memory;
992
993 /* Adjust try_to_copy according to the amount that was
994 * actually allocated. The difference is due
995 * to max sg elements limit
996 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200997 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700998 full_record = true;
999 }
Vakul Garga42055e2018-09-21 09:46:13 +05301000
1001 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001002 u32 first = msg_pl->sg.end;
1003
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001004 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1005 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -07001006 if (ret)
1007 goto fallback_to_reg_send;
1008
Vakul Garga42055e2018-09-21 09:46:13 +05301009 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001010 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001011
1012 sk_msg_sg_copy_set(msg_pl, first);
1013 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1014 record_type, &copied,
1015 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +05301016 if (ret) {
1017 if (ret == -EINPROGRESS)
1018 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001019 else if (ret == -ENOMEM)
1020 goto wait_for_memory;
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001021 else if (ctx->open_rec && ret == -ENOSPC)
John Fastabendd3b18ad32018-10-13 02:46:01 +02001022 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +05301023 else if (ret != -EAGAIN)
1024 goto send_end;
1025 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -07001026 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001027rollback_iter:
1028 copied -= try_to_copy;
1029 sk_msg_sg_copy_clear(msg_pl, first);
1030 iov_iter_revert(&msg->msg_iter,
1031 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001032fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001033 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001034 }
1035
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001036 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +05301037
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001038 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001039 if (ret) {
1040 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301041 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001042
1043 /* Adjust try_to_copy according to the amount that was
1044 * actually allocated. The difference is due
1045 * to max sg elements limit
1046 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001047 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001048 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001049 sk_msg_trim(sk, msg_en,
1050 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001051 }
1052
Vakul Garg65a10e22018-12-21 15:16:52 +00001053 if (try_to_copy) {
1054 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1055 msg_pl, try_to_copy);
1056 if (ret < 0)
1057 goto trim_sgl;
1058 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001059
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001060 /* Open records defined only if successfully copied, otherwise
1061 * we would trim the sg but not reset the open record frags.
1062 */
1063 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001064 copied += try_to_copy;
1065 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001066 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1067 record_type, &copied,
1068 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001069 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301070 if (ret == -EINPROGRESS)
1071 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001072 else if (ret == -ENOMEM)
1073 goto wait_for_memory;
1074 else if (ret != -EAGAIN) {
1075 if (ret == -ENOSPC)
1076 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301077 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001078 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001079 }
1080 }
1081
1082 continue;
1083
1084wait_for_sndbuf:
1085 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1086wait_for_memory:
1087 ret = sk_stream_wait_memory(sk, &timeo);
1088 if (ret) {
1089trim_sgl:
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001090 if (ctx->open_rec)
1091 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001092 goto send_end;
1093 }
1094
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001095 if (ctx->open_rec && msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001096 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001097 }
1098
Vakul Garga42055e2018-09-21 09:46:13 +05301099 if (!num_async) {
1100 goto send_end;
1101 } else if (num_zc) {
1102 /* Wait for pending encryptions to get completed */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301103 spin_lock_bh(&ctx->encrypt_compl_lock);
1104 ctx->async_notify = true;
Vakul Garga42055e2018-09-21 09:46:13 +05301105
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301106 pending = atomic_read(&ctx->encrypt_pending);
1107 spin_unlock_bh(&ctx->encrypt_compl_lock);
1108 if (pending)
Vakul Garga42055e2018-09-21 09:46:13 +05301109 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1110 else
1111 reinit_completion(&ctx->async_wait.completion);
1112
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301113 /* There can be no concurrent accesses, since we have no
1114 * pending encrypt operations
1115 */
Vakul Garga42055e2018-09-21 09:46:13 +05301116 WRITE_ONCE(ctx->async_notify, false);
1117
1118 if (ctx->async_wait.err) {
1119 ret = ctx->async_wait.err;
1120 copied = 0;
1121 }
1122 }
1123
1124 /* Transmit if any encryptions have completed */
1125 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1126 cancel_delayed_work(&ctx->tx_work.work);
1127 tls_tx_records(sk, msg->msg_flags);
1128 }
1129
Dave Watson3c4d7552017-06-14 11:37:39 -07001130send_end:
1131 ret = sk_stream_error(sk, msg->msg_flags, ret);
1132
1133 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001134 mutex_unlock(&tls_ctx->tx_lock);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001135 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001136}
1137
YueHaibing01cb8a12019-01-16 10:39:28 +08001138static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1139 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001140{
Vakul Garga42055e2018-09-21 09:46:13 +05301141 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001142 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001143 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001144 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001145 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001146 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301147 struct tls_rec *rec;
1148 int num_async = 0;
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001149 ssize_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001150 bool full_record;
1151 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301152 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301153 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001154
Dave Watson3c4d7552017-06-14 11:37:39 -07001155 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001156 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1157
Dave Watson3c4d7552017-06-14 11:37:39 -07001158 /* Call the sk_stream functions to manage the sndbuf mem. */
1159 while (size > 0) {
1160 size_t copy, required_size;
1161
1162 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001163 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001164 goto sendpage_end;
1165 }
1166
John Fastabendd3b18ad32018-10-13 02:46:01 +02001167 if (ctx->open_rec)
1168 rec = ctx->open_rec;
1169 else
1170 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301171 if (!rec) {
1172 ret = -ENOMEM;
1173 goto sendpage_end;
1174 }
1175
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001176 msg_pl = &rec->msg_plaintext;
1177
Dave Watson3c4d7552017-06-14 11:37:39 -07001178 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001179 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001180 copy = size;
1181 if (copy >= record_room) {
1182 copy = record_room;
1183 full_record = true;
1184 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001185
Vakul Garg4509de12019-02-14 07:11:35 +00001186 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001187
1188 if (!sk_stream_memory_free(sk))
1189 goto wait_for_sndbuf;
1190alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001191 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001192 if (ret) {
1193 if (ret != -ENOSPC)
1194 goto wait_for_memory;
1195
1196 /* Adjust copy according to the amount that was
1197 * actually allocated. The difference is due
1198 * to max sg elements limit
1199 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001200 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001201 full_record = true;
1202 }
1203
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001204 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001205 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001206
Dave Watson3c4d7552017-06-14 11:37:39 -07001207 offset += copy;
1208 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001209 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001210
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001211 tls_ctx->pending_open_record_frags = true;
1212 if (full_record || eor || sk_msg_full(msg_pl)) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001213 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1214 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001215 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301216 if (ret == -EINPROGRESS)
1217 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001218 else if (ret == -ENOMEM)
1219 goto wait_for_memory;
1220 else if (ret != -EAGAIN) {
1221 if (ret == -ENOSPC)
1222 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301223 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001224 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001225 }
1226 }
1227 continue;
1228wait_for_sndbuf:
1229 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1230wait_for_memory:
1231 ret = sk_stream_wait_memory(sk, &timeo);
1232 if (ret) {
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001233 if (ctx->open_rec)
1234 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001235 goto sendpage_end;
1236 }
1237
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001238 if (ctx->open_rec)
1239 goto alloc_payload;
Dave Watson3c4d7552017-06-14 11:37:39 -07001240 }
1241
Vakul Garga42055e2018-09-21 09:46:13 +05301242 if (num_async) {
1243 /* Transmit if any encryptions have completed */
1244 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1245 cancel_delayed_work(&ctx->tx_work.work);
1246 tls_tx_records(sk, flags);
1247 }
1248 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001249sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001250 ret = sk_stream_error(sk, flags, ret);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001251 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001252}
1253
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001254int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1255 int offset, size_t size, int flags)
1256{
1257 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1258 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1259 MSG_NO_SHARED_FRAGS))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001260 return -EOPNOTSUPP;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001261
1262 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1263}
1264
John Fastabend0608c692018-12-20 11:35:35 -08001265int tls_sw_sendpage(struct sock *sk, struct page *page,
1266 int offset, size_t size, int flags)
1267{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001268 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001269 int ret;
1270
1271 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1272 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001273 return -EOPNOTSUPP;
John Fastabend0608c692018-12-20 11:35:35 -08001274
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001275 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001276 lock_sock(sk);
1277 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1278 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001279 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001280 return ret;
1281}
1282
John Fastabendd3b18ad32018-10-13 02:46:01 +02001283static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1284 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001285{
1286 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001287 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001288 struct sk_buff *skb;
1289 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1290
John Fastabendd3b18ad32018-10-13 02:46:01 +02001291 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001292 if (sk->sk_err) {
1293 *err = sock_error(sk);
1294 return NULL;
1295 }
1296
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001297 if (sk->sk_shutdown & RCV_SHUTDOWN)
1298 return NULL;
1299
Dave Watsonc46234e2018-03-22 10:10:35 -07001300 if (sock_flag(sk, SOCK_DONE))
1301 return NULL;
1302
1303 if ((flags & MSG_DONTWAIT) || !timeo) {
1304 *err = -EAGAIN;
1305 return NULL;
1306 }
1307
1308 add_wait_queue(sk_sleep(sk), &wait);
1309 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001310 sk_wait_event(sk, &timeo,
1311 ctx->recv_pkt != skb ||
1312 !sk_psock_queue_empty(psock),
1313 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001314 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1315 remove_wait_queue(sk_sleep(sk), &wait);
1316
1317 /* Handle signals */
1318 if (signal_pending(current)) {
1319 *err = sock_intr_errno(timeo);
1320 return NULL;
1321 }
1322 }
1323
1324 return skb;
1325}
1326
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001327static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1328 int length, int *pages_used,
1329 unsigned int *size_used,
1330 struct scatterlist *to,
1331 int to_max_pages)
1332{
1333 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1334 struct page *pages[MAX_SKB_FRAGS];
1335 unsigned int size = *size_used;
1336 ssize_t copied, use;
1337 size_t offset;
1338
1339 while (length > 0) {
1340 i = 0;
1341 maxpages = to_max_pages - num_elem;
1342 if (maxpages == 0) {
1343 rc = -EFAULT;
1344 goto out;
1345 }
1346 copied = iov_iter_get_pages(from, pages,
1347 length,
1348 maxpages, &offset);
1349 if (copied <= 0) {
1350 rc = -EFAULT;
1351 goto out;
1352 }
1353
1354 iov_iter_advance(from, copied);
1355
1356 length -= copied;
1357 size += copied;
1358 while (copied) {
1359 use = min_t(int, copied, PAGE_SIZE - offset);
1360
1361 sg_set_page(&to[num_elem],
1362 pages[i], use, offset);
1363 sg_unmark_end(&to[num_elem]);
1364 /* We do not uncharge memory from this API */
1365
1366 offset = 0;
1367 copied -= use;
1368
1369 i++;
1370 num_elem++;
1371 }
1372 }
1373 /* Mark the end in the last sg entry if newly added */
1374 if (num_elem > *pages_used)
1375 sg_mark_end(&to[num_elem - 1]);
1376out:
1377 if (rc)
1378 iov_iter_revert(from, size - *size_used);
1379 *size_used = size;
1380 *pages_used = num_elem;
1381
1382 return rc;
1383}
1384
Vakul Garg0b243d02018-08-10 20:46:41 +05301385/* This function decrypts the input skb into either out_iov or in out_sg
1386 * or in skb buffers itself. The input parameter 'zc' indicates if
1387 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1388 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1389 * NULL, then the decryption happens inside skb buffers itself, i.e.
1390 * zero-copy gets disabled and 'zc' is updated.
1391 */
1392
1393static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1394 struct iov_iter *out_iov,
1395 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001396 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301397{
1398 struct tls_context *tls_ctx = tls_get_ctx(sk);
1399 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001400 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301401 struct strp_msg *rxm = strp_msg(skb);
1402 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1403 struct aead_request *aead_req;
1404 struct sk_buff *unused;
1405 u8 *aad, *iv, *mem = NULL;
1406 struct scatterlist *sgin = NULL;
1407 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001408 const int data_len = rxm->full_len - prot->overhead_size +
1409 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001410 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301411
1412 if (*zc && (out_iov || out_sg)) {
1413 if (out_iov)
1414 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1415 else
1416 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001417 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1418 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301419 } else {
1420 n_sgout = 0;
1421 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001422 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301423 }
1424
Vakul Garg0b243d02018-08-10 20:46:41 +05301425 if (n_sgin < 1)
1426 return -EBADMSG;
1427
1428 /* Increment to accommodate AAD */
1429 n_sgin = n_sgin + 1;
1430
1431 nsg = n_sgin + n_sgout;
1432
1433 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1434 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001435 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301436 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1437
1438 /* Allocate a single block of memory which contains
1439 * aead_req || sgin[] || sgout[] || aad || iv.
1440 * This order achieves correct alignment for aead_req, sgin, sgout.
1441 */
1442 mem = kmalloc(mem_size, sk->sk_allocation);
1443 if (!mem)
1444 return -ENOMEM;
1445
1446 /* Segment the allocated memory */
1447 aead_req = (struct aead_request *)mem;
1448 sgin = (struct scatterlist *)(mem + aead_size);
1449 sgout = sgin + n_sgin;
1450 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001451 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301452
Vakul Gargf295b3a2019-03-20 02:03:36 +00001453 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1454 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1455 iv[0] = 2;
1456 iv_offset = 1;
1457 }
1458
Vakul Garg0b243d02018-08-10 20:46:41 +05301459 /* Prepare IV */
1460 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001461 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001462 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301463 if (err < 0) {
1464 kfree(mem);
1465 return err;
1466 }
Vakul Garg4509de12019-02-14 07:11:35 +00001467 if (prot->version == TLS_1_3_VERSION)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001468 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1469 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001470 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001471 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001472
Vakul Garg4509de12019-02-14 07:11:35 +00001473 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301474
1475 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001476 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1477 prot->tail_size,
1478 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1479 ctx->control, prot->version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301480
1481 /* Prepare sgin */
1482 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001483 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301484 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001485 rxm->offset + prot->prepend_size,
1486 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301487 if (err < 0) {
1488 kfree(mem);
1489 return err;
1490 }
1491
1492 if (n_sgout) {
1493 if (out_iov) {
1494 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001495 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301496
1497 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001498 err = tls_setup_from_iter(sk, out_iov, data_len,
1499 &pages, chunk, &sgout[1],
1500 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301501 if (err < 0)
1502 goto fallback_to_reg_recv;
1503 } else if (out_sg) {
1504 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1505 } else {
1506 goto fallback_to_reg_recv;
1507 }
1508 } else {
1509fallback_to_reg_recv:
1510 sgout = sgin;
1511 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001512 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301513 *zc = false;
1514 }
1515
1516 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301517 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001518 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301519 if (err == -EINPROGRESS)
1520 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301521
1522 /* Release the pages in case iov was mapped to pages */
1523 for (; pages > 0; pages--)
1524 put_page(sg_page(&sgout[pages]));
1525
1526 kfree(mem);
1527 return err;
1528}
1529
Boris Pismennydafb67f2018-07-13 14:33:40 +03001530static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001531 struct iov_iter *dest, int *chunk, bool *zc,
1532 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001533{
1534 struct tls_context *tls_ctx = tls_get_ctx(sk);
1535 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001536 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001537 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001538 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001539
Boris Pismenny4799ac82018-07-13 14:33:43 +03001540 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001541 if (tls_ctx->rx_conf == TLS_HW) {
Jakub Kicinski4de30a82019-10-06 21:09:30 -07001542 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001543 if (err < 0)
1544 return err;
1545 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001546
Boris Pismennyd069b782019-02-27 17:38:06 +02001547 /* Still not decrypted after tls_device */
1548 if (!ctx->decrypted) {
1549 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1550 async);
1551 if (err < 0) {
1552 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001553 tls_advance_record_sn(sk, prot,
1554 &tls_ctx->rx);
Jakub Kicinski5c5d22a2020-01-10 04:36:55 -08001555 else if (err == -EBADMSG)
1556 TLS_INC_STATS(sock_net(sk),
1557 LINUX_MIB_TLSDECRYPTERROR);
Boris Pismennyd069b782019-02-27 17:38:06 +02001558 return err;
1559 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001560 } else {
1561 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301562 }
Dave Watson130b3922019-01-30 21:58:31 +00001563
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001564 pad = padding_length(ctx, prot, skb);
1565 if (pad < 0)
1566 return pad;
1567
1568 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001569 rxm->offset += prot->prepend_size;
1570 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001571 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001572 ctx->decrypted = 1;
Dave Watsonfedf2012019-01-30 21:58:24 +00001573 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001574 } else {
1575 *zc = false;
1576 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001577
Boris Pismennydafb67f2018-07-13 14:33:40 +03001578 return err;
1579}
1580
1581int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1582 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001583{
Vakul Garg0b243d02018-08-10 20:46:41 +05301584 bool zc = true;
1585 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001586
Vakul Garg692d7b52019-01-16 10:40:16 +00001587 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001588}
1589
1590static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1591 unsigned int len)
1592{
1593 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001594 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001595
Vakul Garg94524d82018-08-29 15:26:55 +05301596 if (skb) {
1597 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001598
Vakul Garg94524d82018-08-29 15:26:55 +05301599 if (len < rxm->full_len) {
1600 rxm->offset += len;
1601 rxm->full_len -= len;
1602 return false;
1603 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001604 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001605 }
1606
1607 /* Finished with message */
1608 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001609 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001610
1611 return true;
1612}
1613
Vakul Garg692d7b52019-01-16 10:40:16 +00001614/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001615 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001616 * true. Further, the records are removed from the rx_list if it is not a peek
1617 * case and the record has been consumed completely.
1618 */
1619static int process_rx_list(struct tls_sw_context_rx *ctx,
1620 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001621 u8 *control,
1622 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001623 size_t skip,
1624 size_t len,
1625 bool zc,
1626 bool is_peek)
1627{
1628 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001629 u8 ctrl = *control;
1630 u8 msgc = *cmsg;
1631 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001632 ssize_t copied = 0;
1633
Vakul Garg2b794c42019-02-23 08:42:37 +00001634 /* Set the record type in 'control' if caller didn't pass it */
1635 if (!ctrl && skb) {
1636 tlm = tls_msg(skb);
1637 ctrl = tlm->control;
1638 }
1639
Vakul Garg692d7b52019-01-16 10:40:16 +00001640 while (skip && skb) {
1641 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001642 tlm = tls_msg(skb);
1643
1644 /* Cannot process a record of different type */
1645 if (ctrl != tlm->control)
1646 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001647
1648 if (skip < rxm->full_len)
1649 break;
1650
1651 skip = skip - rxm->full_len;
1652 skb = skb_peek_next(skb, &ctx->rx_list);
1653 }
1654
1655 while (len && skb) {
1656 struct sk_buff *next_skb;
1657 struct strp_msg *rxm = strp_msg(skb);
1658 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1659
Vakul Garg2b794c42019-02-23 08:42:37 +00001660 tlm = tls_msg(skb);
1661
1662 /* Cannot process a record of different type */
1663 if (ctrl != tlm->control)
1664 return 0;
1665
1666 /* Set record type if not already done. For a non-data record,
1667 * do not proceed if record type could not be copied.
1668 */
1669 if (!msgc) {
1670 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1671 sizeof(ctrl), &ctrl);
1672 msgc = true;
1673 if (ctrl != TLS_RECORD_TYPE_DATA) {
1674 if (cerr || msg->msg_flags & MSG_CTRUNC)
1675 return -EIO;
1676
1677 *cmsg = msgc;
1678 }
1679 }
1680
Vakul Garg692d7b52019-01-16 10:40:16 +00001681 if (!zc || (rxm->full_len - skip) > len) {
1682 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1683 msg, chunk);
1684 if (err < 0)
1685 return err;
1686 }
1687
1688 len = len - chunk;
1689 copied = copied + chunk;
1690
1691 /* Consume the data from record if it is non-peek case*/
1692 if (!is_peek) {
1693 rxm->offset = rxm->offset + chunk;
1694 rxm->full_len = rxm->full_len - chunk;
1695
1696 /* Return if there is unconsumed data in the record */
1697 if (rxm->full_len - skip)
1698 break;
1699 }
1700
1701 /* The remaining skip-bytes must lie in 1st record in rx_list.
1702 * So from the 2nd record, 'skip' should be 0.
1703 */
1704 skip = 0;
1705
1706 if (msg)
1707 msg->msg_flags |= MSG_EOR;
1708
1709 next_skb = skb_peek_next(skb, &ctx->rx_list);
1710
1711 if (!is_peek) {
1712 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001713 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001714 }
1715
1716 skb = next_skb;
1717 }
1718
Vakul Garg2b794c42019-02-23 08:42:37 +00001719 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001720 return copied;
1721}
1722
Dave Watsonc46234e2018-03-22 10:10:35 -07001723int tls_sw_recvmsg(struct sock *sk,
1724 struct msghdr *msg,
1725 size_t len,
1726 int nonblock,
1727 int flags,
1728 int *addr_len)
1729{
1730 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001731 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001732 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001733 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001734 unsigned char control = 0;
1735 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001736 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001737 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001738 struct sk_buff *skb;
1739 ssize_t copied = 0;
1740 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001741 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001742 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001743 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001744 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301745 int num_async = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301746 int pending;
Dave Watsonc46234e2018-03-22 10:10:35 -07001747
1748 flags |= nonblock;
1749
1750 if (unlikely(flags & MSG_ERRQUEUE))
1751 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1752
John Fastabendd3b18ad32018-10-13 02:46:01 +02001753 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001754 lock_sock(sk);
1755
Vakul Garg692d7b52019-01-16 10:40:16 +00001756 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001757 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1758 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001759 if (err < 0) {
1760 tls_err_abort(sk, err);
1761 goto end;
1762 } else {
1763 copied = err;
1764 }
1765
Jakub Kicinski46a16952019-05-24 10:34:30 -07001766 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001767 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001768
1769 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1770 len = len - copied;
1771 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001772
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001773 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001774 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001775 bool zc = false;
1776 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001777 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001778 bool async_capable;
1779 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001780
John Fastabendd3b18ad32018-10-13 02:46:01 +02001781 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1782 if (!skb) {
1783 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001784 int ret = __tcp_bpf_recvmsg(sk, psock,
1785 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001786
1787 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001788 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001789 len -= ret;
1790 continue;
1791 }
1792 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001793 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001794 } else {
1795 tlm = tls_msg(skb);
1796 if (prot->version == TLS_1_3_VERSION)
1797 tlm->control = 0;
1798 else
1799 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001800 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001801
1802 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301803
Vakul Garg4509de12019-02-14 07:11:35 +00001804 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001805
1806 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001807 ctx->control == TLS_RECORD_TYPE_DATA &&
Vakul Garg4509de12019-02-14 07:11:35 +00001808 prot->version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001809 zc = true;
1810
Vakul Gargc0ab4732019-02-11 11:31:05 +00001811 /* Do not use async mode if record is non-data */
1812 if (ctx->control == TLS_RECORD_TYPE_DATA)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001813 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001814 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001815 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001816
Dave Watsonfedf2012019-01-30 21:58:24 +00001817 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001818 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001819 if (err < 0 && err != -EINPROGRESS) {
1820 tls_err_abort(sk, EBADMSG);
1821 goto recv_end;
1822 }
1823
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001824 if (err == -EINPROGRESS) {
1825 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001826 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001827 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001828 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001829 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001830
1831 /* If the type of records being processed is not known yet,
1832 * set it to record type just dequeued. If it is already known,
1833 * but does not match the record type just dequeued, go to end.
1834 * We always get record type here since for tls1.2, record type
1835 * is known just after record is dequeued from stream parser.
1836 * For tls1.3, we disable async.
1837 */
1838
1839 if (!control)
1840 control = tlm->control;
1841 else if (control != tlm->control)
1842 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001843
Dave Watsonc46234e2018-03-22 10:10:35 -07001844 if (!cmsg) {
1845 int cerr;
1846
1847 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001848 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001849 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001850 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001851 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1852 err = -EIO;
1853 goto recv_end;
1854 }
1855 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001856 }
1857
Vakul Gargc0ab4732019-02-11 11:31:05 +00001858 if (async)
1859 goto pick_next_record;
1860
Dave Watsonfedf2012019-01-30 21:58:24 +00001861 if (!zc) {
1862 if (rxm->full_len > len) {
1863 retain_skb = true;
1864 chunk = len;
1865 } else {
1866 chunk = rxm->full_len;
1867 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001868
Dave Watsonfedf2012019-01-30 21:58:24 +00001869 err = skb_copy_datagram_msg(skb, rxm->offset,
1870 msg, chunk);
1871 if (err < 0)
1872 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001873
Dave Watsonfedf2012019-01-30 21:58:24 +00001874 if (!is_peek) {
1875 rxm->offset = rxm->offset + chunk;
1876 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001877 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001878 }
1879
Vakul Garg94524d82018-08-29 15:26:55 +05301880pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001881 if (chunk > len)
1882 chunk = len;
1883
1884 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001885 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001886
Vakul Garg692d7b52019-01-16 10:40:16 +00001887 /* For async or peek case, queue the current skb */
1888 if (async || is_peek || retain_skb) {
1889 skb_queue_tail(&ctx->rx_list, skb);
1890 skb = NULL;
1891 }
Vakul Garg94524d82018-08-29 15:26:55 +05301892
Vakul Garg692d7b52019-01-16 10:40:16 +00001893 if (tls_sw_advance_skb(sk, skb, chunk)) {
1894 /* Return full control message to
1895 * userspace before trying to parse
1896 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001897 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001898 msg->msg_flags |= MSG_EOR;
1899 if (ctx->control != TLS_RECORD_TYPE_DATA)
1900 goto recv_end;
1901 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001902 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001903 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001904 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001905
1906recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301907 if (num_async) {
1908 /* Wait for all previously submitted records to be decrypted */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301909 spin_lock_bh(&ctx->decrypt_compl_lock);
1910 ctx->async_notify = true;
1911 pending = atomic_read(&ctx->decrypt_pending);
1912 spin_unlock_bh(&ctx->decrypt_compl_lock);
1913 if (pending) {
Vakul Garg94524d82018-08-29 15:26:55 +05301914 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1915 if (err) {
1916 /* one of async decrypt failed */
1917 tls_err_abort(sk, err);
1918 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001919 decrypted = 0;
1920 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301921 }
1922 } else {
1923 reinit_completion(&ctx->async_wait.completion);
1924 }
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301925
1926 /* There can be no concurrent accesses, since we have no
1927 * pending decrypt operations
1928 */
Vakul Garg94524d82018-08-29 15:26:55 +05301929 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001930
1931 /* Drain records from the rx_list & copy if required */
1932 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001933 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001934 decrypted, false, is_peek);
1935 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001936 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001937 decrypted, true, is_peek);
1938 if (err < 0) {
1939 tls_err_abort(sk, err);
1940 copied = 0;
1941 goto end;
1942 }
Vakul Garg94524d82018-08-29 15:26:55 +05301943 }
1944
Vakul Garg692d7b52019-01-16 10:40:16 +00001945 copied += decrypted;
1946
1947end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001948 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001949 if (psock)
1950 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001951 return copied ? : err;
1952}
1953
1954ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1955 struct pipe_inode_info *pipe,
1956 size_t len, unsigned int flags)
1957{
1958 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001959 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001960 struct strp_msg *rxm = NULL;
1961 struct sock *sk = sock->sk;
1962 struct sk_buff *skb;
1963 ssize_t copied = 0;
1964 int err = 0;
1965 long timeo;
1966 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301967 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001968
1969 lock_sock(sk);
1970
1971 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1972
John Fastabendd3b18ad32018-10-13 02:46:01 +02001973 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001974 if (!skb)
1975 goto splice_read_end;
1976
Dave Watsonc46234e2018-03-22 10:10:35 -07001977 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001978 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001979
Dave Watsonfedf2012019-01-30 21:58:24 +00001980 /* splice does not support reading control messages */
1981 if (ctx->control != TLS_RECORD_TYPE_DATA) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001982 err = -EINVAL;
Dave Watsonfedf2012019-01-30 21:58:24 +00001983 goto splice_read_end;
1984 }
1985
Dave Watsonc46234e2018-03-22 10:10:35 -07001986 if (err < 0) {
1987 tls_err_abort(sk, EBADMSG);
1988 goto splice_read_end;
1989 }
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001990 ctx->decrypted = 1;
Dave Watsonc46234e2018-03-22 10:10:35 -07001991 }
1992 rxm = strp_msg(skb);
1993
1994 chunk = min_t(unsigned int, rxm->full_len, len);
1995 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1996 if (copied < 0)
1997 goto splice_read_end;
1998
1999 if (likely(!(flags & MSG_PEEK)))
2000 tls_sw_advance_skb(sk, skb, copied);
2001
2002splice_read_end:
2003 release_sock(sk);
2004 return copied ? : err;
2005}
2006
John Fastabend924ad652018-10-13 02:46:00 +02002007bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07002008{
Dave Watsonc46234e2018-03-22 10:10:35 -07002009 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002010 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002011 bool ingress_empty = true;
2012 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002013
John Fastabendd3b18ad32018-10-13 02:46:01 +02002014 rcu_read_lock();
2015 psock = sk_psock(sk);
2016 if (psock)
2017 ingress_empty = list_empty(&psock->ingress_msg);
2018 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07002019
Jakub Kicinski13aecb12019-07-04 14:50:36 -07002020 return !ingress_empty || ctx->recv_pkt ||
2021 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002022}
2023
2024static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2025{
2026 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002027 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00002028 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07002029 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07002030 struct strp_msg *rxm = strp_msg(skb);
2031 size_t cipher_overhead;
2032 size_t data_len = 0;
2033 int ret;
2034
2035 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00002036 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07002037 return 0;
2038
Kees Cook3463e512018-06-25 16:55:05 -07002039 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00002040 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07002041 ret = -EINVAL;
2042 goto read_failure;
2043 }
2044
Dave Watsonc46234e2018-03-22 10:10:35 -07002045 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00002046 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002047
2048 if (ret < 0)
2049 goto read_failure;
2050
2051 ctx->control = header[0];
2052
2053 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2054
Vakul Garg4509de12019-02-14 07:11:35 +00002055 cipher_overhead = prot->tag_size;
2056 if (prot->version != TLS_1_3_VERSION)
2057 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002058
Dave Watson130b3922019-01-30 21:58:31 +00002059 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00002060 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002061 ret = -EMSGSIZE;
2062 goto read_failure;
2063 }
2064 if (data_len < cipher_overhead) {
2065 ret = -EBADMSG;
2066 goto read_failure;
2067 }
2068
Dave Watson130b3922019-01-30 21:58:31 +00002069 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2070 if (header[1] != TLS_1_2_VERSION_MINOR ||
2071 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002072 ret = -EINVAL;
2073 goto read_failure;
2074 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002075
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002076 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002077 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002078 return data_len + TLS_HEADER_SIZE;
2079
2080read_failure:
2081 tls_err_abort(strp->sk, ret);
2082
2083 return ret;
2084}
2085
2086static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2087{
2088 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002089 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002090
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002091 ctx->decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07002092
2093 ctx->recv_pkt = skb;
2094 strp_pause(strp);
2095
Vakul Gargad13acc2018-07-30 16:08:33 +05302096 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002097}
2098
2099static void tls_data_ready(struct sock *sk)
2100{
2101 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002102 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002103 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002104
2105 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002106
2107 psock = sk_psock_get(sk);
Xiyu Yang62b40112020-04-25 21:10:23 +08002108 if (psock) {
2109 if (!list_empty(&psock->ingress_msg))
2110 ctx->saved_data_ready(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002111 sk_psock_put(sk, psock);
2112 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002113}
2114
John Fastabendf87e62d2019-07-19 10:29:16 -07002115void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2116{
2117 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2118
2119 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2120 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2121 cancel_delayed_work_sync(&ctx->tx_work.work);
2122}
2123
John Fastabend313ab002019-07-19 10:29:17 -07002124void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002125{
2126 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002127 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302128 struct tls_rec *rec, *tmp;
2129
2130 /* Wait for any pending async encryptions to complete */
2131 smp_store_mb(ctx->async_notify, true);
2132 if (atomic_read(&ctx->encrypt_pending))
2133 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2134
Vakul Garga42055e2018-09-21 09:46:13 +05302135 tls_tx_records(sk, -1);
2136
Vakul Garg9932a292018-09-24 15:35:56 +05302137 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302138 * the partially sent record if any at head of tx_list.
2139 */
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -08002140 if (tls_ctx->partially_sent_record) {
2141 tls_free_partial_record(sk, tls_ctx);
Vakul Garg9932a292018-09-24 15:35:56 +05302142 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302143 struct tls_rec, list);
2144 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002145 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302146 kfree(rec);
2147 }
2148
Vakul Garg9932a292018-09-24 15:35:56 +05302149 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302150 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002151 sk_msg_free(sk, &rec->msg_encrypted);
2152 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302153 kfree(rec);
2154 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002155
Vakul Garg201876b2018-07-24 16:54:27 +05302156 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302157 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002158}
2159
2160void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2161{
2162 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002163
2164 kfree(ctx);
2165}
2166
Boris Pismenny39f56e12018-07-13 14:33:41 +03002167void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002168{
2169 struct tls_context *tls_ctx = tls_get_ctx(sk);
2170 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2171
Jakub Kicinski12c76862019-04-19 16:52:19 -07002172 kfree(tls_ctx->rx.rec_seq);
2173 kfree(tls_ctx->rx.iv);
2174
Dave Watsonc46234e2018-03-22 10:10:35 -07002175 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302176 kfree_skb(ctx->recv_pkt);
2177 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002178 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002179 crypto_free_aead(ctx->aead_recv);
2180 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002181 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2182 * we still want to strp_stop(), but sk->sk_data_ready was
2183 * never swapped.
2184 */
2185 if (ctx->saved_data_ready) {
2186 write_lock_bh(&sk->sk_callback_lock);
2187 sk->sk_data_ready = ctx->saved_data_ready;
2188 write_unlock_bh(&sk->sk_callback_lock);
2189 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002190 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002191}
2192
John Fastabend313ab002019-07-19 10:29:17 -07002193void tls_sw_strparser_done(struct tls_context *tls_ctx)
2194{
2195 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2196
2197 strp_done(&ctx->strp);
2198}
2199
2200void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2201{
2202 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2203
2204 kfree(ctx);
2205}
2206
Boris Pismenny39f56e12018-07-13 14:33:41 +03002207void tls_sw_free_resources_rx(struct sock *sk)
2208{
2209 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002210
2211 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002212 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002213}
2214
Vakul Garg9932a292018-09-24 15:35:56 +05302215/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302216static void tx_work_handler(struct work_struct *work)
2217{
2218 struct delayed_work *delayed_work = to_delayed_work(work);
2219 struct tx_work *tx_work = container_of(delayed_work,
2220 struct tx_work, work);
2221 struct sock *sk = tx_work->sk;
2222 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002223 struct tls_sw_context_tx *ctx;
2224
2225 if (unlikely(!tls_ctx))
2226 return;
2227
2228 ctx = tls_sw_ctx_tx(tls_ctx);
2229 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2230 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302231
2232 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2233 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002234 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302235 lock_sock(sk);
2236 tls_tx_records(sk, -1);
2237 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002238 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302239}
2240
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002241void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2242{
2243 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2244
2245 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002246 if (is_tx_ready(tx_ctx) &&
2247 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2248 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002249}
2250
Jakub Kicinski318892a2019-07-19 10:29:14 -07002251void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2252{
2253 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2254
2255 write_lock_bh(&sk->sk_callback_lock);
2256 rx_ctx->saved_data_ready = sk->sk_data_ready;
2257 sk->sk_data_ready = tls_data_ready;
2258 write_unlock_bh(&sk->sk_callback_lock);
2259
2260 strp_check_rcv(&rx_ctx->strp);
2261}
2262
Dave Watsonc46234e2018-03-22 10:10:35 -07002263int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002264{
Vakul Garg4509de12019-02-14 07:11:35 +00002265 struct tls_context *tls_ctx = tls_get_ctx(sk);
2266 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002267 struct tls_crypto_info *crypto_info;
2268 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002269 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002270 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002271 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2272 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002273 struct cipher_context *cctx;
2274 struct crypto_aead **aead;
2275 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002276 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002277 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002278 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002279 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002280 int rc = 0;
2281
2282 if (!ctx) {
2283 rc = -EINVAL;
2284 goto out;
2285 }
2286
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002287 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002288 if (!ctx->priv_ctx_tx) {
2289 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2290 if (!sw_ctx_tx) {
2291 rc = -ENOMEM;
2292 goto out;
2293 }
2294 ctx->priv_ctx_tx = sw_ctx_tx;
2295 } else {
2296 sw_ctx_tx =
2297 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002298 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002299 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002300 if (!ctx->priv_ctx_rx) {
2301 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2302 if (!sw_ctx_rx) {
2303 rc = -ENOMEM;
2304 goto out;
2305 }
2306 ctx->priv_ctx_rx = sw_ctx_rx;
2307 } else {
2308 sw_ctx_rx =
2309 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002310 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002311 }
2312
Dave Watsonc46234e2018-03-22 10:10:35 -07002313 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002314 crypto_init_wait(&sw_ctx_tx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302315 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002316 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002317 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002318 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302319 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302320 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2321 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002322 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002323 crypto_init_wait(&sw_ctx_rx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302324 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002325 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002326 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002327 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002328 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002329 }
2330
Dave Watson3c4d7552017-06-14 11:37:39 -07002331 switch (crypto_info->cipher_type) {
2332 case TLS_CIPHER_AES_GCM_128: {
2333 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2334 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2335 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2336 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2337 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2338 rec_seq =
2339 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2340 gcm_128_info =
2341 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002342 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2343 key = gcm_128_info->key;
2344 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002345 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2346 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002347 break;
2348 }
2349 case TLS_CIPHER_AES_GCM_256: {
2350 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2351 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2352 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2353 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2354 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2355 rec_seq =
2356 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2357 gcm_256_info =
2358 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2359 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2360 key = gcm_256_info->key;
2361 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002362 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2363 cipher_name = "gcm(aes)";
2364 break;
2365 }
2366 case TLS_CIPHER_AES_CCM_128: {
2367 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2368 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2369 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2370 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2371 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2372 rec_seq =
2373 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2374 ccm_128_info =
2375 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2376 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2377 key = ccm_128_info->key;
2378 salt = ccm_128_info->salt;
2379 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2380 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002381 break;
2382 }
2383 default:
2384 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002385 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002386 }
2387
Jakub Kicinski89fec472019-06-10 21:40:00 -07002388 /* Sanity-check the sizes for stack allocations. */
2389 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2390 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002391 rc = -EINVAL;
2392 goto free_priv;
2393 }
2394
Dave Watson130b3922019-01-30 21:58:31 +00002395 if (crypto_info->version == TLS_1_3_VERSION) {
2396 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002397 prot->aad_size = TLS_HEADER_SIZE;
2398 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002399 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002400 prot->aad_size = TLS_AAD_SPACE_SIZE;
2401 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002402 }
2403
Vakul Garg4509de12019-02-14 07:11:35 +00002404 prot->version = crypto_info->version;
2405 prot->cipher_type = crypto_info->cipher_type;
2406 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2407 prot->tag_size = tag_size;
2408 prot->overhead_size = prot->prepend_size +
2409 prot->tag_size + prot->tail_size;
2410 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002411 prot->salt_size = salt_size;
2412 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002413 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002414 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002415 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002416 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002417 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002418 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002419 memcpy(cctx->iv, salt, salt_size);
2420 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002421 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002422 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002423 rc = -ENOMEM;
2424 goto free_iv;
2425 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002426
Dave Watsonc46234e2018-03-22 10:10:35 -07002427 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002428 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002429 if (IS_ERR(*aead)) {
2430 rc = PTR_ERR(*aead);
2431 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002432 goto free_rec_seq;
2433 }
2434 }
2435
2436 ctx->push_pending_record = tls_sw_push_pending_record;
2437
Dave Watsonfb99bce2019-01-30 21:58:05 +00002438 rc = crypto_aead_setkey(*aead, key, keysize);
2439
Dave Watson3c4d7552017-06-14 11:37:39 -07002440 if (rc)
2441 goto free_aead;
2442
Vakul Garg4509de12019-02-14 07:11:35 +00002443 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002444 if (rc)
2445 goto free_aead;
2446
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002447 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002448 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002449
2450 if (crypto_info->version == TLS_1_3_VERSION)
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002451 sw_ctx_rx->async_capable = 0;
Vakul Garg8497ded2019-02-09 07:53:28 +00002452 else
2453 sw_ctx_rx->async_capable =
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002454 !!(tfm->__crt_alg->cra_flags &
2455 CRYPTO_ALG_ASYNC);
Vakul Garg692d7b52019-01-16 10:40:16 +00002456
Dave Watsonc46234e2018-03-22 10:10:35 -07002457 /* Set up strparser */
2458 memset(&cb, 0, sizeof(cb));
2459 cb.rcv_msg = tls_queue;
2460 cb.parse_msg = tls_read_size;
2461
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002462 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002463 }
2464
2465 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002466
2467free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002468 crypto_free_aead(*aead);
2469 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002470free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002471 kfree(cctx->rec_seq);
2472 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002473free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002474 kfree(cctx->iv);
2475 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002476free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002477 if (tx) {
2478 kfree(ctx->priv_ctx_tx);
2479 ctx->priv_ctx_tx = NULL;
2480 } else {
2481 kfree(ctx->priv_ctx_rx);
2482 ctx->priv_ctx_rx = NULL;
2483 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002484out:
2485 return rc;
2486}