blob: 21c7725d17ca8d285b3fc1b91e79c216a365670a [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070045static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
47{
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
51 int elt = 0;
52
53 if (unlikely(recursion_level >= 24))
54 return -EMSGSIZE;
55
56 if (chunk > 0) {
57 if (chunk > len)
58 chunk = len;
59 elt++;
60 len -= chunk;
61 if (len == 0)
62 return elt;
63 offset += chunk;
64 }
65
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67 int end;
68
69 WARN_ON(start > offset + len);
70
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72 chunk = end - offset;
73 if (chunk > 0) {
74 if (chunk > len)
75 chunk = len;
76 elt++;
77 len -= chunk;
78 if (len == 0)
79 return elt;
80 offset += chunk;
81 }
82 start = end;
83 }
84
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
87 int end, ret;
88
89 WARN_ON(start > offset + len);
90
91 end = start + frag_iter->len;
92 chunk = end - offset;
93 if (chunk > 0) {
94 if (chunk > len)
95 chunk = len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
97 recursion_level + 1);
98 if (unlikely(ret < 0))
99 return ret;
100 elt += ret;
101 len -= chunk;
102 if (len == 0)
103 return elt;
104 offset += chunk;
105 }
106 start = end;
107 }
108 }
109 BUG_ON(len);
110 return elt;
111}
112
113/* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
115 */
116static int skb_nsg(struct sk_buff *skb, int offset, int len)
117{
118 return __skb_nsg(skb, offset, len, 0);
119}
120
Dave Watson130b3922019-01-30 21:58:31 +0000121static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700122 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000123{
124 struct strp_msg *rxm = strp_msg(skb);
125 int sub = 0;
126
127 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700128 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000129 char content_type = 0;
130 int err;
131 int back = 17;
132
133 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700134 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000135 return -EBADMSG;
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
138 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700139 if (err)
140 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000158 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700160 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530161 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000167 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530168
169 /* Propagate if there was an err */
170 if (err) {
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700171 if (err == -EBADMSG)
172 TLS_INC_STATS(sock_net(skb->sk),
173 LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530174 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700175 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000176 } else {
177 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700178 int pad;
179
180 pad = padding_length(ctx, prot, skb);
181 if (pad < 0) {
182 ctx->async_wait.err = pad;
183 tls_err_abort(skb->sk, pad);
184 } else {
185 rxm->full_len -= pad;
186 rxm->offset += prot->prepend_size;
187 rxm->full_len -= prot->overhead_size;
188 }
Vakul Garg94524d82018-08-29 15:26:55 +0530189 }
190
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700191 /* After using skb->sk to propagate sk through crypto async callback
192 * we need to NULL it again.
193 */
194 skb->sk = NULL;
195
Vakul Garg94524d82018-08-29 15:26:55 +0530196
Vakul Garg692d7b52019-01-16 10:40:16 +0000197 /* Free the destination pages if skb was not decrypted inplace */
198 if (sgout != sgin) {
199 /* Skip the first S/G entry as it points to AAD */
200 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
201 if (!sg)
202 break;
203 put_page(sg_page(sg));
204 }
Vakul Garg94524d82018-08-29 15:26:55 +0530205 }
206
207 kfree(aead_req);
208
Vakul Garg692d7b52019-01-16 10:40:16 +0000209 pending = atomic_dec_return(&ctx->decrypt_pending);
210
Vakul Garg94524d82018-08-29 15:26:55 +0530211 if (!pending && READ_ONCE(ctx->async_notify))
212 complete(&ctx->async_wait.completion);
213}
214
Dave Watsonc46234e2018-03-22 10:10:35 -0700215static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530216 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700217 struct scatterlist *sgin,
218 struct scatterlist *sgout,
219 char *iv_recv,
220 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530221 struct aead_request *aead_req,
222 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700223{
224 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000225 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300226 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700227 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700228
Vakul Garg0b243d02018-08-10 20:46:41 +0530229 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000230 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700231 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000232 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700233 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700234
Vakul Garg94524d82018-08-29 15:26:55 +0530235 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700236 /* Using skb->sk to push sk through to crypto async callback
237 * handler. This allows propagating errors up to the socket
238 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000239 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700240 * because it is a clone from strparser.
241 */
242 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530243 aead_request_set_callback(aead_req,
244 CRYPTO_TFM_REQ_MAY_BACKLOG,
245 tls_decrypt_done, skb);
246 atomic_inc(&ctx->decrypt_pending);
247 } else {
248 aead_request_set_callback(aead_req,
249 CRYPTO_TFM_REQ_MAY_BACKLOG,
250 crypto_req_done, &ctx->async_wait);
251 }
252
253 ret = crypto_aead_decrypt(aead_req);
254 if (ret == -EINPROGRESS) {
255 if (async)
256 return ret;
257
258 ret = crypto_wait_req(ret, &ctx->async_wait);
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700259 } else if (ret == -EBADMSG) {
260 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530261 }
262
263 if (async)
264 atomic_dec(&ctx->decrypt_pending);
265
Dave Watsonc46234e2018-03-22 10:10:35 -0700266 return ret;
267}
268
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200269static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700270{
271 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000272 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300273 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530274 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700275
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200276 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700277 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000278 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200279 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700280}
281
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200282static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700283{
284 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300285 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530286 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200287 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200289 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700290}
291
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200292static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700293{
294 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000295 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300296 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530297 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200298 struct sk_msg *msg_pl = &rec->msg_plaintext;
299 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530300 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700301
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200302 /* We add page references worth len bytes from encrypted sg
303 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530304 * has enough required room (ensured by caller).
305 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200306 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530307
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200308 /* Skip initial bytes in msg_en's data to be able to use
309 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530310 */
Vakul Garg4509de12019-02-14 07:11:35 +0000311 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530312
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200313 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700314}
315
John Fastabendd3b18ad32018-10-13 02:46:01 +0200316static struct tls_rec *tls_get_rec(struct sock *sk)
317{
318 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000319 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200320 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
321 struct sk_msg *msg_pl, *msg_en;
322 struct tls_rec *rec;
323 int mem_size;
324
325 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
326
327 rec = kzalloc(mem_size, sk->sk_allocation);
328 if (!rec)
329 return NULL;
330
331 msg_pl = &rec->msg_plaintext;
332 msg_en = &rec->msg_encrypted;
333
334 sk_msg_init(msg_pl);
335 sk_msg_init(msg_en);
336
337 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000338 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200339 sg_unmark_end(&rec->sg_aead_in[1]);
340
341 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000342 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200343 sg_unmark_end(&rec->sg_aead_out[1]);
344
345 return rec;
346}
347
348static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
349{
350 sk_msg_free(sk, &rec->msg_encrypted);
351 sk_msg_free(sk, &rec->msg_plaintext);
352 kfree(rec);
353}
354
Vakul Gargc7749732018-09-25 20:21:51 +0530355static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700356{
357 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300358 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530359 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700360
John Fastabendd3b18ad32018-10-13 02:46:01 +0200361 if (rec) {
362 tls_free_rec(sk, rec);
363 ctx->open_rec = NULL;
364 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700365}
366
Vakul Garga42055e2018-09-21 09:46:13 +0530367int tls_tx_records(struct sock *sk, int flags)
368{
369 struct tls_context *tls_ctx = tls_get_ctx(sk);
370 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
371 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200372 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530373 int tx_flags, rc = 0;
374
375 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530376 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530377 struct tls_rec, list);
378
379 if (flags == -1)
380 tx_flags = rec->tx_flags;
381 else
382 tx_flags = flags;
383
384 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
385 if (rc)
386 goto tx_err;
387
388 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530389 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530390 */
Vakul Garga42055e2018-09-21 09:46:13 +0530391 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200392 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530393 kfree(rec);
394 }
395
Vakul Garg9932a292018-09-24 15:35:56 +0530396 /* Tx all ready records */
397 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
398 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530399 if (flags == -1)
400 tx_flags = rec->tx_flags;
401 else
402 tx_flags = flags;
403
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200404 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530405 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200406 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530407 0, tx_flags);
408 if (rc)
409 goto tx_err;
410
Vakul Garga42055e2018-09-21 09:46:13 +0530411 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200412 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530413 kfree(rec);
414 } else {
415 break;
416 }
417 }
418
419tx_err:
420 if (rc < 0 && rc != -EAGAIN)
421 tls_err_abort(sk, EBADMSG);
422
423 return rc;
424}
425
426static void tls_encrypt_done(struct crypto_async_request *req, int err)
427{
428 struct aead_request *aead_req = (struct aead_request *)req;
429 struct sock *sk = req->data;
430 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000431 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530432 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200433 struct scatterlist *sge;
434 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530435 struct tls_rec *rec;
436 bool ready = false;
437 int pending;
438
439 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200440 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530441
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200442 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000443 sge->offset -= prot->prepend_size;
444 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530445
Vakul Garg80ece6a2018-09-26 16:22:08 +0530446 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530447 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530448 rec = NULL;
449
450 /* If err is already set on socket, return the same code */
451 if (sk->sk_err) {
452 ctx->async_wait.err = sk->sk_err;
453 } else {
454 ctx->async_wait.err = err;
455 tls_err_abort(sk, err);
456 }
457 }
458
Vakul Garg9932a292018-09-24 15:35:56 +0530459 if (rec) {
460 struct tls_rec *first_rec;
461
462 /* Mark the record as ready for transmission */
463 smp_store_mb(rec->tx_ready, true);
464
465 /* If received record is at head of tx_list, schedule tx */
466 first_rec = list_first_entry(&ctx->tx_list,
467 struct tls_rec, list);
468 if (rec == first_rec)
469 ready = true;
470 }
Vakul Garga42055e2018-09-21 09:46:13 +0530471
472 pending = atomic_dec_return(&ctx->encrypt_pending);
473
474 if (!pending && READ_ONCE(ctx->async_notify))
475 complete(&ctx->async_wait.completion);
476
477 if (!ready)
478 return;
479
480 /* Schedule the transmission */
481 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200482 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530483}
484
485static int tls_do_encryption(struct sock *sk,
486 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200487 struct tls_sw_context_tx *ctx,
488 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200489 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700490{
Vakul Garg4509de12019-02-14 07:11:35 +0000491 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530492 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200493 struct sk_msg *msg_en = &rec->msg_encrypted;
494 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000495 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700496
Vakul Gargf295b3a2019-03-20 02:03:36 +0000497 /* For CCM based ciphers, first byte of IV is a constant */
498 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
499 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
500 iv_offset = 1;
501 }
502
503 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
504 prot->iv_size + prot->salt_size);
505
506 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000507
Vakul Garg4509de12019-02-14 07:11:35 +0000508 sge->offset += prot->prepend_size;
509 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700510
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200511 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530512
Dave Watson3c4d7552017-06-14 11:37:39 -0700513 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000514 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200515 aead_request_set_crypt(aead_req, rec->sg_aead_in,
516 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000517 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530518
519 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530520 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530521
Vakul Garg9932a292018-09-24 15:35:56 +0530522 /* Add the record in tx_list */
523 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530524 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700525
Vakul Garga42055e2018-09-21 09:46:13 +0530526 rc = crypto_aead_encrypt(aead_req);
527 if (!rc || rc != -EINPROGRESS) {
528 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000529 sge->offset -= prot->prepend_size;
530 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530531 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700532
Vakul Garg9932a292018-09-24 15:35:56 +0530533 if (!rc) {
534 WRITE_ONCE(rec->tx_ready, true);
535 } else if (rc != -EINPROGRESS) {
536 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530537 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530538 }
Vakul Garga42055e2018-09-21 09:46:13 +0530539
540 /* Unhook the record from context if encryption is not failure */
541 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700542 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700543 return rc;
544}
545
John Fastabendd3b18ad32018-10-13 02:46:01 +0200546static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
547 struct tls_rec **to, struct sk_msg *msg_opl,
548 struct sk_msg *msg_oen, u32 split_point,
549 u32 tx_overhead_size, u32 *orig_end)
550{
551 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
552 struct scatterlist *sge, *osge, *nsge;
553 u32 orig_size = msg_opl->sg.size;
554 struct scatterlist tmp = { };
555 struct sk_msg *msg_npl;
556 struct tls_rec *new;
557 int ret;
558
559 new = tls_get_rec(sk);
560 if (!new)
561 return -ENOMEM;
562 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
563 tx_overhead_size, 0);
564 if (ret < 0) {
565 tls_free_rec(sk, new);
566 return ret;
567 }
568
569 *orig_end = msg_opl->sg.end;
570 i = msg_opl->sg.start;
571 sge = sk_msg_elem(msg_opl, i);
572 while (apply && sge->length) {
573 if (sge->length > apply) {
574 u32 len = sge->length - apply;
575
576 get_page(sg_page(sge));
577 sg_set_page(&tmp, sg_page(sge), len,
578 sge->offset + apply);
579 sge->length = apply;
580 bytes += apply;
581 apply = 0;
582 } else {
583 apply -= sge->length;
584 bytes += sge->length;
585 }
586
587 sk_msg_iter_var_next(i);
588 if (i == msg_opl->sg.end)
589 break;
590 sge = sk_msg_elem(msg_opl, i);
591 }
592
593 msg_opl->sg.end = i;
594 msg_opl->sg.curr = i;
595 msg_opl->sg.copybreak = 0;
596 msg_opl->apply_bytes = 0;
597 msg_opl->sg.size = bytes;
598
599 msg_npl = &new->msg_plaintext;
600 msg_npl->apply_bytes = apply;
601 msg_npl->sg.size = orig_size - bytes;
602
603 j = msg_npl->sg.start;
604 nsge = sk_msg_elem(msg_npl, j);
605 if (tmp.length) {
606 memcpy(nsge, &tmp, sizeof(*nsge));
607 sk_msg_iter_var_next(j);
608 nsge = sk_msg_elem(msg_npl, j);
609 }
610
611 osge = sk_msg_elem(msg_opl, i);
612 while (osge->length) {
613 memcpy(nsge, osge, sizeof(*nsge));
614 sg_unmark_end(nsge);
615 sk_msg_iter_var_next(i);
616 sk_msg_iter_var_next(j);
617 if (i == *orig_end)
618 break;
619 osge = sk_msg_elem(msg_opl, i);
620 nsge = sk_msg_elem(msg_npl, j);
621 }
622
623 msg_npl->sg.end = j;
624 msg_npl->sg.curr = j;
625 msg_npl->sg.copybreak = 0;
626
627 *to = new;
628 return 0;
629}
630
631static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
632 struct tls_rec *from, u32 orig_end)
633{
634 struct sk_msg *msg_npl = &from->msg_plaintext;
635 struct sk_msg *msg_opl = &to->msg_plaintext;
636 struct scatterlist *osge, *nsge;
637 u32 i, j;
638
639 i = msg_opl->sg.end;
640 sk_msg_iter_var_prev(i);
641 j = msg_npl->sg.start;
642
643 osge = sk_msg_elem(msg_opl, i);
644 nsge = sk_msg_elem(msg_npl, j);
645
646 if (sg_page(osge) == sg_page(nsge) &&
647 osge->offset + osge->length == nsge->offset) {
648 osge->length += nsge->length;
649 put_page(sg_page(nsge));
650 }
651
652 msg_opl->sg.end = orig_end;
653 msg_opl->sg.curr = orig_end;
654 msg_opl->sg.copybreak = 0;
655 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
656 msg_opl->sg.size += msg_npl->sg.size;
657
658 sk_msg_free(sk, &to->msg_encrypted);
659 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
660
661 kfree(from);
662}
663
Dave Watson3c4d7552017-06-14 11:37:39 -0700664static int tls_push_record(struct sock *sk, int flags,
665 unsigned char record_type)
666{
667 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000668 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300669 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200670 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
671 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200672 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200673 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200674 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700675 int rc;
676
Vakul Garga42055e2018-09-21 09:46:13 +0530677 if (!rec)
678 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200679
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200680 msg_pl = &rec->msg_plaintext;
681 msg_en = &rec->msg_encrypted;
682
John Fastabendd3b18ad32018-10-13 02:46:01 +0200683 split_point = msg_pl->apply_bytes;
684 split = split_point && split_point < msg_pl->sg.size;
John Fastabendd468e472020-01-11 06:12:04 +0000685 if (unlikely((!split &&
686 msg_pl->sg.size +
687 prot->overhead_size > msg_en->sg.size) ||
688 (split &&
689 split_point +
690 prot->overhead_size > msg_en->sg.size))) {
691 split = true;
692 split_point = msg_en->sg.size;
693 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200694 if (split) {
695 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000696 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200697 &orig_end);
698 if (rc < 0)
699 return rc;
John Fastabendd468e472020-01-11 06:12:04 +0000700 /* This can happen if above tls_split_open_record allocates
701 * a single large encryption buffer instead of two smaller
702 * ones. In this case adjust pointers and continue without
703 * split.
704 */
705 if (!msg_pl->sg.size) {
706 tls_merge_open_record(sk, rec, tmp, orig_end);
707 msg_pl = &rec->msg_plaintext;
708 msg_en = &rec->msg_encrypted;
709 split = false;
710 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200711 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000712 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200713 }
714
Vakul Garga42055e2018-09-21 09:46:13 +0530715 rec->tx_flags = flags;
716 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700717
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200718 i = msg_pl->sg.end;
719 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000720
721 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000722 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000723 /* Add content type to end of message. No padding added */
724 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
725 sg_mark_end(&rec->sg_content_type);
726 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
727 &rec->sg_content_type);
728 } else {
729 sg_mark_end(sk_msg_elem(msg_pl, i));
730 }
Vakul Garga42055e2018-09-21 09:46:13 +0530731
John Fastabend9aaaa562020-01-11 06:12:05 +0000732 if (msg_pl->sg.end < msg_pl->sg.start) {
733 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
734 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
735 msg_pl->sg.data);
736 }
737
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200738 i = msg_pl->sg.start;
Jakub Kicinski9e5ffed2019-11-27 12:16:43 -0800739 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200740
741 i = msg_en->sg.end;
742 sk_msg_iter_var_prev(i);
743 sg_mark_end(sk_msg_elem(msg_en, i));
744
745 i = msg_en->sg.start;
746 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
747
Vakul Garg4509de12019-02-14 07:11:35 +0000748 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
749 tls_ctx->tx.rec_seq, prot->rec_seq_size,
750 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700751
752 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200753 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000754 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000755 msg_pl->sg.size + prot->tail_size,
756 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700757
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200758 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700759
Dave Watson130b3922019-01-30 21:58:31 +0000760 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000761 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700762 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200763 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200764 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200765 if (split) {
766 tls_ctx->pending_open_record_frags = true;
767 tls_merge_open_record(sk, rec, tmp, orig_end);
768 }
769 }
Dave Watson5b053e12019-01-30 22:08:21 +0000770 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530771 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200772 } else if (split) {
773 msg_pl = &tmp->msg_plaintext;
774 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000775 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200776 tls_ctx->pending_open_record_frags = true;
777 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700778 }
779
Vakul Garg9932a292018-09-24 15:35:56 +0530780 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700781}
782
John Fastabendd3b18ad32018-10-13 02:46:01 +0200783static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
784 bool full_record, u8 record_type,
785 size_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700786{
787 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300788 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200789 struct sk_msg msg_redir = { };
790 struct sk_psock *psock;
791 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530792 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800793 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200794 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800795 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530796
John Fastabend0608c692018-12-20 11:35:35 -0800797 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200798 psock = sk_psock_get(sk);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800799 if (!psock || !policy) {
800 err = tls_push_record(sk, flags, record_type);
801 if (err) {
802 *copied -= sk_msg_free(sk, msg);
803 tls_free_open_rec(sk);
804 }
805 return err;
806 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200807more_data:
808 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800809 if (psock->eval == __SK_NONE) {
810 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200811 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800812 if (delta < msg->sg.size)
813 delta -= msg->sg.size;
814 else
815 delta = 0;
816 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200817 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
818 !enospc && !full_record) {
819 err = -ENOSPC;
820 goto out_err;
821 }
822 msg->cork_bytes = 0;
823 send = msg->sg.size;
824 if (msg->apply_bytes && msg->apply_bytes < send)
825 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530826
John Fastabendd3b18ad32018-10-13 02:46:01 +0200827 switch (psock->eval) {
828 case __SK_PASS:
829 err = tls_push_record(sk, flags, record_type);
830 if (err < 0) {
831 *copied -= sk_msg_free(sk, msg);
832 tls_free_open_rec(sk);
833 goto out_err;
834 }
835 break;
836 case __SK_REDIRECT:
837 sk_redir = psock->sk_redir;
838 memcpy(&msg_redir, msg, sizeof(*msg));
839 if (msg->apply_bytes < send)
840 msg->apply_bytes = 0;
841 else
842 msg->apply_bytes -= send;
843 sk_msg_return_zero(sk, msg, send);
844 msg->sg.size -= send;
845 release_sock(sk);
846 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
847 lock_sock(sk);
848 if (err < 0) {
849 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
850 msg->sg.size = 0;
851 }
852 if (msg->sg.size == 0)
853 tls_free_open_rec(sk);
854 break;
855 case __SK_DROP:
856 default:
857 sk_msg_free_partial(sk, msg, send);
858 if (msg->apply_bytes < send)
859 msg->apply_bytes = 0;
860 else
861 msg->apply_bytes -= send;
862 if (msg->sg.size == 0)
863 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800864 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200865 err = -EACCES;
866 }
Vakul Garga42055e2018-09-21 09:46:13 +0530867
John Fastabendd3b18ad32018-10-13 02:46:01 +0200868 if (likely(!err)) {
869 bool reset_eval = !ctx->open_rec;
870
871 rec = ctx->open_rec;
872 if (rec) {
873 msg = &rec->msg_plaintext;
874 if (!msg->apply_bytes)
875 reset_eval = true;
876 }
877 if (reset_eval) {
878 psock->eval = __SK_NONE;
879 if (psock->sk_redir) {
880 sock_put(psock->sk_redir);
881 psock->sk_redir = NULL;
882 }
883 }
884 if (rec)
885 goto more_data;
886 }
887 out_err:
888 sk_psock_put(sk, psock);
889 return err;
890}
891
892static int tls_sw_push_pending_record(struct sock *sk, int flags)
893{
894 struct tls_context *tls_ctx = tls_get_ctx(sk);
895 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
896 struct tls_rec *rec = ctx->open_rec;
897 struct sk_msg *msg_pl;
898 size_t copied;
899
Vakul Garga42055e2018-09-21 09:46:13 +0530900 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200901 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530902
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200903 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200904 copied = msg_pl->sg.size;
905 if (!copied)
906 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530907
John Fastabendd3b18ad32018-10-13 02:46:01 +0200908 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
909 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530910}
911
912int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
913{
Dave Watson3c4d7552017-06-14 11:37:39 -0700914 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530915 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000916 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530917 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000918 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530919 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100920 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700921 bool eor = !(msg->msg_flags & MSG_MORE);
922 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200923 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530924 struct tls_rec *rec;
925 int required_size;
926 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700927 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530928 int record_room;
929 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700930 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530931 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700932
933 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100934 return -EOPNOTSUPP;
Dave Watson3c4d7552017-06-14 11:37:39 -0700935
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800936 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700937 lock_sock(sk);
938
Dave Watson3c4d7552017-06-14 11:37:39 -0700939 if (unlikely(msg->msg_controllen)) {
940 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530941 if (ret) {
942 if (ret == -EINPROGRESS)
943 num_async++;
944 else if (ret != -EAGAIN)
945 goto send_end;
946 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700947 }
948
949 while (msg_data_left(msg)) {
950 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100951 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700952 goto send_end;
953 }
954
John Fastabendd3b18ad32018-10-13 02:46:01 +0200955 if (ctx->open_rec)
956 rec = ctx->open_rec;
957 else
958 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530959 if (!rec) {
960 ret = -ENOMEM;
961 goto send_end;
962 }
963
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200964 msg_pl = &rec->msg_plaintext;
965 msg_en = &rec->msg_encrypted;
966
967 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700968 full_record = false;
969 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200970 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700971 if (try_to_copy >= record_room) {
972 try_to_copy = record_room;
973 full_record = true;
974 }
975
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200976 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000977 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700978
979 if (!sk_stream_memory_free(sk))
980 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530981
Dave Watson3c4d7552017-06-14 11:37:39 -0700982alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200983 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700984 if (ret) {
985 if (ret != -ENOSPC)
986 goto wait_for_memory;
987
988 /* Adjust try_to_copy according to the amount that was
989 * actually allocated. The difference is due
990 * to max sg elements limit
991 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200992 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700993 full_record = true;
994 }
Vakul Garga42055e2018-09-21 09:46:13 +0530995
996 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200997 u32 first = msg_pl->sg.end;
998
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200999 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1000 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -07001001 if (ret)
1002 goto fallback_to_reg_send;
1003
Vakul Garga42055e2018-09-21 09:46:13 +05301004 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001005 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001006
1007 sk_msg_sg_copy_set(msg_pl, first);
1008 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1009 record_type, &copied,
1010 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +05301011 if (ret) {
1012 if (ret == -EINPROGRESS)
1013 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001014 else if (ret == -ENOMEM)
1015 goto wait_for_memory;
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001016 else if (ctx->open_rec && ret == -ENOSPC)
John Fastabendd3b18ad32018-10-13 02:46:01 +02001017 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +05301018 else if (ret != -EAGAIN)
1019 goto send_end;
1020 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -07001021 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001022rollback_iter:
1023 copied -= try_to_copy;
1024 sk_msg_sg_copy_clear(msg_pl, first);
1025 iov_iter_revert(&msg->msg_iter,
1026 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001027fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001028 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001029 }
1030
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001031 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +05301032
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001033 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001034 if (ret) {
1035 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301036 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001037
1038 /* Adjust try_to_copy according to the amount that was
1039 * actually allocated. The difference is due
1040 * to max sg elements limit
1041 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001042 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001043 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001044 sk_msg_trim(sk, msg_en,
1045 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001046 }
1047
Vakul Garg65a10e22018-12-21 15:16:52 +00001048 if (try_to_copy) {
1049 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1050 msg_pl, try_to_copy);
1051 if (ret < 0)
1052 goto trim_sgl;
1053 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001054
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001055 /* Open records defined only if successfully copied, otherwise
1056 * we would trim the sg but not reset the open record frags.
1057 */
1058 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001059 copied += try_to_copy;
1060 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001061 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1062 record_type, &copied,
1063 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001064 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301065 if (ret == -EINPROGRESS)
1066 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001067 else if (ret == -ENOMEM)
1068 goto wait_for_memory;
1069 else if (ret != -EAGAIN) {
1070 if (ret == -ENOSPC)
1071 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301072 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001073 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001074 }
1075 }
1076
1077 continue;
1078
1079wait_for_sndbuf:
1080 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1081wait_for_memory:
1082 ret = sk_stream_wait_memory(sk, &timeo);
1083 if (ret) {
1084trim_sgl:
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001085 if (ctx->open_rec)
1086 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001087 goto send_end;
1088 }
1089
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001090 if (ctx->open_rec && msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001091 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001092 }
1093
Vakul Garga42055e2018-09-21 09:46:13 +05301094 if (!num_async) {
1095 goto send_end;
1096 } else if (num_zc) {
1097 /* Wait for pending encryptions to get completed */
1098 smp_store_mb(ctx->async_notify, true);
1099
1100 if (atomic_read(&ctx->encrypt_pending))
1101 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1102 else
1103 reinit_completion(&ctx->async_wait.completion);
1104
1105 WRITE_ONCE(ctx->async_notify, false);
1106
1107 if (ctx->async_wait.err) {
1108 ret = ctx->async_wait.err;
1109 copied = 0;
1110 }
1111 }
1112
1113 /* Transmit if any encryptions have completed */
1114 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1115 cancel_delayed_work(&ctx->tx_work.work);
1116 tls_tx_records(sk, msg->msg_flags);
1117 }
1118
Dave Watson3c4d7552017-06-14 11:37:39 -07001119send_end:
1120 ret = sk_stream_error(sk, msg->msg_flags, ret);
1121
1122 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001123 mutex_unlock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -07001124 return copied ? copied : ret;
1125}
1126
YueHaibing01cb8a12019-01-16 10:39:28 +08001127static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1128 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001129{
Vakul Garga42055e2018-09-21 09:46:13 +05301130 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001131 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001132 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001133 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001134 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001135 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301136 struct tls_rec *rec;
1137 int num_async = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001138 size_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001139 bool full_record;
1140 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301141 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301142 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001143
Dave Watson3c4d7552017-06-14 11:37:39 -07001144 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001145 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1146
Dave Watson3c4d7552017-06-14 11:37:39 -07001147 /* Call the sk_stream functions to manage the sndbuf mem. */
1148 while (size > 0) {
1149 size_t copy, required_size;
1150
1151 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001152 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001153 goto sendpage_end;
1154 }
1155
John Fastabendd3b18ad32018-10-13 02:46:01 +02001156 if (ctx->open_rec)
1157 rec = ctx->open_rec;
1158 else
1159 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301160 if (!rec) {
1161 ret = -ENOMEM;
1162 goto sendpage_end;
1163 }
1164
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001165 msg_pl = &rec->msg_plaintext;
1166
Dave Watson3c4d7552017-06-14 11:37:39 -07001167 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001168 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001169 copy = size;
1170 if (copy >= record_room) {
1171 copy = record_room;
1172 full_record = true;
1173 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001174
Vakul Garg4509de12019-02-14 07:11:35 +00001175 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001176
1177 if (!sk_stream_memory_free(sk))
1178 goto wait_for_sndbuf;
1179alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001180 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001181 if (ret) {
1182 if (ret != -ENOSPC)
1183 goto wait_for_memory;
1184
1185 /* Adjust copy according to the amount that was
1186 * actually allocated. The difference is due
1187 * to max sg elements limit
1188 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001189 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001190 full_record = true;
1191 }
1192
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001193 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001194 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001195
Dave Watson3c4d7552017-06-14 11:37:39 -07001196 offset += copy;
1197 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001198 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001199
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001200 tls_ctx->pending_open_record_frags = true;
1201 if (full_record || eor || sk_msg_full(msg_pl)) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001202 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1203 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001204 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301205 if (ret == -EINPROGRESS)
1206 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001207 else if (ret == -ENOMEM)
1208 goto wait_for_memory;
1209 else if (ret != -EAGAIN) {
1210 if (ret == -ENOSPC)
1211 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301212 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001213 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001214 }
1215 }
1216 continue;
1217wait_for_sndbuf:
1218 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1219wait_for_memory:
1220 ret = sk_stream_wait_memory(sk, &timeo);
1221 if (ret) {
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001222 if (ctx->open_rec)
1223 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001224 goto sendpage_end;
1225 }
1226
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001227 if (ctx->open_rec)
1228 goto alloc_payload;
Dave Watson3c4d7552017-06-14 11:37:39 -07001229 }
1230
Vakul Garga42055e2018-09-21 09:46:13 +05301231 if (num_async) {
1232 /* Transmit if any encryptions have completed */
1233 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1234 cancel_delayed_work(&ctx->tx_work.work);
1235 tls_tx_records(sk, flags);
1236 }
1237 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001238sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001239 ret = sk_stream_error(sk, flags, ret);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001240 return copied ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001241}
1242
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001243int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1244 int offset, size_t size, int flags)
1245{
1246 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1247 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1248 MSG_NO_SHARED_FRAGS))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001249 return -EOPNOTSUPP;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001250
1251 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1252}
1253
John Fastabend0608c692018-12-20 11:35:35 -08001254int tls_sw_sendpage(struct sock *sk, struct page *page,
1255 int offset, size_t size, int flags)
1256{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001257 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001258 int ret;
1259
1260 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1261 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001262 return -EOPNOTSUPP;
John Fastabend0608c692018-12-20 11:35:35 -08001263
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001264 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001265 lock_sock(sk);
1266 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1267 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001268 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001269 return ret;
1270}
1271
John Fastabendd3b18ad32018-10-13 02:46:01 +02001272static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1273 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001274{
1275 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001276 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001277 struct sk_buff *skb;
1278 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1279
John Fastabendd3b18ad32018-10-13 02:46:01 +02001280 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001281 if (sk->sk_err) {
1282 *err = sock_error(sk);
1283 return NULL;
1284 }
1285
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001286 if (sk->sk_shutdown & RCV_SHUTDOWN)
1287 return NULL;
1288
Dave Watsonc46234e2018-03-22 10:10:35 -07001289 if (sock_flag(sk, SOCK_DONE))
1290 return NULL;
1291
1292 if ((flags & MSG_DONTWAIT) || !timeo) {
1293 *err = -EAGAIN;
1294 return NULL;
1295 }
1296
1297 add_wait_queue(sk_sleep(sk), &wait);
1298 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001299 sk_wait_event(sk, &timeo,
1300 ctx->recv_pkt != skb ||
1301 !sk_psock_queue_empty(psock),
1302 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001303 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1304 remove_wait_queue(sk_sleep(sk), &wait);
1305
1306 /* Handle signals */
1307 if (signal_pending(current)) {
1308 *err = sock_intr_errno(timeo);
1309 return NULL;
1310 }
1311 }
1312
1313 return skb;
1314}
1315
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001316static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1317 int length, int *pages_used,
1318 unsigned int *size_used,
1319 struct scatterlist *to,
1320 int to_max_pages)
1321{
1322 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1323 struct page *pages[MAX_SKB_FRAGS];
1324 unsigned int size = *size_used;
1325 ssize_t copied, use;
1326 size_t offset;
1327
1328 while (length > 0) {
1329 i = 0;
1330 maxpages = to_max_pages - num_elem;
1331 if (maxpages == 0) {
1332 rc = -EFAULT;
1333 goto out;
1334 }
1335 copied = iov_iter_get_pages(from, pages,
1336 length,
1337 maxpages, &offset);
1338 if (copied <= 0) {
1339 rc = -EFAULT;
1340 goto out;
1341 }
1342
1343 iov_iter_advance(from, copied);
1344
1345 length -= copied;
1346 size += copied;
1347 while (copied) {
1348 use = min_t(int, copied, PAGE_SIZE - offset);
1349
1350 sg_set_page(&to[num_elem],
1351 pages[i], use, offset);
1352 sg_unmark_end(&to[num_elem]);
1353 /* We do not uncharge memory from this API */
1354
1355 offset = 0;
1356 copied -= use;
1357
1358 i++;
1359 num_elem++;
1360 }
1361 }
1362 /* Mark the end in the last sg entry if newly added */
1363 if (num_elem > *pages_used)
1364 sg_mark_end(&to[num_elem - 1]);
1365out:
1366 if (rc)
1367 iov_iter_revert(from, size - *size_used);
1368 *size_used = size;
1369 *pages_used = num_elem;
1370
1371 return rc;
1372}
1373
Vakul Garg0b243d02018-08-10 20:46:41 +05301374/* This function decrypts the input skb into either out_iov or in out_sg
1375 * or in skb buffers itself. The input parameter 'zc' indicates if
1376 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1377 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1378 * NULL, then the decryption happens inside skb buffers itself, i.e.
1379 * zero-copy gets disabled and 'zc' is updated.
1380 */
1381
1382static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1383 struct iov_iter *out_iov,
1384 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001385 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301386{
1387 struct tls_context *tls_ctx = tls_get_ctx(sk);
1388 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001389 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301390 struct strp_msg *rxm = strp_msg(skb);
1391 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1392 struct aead_request *aead_req;
1393 struct sk_buff *unused;
1394 u8 *aad, *iv, *mem = NULL;
1395 struct scatterlist *sgin = NULL;
1396 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001397 const int data_len = rxm->full_len - prot->overhead_size +
1398 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001399 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301400
1401 if (*zc && (out_iov || out_sg)) {
1402 if (out_iov)
1403 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1404 else
1405 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001406 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1407 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301408 } else {
1409 n_sgout = 0;
1410 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001411 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301412 }
1413
Vakul Garg0b243d02018-08-10 20:46:41 +05301414 if (n_sgin < 1)
1415 return -EBADMSG;
1416
1417 /* Increment to accommodate AAD */
1418 n_sgin = n_sgin + 1;
1419
1420 nsg = n_sgin + n_sgout;
1421
1422 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1423 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001424 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301425 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1426
1427 /* Allocate a single block of memory which contains
1428 * aead_req || sgin[] || sgout[] || aad || iv.
1429 * This order achieves correct alignment for aead_req, sgin, sgout.
1430 */
1431 mem = kmalloc(mem_size, sk->sk_allocation);
1432 if (!mem)
1433 return -ENOMEM;
1434
1435 /* Segment the allocated memory */
1436 aead_req = (struct aead_request *)mem;
1437 sgin = (struct scatterlist *)(mem + aead_size);
1438 sgout = sgin + n_sgin;
1439 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001440 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301441
Vakul Gargf295b3a2019-03-20 02:03:36 +00001442 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1443 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1444 iv[0] = 2;
1445 iv_offset = 1;
1446 }
1447
Vakul Garg0b243d02018-08-10 20:46:41 +05301448 /* Prepare IV */
1449 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001450 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001451 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301452 if (err < 0) {
1453 kfree(mem);
1454 return err;
1455 }
Vakul Garg4509de12019-02-14 07:11:35 +00001456 if (prot->version == TLS_1_3_VERSION)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001457 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1458 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001459 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001460 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001461
Vakul Garg4509de12019-02-14 07:11:35 +00001462 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301463
1464 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001465 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1466 prot->tail_size,
1467 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1468 ctx->control, prot->version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301469
1470 /* Prepare sgin */
1471 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001472 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301473 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001474 rxm->offset + prot->prepend_size,
1475 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301476 if (err < 0) {
1477 kfree(mem);
1478 return err;
1479 }
1480
1481 if (n_sgout) {
1482 if (out_iov) {
1483 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001484 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301485
1486 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001487 err = tls_setup_from_iter(sk, out_iov, data_len,
1488 &pages, chunk, &sgout[1],
1489 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301490 if (err < 0)
1491 goto fallback_to_reg_recv;
1492 } else if (out_sg) {
1493 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1494 } else {
1495 goto fallback_to_reg_recv;
1496 }
1497 } else {
1498fallback_to_reg_recv:
1499 sgout = sgin;
1500 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001501 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301502 *zc = false;
1503 }
1504
1505 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301506 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001507 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301508 if (err == -EINPROGRESS)
1509 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301510
1511 /* Release the pages in case iov was mapped to pages */
1512 for (; pages > 0; pages--)
1513 put_page(sg_page(&sgout[pages]));
1514
1515 kfree(mem);
1516 return err;
1517}
1518
Boris Pismennydafb67f2018-07-13 14:33:40 +03001519static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001520 struct iov_iter *dest, int *chunk, bool *zc,
1521 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001522{
1523 struct tls_context *tls_ctx = tls_get_ctx(sk);
1524 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001525 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001526 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001527 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001528
Boris Pismenny4799ac82018-07-13 14:33:43 +03001529 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001530 if (tls_ctx->rx_conf == TLS_HW) {
Jakub Kicinski4de30a82019-10-06 21:09:30 -07001531 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001532 if (err < 0)
1533 return err;
1534 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001535
Boris Pismennyd069b782019-02-27 17:38:06 +02001536 /* Still not decrypted after tls_device */
1537 if (!ctx->decrypted) {
1538 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1539 async);
1540 if (err < 0) {
1541 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001542 tls_advance_record_sn(sk, prot,
1543 &tls_ctx->rx);
Boris Pismennyd069b782019-02-27 17:38:06 +02001544
1545 return err;
1546 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001547 } else {
1548 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301549 }
Dave Watson130b3922019-01-30 21:58:31 +00001550
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001551 pad = padding_length(ctx, prot, skb);
1552 if (pad < 0)
1553 return pad;
1554
1555 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001556 rxm->offset += prot->prepend_size;
1557 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001558 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001559 ctx->decrypted = 1;
Dave Watsonfedf2012019-01-30 21:58:24 +00001560 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001561 } else {
1562 *zc = false;
1563 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001564
Boris Pismennydafb67f2018-07-13 14:33:40 +03001565 return err;
1566}
1567
1568int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1569 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001570{
Vakul Garg0b243d02018-08-10 20:46:41 +05301571 bool zc = true;
1572 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001573
Vakul Garg692d7b52019-01-16 10:40:16 +00001574 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001575}
1576
1577static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1578 unsigned int len)
1579{
1580 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001581 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001582
Vakul Garg94524d82018-08-29 15:26:55 +05301583 if (skb) {
1584 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001585
Vakul Garg94524d82018-08-29 15:26:55 +05301586 if (len < rxm->full_len) {
1587 rxm->offset += len;
1588 rxm->full_len -= len;
1589 return false;
1590 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001591 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001592 }
1593
1594 /* Finished with message */
1595 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001596 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001597
1598 return true;
1599}
1600
Vakul Garg692d7b52019-01-16 10:40:16 +00001601/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001602 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001603 * true. Further, the records are removed from the rx_list if it is not a peek
1604 * case and the record has been consumed completely.
1605 */
1606static int process_rx_list(struct tls_sw_context_rx *ctx,
1607 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001608 u8 *control,
1609 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001610 size_t skip,
1611 size_t len,
1612 bool zc,
1613 bool is_peek)
1614{
1615 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001616 u8 ctrl = *control;
1617 u8 msgc = *cmsg;
1618 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001619 ssize_t copied = 0;
1620
Vakul Garg2b794c42019-02-23 08:42:37 +00001621 /* Set the record type in 'control' if caller didn't pass it */
1622 if (!ctrl && skb) {
1623 tlm = tls_msg(skb);
1624 ctrl = tlm->control;
1625 }
1626
Vakul Garg692d7b52019-01-16 10:40:16 +00001627 while (skip && skb) {
1628 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001629 tlm = tls_msg(skb);
1630
1631 /* Cannot process a record of different type */
1632 if (ctrl != tlm->control)
1633 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001634
1635 if (skip < rxm->full_len)
1636 break;
1637
1638 skip = skip - rxm->full_len;
1639 skb = skb_peek_next(skb, &ctx->rx_list);
1640 }
1641
1642 while (len && skb) {
1643 struct sk_buff *next_skb;
1644 struct strp_msg *rxm = strp_msg(skb);
1645 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1646
Vakul Garg2b794c42019-02-23 08:42:37 +00001647 tlm = tls_msg(skb);
1648
1649 /* Cannot process a record of different type */
1650 if (ctrl != tlm->control)
1651 return 0;
1652
1653 /* Set record type if not already done. For a non-data record,
1654 * do not proceed if record type could not be copied.
1655 */
1656 if (!msgc) {
1657 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1658 sizeof(ctrl), &ctrl);
1659 msgc = true;
1660 if (ctrl != TLS_RECORD_TYPE_DATA) {
1661 if (cerr || msg->msg_flags & MSG_CTRUNC)
1662 return -EIO;
1663
1664 *cmsg = msgc;
1665 }
1666 }
1667
Vakul Garg692d7b52019-01-16 10:40:16 +00001668 if (!zc || (rxm->full_len - skip) > len) {
1669 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1670 msg, chunk);
1671 if (err < 0)
1672 return err;
1673 }
1674
1675 len = len - chunk;
1676 copied = copied + chunk;
1677
1678 /* Consume the data from record if it is non-peek case*/
1679 if (!is_peek) {
1680 rxm->offset = rxm->offset + chunk;
1681 rxm->full_len = rxm->full_len - chunk;
1682
1683 /* Return if there is unconsumed data in the record */
1684 if (rxm->full_len - skip)
1685 break;
1686 }
1687
1688 /* The remaining skip-bytes must lie in 1st record in rx_list.
1689 * So from the 2nd record, 'skip' should be 0.
1690 */
1691 skip = 0;
1692
1693 if (msg)
1694 msg->msg_flags |= MSG_EOR;
1695
1696 next_skb = skb_peek_next(skb, &ctx->rx_list);
1697
1698 if (!is_peek) {
1699 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001700 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001701 }
1702
1703 skb = next_skb;
1704 }
1705
Vakul Garg2b794c42019-02-23 08:42:37 +00001706 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001707 return copied;
1708}
1709
Dave Watsonc46234e2018-03-22 10:10:35 -07001710int tls_sw_recvmsg(struct sock *sk,
1711 struct msghdr *msg,
1712 size_t len,
1713 int nonblock,
1714 int flags,
1715 int *addr_len)
1716{
1717 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001718 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001719 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001720 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001721 unsigned char control = 0;
1722 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001723 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001724 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001725 struct sk_buff *skb;
1726 ssize_t copied = 0;
1727 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001728 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001729 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001730 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001731 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301732 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001733
1734 flags |= nonblock;
1735
1736 if (unlikely(flags & MSG_ERRQUEUE))
1737 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1738
John Fastabendd3b18ad32018-10-13 02:46:01 +02001739 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001740 lock_sock(sk);
1741
Vakul Garg692d7b52019-01-16 10:40:16 +00001742 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001743 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1744 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001745 if (err < 0) {
1746 tls_err_abort(sk, err);
1747 goto end;
1748 } else {
1749 copied = err;
1750 }
1751
Jakub Kicinski46a16952019-05-24 10:34:30 -07001752 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001753 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001754
1755 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1756 len = len - copied;
1757 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001758
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001759 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001760 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001761 bool zc = false;
1762 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001763 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001764 bool async_capable;
1765 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001766
John Fastabendd3b18ad32018-10-13 02:46:01 +02001767 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1768 if (!skb) {
1769 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001770 int ret = __tcp_bpf_recvmsg(sk, psock,
1771 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001772
1773 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001774 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001775 len -= ret;
1776 continue;
1777 }
1778 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001779 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001780 } else {
1781 tlm = tls_msg(skb);
1782 if (prot->version == TLS_1_3_VERSION)
1783 tlm->control = 0;
1784 else
1785 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001786 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001787
1788 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301789
Vakul Garg4509de12019-02-14 07:11:35 +00001790 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001791
1792 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001793 ctx->control == TLS_RECORD_TYPE_DATA &&
Vakul Garg4509de12019-02-14 07:11:35 +00001794 prot->version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001795 zc = true;
1796
Vakul Gargc0ab4732019-02-11 11:31:05 +00001797 /* Do not use async mode if record is non-data */
1798 if (ctx->control == TLS_RECORD_TYPE_DATA)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001799 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001800 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001801 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001802
Dave Watsonfedf2012019-01-30 21:58:24 +00001803 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001804 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001805 if (err < 0 && err != -EINPROGRESS) {
1806 tls_err_abort(sk, EBADMSG);
1807 goto recv_end;
1808 }
1809
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001810 if (err == -EINPROGRESS) {
1811 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001812 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001813 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001814 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001815 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001816
1817 /* If the type of records being processed is not known yet,
1818 * set it to record type just dequeued. If it is already known,
1819 * but does not match the record type just dequeued, go to end.
1820 * We always get record type here since for tls1.2, record type
1821 * is known just after record is dequeued from stream parser.
1822 * For tls1.3, we disable async.
1823 */
1824
1825 if (!control)
1826 control = tlm->control;
1827 else if (control != tlm->control)
1828 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001829
Dave Watsonc46234e2018-03-22 10:10:35 -07001830 if (!cmsg) {
1831 int cerr;
1832
1833 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001834 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001835 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001836 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001837 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1838 err = -EIO;
1839 goto recv_end;
1840 }
1841 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001842 }
1843
Vakul Gargc0ab4732019-02-11 11:31:05 +00001844 if (async)
1845 goto pick_next_record;
1846
Dave Watsonfedf2012019-01-30 21:58:24 +00001847 if (!zc) {
1848 if (rxm->full_len > len) {
1849 retain_skb = true;
1850 chunk = len;
1851 } else {
1852 chunk = rxm->full_len;
1853 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001854
Dave Watsonfedf2012019-01-30 21:58:24 +00001855 err = skb_copy_datagram_msg(skb, rxm->offset,
1856 msg, chunk);
1857 if (err < 0)
1858 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001859
Dave Watsonfedf2012019-01-30 21:58:24 +00001860 if (!is_peek) {
1861 rxm->offset = rxm->offset + chunk;
1862 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001863 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001864 }
1865
Vakul Garg94524d82018-08-29 15:26:55 +05301866pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001867 if (chunk > len)
1868 chunk = len;
1869
1870 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001871 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001872
Vakul Garg692d7b52019-01-16 10:40:16 +00001873 /* For async or peek case, queue the current skb */
1874 if (async || is_peek || retain_skb) {
1875 skb_queue_tail(&ctx->rx_list, skb);
1876 skb = NULL;
1877 }
Vakul Garg94524d82018-08-29 15:26:55 +05301878
Vakul Garg692d7b52019-01-16 10:40:16 +00001879 if (tls_sw_advance_skb(sk, skb, chunk)) {
1880 /* Return full control message to
1881 * userspace before trying to parse
1882 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001883 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001884 msg->msg_flags |= MSG_EOR;
1885 if (ctx->control != TLS_RECORD_TYPE_DATA)
1886 goto recv_end;
1887 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001888 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001889 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001890 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001891
1892recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301893 if (num_async) {
1894 /* Wait for all previously submitted records to be decrypted */
1895 smp_store_mb(ctx->async_notify, true);
1896 if (atomic_read(&ctx->decrypt_pending)) {
1897 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1898 if (err) {
1899 /* one of async decrypt failed */
1900 tls_err_abort(sk, err);
1901 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001902 decrypted = 0;
1903 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301904 }
1905 } else {
1906 reinit_completion(&ctx->async_wait.completion);
1907 }
1908 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001909
1910 /* Drain records from the rx_list & copy if required */
1911 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001912 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001913 decrypted, false, is_peek);
1914 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001915 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001916 decrypted, true, is_peek);
1917 if (err < 0) {
1918 tls_err_abort(sk, err);
1919 copied = 0;
1920 goto end;
1921 }
Vakul Garg94524d82018-08-29 15:26:55 +05301922 }
1923
Vakul Garg692d7b52019-01-16 10:40:16 +00001924 copied += decrypted;
1925
1926end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001927 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001928 if (psock)
1929 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001930 return copied ? : err;
1931}
1932
1933ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1934 struct pipe_inode_info *pipe,
1935 size_t len, unsigned int flags)
1936{
1937 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001938 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001939 struct strp_msg *rxm = NULL;
1940 struct sock *sk = sock->sk;
1941 struct sk_buff *skb;
1942 ssize_t copied = 0;
1943 int err = 0;
1944 long timeo;
1945 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301946 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001947
1948 lock_sock(sk);
1949
1950 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1951
John Fastabendd3b18ad32018-10-13 02:46:01 +02001952 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001953 if (!skb)
1954 goto splice_read_end;
1955
Dave Watsonc46234e2018-03-22 10:10:35 -07001956 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001957 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001958
Dave Watsonfedf2012019-01-30 21:58:24 +00001959 /* splice does not support reading control messages */
1960 if (ctx->control != TLS_RECORD_TYPE_DATA) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001961 err = -EINVAL;
Dave Watsonfedf2012019-01-30 21:58:24 +00001962 goto splice_read_end;
1963 }
1964
Dave Watsonc46234e2018-03-22 10:10:35 -07001965 if (err < 0) {
1966 tls_err_abort(sk, EBADMSG);
1967 goto splice_read_end;
1968 }
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001969 ctx->decrypted = 1;
Dave Watsonc46234e2018-03-22 10:10:35 -07001970 }
1971 rxm = strp_msg(skb);
1972
1973 chunk = min_t(unsigned int, rxm->full_len, len);
1974 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1975 if (copied < 0)
1976 goto splice_read_end;
1977
1978 if (likely(!(flags & MSG_PEEK)))
1979 tls_sw_advance_skb(sk, skb, copied);
1980
1981splice_read_end:
1982 release_sock(sk);
1983 return copied ? : err;
1984}
1985
John Fastabend924ad652018-10-13 02:46:00 +02001986bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07001987{
Dave Watsonc46234e2018-03-22 10:10:35 -07001988 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001989 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001990 bool ingress_empty = true;
1991 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001992
John Fastabendd3b18ad32018-10-13 02:46:01 +02001993 rcu_read_lock();
1994 psock = sk_psock(sk);
1995 if (psock)
1996 ingress_empty = list_empty(&psock->ingress_msg);
1997 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07001998
Jakub Kicinski13aecb12019-07-04 14:50:36 -07001999 return !ingress_empty || ctx->recv_pkt ||
2000 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002001}
2002
2003static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2004{
2005 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002006 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00002007 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07002008 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07002009 struct strp_msg *rxm = strp_msg(skb);
2010 size_t cipher_overhead;
2011 size_t data_len = 0;
2012 int ret;
2013
2014 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00002015 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07002016 return 0;
2017
Kees Cook3463e512018-06-25 16:55:05 -07002018 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00002019 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07002020 ret = -EINVAL;
2021 goto read_failure;
2022 }
2023
Dave Watsonc46234e2018-03-22 10:10:35 -07002024 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00002025 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002026
2027 if (ret < 0)
2028 goto read_failure;
2029
2030 ctx->control = header[0];
2031
2032 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2033
Vakul Garg4509de12019-02-14 07:11:35 +00002034 cipher_overhead = prot->tag_size;
2035 if (prot->version != TLS_1_3_VERSION)
2036 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002037
Dave Watson130b3922019-01-30 21:58:31 +00002038 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00002039 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002040 ret = -EMSGSIZE;
2041 goto read_failure;
2042 }
2043 if (data_len < cipher_overhead) {
2044 ret = -EBADMSG;
2045 goto read_failure;
2046 }
2047
Dave Watson130b3922019-01-30 21:58:31 +00002048 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2049 if (header[1] != TLS_1_2_VERSION_MINOR ||
2050 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002051 ret = -EINVAL;
2052 goto read_failure;
2053 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002054
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002055 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002056 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002057 return data_len + TLS_HEADER_SIZE;
2058
2059read_failure:
2060 tls_err_abort(strp->sk, ret);
2061
2062 return ret;
2063}
2064
2065static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2066{
2067 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002068 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002069
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002070 ctx->decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07002071
2072 ctx->recv_pkt = skb;
2073 strp_pause(strp);
2074
Vakul Gargad13acc2018-07-30 16:08:33 +05302075 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002076}
2077
2078static void tls_data_ready(struct sock *sk)
2079{
2080 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002081 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002082 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002083
2084 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002085
2086 psock = sk_psock_get(sk);
2087 if (psock && !list_empty(&psock->ingress_msg)) {
2088 ctx->saved_data_ready(sk);
2089 sk_psock_put(sk, psock);
2090 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002091}
2092
John Fastabendf87e62d2019-07-19 10:29:16 -07002093void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2094{
2095 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2096
2097 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2098 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2099 cancel_delayed_work_sync(&ctx->tx_work.work);
2100}
2101
John Fastabend313ab002019-07-19 10:29:17 -07002102void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002103{
2104 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002105 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302106 struct tls_rec *rec, *tmp;
2107
2108 /* Wait for any pending async encryptions to complete */
2109 smp_store_mb(ctx->async_notify, true);
2110 if (atomic_read(&ctx->encrypt_pending))
2111 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2112
Vakul Garga42055e2018-09-21 09:46:13 +05302113 tls_tx_records(sk, -1);
2114
Vakul Garg9932a292018-09-24 15:35:56 +05302115 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302116 * the partially sent record if any at head of tx_list.
2117 */
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -08002118 if (tls_ctx->partially_sent_record) {
2119 tls_free_partial_record(sk, tls_ctx);
Vakul Garg9932a292018-09-24 15:35:56 +05302120 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302121 struct tls_rec, list);
2122 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002123 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302124 kfree(rec);
2125 }
2126
Vakul Garg9932a292018-09-24 15:35:56 +05302127 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302128 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002129 sk_msg_free(sk, &rec->msg_encrypted);
2130 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302131 kfree(rec);
2132 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002133
Vakul Garg201876b2018-07-24 16:54:27 +05302134 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302135 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002136}
2137
2138void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2139{
2140 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002141
2142 kfree(ctx);
2143}
2144
Boris Pismenny39f56e12018-07-13 14:33:41 +03002145void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002146{
2147 struct tls_context *tls_ctx = tls_get_ctx(sk);
2148 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2149
Jakub Kicinski12c76862019-04-19 16:52:19 -07002150 kfree(tls_ctx->rx.rec_seq);
2151 kfree(tls_ctx->rx.iv);
2152
Dave Watsonc46234e2018-03-22 10:10:35 -07002153 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302154 kfree_skb(ctx->recv_pkt);
2155 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002156 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002157 crypto_free_aead(ctx->aead_recv);
2158 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002159 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2160 * we still want to strp_stop(), but sk->sk_data_ready was
2161 * never swapped.
2162 */
2163 if (ctx->saved_data_ready) {
2164 write_lock_bh(&sk->sk_callback_lock);
2165 sk->sk_data_ready = ctx->saved_data_ready;
2166 write_unlock_bh(&sk->sk_callback_lock);
2167 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002168 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002169}
2170
John Fastabend313ab002019-07-19 10:29:17 -07002171void tls_sw_strparser_done(struct tls_context *tls_ctx)
2172{
2173 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2174
2175 strp_done(&ctx->strp);
2176}
2177
2178void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2179{
2180 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2181
2182 kfree(ctx);
2183}
2184
Boris Pismenny39f56e12018-07-13 14:33:41 +03002185void tls_sw_free_resources_rx(struct sock *sk)
2186{
2187 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002188
2189 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002190 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002191}
2192
Vakul Garg9932a292018-09-24 15:35:56 +05302193/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302194static void tx_work_handler(struct work_struct *work)
2195{
2196 struct delayed_work *delayed_work = to_delayed_work(work);
2197 struct tx_work *tx_work = container_of(delayed_work,
2198 struct tx_work, work);
2199 struct sock *sk = tx_work->sk;
2200 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002201 struct tls_sw_context_tx *ctx;
2202
2203 if (unlikely(!tls_ctx))
2204 return;
2205
2206 ctx = tls_sw_ctx_tx(tls_ctx);
2207 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2208 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302209
2210 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2211 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002212 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302213 lock_sock(sk);
2214 tls_tx_records(sk, -1);
2215 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002216 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302217}
2218
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002219void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2220{
2221 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2222
2223 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002224 if (is_tx_ready(tx_ctx) &&
2225 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2226 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002227}
2228
Jakub Kicinski318892a2019-07-19 10:29:14 -07002229void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2230{
2231 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2232
2233 write_lock_bh(&sk->sk_callback_lock);
2234 rx_ctx->saved_data_ready = sk->sk_data_ready;
2235 sk->sk_data_ready = tls_data_ready;
2236 write_unlock_bh(&sk->sk_callback_lock);
2237
2238 strp_check_rcv(&rx_ctx->strp);
2239}
2240
Dave Watsonc46234e2018-03-22 10:10:35 -07002241int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002242{
Vakul Garg4509de12019-02-14 07:11:35 +00002243 struct tls_context *tls_ctx = tls_get_ctx(sk);
2244 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002245 struct tls_crypto_info *crypto_info;
2246 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002247 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002248 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002249 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2250 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002251 struct cipher_context *cctx;
2252 struct crypto_aead **aead;
2253 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002254 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002255 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002256 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002257 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002258 int rc = 0;
2259
2260 if (!ctx) {
2261 rc = -EINVAL;
2262 goto out;
2263 }
2264
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002265 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002266 if (!ctx->priv_ctx_tx) {
2267 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2268 if (!sw_ctx_tx) {
2269 rc = -ENOMEM;
2270 goto out;
2271 }
2272 ctx->priv_ctx_tx = sw_ctx_tx;
2273 } else {
2274 sw_ctx_tx =
2275 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002276 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002277 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002278 if (!ctx->priv_ctx_rx) {
2279 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2280 if (!sw_ctx_rx) {
2281 rc = -ENOMEM;
2282 goto out;
2283 }
2284 ctx->priv_ctx_rx = sw_ctx_rx;
2285 } else {
2286 sw_ctx_rx =
2287 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002288 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002289 }
2290
Dave Watsonc46234e2018-03-22 10:10:35 -07002291 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002292 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002293 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002294 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002295 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302296 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302297 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2298 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002299 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002300 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002301 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002302 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002303 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002304 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002305 }
2306
Dave Watson3c4d7552017-06-14 11:37:39 -07002307 switch (crypto_info->cipher_type) {
2308 case TLS_CIPHER_AES_GCM_128: {
2309 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2310 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2311 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2312 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2313 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2314 rec_seq =
2315 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2316 gcm_128_info =
2317 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002318 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2319 key = gcm_128_info->key;
2320 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002321 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2322 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002323 break;
2324 }
2325 case TLS_CIPHER_AES_GCM_256: {
2326 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2327 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2328 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2329 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2330 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2331 rec_seq =
2332 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2333 gcm_256_info =
2334 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2335 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2336 key = gcm_256_info->key;
2337 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002338 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2339 cipher_name = "gcm(aes)";
2340 break;
2341 }
2342 case TLS_CIPHER_AES_CCM_128: {
2343 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2344 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2345 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2346 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2347 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2348 rec_seq =
2349 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2350 ccm_128_info =
2351 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2352 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2353 key = ccm_128_info->key;
2354 salt = ccm_128_info->salt;
2355 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2356 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002357 break;
2358 }
2359 default:
2360 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002361 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002362 }
2363
Jakub Kicinski89fec472019-06-10 21:40:00 -07002364 /* Sanity-check the sizes for stack allocations. */
2365 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2366 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002367 rc = -EINVAL;
2368 goto free_priv;
2369 }
2370
Dave Watson130b3922019-01-30 21:58:31 +00002371 if (crypto_info->version == TLS_1_3_VERSION) {
2372 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002373 prot->aad_size = TLS_HEADER_SIZE;
2374 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002375 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002376 prot->aad_size = TLS_AAD_SPACE_SIZE;
2377 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002378 }
2379
Vakul Garg4509de12019-02-14 07:11:35 +00002380 prot->version = crypto_info->version;
2381 prot->cipher_type = crypto_info->cipher_type;
2382 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2383 prot->tag_size = tag_size;
2384 prot->overhead_size = prot->prepend_size +
2385 prot->tag_size + prot->tail_size;
2386 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002387 prot->salt_size = salt_size;
2388 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002389 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002390 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002391 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002392 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002393 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002394 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002395 memcpy(cctx->iv, salt, salt_size);
2396 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002397 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002398 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002399 rc = -ENOMEM;
2400 goto free_iv;
2401 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002402
Dave Watsonc46234e2018-03-22 10:10:35 -07002403 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002404 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002405 if (IS_ERR(*aead)) {
2406 rc = PTR_ERR(*aead);
2407 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002408 goto free_rec_seq;
2409 }
2410 }
2411
2412 ctx->push_pending_record = tls_sw_push_pending_record;
2413
Dave Watsonfb99bce2019-01-30 21:58:05 +00002414 rc = crypto_aead_setkey(*aead, key, keysize);
2415
Dave Watson3c4d7552017-06-14 11:37:39 -07002416 if (rc)
2417 goto free_aead;
2418
Vakul Garg4509de12019-02-14 07:11:35 +00002419 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002420 if (rc)
2421 goto free_aead;
2422
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002423 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002424 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002425
2426 if (crypto_info->version == TLS_1_3_VERSION)
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002427 sw_ctx_rx->async_capable = 0;
Vakul Garg8497ded2019-02-09 07:53:28 +00002428 else
2429 sw_ctx_rx->async_capable =
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002430 !!(tfm->__crt_alg->cra_flags &
2431 CRYPTO_ALG_ASYNC);
Vakul Garg692d7b52019-01-16 10:40:16 +00002432
Dave Watsonc46234e2018-03-22 10:10:35 -07002433 /* Set up strparser */
2434 memset(&cb, 0, sizeof(cb));
2435 cb.rcv_msg = tls_queue;
2436 cb.parse_msg = tls_read_size;
2437
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002438 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002439 }
2440
2441 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002442
2443free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002444 crypto_free_aead(*aead);
2445 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002446free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002447 kfree(cctx->rec_seq);
2448 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002449free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002450 kfree(cctx->iv);
2451 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002452free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002453 if (tx) {
2454 kfree(ctx->priv_ctx_tx);
2455 ctx->priv_ctx_tx = NULL;
2456 } else {
2457 kfree(ctx->priv_ctx_rx);
2458 ctx->priv_ctx_rx = NULL;
2459 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002460out:
2461 return rc;
2462}