blob: 704313dd082f389f319ab0192ffab411c86913b5 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070045static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
47{
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
51 int elt = 0;
52
53 if (unlikely(recursion_level >= 24))
54 return -EMSGSIZE;
55
56 if (chunk > 0) {
57 if (chunk > len)
58 chunk = len;
59 elt++;
60 len -= chunk;
61 if (len == 0)
62 return elt;
63 offset += chunk;
64 }
65
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67 int end;
68
69 WARN_ON(start > offset + len);
70
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72 chunk = end - offset;
73 if (chunk > 0) {
74 if (chunk > len)
75 chunk = len;
76 elt++;
77 len -= chunk;
78 if (len == 0)
79 return elt;
80 offset += chunk;
81 }
82 start = end;
83 }
84
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
87 int end, ret;
88
89 WARN_ON(start > offset + len);
90
91 end = start + frag_iter->len;
92 chunk = end - offset;
93 if (chunk > 0) {
94 if (chunk > len)
95 chunk = len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
97 recursion_level + 1);
98 if (unlikely(ret < 0))
99 return ret;
100 elt += ret;
101 len -= chunk;
102 if (len == 0)
103 return elt;
104 offset += chunk;
105 }
106 start = end;
107 }
108 }
109 BUG_ON(len);
110 return elt;
111}
112
113/* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
115 */
116static int skb_nsg(struct sk_buff *skb, int offset, int len)
117{
118 return __skb_nsg(skb, offset, len, 0);
119}
120
Dave Watson130b3922019-01-30 21:58:31 +0000121static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700122 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000123{
124 struct strp_msg *rxm = strp_msg(skb);
125 int sub = 0;
126
127 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700128 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000129 char content_type = 0;
130 int err;
131 int back = 17;
132
133 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700134 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000135 return -EBADMSG;
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
138 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700139 if (err)
140 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000158 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700160 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530161 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000167 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530168
169 /* Propagate if there was an err */
170 if (err) {
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700171 if (err == -EBADMSG)
172 TLS_INC_STATS(sock_net(skb->sk),
173 LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530174 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700175 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000176 } else {
177 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700178 int pad;
179
180 pad = padding_length(ctx, prot, skb);
181 if (pad < 0) {
182 ctx->async_wait.err = pad;
183 tls_err_abort(skb->sk, pad);
184 } else {
185 rxm->full_len -= pad;
186 rxm->offset += prot->prepend_size;
187 rxm->full_len -= prot->overhead_size;
188 }
Vakul Garg94524d82018-08-29 15:26:55 +0530189 }
190
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700191 /* After using skb->sk to propagate sk through crypto async callback
192 * we need to NULL it again.
193 */
194 skb->sk = NULL;
195
Vakul Garg94524d82018-08-29 15:26:55 +0530196
Vakul Garg692d7b52019-01-16 10:40:16 +0000197 /* Free the destination pages if skb was not decrypted inplace */
198 if (sgout != sgin) {
199 /* Skip the first S/G entry as it points to AAD */
200 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
201 if (!sg)
202 break;
203 put_page(sg_page(sg));
204 }
Vakul Garg94524d82018-08-29 15:26:55 +0530205 }
206
207 kfree(aead_req);
208
Vakul Garg692d7b52019-01-16 10:40:16 +0000209 pending = atomic_dec_return(&ctx->decrypt_pending);
210
Vakul Garg94524d82018-08-29 15:26:55 +0530211 if (!pending && READ_ONCE(ctx->async_notify))
212 complete(&ctx->async_wait.completion);
213}
214
Dave Watsonc46234e2018-03-22 10:10:35 -0700215static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530216 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700217 struct scatterlist *sgin,
218 struct scatterlist *sgout,
219 char *iv_recv,
220 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530221 struct aead_request *aead_req,
222 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700223{
224 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000225 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300226 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700227 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700228
Vakul Garg0b243d02018-08-10 20:46:41 +0530229 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000230 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700231 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000232 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700233 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700234
Vakul Garg94524d82018-08-29 15:26:55 +0530235 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700236 /* Using skb->sk to push sk through to crypto async callback
237 * handler. This allows propagating errors up to the socket
238 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000239 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700240 * because it is a clone from strparser.
241 */
242 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530243 aead_request_set_callback(aead_req,
244 CRYPTO_TFM_REQ_MAY_BACKLOG,
245 tls_decrypt_done, skb);
246 atomic_inc(&ctx->decrypt_pending);
247 } else {
248 aead_request_set_callback(aead_req,
249 CRYPTO_TFM_REQ_MAY_BACKLOG,
250 crypto_req_done, &ctx->async_wait);
251 }
252
253 ret = crypto_aead_decrypt(aead_req);
254 if (ret == -EINPROGRESS) {
255 if (async)
256 return ret;
257
258 ret = crypto_wait_req(ret, &ctx->async_wait);
259 }
260
261 if (async)
262 atomic_dec(&ctx->decrypt_pending);
263
Dave Watsonc46234e2018-03-22 10:10:35 -0700264 return ret;
265}
266
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200267static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700268{
269 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000270 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300271 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530272 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700273
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200274 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700275 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000276 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200277 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700278}
279
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200280static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700281{
282 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300283 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530284 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200285 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700286
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200287 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700288}
289
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200290static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700291{
292 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000293 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300294 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530295 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200296 struct sk_msg *msg_pl = &rec->msg_plaintext;
297 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530298 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700299
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200300 /* We add page references worth len bytes from encrypted sg
301 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530302 * has enough required room (ensured by caller).
303 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200304 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530305
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200306 /* Skip initial bytes in msg_en's data to be able to use
307 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530308 */
Vakul Garg4509de12019-02-14 07:11:35 +0000309 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530310
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200311 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700312}
313
John Fastabendd3b18ad32018-10-13 02:46:01 +0200314static struct tls_rec *tls_get_rec(struct sock *sk)
315{
316 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000317 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200318 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
319 struct sk_msg *msg_pl, *msg_en;
320 struct tls_rec *rec;
321 int mem_size;
322
323 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
324
325 rec = kzalloc(mem_size, sk->sk_allocation);
326 if (!rec)
327 return NULL;
328
329 msg_pl = &rec->msg_plaintext;
330 msg_en = &rec->msg_encrypted;
331
332 sk_msg_init(msg_pl);
333 sk_msg_init(msg_en);
334
335 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000336 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200337 sg_unmark_end(&rec->sg_aead_in[1]);
338
339 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000340 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200341 sg_unmark_end(&rec->sg_aead_out[1]);
342
343 return rec;
344}
345
346static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
347{
348 sk_msg_free(sk, &rec->msg_encrypted);
349 sk_msg_free(sk, &rec->msg_plaintext);
350 kfree(rec);
351}
352
Vakul Gargc7749732018-09-25 20:21:51 +0530353static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700354{
355 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300356 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530357 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700358
John Fastabendd3b18ad32018-10-13 02:46:01 +0200359 if (rec) {
360 tls_free_rec(sk, rec);
361 ctx->open_rec = NULL;
362 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700363}
364
Vakul Garga42055e2018-09-21 09:46:13 +0530365int tls_tx_records(struct sock *sk, int flags)
366{
367 struct tls_context *tls_ctx = tls_get_ctx(sk);
368 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
369 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200370 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530371 int tx_flags, rc = 0;
372
373 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530374 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530375 struct tls_rec, list);
376
377 if (flags == -1)
378 tx_flags = rec->tx_flags;
379 else
380 tx_flags = flags;
381
382 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
383 if (rc)
384 goto tx_err;
385
386 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530387 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530388 */
Vakul Garga42055e2018-09-21 09:46:13 +0530389 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200390 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530391 kfree(rec);
392 }
393
Vakul Garg9932a292018-09-24 15:35:56 +0530394 /* Tx all ready records */
395 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
396 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530397 if (flags == -1)
398 tx_flags = rec->tx_flags;
399 else
400 tx_flags = flags;
401
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200402 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530403 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200404 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530405 0, tx_flags);
406 if (rc)
407 goto tx_err;
408
Vakul Garga42055e2018-09-21 09:46:13 +0530409 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200410 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530411 kfree(rec);
412 } else {
413 break;
414 }
415 }
416
417tx_err:
418 if (rc < 0 && rc != -EAGAIN)
419 tls_err_abort(sk, EBADMSG);
420
421 return rc;
422}
423
424static void tls_encrypt_done(struct crypto_async_request *req, int err)
425{
426 struct aead_request *aead_req = (struct aead_request *)req;
427 struct sock *sk = req->data;
428 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000429 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530430 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200431 struct scatterlist *sge;
432 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530433 struct tls_rec *rec;
434 bool ready = false;
435 int pending;
436
437 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200438 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530439
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200440 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000441 sge->offset -= prot->prepend_size;
442 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530443
Vakul Garg80ece6a2018-09-26 16:22:08 +0530444 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530445 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530446 rec = NULL;
447
448 /* If err is already set on socket, return the same code */
449 if (sk->sk_err) {
450 ctx->async_wait.err = sk->sk_err;
451 } else {
452 ctx->async_wait.err = err;
453 tls_err_abort(sk, err);
454 }
455 }
456
Vakul Garg9932a292018-09-24 15:35:56 +0530457 if (rec) {
458 struct tls_rec *first_rec;
459
460 /* Mark the record as ready for transmission */
461 smp_store_mb(rec->tx_ready, true);
462
463 /* If received record is at head of tx_list, schedule tx */
464 first_rec = list_first_entry(&ctx->tx_list,
465 struct tls_rec, list);
466 if (rec == first_rec)
467 ready = true;
468 }
Vakul Garga42055e2018-09-21 09:46:13 +0530469
470 pending = atomic_dec_return(&ctx->encrypt_pending);
471
472 if (!pending && READ_ONCE(ctx->async_notify))
473 complete(&ctx->async_wait.completion);
474
475 if (!ready)
476 return;
477
478 /* Schedule the transmission */
479 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200480 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530481}
482
483static int tls_do_encryption(struct sock *sk,
484 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200485 struct tls_sw_context_tx *ctx,
486 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200487 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700488{
Vakul Garg4509de12019-02-14 07:11:35 +0000489 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530490 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200491 struct sk_msg *msg_en = &rec->msg_encrypted;
492 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000493 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700494
Vakul Gargf295b3a2019-03-20 02:03:36 +0000495 /* For CCM based ciphers, first byte of IV is a constant */
496 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
497 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
498 iv_offset = 1;
499 }
500
501 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
502 prot->iv_size + prot->salt_size);
503
504 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000505
Vakul Garg4509de12019-02-14 07:11:35 +0000506 sge->offset += prot->prepend_size;
507 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700508
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200509 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530510
Dave Watson3c4d7552017-06-14 11:37:39 -0700511 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000512 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200513 aead_request_set_crypt(aead_req, rec->sg_aead_in,
514 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000515 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530516
517 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530518 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530519
Vakul Garg9932a292018-09-24 15:35:56 +0530520 /* Add the record in tx_list */
521 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530522 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700523
Vakul Garga42055e2018-09-21 09:46:13 +0530524 rc = crypto_aead_encrypt(aead_req);
525 if (!rc || rc != -EINPROGRESS) {
526 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000527 sge->offset -= prot->prepend_size;
528 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530529 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700530
Vakul Garg9932a292018-09-24 15:35:56 +0530531 if (!rc) {
532 WRITE_ONCE(rec->tx_ready, true);
533 } else if (rc != -EINPROGRESS) {
534 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530535 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530536 }
Vakul Garga42055e2018-09-21 09:46:13 +0530537
538 /* Unhook the record from context if encryption is not failure */
539 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700540 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700541 return rc;
542}
543
John Fastabendd3b18ad32018-10-13 02:46:01 +0200544static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
545 struct tls_rec **to, struct sk_msg *msg_opl,
546 struct sk_msg *msg_oen, u32 split_point,
547 u32 tx_overhead_size, u32 *orig_end)
548{
549 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
550 struct scatterlist *sge, *osge, *nsge;
551 u32 orig_size = msg_opl->sg.size;
552 struct scatterlist tmp = { };
553 struct sk_msg *msg_npl;
554 struct tls_rec *new;
555 int ret;
556
557 new = tls_get_rec(sk);
558 if (!new)
559 return -ENOMEM;
560 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
561 tx_overhead_size, 0);
562 if (ret < 0) {
563 tls_free_rec(sk, new);
564 return ret;
565 }
566
567 *orig_end = msg_opl->sg.end;
568 i = msg_opl->sg.start;
569 sge = sk_msg_elem(msg_opl, i);
570 while (apply && sge->length) {
571 if (sge->length > apply) {
572 u32 len = sge->length - apply;
573
574 get_page(sg_page(sge));
575 sg_set_page(&tmp, sg_page(sge), len,
576 sge->offset + apply);
577 sge->length = apply;
578 bytes += apply;
579 apply = 0;
580 } else {
581 apply -= sge->length;
582 bytes += sge->length;
583 }
584
585 sk_msg_iter_var_next(i);
586 if (i == msg_opl->sg.end)
587 break;
588 sge = sk_msg_elem(msg_opl, i);
589 }
590
591 msg_opl->sg.end = i;
592 msg_opl->sg.curr = i;
593 msg_opl->sg.copybreak = 0;
594 msg_opl->apply_bytes = 0;
595 msg_opl->sg.size = bytes;
596
597 msg_npl = &new->msg_plaintext;
598 msg_npl->apply_bytes = apply;
599 msg_npl->sg.size = orig_size - bytes;
600
601 j = msg_npl->sg.start;
602 nsge = sk_msg_elem(msg_npl, j);
603 if (tmp.length) {
604 memcpy(nsge, &tmp, sizeof(*nsge));
605 sk_msg_iter_var_next(j);
606 nsge = sk_msg_elem(msg_npl, j);
607 }
608
609 osge = sk_msg_elem(msg_opl, i);
610 while (osge->length) {
611 memcpy(nsge, osge, sizeof(*nsge));
612 sg_unmark_end(nsge);
613 sk_msg_iter_var_next(i);
614 sk_msg_iter_var_next(j);
615 if (i == *orig_end)
616 break;
617 osge = sk_msg_elem(msg_opl, i);
618 nsge = sk_msg_elem(msg_npl, j);
619 }
620
621 msg_npl->sg.end = j;
622 msg_npl->sg.curr = j;
623 msg_npl->sg.copybreak = 0;
624
625 *to = new;
626 return 0;
627}
628
629static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
630 struct tls_rec *from, u32 orig_end)
631{
632 struct sk_msg *msg_npl = &from->msg_plaintext;
633 struct sk_msg *msg_opl = &to->msg_plaintext;
634 struct scatterlist *osge, *nsge;
635 u32 i, j;
636
637 i = msg_opl->sg.end;
638 sk_msg_iter_var_prev(i);
639 j = msg_npl->sg.start;
640
641 osge = sk_msg_elem(msg_opl, i);
642 nsge = sk_msg_elem(msg_npl, j);
643
644 if (sg_page(osge) == sg_page(nsge) &&
645 osge->offset + osge->length == nsge->offset) {
646 osge->length += nsge->length;
647 put_page(sg_page(nsge));
648 }
649
650 msg_opl->sg.end = orig_end;
651 msg_opl->sg.curr = orig_end;
652 msg_opl->sg.copybreak = 0;
653 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
654 msg_opl->sg.size += msg_npl->sg.size;
655
656 sk_msg_free(sk, &to->msg_encrypted);
657 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
658
659 kfree(from);
660}
661
Dave Watson3c4d7552017-06-14 11:37:39 -0700662static int tls_push_record(struct sock *sk, int flags,
663 unsigned char record_type)
664{
665 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000666 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300667 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200668 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
669 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200670 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200671 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200672 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700673 int rc;
674
Vakul Garga42055e2018-09-21 09:46:13 +0530675 if (!rec)
676 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200677
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200678 msg_pl = &rec->msg_plaintext;
679 msg_en = &rec->msg_encrypted;
680
John Fastabendd3b18ad32018-10-13 02:46:01 +0200681 split_point = msg_pl->apply_bytes;
682 split = split_point && split_point < msg_pl->sg.size;
John Fastabendd468e472020-01-11 06:12:04 +0000683 if (unlikely((!split &&
684 msg_pl->sg.size +
685 prot->overhead_size > msg_en->sg.size) ||
686 (split &&
687 split_point +
688 prot->overhead_size > msg_en->sg.size))) {
689 split = true;
690 split_point = msg_en->sg.size;
691 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200692 if (split) {
693 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000694 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200695 &orig_end);
696 if (rc < 0)
697 return rc;
John Fastabendd468e472020-01-11 06:12:04 +0000698 /* This can happen if above tls_split_open_record allocates
699 * a single large encryption buffer instead of two smaller
700 * ones. In this case adjust pointers and continue without
701 * split.
702 */
703 if (!msg_pl->sg.size) {
704 tls_merge_open_record(sk, rec, tmp, orig_end);
705 msg_pl = &rec->msg_plaintext;
706 msg_en = &rec->msg_encrypted;
707 split = false;
708 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200709 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000710 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200711 }
712
Vakul Garga42055e2018-09-21 09:46:13 +0530713 rec->tx_flags = flags;
714 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700715
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200716 i = msg_pl->sg.end;
717 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000718
719 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000720 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000721 /* Add content type to end of message. No padding added */
722 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
723 sg_mark_end(&rec->sg_content_type);
724 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
725 &rec->sg_content_type);
726 } else {
727 sg_mark_end(sk_msg_elem(msg_pl, i));
728 }
Vakul Garga42055e2018-09-21 09:46:13 +0530729
John Fastabend9aaaa562020-01-11 06:12:05 +0000730 if (msg_pl->sg.end < msg_pl->sg.start) {
731 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
732 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
733 msg_pl->sg.data);
734 }
735
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200736 i = msg_pl->sg.start;
Jakub Kicinski9e5ffed2019-11-27 12:16:43 -0800737 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200738
739 i = msg_en->sg.end;
740 sk_msg_iter_var_prev(i);
741 sg_mark_end(sk_msg_elem(msg_en, i));
742
743 i = msg_en->sg.start;
744 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
745
Vakul Garg4509de12019-02-14 07:11:35 +0000746 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
747 tls_ctx->tx.rec_seq, prot->rec_seq_size,
748 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700749
750 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200751 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000752 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000753 msg_pl->sg.size + prot->tail_size,
754 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700755
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200756 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700757
Dave Watson130b3922019-01-30 21:58:31 +0000758 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000759 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700760 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200761 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200762 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200763 if (split) {
764 tls_ctx->pending_open_record_frags = true;
765 tls_merge_open_record(sk, rec, tmp, orig_end);
766 }
767 }
Dave Watson5b053e12019-01-30 22:08:21 +0000768 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530769 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200770 } else if (split) {
771 msg_pl = &tmp->msg_plaintext;
772 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000773 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200774 tls_ctx->pending_open_record_frags = true;
775 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700776 }
777
Vakul Garg9932a292018-09-24 15:35:56 +0530778 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700779}
780
John Fastabendd3b18ad32018-10-13 02:46:01 +0200781static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
782 bool full_record, u8 record_type,
783 size_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700784{
785 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300786 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200787 struct sk_msg msg_redir = { };
788 struct sk_psock *psock;
789 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530790 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800791 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200792 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800793 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530794
John Fastabend0608c692018-12-20 11:35:35 -0800795 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200796 psock = sk_psock_get(sk);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800797 if (!psock || !policy) {
798 err = tls_push_record(sk, flags, record_type);
Jakub Kicinskidb885e62020-01-10 04:38:32 -0800799 if (err && err != -EINPROGRESS) {
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800800 *copied -= sk_msg_free(sk, msg);
801 tls_free_open_rec(sk);
802 }
Xiyu Yang095f5612020-04-25 20:54:37 +0800803 if (psock)
804 sk_psock_put(sk, psock);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800805 return err;
806 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200807more_data:
808 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800809 if (psock->eval == __SK_NONE) {
810 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200811 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7361d442020-01-11 06:12:06 +0000812 delta -= msg->sg.size;
John Fastabend7246d8e2018-11-26 14:16:17 -0800813 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200814 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
815 !enospc && !full_record) {
816 err = -ENOSPC;
817 goto out_err;
818 }
819 msg->cork_bytes = 0;
820 send = msg->sg.size;
821 if (msg->apply_bytes && msg->apply_bytes < send)
822 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530823
John Fastabendd3b18ad32018-10-13 02:46:01 +0200824 switch (psock->eval) {
825 case __SK_PASS:
826 err = tls_push_record(sk, flags, record_type);
Jakub Kicinskidb885e62020-01-10 04:38:32 -0800827 if (err && err != -EINPROGRESS) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200828 *copied -= sk_msg_free(sk, msg);
829 tls_free_open_rec(sk);
830 goto out_err;
831 }
832 break;
833 case __SK_REDIRECT:
834 sk_redir = psock->sk_redir;
835 memcpy(&msg_redir, msg, sizeof(*msg));
836 if (msg->apply_bytes < send)
837 msg->apply_bytes = 0;
838 else
839 msg->apply_bytes -= send;
840 sk_msg_return_zero(sk, msg, send);
841 msg->sg.size -= send;
842 release_sock(sk);
843 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
844 lock_sock(sk);
845 if (err < 0) {
846 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
847 msg->sg.size = 0;
848 }
849 if (msg->sg.size == 0)
850 tls_free_open_rec(sk);
851 break;
852 case __SK_DROP:
853 default:
854 sk_msg_free_partial(sk, msg, send);
855 if (msg->apply_bytes < send)
856 msg->apply_bytes = 0;
857 else
858 msg->apply_bytes -= send;
859 if (msg->sg.size == 0)
860 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800861 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200862 err = -EACCES;
863 }
Vakul Garga42055e2018-09-21 09:46:13 +0530864
John Fastabendd3b18ad32018-10-13 02:46:01 +0200865 if (likely(!err)) {
866 bool reset_eval = !ctx->open_rec;
867
868 rec = ctx->open_rec;
869 if (rec) {
870 msg = &rec->msg_plaintext;
871 if (!msg->apply_bytes)
872 reset_eval = true;
873 }
874 if (reset_eval) {
875 psock->eval = __SK_NONE;
876 if (psock->sk_redir) {
877 sock_put(psock->sk_redir);
878 psock->sk_redir = NULL;
879 }
880 }
881 if (rec)
882 goto more_data;
883 }
884 out_err:
885 sk_psock_put(sk, psock);
886 return err;
887}
888
889static int tls_sw_push_pending_record(struct sock *sk, int flags)
890{
891 struct tls_context *tls_ctx = tls_get_ctx(sk);
892 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
893 struct tls_rec *rec = ctx->open_rec;
894 struct sk_msg *msg_pl;
895 size_t copied;
896
Vakul Garga42055e2018-09-21 09:46:13 +0530897 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200898 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530899
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200900 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200901 copied = msg_pl->sg.size;
902 if (!copied)
903 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530904
John Fastabendd3b18ad32018-10-13 02:46:01 +0200905 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
906 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530907}
908
909int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
910{
Dave Watson3c4d7552017-06-14 11:37:39 -0700911 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530912 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000913 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530914 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000915 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530916 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100917 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700918 bool eor = !(msg->msg_flags & MSG_MORE);
919 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200920 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530921 struct tls_rec *rec;
922 int required_size;
923 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700924 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530925 int record_room;
926 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700927 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530928 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700929
930 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100931 return -EOPNOTSUPP;
Dave Watson3c4d7552017-06-14 11:37:39 -0700932
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800933 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700934 lock_sock(sk);
935
Dave Watson3c4d7552017-06-14 11:37:39 -0700936 if (unlikely(msg->msg_controllen)) {
937 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530938 if (ret) {
939 if (ret == -EINPROGRESS)
940 num_async++;
941 else if (ret != -EAGAIN)
942 goto send_end;
943 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700944 }
945
946 while (msg_data_left(msg)) {
947 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100948 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700949 goto send_end;
950 }
951
John Fastabendd3b18ad32018-10-13 02:46:01 +0200952 if (ctx->open_rec)
953 rec = ctx->open_rec;
954 else
955 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530956 if (!rec) {
957 ret = -ENOMEM;
958 goto send_end;
959 }
960
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200961 msg_pl = &rec->msg_plaintext;
962 msg_en = &rec->msg_encrypted;
963
964 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700965 full_record = false;
966 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200967 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700968 if (try_to_copy >= record_room) {
969 try_to_copy = record_room;
970 full_record = true;
971 }
972
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200973 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000974 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700975
976 if (!sk_stream_memory_free(sk))
977 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530978
Dave Watson3c4d7552017-06-14 11:37:39 -0700979alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200980 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700981 if (ret) {
982 if (ret != -ENOSPC)
983 goto wait_for_memory;
984
985 /* Adjust try_to_copy according to the amount that was
986 * actually allocated. The difference is due
987 * to max sg elements limit
988 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200989 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700990 full_record = true;
991 }
Vakul Garga42055e2018-09-21 09:46:13 +0530992
993 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200994 u32 first = msg_pl->sg.end;
995
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200996 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
997 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -0700998 if (ret)
999 goto fallback_to_reg_send;
1000
Vakul Garga42055e2018-09-21 09:46:13 +05301001 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001002 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001003
1004 sk_msg_sg_copy_set(msg_pl, first);
1005 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1006 record_type, &copied,
1007 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +05301008 if (ret) {
1009 if (ret == -EINPROGRESS)
1010 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001011 else if (ret == -ENOMEM)
1012 goto wait_for_memory;
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001013 else if (ctx->open_rec && ret == -ENOSPC)
John Fastabendd3b18ad32018-10-13 02:46:01 +02001014 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +05301015 else if (ret != -EAGAIN)
1016 goto send_end;
1017 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -07001018 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001019rollback_iter:
1020 copied -= try_to_copy;
1021 sk_msg_sg_copy_clear(msg_pl, first);
1022 iov_iter_revert(&msg->msg_iter,
1023 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001024fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001025 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001026 }
1027
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001028 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +05301029
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001030 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001031 if (ret) {
1032 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301033 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001034
1035 /* Adjust try_to_copy according to the amount that was
1036 * actually allocated. The difference is due
1037 * to max sg elements limit
1038 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001039 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001040 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001041 sk_msg_trim(sk, msg_en,
1042 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001043 }
1044
Vakul Garg65a10e22018-12-21 15:16:52 +00001045 if (try_to_copy) {
1046 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1047 msg_pl, try_to_copy);
1048 if (ret < 0)
1049 goto trim_sgl;
1050 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001051
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001052 /* Open records defined only if successfully copied, otherwise
1053 * we would trim the sg but not reset the open record frags.
1054 */
1055 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001056 copied += try_to_copy;
1057 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001058 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1059 record_type, &copied,
1060 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001061 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301062 if (ret == -EINPROGRESS)
1063 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001064 else if (ret == -ENOMEM)
1065 goto wait_for_memory;
1066 else if (ret != -EAGAIN) {
1067 if (ret == -ENOSPC)
1068 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301069 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001070 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001071 }
1072 }
1073
1074 continue;
1075
1076wait_for_sndbuf:
1077 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1078wait_for_memory:
1079 ret = sk_stream_wait_memory(sk, &timeo);
1080 if (ret) {
1081trim_sgl:
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001082 if (ctx->open_rec)
1083 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001084 goto send_end;
1085 }
1086
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001087 if (ctx->open_rec && msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001088 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001089 }
1090
Vakul Garga42055e2018-09-21 09:46:13 +05301091 if (!num_async) {
1092 goto send_end;
1093 } else if (num_zc) {
1094 /* Wait for pending encryptions to get completed */
1095 smp_store_mb(ctx->async_notify, true);
1096
1097 if (atomic_read(&ctx->encrypt_pending))
1098 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1099 else
1100 reinit_completion(&ctx->async_wait.completion);
1101
1102 WRITE_ONCE(ctx->async_notify, false);
1103
1104 if (ctx->async_wait.err) {
1105 ret = ctx->async_wait.err;
1106 copied = 0;
1107 }
1108 }
1109
1110 /* Transmit if any encryptions have completed */
1111 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1112 cancel_delayed_work(&ctx->tx_work.work);
1113 tls_tx_records(sk, msg->msg_flags);
1114 }
1115
Dave Watson3c4d7552017-06-14 11:37:39 -07001116send_end:
1117 ret = sk_stream_error(sk, msg->msg_flags, ret);
1118
1119 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001120 mutex_unlock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -07001121 return copied ? copied : ret;
1122}
1123
YueHaibing01cb8a12019-01-16 10:39:28 +08001124static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1125 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001126{
Vakul Garga42055e2018-09-21 09:46:13 +05301127 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001128 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001129 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001130 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001131 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001132 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301133 struct tls_rec *rec;
1134 int num_async = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001135 size_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001136 bool full_record;
1137 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301138 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301139 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001140
Dave Watson3c4d7552017-06-14 11:37:39 -07001141 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001142 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1143
Dave Watson3c4d7552017-06-14 11:37:39 -07001144 /* Call the sk_stream functions to manage the sndbuf mem. */
1145 while (size > 0) {
1146 size_t copy, required_size;
1147
1148 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001149 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001150 goto sendpage_end;
1151 }
1152
John Fastabendd3b18ad32018-10-13 02:46:01 +02001153 if (ctx->open_rec)
1154 rec = ctx->open_rec;
1155 else
1156 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301157 if (!rec) {
1158 ret = -ENOMEM;
1159 goto sendpage_end;
1160 }
1161
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001162 msg_pl = &rec->msg_plaintext;
1163
Dave Watson3c4d7552017-06-14 11:37:39 -07001164 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001165 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001166 copy = size;
1167 if (copy >= record_room) {
1168 copy = record_room;
1169 full_record = true;
1170 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001171
Vakul Garg4509de12019-02-14 07:11:35 +00001172 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001173
1174 if (!sk_stream_memory_free(sk))
1175 goto wait_for_sndbuf;
1176alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001177 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001178 if (ret) {
1179 if (ret != -ENOSPC)
1180 goto wait_for_memory;
1181
1182 /* Adjust copy according to the amount that was
1183 * actually allocated. The difference is due
1184 * to max sg elements limit
1185 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001186 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001187 full_record = true;
1188 }
1189
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001190 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001191 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001192
Dave Watson3c4d7552017-06-14 11:37:39 -07001193 offset += copy;
1194 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001195 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001196
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001197 tls_ctx->pending_open_record_frags = true;
1198 if (full_record || eor || sk_msg_full(msg_pl)) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001199 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1200 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001201 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301202 if (ret == -EINPROGRESS)
1203 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001204 else if (ret == -ENOMEM)
1205 goto wait_for_memory;
1206 else if (ret != -EAGAIN) {
1207 if (ret == -ENOSPC)
1208 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301209 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001210 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001211 }
1212 }
1213 continue;
1214wait_for_sndbuf:
1215 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1216wait_for_memory:
1217 ret = sk_stream_wait_memory(sk, &timeo);
1218 if (ret) {
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001219 if (ctx->open_rec)
1220 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001221 goto sendpage_end;
1222 }
1223
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001224 if (ctx->open_rec)
1225 goto alloc_payload;
Dave Watson3c4d7552017-06-14 11:37:39 -07001226 }
1227
Vakul Garga42055e2018-09-21 09:46:13 +05301228 if (num_async) {
1229 /* Transmit if any encryptions have completed */
1230 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1231 cancel_delayed_work(&ctx->tx_work.work);
1232 tls_tx_records(sk, flags);
1233 }
1234 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001235sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001236 ret = sk_stream_error(sk, flags, ret);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001237 return copied ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001238}
1239
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001240int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1241 int offset, size_t size, int flags)
1242{
1243 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1244 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1245 MSG_NO_SHARED_FRAGS))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001246 return -EOPNOTSUPP;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001247
1248 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1249}
1250
John Fastabend0608c692018-12-20 11:35:35 -08001251int tls_sw_sendpage(struct sock *sk, struct page *page,
1252 int offset, size_t size, int flags)
1253{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001254 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001255 int ret;
1256
1257 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1258 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001259 return -EOPNOTSUPP;
John Fastabend0608c692018-12-20 11:35:35 -08001260
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001261 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001262 lock_sock(sk);
1263 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1264 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001265 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001266 return ret;
1267}
1268
John Fastabendd3b18ad32018-10-13 02:46:01 +02001269static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1270 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001271{
1272 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001273 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001274 struct sk_buff *skb;
1275 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1276
John Fastabendd3b18ad32018-10-13 02:46:01 +02001277 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001278 if (sk->sk_err) {
1279 *err = sock_error(sk);
1280 return NULL;
1281 }
1282
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001283 if (sk->sk_shutdown & RCV_SHUTDOWN)
1284 return NULL;
1285
Dave Watsonc46234e2018-03-22 10:10:35 -07001286 if (sock_flag(sk, SOCK_DONE))
1287 return NULL;
1288
1289 if ((flags & MSG_DONTWAIT) || !timeo) {
1290 *err = -EAGAIN;
1291 return NULL;
1292 }
1293
1294 add_wait_queue(sk_sleep(sk), &wait);
1295 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001296 sk_wait_event(sk, &timeo,
1297 ctx->recv_pkt != skb ||
1298 !sk_psock_queue_empty(psock),
1299 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001300 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1301 remove_wait_queue(sk_sleep(sk), &wait);
1302
1303 /* Handle signals */
1304 if (signal_pending(current)) {
1305 *err = sock_intr_errno(timeo);
1306 return NULL;
1307 }
1308 }
1309
1310 return skb;
1311}
1312
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001313static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1314 int length, int *pages_used,
1315 unsigned int *size_used,
1316 struct scatterlist *to,
1317 int to_max_pages)
1318{
1319 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1320 struct page *pages[MAX_SKB_FRAGS];
1321 unsigned int size = *size_used;
1322 ssize_t copied, use;
1323 size_t offset;
1324
1325 while (length > 0) {
1326 i = 0;
1327 maxpages = to_max_pages - num_elem;
1328 if (maxpages == 0) {
1329 rc = -EFAULT;
1330 goto out;
1331 }
1332 copied = iov_iter_get_pages(from, pages,
1333 length,
1334 maxpages, &offset);
1335 if (copied <= 0) {
1336 rc = -EFAULT;
1337 goto out;
1338 }
1339
1340 iov_iter_advance(from, copied);
1341
1342 length -= copied;
1343 size += copied;
1344 while (copied) {
1345 use = min_t(int, copied, PAGE_SIZE - offset);
1346
1347 sg_set_page(&to[num_elem],
1348 pages[i], use, offset);
1349 sg_unmark_end(&to[num_elem]);
1350 /* We do not uncharge memory from this API */
1351
1352 offset = 0;
1353 copied -= use;
1354
1355 i++;
1356 num_elem++;
1357 }
1358 }
1359 /* Mark the end in the last sg entry if newly added */
1360 if (num_elem > *pages_used)
1361 sg_mark_end(&to[num_elem - 1]);
1362out:
1363 if (rc)
1364 iov_iter_revert(from, size - *size_used);
1365 *size_used = size;
1366 *pages_used = num_elem;
1367
1368 return rc;
1369}
1370
Vakul Garg0b243d02018-08-10 20:46:41 +05301371/* This function decrypts the input skb into either out_iov or in out_sg
1372 * or in skb buffers itself. The input parameter 'zc' indicates if
1373 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1374 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1375 * NULL, then the decryption happens inside skb buffers itself, i.e.
1376 * zero-copy gets disabled and 'zc' is updated.
1377 */
1378
1379static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1380 struct iov_iter *out_iov,
1381 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001382 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301383{
1384 struct tls_context *tls_ctx = tls_get_ctx(sk);
1385 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001386 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301387 struct strp_msg *rxm = strp_msg(skb);
1388 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1389 struct aead_request *aead_req;
1390 struct sk_buff *unused;
1391 u8 *aad, *iv, *mem = NULL;
1392 struct scatterlist *sgin = NULL;
1393 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001394 const int data_len = rxm->full_len - prot->overhead_size +
1395 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001396 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301397
1398 if (*zc && (out_iov || out_sg)) {
1399 if (out_iov)
1400 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1401 else
1402 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001403 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1404 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301405 } else {
1406 n_sgout = 0;
1407 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001408 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301409 }
1410
Vakul Garg0b243d02018-08-10 20:46:41 +05301411 if (n_sgin < 1)
1412 return -EBADMSG;
1413
1414 /* Increment to accommodate AAD */
1415 n_sgin = n_sgin + 1;
1416
1417 nsg = n_sgin + n_sgout;
1418
1419 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1420 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001421 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301422 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1423
1424 /* Allocate a single block of memory which contains
1425 * aead_req || sgin[] || sgout[] || aad || iv.
1426 * This order achieves correct alignment for aead_req, sgin, sgout.
1427 */
1428 mem = kmalloc(mem_size, sk->sk_allocation);
1429 if (!mem)
1430 return -ENOMEM;
1431
1432 /* Segment the allocated memory */
1433 aead_req = (struct aead_request *)mem;
1434 sgin = (struct scatterlist *)(mem + aead_size);
1435 sgout = sgin + n_sgin;
1436 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001437 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301438
Vakul Gargf295b3a2019-03-20 02:03:36 +00001439 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1440 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1441 iv[0] = 2;
1442 iv_offset = 1;
1443 }
1444
Vakul Garg0b243d02018-08-10 20:46:41 +05301445 /* Prepare IV */
1446 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001447 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001448 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301449 if (err < 0) {
1450 kfree(mem);
1451 return err;
1452 }
Vakul Garg4509de12019-02-14 07:11:35 +00001453 if (prot->version == TLS_1_3_VERSION)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001454 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1455 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001456 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001457 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001458
Vakul Garg4509de12019-02-14 07:11:35 +00001459 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301460
1461 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001462 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1463 prot->tail_size,
1464 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1465 ctx->control, prot->version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301466
1467 /* Prepare sgin */
1468 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001469 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301470 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001471 rxm->offset + prot->prepend_size,
1472 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301473 if (err < 0) {
1474 kfree(mem);
1475 return err;
1476 }
1477
1478 if (n_sgout) {
1479 if (out_iov) {
1480 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001481 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301482
1483 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001484 err = tls_setup_from_iter(sk, out_iov, data_len,
1485 &pages, chunk, &sgout[1],
1486 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301487 if (err < 0)
1488 goto fallback_to_reg_recv;
1489 } else if (out_sg) {
1490 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1491 } else {
1492 goto fallback_to_reg_recv;
1493 }
1494 } else {
1495fallback_to_reg_recv:
1496 sgout = sgin;
1497 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001498 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301499 *zc = false;
1500 }
1501
1502 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301503 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001504 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301505 if (err == -EINPROGRESS)
1506 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301507
1508 /* Release the pages in case iov was mapped to pages */
1509 for (; pages > 0; pages--)
1510 put_page(sg_page(&sgout[pages]));
1511
1512 kfree(mem);
1513 return err;
1514}
1515
Boris Pismennydafb67f2018-07-13 14:33:40 +03001516static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001517 struct iov_iter *dest, int *chunk, bool *zc,
1518 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001519{
1520 struct tls_context *tls_ctx = tls_get_ctx(sk);
1521 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001522 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001523 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001524 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001525
Boris Pismenny4799ac82018-07-13 14:33:43 +03001526 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001527 if (tls_ctx->rx_conf == TLS_HW) {
Jakub Kicinski4de30a82019-10-06 21:09:30 -07001528 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001529 if (err < 0)
1530 return err;
1531 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001532
Boris Pismennyd069b782019-02-27 17:38:06 +02001533 /* Still not decrypted after tls_device */
1534 if (!ctx->decrypted) {
1535 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1536 async);
1537 if (err < 0) {
1538 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001539 tls_advance_record_sn(sk, prot,
1540 &tls_ctx->rx);
Jakub Kicinski5c5d22a2020-01-10 04:36:55 -08001541 else if (err == -EBADMSG)
1542 TLS_INC_STATS(sock_net(sk),
1543 LINUX_MIB_TLSDECRYPTERROR);
Boris Pismennyd069b782019-02-27 17:38:06 +02001544 return err;
1545 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001546 } else {
1547 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301548 }
Dave Watson130b3922019-01-30 21:58:31 +00001549
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001550 pad = padding_length(ctx, prot, skb);
1551 if (pad < 0)
1552 return pad;
1553
1554 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001555 rxm->offset += prot->prepend_size;
1556 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001557 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001558 ctx->decrypted = 1;
Dave Watsonfedf2012019-01-30 21:58:24 +00001559 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001560 } else {
1561 *zc = false;
1562 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001563
Boris Pismennydafb67f2018-07-13 14:33:40 +03001564 return err;
1565}
1566
1567int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1568 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001569{
Vakul Garg0b243d02018-08-10 20:46:41 +05301570 bool zc = true;
1571 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001572
Vakul Garg692d7b52019-01-16 10:40:16 +00001573 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001574}
1575
1576static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1577 unsigned int len)
1578{
1579 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001580 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001581
Vakul Garg94524d82018-08-29 15:26:55 +05301582 if (skb) {
1583 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001584
Vakul Garg94524d82018-08-29 15:26:55 +05301585 if (len < rxm->full_len) {
1586 rxm->offset += len;
1587 rxm->full_len -= len;
1588 return false;
1589 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001590 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001591 }
1592
1593 /* Finished with message */
1594 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001595 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001596
1597 return true;
1598}
1599
Vakul Garg692d7b52019-01-16 10:40:16 +00001600/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001601 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001602 * true. Further, the records are removed from the rx_list if it is not a peek
1603 * case and the record has been consumed completely.
1604 */
1605static int process_rx_list(struct tls_sw_context_rx *ctx,
1606 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001607 u8 *control,
1608 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001609 size_t skip,
1610 size_t len,
1611 bool zc,
1612 bool is_peek)
1613{
1614 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001615 u8 ctrl = *control;
1616 u8 msgc = *cmsg;
1617 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001618 ssize_t copied = 0;
1619
Vakul Garg2b794c42019-02-23 08:42:37 +00001620 /* Set the record type in 'control' if caller didn't pass it */
1621 if (!ctrl && skb) {
1622 tlm = tls_msg(skb);
1623 ctrl = tlm->control;
1624 }
1625
Vakul Garg692d7b52019-01-16 10:40:16 +00001626 while (skip && skb) {
1627 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001628 tlm = tls_msg(skb);
1629
1630 /* Cannot process a record of different type */
1631 if (ctrl != tlm->control)
1632 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001633
1634 if (skip < rxm->full_len)
1635 break;
1636
1637 skip = skip - rxm->full_len;
1638 skb = skb_peek_next(skb, &ctx->rx_list);
1639 }
1640
1641 while (len && skb) {
1642 struct sk_buff *next_skb;
1643 struct strp_msg *rxm = strp_msg(skb);
1644 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1645
Vakul Garg2b794c42019-02-23 08:42:37 +00001646 tlm = tls_msg(skb);
1647
1648 /* Cannot process a record of different type */
1649 if (ctrl != tlm->control)
1650 return 0;
1651
1652 /* Set record type if not already done. For a non-data record,
1653 * do not proceed if record type could not be copied.
1654 */
1655 if (!msgc) {
1656 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1657 sizeof(ctrl), &ctrl);
1658 msgc = true;
1659 if (ctrl != TLS_RECORD_TYPE_DATA) {
1660 if (cerr || msg->msg_flags & MSG_CTRUNC)
1661 return -EIO;
1662
1663 *cmsg = msgc;
1664 }
1665 }
1666
Vakul Garg692d7b52019-01-16 10:40:16 +00001667 if (!zc || (rxm->full_len - skip) > len) {
1668 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1669 msg, chunk);
1670 if (err < 0)
1671 return err;
1672 }
1673
1674 len = len - chunk;
1675 copied = copied + chunk;
1676
1677 /* Consume the data from record if it is non-peek case*/
1678 if (!is_peek) {
1679 rxm->offset = rxm->offset + chunk;
1680 rxm->full_len = rxm->full_len - chunk;
1681
1682 /* Return if there is unconsumed data in the record */
1683 if (rxm->full_len - skip)
1684 break;
1685 }
1686
1687 /* The remaining skip-bytes must lie in 1st record in rx_list.
1688 * So from the 2nd record, 'skip' should be 0.
1689 */
1690 skip = 0;
1691
1692 if (msg)
1693 msg->msg_flags |= MSG_EOR;
1694
1695 next_skb = skb_peek_next(skb, &ctx->rx_list);
1696
1697 if (!is_peek) {
1698 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001699 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001700 }
1701
1702 skb = next_skb;
1703 }
1704
Vakul Garg2b794c42019-02-23 08:42:37 +00001705 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001706 return copied;
1707}
1708
Dave Watsonc46234e2018-03-22 10:10:35 -07001709int tls_sw_recvmsg(struct sock *sk,
1710 struct msghdr *msg,
1711 size_t len,
1712 int nonblock,
1713 int flags,
1714 int *addr_len)
1715{
1716 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001717 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001718 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001719 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001720 unsigned char control = 0;
1721 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001722 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001723 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001724 struct sk_buff *skb;
1725 ssize_t copied = 0;
1726 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001727 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001728 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001729 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001730 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301731 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001732
1733 flags |= nonblock;
1734
1735 if (unlikely(flags & MSG_ERRQUEUE))
1736 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1737
John Fastabendd3b18ad32018-10-13 02:46:01 +02001738 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001739 lock_sock(sk);
1740
Vakul Garg692d7b52019-01-16 10:40:16 +00001741 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001742 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1743 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001744 if (err < 0) {
1745 tls_err_abort(sk, err);
1746 goto end;
1747 } else {
1748 copied = err;
1749 }
1750
Jakub Kicinski46a16952019-05-24 10:34:30 -07001751 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001752 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001753
1754 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1755 len = len - copied;
1756 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001757
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001758 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001759 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001760 bool zc = false;
1761 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001762 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001763 bool async_capable;
1764 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001765
John Fastabendd3b18ad32018-10-13 02:46:01 +02001766 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1767 if (!skb) {
1768 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001769 int ret = __tcp_bpf_recvmsg(sk, psock,
1770 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001771
1772 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001773 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001774 len -= ret;
1775 continue;
1776 }
1777 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001778 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001779 } else {
1780 tlm = tls_msg(skb);
1781 if (prot->version == TLS_1_3_VERSION)
1782 tlm->control = 0;
1783 else
1784 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001785 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001786
1787 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301788
Vakul Garg4509de12019-02-14 07:11:35 +00001789 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001790
1791 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001792 ctx->control == TLS_RECORD_TYPE_DATA &&
Vakul Garg4509de12019-02-14 07:11:35 +00001793 prot->version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001794 zc = true;
1795
Vakul Gargc0ab4732019-02-11 11:31:05 +00001796 /* Do not use async mode if record is non-data */
1797 if (ctx->control == TLS_RECORD_TYPE_DATA)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001798 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001799 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001800 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001801
Dave Watsonfedf2012019-01-30 21:58:24 +00001802 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001803 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001804 if (err < 0 && err != -EINPROGRESS) {
1805 tls_err_abort(sk, EBADMSG);
1806 goto recv_end;
1807 }
1808
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001809 if (err == -EINPROGRESS) {
1810 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001811 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001812 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001813 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001814 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001815
1816 /* If the type of records being processed is not known yet,
1817 * set it to record type just dequeued. If it is already known,
1818 * but does not match the record type just dequeued, go to end.
1819 * We always get record type here since for tls1.2, record type
1820 * is known just after record is dequeued from stream parser.
1821 * For tls1.3, we disable async.
1822 */
1823
1824 if (!control)
1825 control = tlm->control;
1826 else if (control != tlm->control)
1827 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001828
Dave Watsonc46234e2018-03-22 10:10:35 -07001829 if (!cmsg) {
1830 int cerr;
1831
1832 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001833 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001834 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001835 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001836 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1837 err = -EIO;
1838 goto recv_end;
1839 }
1840 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001841 }
1842
Vakul Gargc0ab4732019-02-11 11:31:05 +00001843 if (async)
1844 goto pick_next_record;
1845
Dave Watsonfedf2012019-01-30 21:58:24 +00001846 if (!zc) {
1847 if (rxm->full_len > len) {
1848 retain_skb = true;
1849 chunk = len;
1850 } else {
1851 chunk = rxm->full_len;
1852 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001853
Dave Watsonfedf2012019-01-30 21:58:24 +00001854 err = skb_copy_datagram_msg(skb, rxm->offset,
1855 msg, chunk);
1856 if (err < 0)
1857 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001858
Dave Watsonfedf2012019-01-30 21:58:24 +00001859 if (!is_peek) {
1860 rxm->offset = rxm->offset + chunk;
1861 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001862 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001863 }
1864
Vakul Garg94524d82018-08-29 15:26:55 +05301865pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001866 if (chunk > len)
1867 chunk = len;
1868
1869 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001870 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001871
Vakul Garg692d7b52019-01-16 10:40:16 +00001872 /* For async or peek case, queue the current skb */
1873 if (async || is_peek || retain_skb) {
1874 skb_queue_tail(&ctx->rx_list, skb);
1875 skb = NULL;
1876 }
Vakul Garg94524d82018-08-29 15:26:55 +05301877
Vakul Garg692d7b52019-01-16 10:40:16 +00001878 if (tls_sw_advance_skb(sk, skb, chunk)) {
1879 /* Return full control message to
1880 * userspace before trying to parse
1881 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001882 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001883 msg->msg_flags |= MSG_EOR;
1884 if (ctx->control != TLS_RECORD_TYPE_DATA)
1885 goto recv_end;
1886 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001887 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001888 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001889 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001890
1891recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301892 if (num_async) {
1893 /* Wait for all previously submitted records to be decrypted */
1894 smp_store_mb(ctx->async_notify, true);
1895 if (atomic_read(&ctx->decrypt_pending)) {
1896 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1897 if (err) {
1898 /* one of async decrypt failed */
1899 tls_err_abort(sk, err);
1900 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001901 decrypted = 0;
1902 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301903 }
1904 } else {
1905 reinit_completion(&ctx->async_wait.completion);
1906 }
1907 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001908
1909 /* Drain records from the rx_list & copy if required */
1910 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001911 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001912 decrypted, false, is_peek);
1913 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001914 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001915 decrypted, true, is_peek);
1916 if (err < 0) {
1917 tls_err_abort(sk, err);
1918 copied = 0;
1919 goto end;
1920 }
Vakul Garg94524d82018-08-29 15:26:55 +05301921 }
1922
Vakul Garg692d7b52019-01-16 10:40:16 +00001923 copied += decrypted;
1924
1925end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001926 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001927 if (psock)
1928 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001929 return copied ? : err;
1930}
1931
1932ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1933 struct pipe_inode_info *pipe,
1934 size_t len, unsigned int flags)
1935{
1936 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001937 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001938 struct strp_msg *rxm = NULL;
1939 struct sock *sk = sock->sk;
1940 struct sk_buff *skb;
1941 ssize_t copied = 0;
1942 int err = 0;
1943 long timeo;
1944 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301945 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001946
1947 lock_sock(sk);
1948
1949 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1950
John Fastabendd3b18ad32018-10-13 02:46:01 +02001951 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001952 if (!skb)
1953 goto splice_read_end;
1954
Dave Watsonc46234e2018-03-22 10:10:35 -07001955 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001956 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001957
Dave Watsonfedf2012019-01-30 21:58:24 +00001958 /* splice does not support reading control messages */
1959 if (ctx->control != TLS_RECORD_TYPE_DATA) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001960 err = -EINVAL;
Dave Watsonfedf2012019-01-30 21:58:24 +00001961 goto splice_read_end;
1962 }
1963
Dave Watsonc46234e2018-03-22 10:10:35 -07001964 if (err < 0) {
1965 tls_err_abort(sk, EBADMSG);
1966 goto splice_read_end;
1967 }
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001968 ctx->decrypted = 1;
Dave Watsonc46234e2018-03-22 10:10:35 -07001969 }
1970 rxm = strp_msg(skb);
1971
1972 chunk = min_t(unsigned int, rxm->full_len, len);
1973 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1974 if (copied < 0)
1975 goto splice_read_end;
1976
1977 if (likely(!(flags & MSG_PEEK)))
1978 tls_sw_advance_skb(sk, skb, copied);
1979
1980splice_read_end:
1981 release_sock(sk);
1982 return copied ? : err;
1983}
1984
John Fastabend924ad652018-10-13 02:46:00 +02001985bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07001986{
Dave Watsonc46234e2018-03-22 10:10:35 -07001987 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001988 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001989 bool ingress_empty = true;
1990 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001991
John Fastabendd3b18ad32018-10-13 02:46:01 +02001992 rcu_read_lock();
1993 psock = sk_psock(sk);
1994 if (psock)
1995 ingress_empty = list_empty(&psock->ingress_msg);
1996 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07001997
Jakub Kicinski13aecb12019-07-04 14:50:36 -07001998 return !ingress_empty || ctx->recv_pkt ||
1999 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002000}
2001
2002static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2003{
2004 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002005 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00002006 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07002007 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07002008 struct strp_msg *rxm = strp_msg(skb);
2009 size_t cipher_overhead;
2010 size_t data_len = 0;
2011 int ret;
2012
2013 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00002014 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07002015 return 0;
2016
Kees Cook3463e512018-06-25 16:55:05 -07002017 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00002018 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07002019 ret = -EINVAL;
2020 goto read_failure;
2021 }
2022
Dave Watsonc46234e2018-03-22 10:10:35 -07002023 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00002024 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002025
2026 if (ret < 0)
2027 goto read_failure;
2028
2029 ctx->control = header[0];
2030
2031 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2032
Vakul Garg4509de12019-02-14 07:11:35 +00002033 cipher_overhead = prot->tag_size;
2034 if (prot->version != TLS_1_3_VERSION)
2035 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002036
Dave Watson130b3922019-01-30 21:58:31 +00002037 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00002038 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002039 ret = -EMSGSIZE;
2040 goto read_failure;
2041 }
2042 if (data_len < cipher_overhead) {
2043 ret = -EBADMSG;
2044 goto read_failure;
2045 }
2046
Dave Watson130b3922019-01-30 21:58:31 +00002047 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2048 if (header[1] != TLS_1_2_VERSION_MINOR ||
2049 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002050 ret = -EINVAL;
2051 goto read_failure;
2052 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002053
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002054 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002055 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002056 return data_len + TLS_HEADER_SIZE;
2057
2058read_failure:
2059 tls_err_abort(strp->sk, ret);
2060
2061 return ret;
2062}
2063
2064static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2065{
2066 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002067 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002068
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002069 ctx->decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07002070
2071 ctx->recv_pkt = skb;
2072 strp_pause(strp);
2073
Vakul Gargad13acc2018-07-30 16:08:33 +05302074 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002075}
2076
2077static void tls_data_ready(struct sock *sk)
2078{
2079 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002080 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002081 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002082
2083 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002084
2085 psock = sk_psock_get(sk);
2086 if (psock && !list_empty(&psock->ingress_msg)) {
2087 ctx->saved_data_ready(sk);
2088 sk_psock_put(sk, psock);
2089 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002090}
2091
John Fastabendf87e62d2019-07-19 10:29:16 -07002092void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2093{
2094 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2095
2096 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2097 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2098 cancel_delayed_work_sync(&ctx->tx_work.work);
2099}
2100
John Fastabend313ab002019-07-19 10:29:17 -07002101void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002102{
2103 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002104 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302105 struct tls_rec *rec, *tmp;
2106
2107 /* Wait for any pending async encryptions to complete */
2108 smp_store_mb(ctx->async_notify, true);
2109 if (atomic_read(&ctx->encrypt_pending))
2110 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2111
Vakul Garga42055e2018-09-21 09:46:13 +05302112 tls_tx_records(sk, -1);
2113
Vakul Garg9932a292018-09-24 15:35:56 +05302114 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302115 * the partially sent record if any at head of tx_list.
2116 */
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -08002117 if (tls_ctx->partially_sent_record) {
2118 tls_free_partial_record(sk, tls_ctx);
Vakul Garg9932a292018-09-24 15:35:56 +05302119 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302120 struct tls_rec, list);
2121 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002122 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302123 kfree(rec);
2124 }
2125
Vakul Garg9932a292018-09-24 15:35:56 +05302126 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302127 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002128 sk_msg_free(sk, &rec->msg_encrypted);
2129 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302130 kfree(rec);
2131 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002132
Vakul Garg201876b2018-07-24 16:54:27 +05302133 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302134 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002135}
2136
2137void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2138{
2139 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002140
2141 kfree(ctx);
2142}
2143
Boris Pismenny39f56e12018-07-13 14:33:41 +03002144void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002145{
2146 struct tls_context *tls_ctx = tls_get_ctx(sk);
2147 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2148
Jakub Kicinski12c76862019-04-19 16:52:19 -07002149 kfree(tls_ctx->rx.rec_seq);
2150 kfree(tls_ctx->rx.iv);
2151
Dave Watsonc46234e2018-03-22 10:10:35 -07002152 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302153 kfree_skb(ctx->recv_pkt);
2154 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002155 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002156 crypto_free_aead(ctx->aead_recv);
2157 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002158 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2159 * we still want to strp_stop(), but sk->sk_data_ready was
2160 * never swapped.
2161 */
2162 if (ctx->saved_data_ready) {
2163 write_lock_bh(&sk->sk_callback_lock);
2164 sk->sk_data_ready = ctx->saved_data_ready;
2165 write_unlock_bh(&sk->sk_callback_lock);
2166 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002167 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002168}
2169
John Fastabend313ab002019-07-19 10:29:17 -07002170void tls_sw_strparser_done(struct tls_context *tls_ctx)
2171{
2172 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2173
2174 strp_done(&ctx->strp);
2175}
2176
2177void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2178{
2179 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2180
2181 kfree(ctx);
2182}
2183
Boris Pismenny39f56e12018-07-13 14:33:41 +03002184void tls_sw_free_resources_rx(struct sock *sk)
2185{
2186 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002187
2188 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002189 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002190}
2191
Vakul Garg9932a292018-09-24 15:35:56 +05302192/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302193static void tx_work_handler(struct work_struct *work)
2194{
2195 struct delayed_work *delayed_work = to_delayed_work(work);
2196 struct tx_work *tx_work = container_of(delayed_work,
2197 struct tx_work, work);
2198 struct sock *sk = tx_work->sk;
2199 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002200 struct tls_sw_context_tx *ctx;
2201
2202 if (unlikely(!tls_ctx))
2203 return;
2204
2205 ctx = tls_sw_ctx_tx(tls_ctx);
2206 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2207 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302208
2209 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2210 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002211 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302212 lock_sock(sk);
2213 tls_tx_records(sk, -1);
2214 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002215 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302216}
2217
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002218void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2219{
2220 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2221
2222 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002223 if (is_tx_ready(tx_ctx) &&
2224 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2225 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002226}
2227
Jakub Kicinski318892a2019-07-19 10:29:14 -07002228void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2229{
2230 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2231
2232 write_lock_bh(&sk->sk_callback_lock);
2233 rx_ctx->saved_data_ready = sk->sk_data_ready;
2234 sk->sk_data_ready = tls_data_ready;
2235 write_unlock_bh(&sk->sk_callback_lock);
2236
2237 strp_check_rcv(&rx_ctx->strp);
2238}
2239
Dave Watsonc46234e2018-03-22 10:10:35 -07002240int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002241{
Vakul Garg4509de12019-02-14 07:11:35 +00002242 struct tls_context *tls_ctx = tls_get_ctx(sk);
2243 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002244 struct tls_crypto_info *crypto_info;
2245 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002246 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002247 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002248 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2249 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002250 struct cipher_context *cctx;
2251 struct crypto_aead **aead;
2252 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002253 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002254 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002255 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002256 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002257 int rc = 0;
2258
2259 if (!ctx) {
2260 rc = -EINVAL;
2261 goto out;
2262 }
2263
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002264 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002265 if (!ctx->priv_ctx_tx) {
2266 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2267 if (!sw_ctx_tx) {
2268 rc = -ENOMEM;
2269 goto out;
2270 }
2271 ctx->priv_ctx_tx = sw_ctx_tx;
2272 } else {
2273 sw_ctx_tx =
2274 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002275 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002276 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002277 if (!ctx->priv_ctx_rx) {
2278 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2279 if (!sw_ctx_rx) {
2280 rc = -ENOMEM;
2281 goto out;
2282 }
2283 ctx->priv_ctx_rx = sw_ctx_rx;
2284 } else {
2285 sw_ctx_rx =
2286 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002287 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002288 }
2289
Dave Watsonc46234e2018-03-22 10:10:35 -07002290 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002291 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002292 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002293 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002294 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302295 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302296 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2297 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002298 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002299 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002300 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002301 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002302 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002303 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002304 }
2305
Dave Watson3c4d7552017-06-14 11:37:39 -07002306 switch (crypto_info->cipher_type) {
2307 case TLS_CIPHER_AES_GCM_128: {
2308 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2309 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2310 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2311 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2312 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2313 rec_seq =
2314 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2315 gcm_128_info =
2316 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002317 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2318 key = gcm_128_info->key;
2319 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002320 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2321 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002322 break;
2323 }
2324 case TLS_CIPHER_AES_GCM_256: {
2325 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2326 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2327 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2328 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2329 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2330 rec_seq =
2331 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2332 gcm_256_info =
2333 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2334 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2335 key = gcm_256_info->key;
2336 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002337 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2338 cipher_name = "gcm(aes)";
2339 break;
2340 }
2341 case TLS_CIPHER_AES_CCM_128: {
2342 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2343 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2344 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2345 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2346 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2347 rec_seq =
2348 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2349 ccm_128_info =
2350 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2351 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2352 key = ccm_128_info->key;
2353 salt = ccm_128_info->salt;
2354 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2355 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002356 break;
2357 }
2358 default:
2359 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002360 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002361 }
2362
Jakub Kicinski89fec472019-06-10 21:40:00 -07002363 /* Sanity-check the sizes for stack allocations. */
2364 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2365 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002366 rc = -EINVAL;
2367 goto free_priv;
2368 }
2369
Dave Watson130b3922019-01-30 21:58:31 +00002370 if (crypto_info->version == TLS_1_3_VERSION) {
2371 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002372 prot->aad_size = TLS_HEADER_SIZE;
2373 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002374 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002375 prot->aad_size = TLS_AAD_SPACE_SIZE;
2376 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002377 }
2378
Vakul Garg4509de12019-02-14 07:11:35 +00002379 prot->version = crypto_info->version;
2380 prot->cipher_type = crypto_info->cipher_type;
2381 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2382 prot->tag_size = tag_size;
2383 prot->overhead_size = prot->prepend_size +
2384 prot->tag_size + prot->tail_size;
2385 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002386 prot->salt_size = salt_size;
2387 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002388 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002389 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002390 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002391 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002392 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002393 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002394 memcpy(cctx->iv, salt, salt_size);
2395 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002396 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002397 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002398 rc = -ENOMEM;
2399 goto free_iv;
2400 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002401
Dave Watsonc46234e2018-03-22 10:10:35 -07002402 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002403 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002404 if (IS_ERR(*aead)) {
2405 rc = PTR_ERR(*aead);
2406 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002407 goto free_rec_seq;
2408 }
2409 }
2410
2411 ctx->push_pending_record = tls_sw_push_pending_record;
2412
Dave Watsonfb99bce2019-01-30 21:58:05 +00002413 rc = crypto_aead_setkey(*aead, key, keysize);
2414
Dave Watson3c4d7552017-06-14 11:37:39 -07002415 if (rc)
2416 goto free_aead;
2417
Vakul Garg4509de12019-02-14 07:11:35 +00002418 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002419 if (rc)
2420 goto free_aead;
2421
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002422 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002423 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002424
2425 if (crypto_info->version == TLS_1_3_VERSION)
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002426 sw_ctx_rx->async_capable = 0;
Vakul Garg8497ded2019-02-09 07:53:28 +00002427 else
2428 sw_ctx_rx->async_capable =
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002429 !!(tfm->__crt_alg->cra_flags &
2430 CRYPTO_ALG_ASYNC);
Vakul Garg692d7b52019-01-16 10:40:16 +00002431
Dave Watsonc46234e2018-03-22 10:10:35 -07002432 /* Set up strparser */
2433 memset(&cb, 0, sizeof(cb));
2434 cb.rcv_msg = tls_queue;
2435 cb.parse_msg = tls_read_size;
2436
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002437 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002438 }
2439
2440 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002441
2442free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002443 crypto_free_aead(*aead);
2444 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002445free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002446 kfree(cctx->rec_seq);
2447 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002448free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002449 kfree(cctx->iv);
2450 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002451free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002452 if (tx) {
2453 kfree(ctx->priv_ctx_tx);
2454 ctx->priv_ctx_tx = NULL;
2455 } else {
2456 kfree(ctx->priv_ctx_rx);
2457 ctx->priv_ctx_rx = NULL;
2458 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002459out:
2460 return rc;
2461}