blob: 917caacd4d31fed4cf52fde7c95b4e55ec2da235 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Kees Cookb16520f2018-04-10 17:52:34 -070045#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
46
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070047static int __skb_nsg(struct sk_buff *skb, int offset, int len,
48 unsigned int recursion_level)
49{
50 int start = skb_headlen(skb);
51 int i, chunk = start - offset;
52 struct sk_buff *frag_iter;
53 int elt = 0;
54
55 if (unlikely(recursion_level >= 24))
56 return -EMSGSIZE;
57
58 if (chunk > 0) {
59 if (chunk > len)
60 chunk = len;
61 elt++;
62 len -= chunk;
63 if (len == 0)
64 return elt;
65 offset += chunk;
66 }
67
68 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
69 int end;
70
71 WARN_ON(start > offset + len);
72
73 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
74 chunk = end - offset;
75 if (chunk > 0) {
76 if (chunk > len)
77 chunk = len;
78 elt++;
79 len -= chunk;
80 if (len == 0)
81 return elt;
82 offset += chunk;
83 }
84 start = end;
85 }
86
87 if (unlikely(skb_has_frag_list(skb))) {
88 skb_walk_frags(skb, frag_iter) {
89 int end, ret;
90
91 WARN_ON(start > offset + len);
92
93 end = start + frag_iter->len;
94 chunk = end - offset;
95 if (chunk > 0) {
96 if (chunk > len)
97 chunk = len;
98 ret = __skb_nsg(frag_iter, offset - start, chunk,
99 recursion_level + 1);
100 if (unlikely(ret < 0))
101 return ret;
102 elt += ret;
103 len -= chunk;
104 if (len == 0)
105 return elt;
106 offset += chunk;
107 }
108 start = end;
109 }
110 }
111 BUG_ON(len);
112 return elt;
113}
114
115/* Return the number of scatterlist elements required to completely map the
116 * skb, or -EMSGSIZE if the recursion depth is exceeded.
117 */
118static int skb_nsg(struct sk_buff *skb, int offset, int len)
119{
120 return __skb_nsg(skb, offset, len, 0);
121}
122
Dave Watson130b3922019-01-30 21:58:31 +0000123static int padding_length(struct tls_sw_context_rx *ctx,
124 struct tls_context *tls_ctx, struct sk_buff *skb)
125{
126 struct strp_msg *rxm = strp_msg(skb);
127 int sub = 0;
128
129 /* Determine zero-padding length */
Vakul Garg4509de12019-02-14 07:11:35 +0000130 if (tls_ctx->prot_info.version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000131 char content_type = 0;
132 int err;
133 int back = 17;
134
135 while (content_type == 0) {
136 if (back > rxm->full_len)
137 return -EBADMSG;
138 err = skb_copy_bits(skb,
139 rxm->offset + rxm->full_len - back,
140 &content_type, 1);
141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000158 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700160 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530161 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000167 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530168
169 /* Propagate if there was an err */
170 if (err) {
171 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700172 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000173 } else {
174 struct strp_msg *rxm = strp_msg(skb);
Dave Watson130b3922019-01-30 21:58:31 +0000175 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
Vakul Garg4509de12019-02-14 07:11:35 +0000176 rxm->offset += prot->prepend_size;
177 rxm->full_len -= prot->overhead_size;
Vakul Garg94524d82018-08-29 15:26:55 +0530178 }
179
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700180 /* After using skb->sk to propagate sk through crypto async callback
181 * we need to NULL it again.
182 */
183 skb->sk = NULL;
184
Vakul Garg94524d82018-08-29 15:26:55 +0530185
Vakul Garg692d7b52019-01-16 10:40:16 +0000186 /* Free the destination pages if skb was not decrypted inplace */
187 if (sgout != sgin) {
188 /* Skip the first S/G entry as it points to AAD */
189 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
190 if (!sg)
191 break;
192 put_page(sg_page(sg));
193 }
Vakul Garg94524d82018-08-29 15:26:55 +0530194 }
195
196 kfree(aead_req);
197
Vakul Garg692d7b52019-01-16 10:40:16 +0000198 pending = atomic_dec_return(&ctx->decrypt_pending);
199
Vakul Garg94524d82018-08-29 15:26:55 +0530200 if (!pending && READ_ONCE(ctx->async_notify))
201 complete(&ctx->async_wait.completion);
202}
203
Dave Watsonc46234e2018-03-22 10:10:35 -0700204static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530205 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700206 struct scatterlist *sgin,
207 struct scatterlist *sgout,
208 char *iv_recv,
209 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530210 struct aead_request *aead_req,
211 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700212{
213 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000214 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300215 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700216 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700217
Vakul Garg0b243d02018-08-10 20:46:41 +0530218 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000219 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700220 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000221 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700222 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700223
Vakul Garg94524d82018-08-29 15:26:55 +0530224 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700225 /* Using skb->sk to push sk through to crypto async callback
226 * handler. This allows propagating errors up to the socket
227 * if needed. It _must_ be cleared in the async handler
228 * before kfree_skb is called. We _know_ skb->sk is NULL
229 * because it is a clone from strparser.
230 */
231 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530232 aead_request_set_callback(aead_req,
233 CRYPTO_TFM_REQ_MAY_BACKLOG,
234 tls_decrypt_done, skb);
235 atomic_inc(&ctx->decrypt_pending);
236 } else {
237 aead_request_set_callback(aead_req,
238 CRYPTO_TFM_REQ_MAY_BACKLOG,
239 crypto_req_done, &ctx->async_wait);
240 }
241
242 ret = crypto_aead_decrypt(aead_req);
243 if (ret == -EINPROGRESS) {
244 if (async)
245 return ret;
246
247 ret = crypto_wait_req(ret, &ctx->async_wait);
248 }
249
250 if (async)
251 atomic_dec(&ctx->decrypt_pending);
252
Dave Watsonc46234e2018-03-22 10:10:35 -0700253 return ret;
254}
255
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200256static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700257{
258 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000259 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300260 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530261 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700262
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200263 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700264 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000265 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200266 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700267}
268
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200269static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700270{
271 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300272 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530273 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200274 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700275
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200276 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700277}
278
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200279static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700280{
281 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000282 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300283 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530284 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200285 struct sk_msg *msg_pl = &rec->msg_plaintext;
286 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530287 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200289 /* We add page references worth len bytes from encrypted sg
290 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530291 * has enough required room (ensured by caller).
292 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200293 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530294
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200295 /* Skip initial bytes in msg_en's data to be able to use
296 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530297 */
Vakul Garg4509de12019-02-14 07:11:35 +0000298 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530299
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200300 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700301}
302
John Fastabendd3b18ad32018-10-13 02:46:01 +0200303static struct tls_rec *tls_get_rec(struct sock *sk)
304{
305 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000306 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200307 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
308 struct sk_msg *msg_pl, *msg_en;
309 struct tls_rec *rec;
310 int mem_size;
311
312 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
313
314 rec = kzalloc(mem_size, sk->sk_allocation);
315 if (!rec)
316 return NULL;
317
318 msg_pl = &rec->msg_plaintext;
319 msg_en = &rec->msg_encrypted;
320
321 sk_msg_init(msg_pl);
322 sk_msg_init(msg_en);
323
324 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000325 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200326 sg_unmark_end(&rec->sg_aead_in[1]);
327
328 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000329 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200330 sg_unmark_end(&rec->sg_aead_out[1]);
331
332 return rec;
333}
334
335static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
336{
337 sk_msg_free(sk, &rec->msg_encrypted);
338 sk_msg_free(sk, &rec->msg_plaintext);
339 kfree(rec);
340}
341
Vakul Gargc7749732018-09-25 20:21:51 +0530342static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700343{
344 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300345 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530346 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700347
John Fastabendd3b18ad32018-10-13 02:46:01 +0200348 if (rec) {
349 tls_free_rec(sk, rec);
350 ctx->open_rec = NULL;
351 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700352}
353
Vakul Garga42055e2018-09-21 09:46:13 +0530354int tls_tx_records(struct sock *sk, int flags)
355{
356 struct tls_context *tls_ctx = tls_get_ctx(sk);
357 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
358 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200359 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530360 int tx_flags, rc = 0;
361
362 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530363 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530364 struct tls_rec, list);
365
366 if (flags == -1)
367 tx_flags = rec->tx_flags;
368 else
369 tx_flags = flags;
370
371 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
372 if (rc)
373 goto tx_err;
374
375 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530376 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530377 */
Vakul Garga42055e2018-09-21 09:46:13 +0530378 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200379 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530380 kfree(rec);
381 }
382
Vakul Garg9932a292018-09-24 15:35:56 +0530383 /* Tx all ready records */
384 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
385 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530386 if (flags == -1)
387 tx_flags = rec->tx_flags;
388 else
389 tx_flags = flags;
390
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200391 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530392 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200393 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530394 0, tx_flags);
395 if (rc)
396 goto tx_err;
397
Vakul Garga42055e2018-09-21 09:46:13 +0530398 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200399 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530400 kfree(rec);
401 } else {
402 break;
403 }
404 }
405
406tx_err:
407 if (rc < 0 && rc != -EAGAIN)
408 tls_err_abort(sk, EBADMSG);
409
410 return rc;
411}
412
413static void tls_encrypt_done(struct crypto_async_request *req, int err)
414{
415 struct aead_request *aead_req = (struct aead_request *)req;
416 struct sock *sk = req->data;
417 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000418 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530419 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200420 struct scatterlist *sge;
421 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530422 struct tls_rec *rec;
423 bool ready = false;
424 int pending;
425
426 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200427 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530428
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200429 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000430 sge->offset -= prot->prepend_size;
431 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530432
Vakul Garg80ece6a2018-09-26 16:22:08 +0530433 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530434 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530435 rec = NULL;
436
437 /* If err is already set on socket, return the same code */
438 if (sk->sk_err) {
439 ctx->async_wait.err = sk->sk_err;
440 } else {
441 ctx->async_wait.err = err;
442 tls_err_abort(sk, err);
443 }
444 }
445
Vakul Garg9932a292018-09-24 15:35:56 +0530446 if (rec) {
447 struct tls_rec *first_rec;
448
449 /* Mark the record as ready for transmission */
450 smp_store_mb(rec->tx_ready, true);
451
452 /* If received record is at head of tx_list, schedule tx */
453 first_rec = list_first_entry(&ctx->tx_list,
454 struct tls_rec, list);
455 if (rec == first_rec)
456 ready = true;
457 }
Vakul Garga42055e2018-09-21 09:46:13 +0530458
459 pending = atomic_dec_return(&ctx->encrypt_pending);
460
461 if (!pending && READ_ONCE(ctx->async_notify))
462 complete(&ctx->async_wait.completion);
463
464 if (!ready)
465 return;
466
467 /* Schedule the transmission */
468 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200469 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530470}
471
472static int tls_do_encryption(struct sock *sk,
473 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200474 struct tls_sw_context_tx *ctx,
475 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200476 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700477{
Vakul Garg4509de12019-02-14 07:11:35 +0000478 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530479 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200480 struct sk_msg *msg_en = &rec->msg_encrypted;
481 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Dave Watson3c4d7552017-06-14 11:37:39 -0700482 int rc;
483
Dave Watson32eb67b2019-01-27 00:57:38 +0000484 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
Vakul Garg4509de12019-02-14 07:11:35 +0000485 xor_iv_with_seq(prot->version, rec->iv_data,
Dave Watson130b3922019-01-30 21:58:31 +0000486 tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000487
Vakul Garg4509de12019-02-14 07:11:35 +0000488 sge->offset += prot->prepend_size;
489 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700490
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200491 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530492
Dave Watson3c4d7552017-06-14 11:37:39 -0700493 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000494 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200495 aead_request_set_crypt(aead_req, rec->sg_aead_in,
496 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000497 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530498
499 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530500 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530501
Vakul Garg9932a292018-09-24 15:35:56 +0530502 /* Add the record in tx_list */
503 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530504 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700505
Vakul Garga42055e2018-09-21 09:46:13 +0530506 rc = crypto_aead_encrypt(aead_req);
507 if (!rc || rc != -EINPROGRESS) {
508 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000509 sge->offset -= prot->prepend_size;
510 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530511 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700512
Vakul Garg9932a292018-09-24 15:35:56 +0530513 if (!rc) {
514 WRITE_ONCE(rec->tx_ready, true);
515 } else if (rc != -EINPROGRESS) {
516 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530517 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530518 }
Vakul Garga42055e2018-09-21 09:46:13 +0530519
520 /* Unhook the record from context if encryption is not failure */
521 ctx->open_rec = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +0000522 tls_advance_record_sn(sk, &tls_ctx->tx, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700523 return rc;
524}
525
John Fastabendd3b18ad32018-10-13 02:46:01 +0200526static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
527 struct tls_rec **to, struct sk_msg *msg_opl,
528 struct sk_msg *msg_oen, u32 split_point,
529 u32 tx_overhead_size, u32 *orig_end)
530{
531 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
532 struct scatterlist *sge, *osge, *nsge;
533 u32 orig_size = msg_opl->sg.size;
534 struct scatterlist tmp = { };
535 struct sk_msg *msg_npl;
536 struct tls_rec *new;
537 int ret;
538
539 new = tls_get_rec(sk);
540 if (!new)
541 return -ENOMEM;
542 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
543 tx_overhead_size, 0);
544 if (ret < 0) {
545 tls_free_rec(sk, new);
546 return ret;
547 }
548
549 *orig_end = msg_opl->sg.end;
550 i = msg_opl->sg.start;
551 sge = sk_msg_elem(msg_opl, i);
552 while (apply && sge->length) {
553 if (sge->length > apply) {
554 u32 len = sge->length - apply;
555
556 get_page(sg_page(sge));
557 sg_set_page(&tmp, sg_page(sge), len,
558 sge->offset + apply);
559 sge->length = apply;
560 bytes += apply;
561 apply = 0;
562 } else {
563 apply -= sge->length;
564 bytes += sge->length;
565 }
566
567 sk_msg_iter_var_next(i);
568 if (i == msg_opl->sg.end)
569 break;
570 sge = sk_msg_elem(msg_opl, i);
571 }
572
573 msg_opl->sg.end = i;
574 msg_opl->sg.curr = i;
575 msg_opl->sg.copybreak = 0;
576 msg_opl->apply_bytes = 0;
577 msg_opl->sg.size = bytes;
578
579 msg_npl = &new->msg_plaintext;
580 msg_npl->apply_bytes = apply;
581 msg_npl->sg.size = orig_size - bytes;
582
583 j = msg_npl->sg.start;
584 nsge = sk_msg_elem(msg_npl, j);
585 if (tmp.length) {
586 memcpy(nsge, &tmp, sizeof(*nsge));
587 sk_msg_iter_var_next(j);
588 nsge = sk_msg_elem(msg_npl, j);
589 }
590
591 osge = sk_msg_elem(msg_opl, i);
592 while (osge->length) {
593 memcpy(nsge, osge, sizeof(*nsge));
594 sg_unmark_end(nsge);
595 sk_msg_iter_var_next(i);
596 sk_msg_iter_var_next(j);
597 if (i == *orig_end)
598 break;
599 osge = sk_msg_elem(msg_opl, i);
600 nsge = sk_msg_elem(msg_npl, j);
601 }
602
603 msg_npl->sg.end = j;
604 msg_npl->sg.curr = j;
605 msg_npl->sg.copybreak = 0;
606
607 *to = new;
608 return 0;
609}
610
611static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
612 struct tls_rec *from, u32 orig_end)
613{
614 struct sk_msg *msg_npl = &from->msg_plaintext;
615 struct sk_msg *msg_opl = &to->msg_plaintext;
616 struct scatterlist *osge, *nsge;
617 u32 i, j;
618
619 i = msg_opl->sg.end;
620 sk_msg_iter_var_prev(i);
621 j = msg_npl->sg.start;
622
623 osge = sk_msg_elem(msg_opl, i);
624 nsge = sk_msg_elem(msg_npl, j);
625
626 if (sg_page(osge) == sg_page(nsge) &&
627 osge->offset + osge->length == nsge->offset) {
628 osge->length += nsge->length;
629 put_page(sg_page(nsge));
630 }
631
632 msg_opl->sg.end = orig_end;
633 msg_opl->sg.curr = orig_end;
634 msg_opl->sg.copybreak = 0;
635 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
636 msg_opl->sg.size += msg_npl->sg.size;
637
638 sk_msg_free(sk, &to->msg_encrypted);
639 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
640
641 kfree(from);
642}
643
Dave Watson3c4d7552017-06-14 11:37:39 -0700644static int tls_push_record(struct sock *sk, int flags,
645 unsigned char record_type)
646{
647 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000648 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300649 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200650 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
651 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200652 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200653 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200654 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700655 int rc;
656
Vakul Garga42055e2018-09-21 09:46:13 +0530657 if (!rec)
658 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200659
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200660 msg_pl = &rec->msg_plaintext;
661 msg_en = &rec->msg_encrypted;
662
John Fastabendd3b18ad32018-10-13 02:46:01 +0200663 split_point = msg_pl->apply_bytes;
664 split = split_point && split_point < msg_pl->sg.size;
665 if (split) {
666 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000667 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200668 &orig_end);
669 if (rc < 0)
670 return rc;
671 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000672 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200673 }
674
Vakul Garga42055e2018-09-21 09:46:13 +0530675 rec->tx_flags = flags;
676 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700677
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200678 i = msg_pl->sg.end;
679 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000680
681 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000682 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000683 /* Add content type to end of message. No padding added */
684 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
685 sg_mark_end(&rec->sg_content_type);
686 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
687 &rec->sg_content_type);
688 } else {
689 sg_mark_end(sk_msg_elem(msg_pl, i));
690 }
Vakul Garga42055e2018-09-21 09:46:13 +0530691
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200692 i = msg_pl->sg.start;
693 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
694 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
695
696 i = msg_en->sg.end;
697 sk_msg_iter_var_prev(i);
698 sg_mark_end(sk_msg_elem(msg_en, i));
699
700 i = msg_en->sg.start;
701 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
702
Vakul Garg4509de12019-02-14 07:11:35 +0000703 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
704 tls_ctx->tx.rec_seq, prot->rec_seq_size,
705 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700706
707 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200708 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000709 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000710 msg_pl->sg.size + prot->tail_size,
711 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700712
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200713 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700714
Dave Watson130b3922019-01-30 21:58:31 +0000715 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000716 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700717 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200718 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200719 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200720 if (split) {
721 tls_ctx->pending_open_record_frags = true;
722 tls_merge_open_record(sk, rec, tmp, orig_end);
723 }
724 }
Dave Watson5b053e12019-01-30 22:08:21 +0000725 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530726 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200727 } else if (split) {
728 msg_pl = &tmp->msg_plaintext;
729 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000730 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200731 tls_ctx->pending_open_record_frags = true;
732 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700733 }
734
Vakul Garg9932a292018-09-24 15:35:56 +0530735 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700736}
737
John Fastabendd3b18ad32018-10-13 02:46:01 +0200738static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
739 bool full_record, u8 record_type,
740 size_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700741{
742 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300743 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200744 struct sk_msg msg_redir = { };
745 struct sk_psock *psock;
746 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530747 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800748 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200749 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800750 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530751
John Fastabend0608c692018-12-20 11:35:35 -0800752 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200753 psock = sk_psock_get(sk);
John Fastabend0608c692018-12-20 11:35:35 -0800754 if (!psock || !policy)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200755 return tls_push_record(sk, flags, record_type);
756more_data:
757 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800758 if (psock->eval == __SK_NONE) {
759 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200760 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800761 if (delta < msg->sg.size)
762 delta -= msg->sg.size;
763 else
764 delta = 0;
765 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200766 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
767 !enospc && !full_record) {
768 err = -ENOSPC;
769 goto out_err;
770 }
771 msg->cork_bytes = 0;
772 send = msg->sg.size;
773 if (msg->apply_bytes && msg->apply_bytes < send)
774 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530775
John Fastabendd3b18ad32018-10-13 02:46:01 +0200776 switch (psock->eval) {
777 case __SK_PASS:
778 err = tls_push_record(sk, flags, record_type);
779 if (err < 0) {
780 *copied -= sk_msg_free(sk, msg);
781 tls_free_open_rec(sk);
782 goto out_err;
783 }
784 break;
785 case __SK_REDIRECT:
786 sk_redir = psock->sk_redir;
787 memcpy(&msg_redir, msg, sizeof(*msg));
788 if (msg->apply_bytes < send)
789 msg->apply_bytes = 0;
790 else
791 msg->apply_bytes -= send;
792 sk_msg_return_zero(sk, msg, send);
793 msg->sg.size -= send;
794 release_sock(sk);
795 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
796 lock_sock(sk);
797 if (err < 0) {
798 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
799 msg->sg.size = 0;
800 }
801 if (msg->sg.size == 0)
802 tls_free_open_rec(sk);
803 break;
804 case __SK_DROP:
805 default:
806 sk_msg_free_partial(sk, msg, send);
807 if (msg->apply_bytes < send)
808 msg->apply_bytes = 0;
809 else
810 msg->apply_bytes -= send;
811 if (msg->sg.size == 0)
812 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800813 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200814 err = -EACCES;
815 }
Vakul Garga42055e2018-09-21 09:46:13 +0530816
John Fastabendd3b18ad32018-10-13 02:46:01 +0200817 if (likely(!err)) {
818 bool reset_eval = !ctx->open_rec;
819
820 rec = ctx->open_rec;
821 if (rec) {
822 msg = &rec->msg_plaintext;
823 if (!msg->apply_bytes)
824 reset_eval = true;
825 }
826 if (reset_eval) {
827 psock->eval = __SK_NONE;
828 if (psock->sk_redir) {
829 sock_put(psock->sk_redir);
830 psock->sk_redir = NULL;
831 }
832 }
833 if (rec)
834 goto more_data;
835 }
836 out_err:
837 sk_psock_put(sk, psock);
838 return err;
839}
840
841static int tls_sw_push_pending_record(struct sock *sk, int flags)
842{
843 struct tls_context *tls_ctx = tls_get_ctx(sk);
844 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
845 struct tls_rec *rec = ctx->open_rec;
846 struct sk_msg *msg_pl;
847 size_t copied;
848
Vakul Garga42055e2018-09-21 09:46:13 +0530849 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200850 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530851
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200852 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200853 copied = msg_pl->sg.size;
854 if (!copied)
855 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530856
John Fastabendd3b18ad32018-10-13 02:46:01 +0200857 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
858 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530859}
860
861int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
862{
Dave Watson3c4d7552017-06-14 11:37:39 -0700863 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530864 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000865 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530866 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000867 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530868 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100869 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700870 bool eor = !(msg->msg_flags & MSG_MORE);
871 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200872 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530873 struct tls_rec *rec;
874 int required_size;
875 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700876 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530877 int record_room;
878 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700879 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530880 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700881
882 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
883 return -ENOTSUPP;
884
885 lock_sock(sk);
886
Vakul Garga42055e2018-09-21 09:46:13 +0530887 /* Wait till there is any pending write on socket */
888 if (unlikely(sk->sk_write_pending)) {
889 ret = wait_on_pending_writer(sk, &timeo);
890 if (unlikely(ret))
891 goto send_end;
892 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700893
894 if (unlikely(msg->msg_controllen)) {
895 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530896 if (ret) {
897 if (ret == -EINPROGRESS)
898 num_async++;
899 else if (ret != -EAGAIN)
900 goto send_end;
901 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700902 }
903
904 while (msg_data_left(msg)) {
905 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100906 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700907 goto send_end;
908 }
909
John Fastabendd3b18ad32018-10-13 02:46:01 +0200910 if (ctx->open_rec)
911 rec = ctx->open_rec;
912 else
913 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530914 if (!rec) {
915 ret = -ENOMEM;
916 goto send_end;
917 }
918
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200919 msg_pl = &rec->msg_plaintext;
920 msg_en = &rec->msg_encrypted;
921
922 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700923 full_record = false;
924 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200925 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700926 if (try_to_copy >= record_room) {
927 try_to_copy = record_room;
928 full_record = true;
929 }
930
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200931 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000932 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700933
934 if (!sk_stream_memory_free(sk))
935 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530936
Dave Watson3c4d7552017-06-14 11:37:39 -0700937alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200938 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700939 if (ret) {
940 if (ret != -ENOSPC)
941 goto wait_for_memory;
942
943 /* Adjust try_to_copy according to the amount that was
944 * actually allocated. The difference is due
945 * to max sg elements limit
946 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200947 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700948 full_record = true;
949 }
Vakul Garga42055e2018-09-21 09:46:13 +0530950
951 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200952 u32 first = msg_pl->sg.end;
953
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200954 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
955 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -0700956 if (ret)
957 goto fallback_to_reg_send;
958
Vakul Garg4e6d4722018-09-30 08:04:35 +0530959 rec->inplace_crypto = 0;
960
Vakul Garga42055e2018-09-21 09:46:13 +0530961 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700962 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200963
964 sk_msg_sg_copy_set(msg_pl, first);
965 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
966 record_type, &copied,
967 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530968 if (ret) {
969 if (ret == -EINPROGRESS)
970 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200971 else if (ret == -ENOMEM)
972 goto wait_for_memory;
973 else if (ret == -ENOSPC)
974 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +0530975 else if (ret != -EAGAIN)
976 goto send_end;
977 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700978 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200979rollback_iter:
980 copied -= try_to_copy;
981 sk_msg_sg_copy_clear(msg_pl, first);
982 iov_iter_revert(&msg->msg_iter,
983 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700984fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200985 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700986 }
987
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200988 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530989
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200990 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700991 if (ret) {
992 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +0530993 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700994
995 /* Adjust try_to_copy according to the amount that was
996 * actually allocated. The difference is due
997 * to max sg elements limit
998 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200999 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001000 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001001 sk_msg_trim(sk, msg_en,
1002 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001003 }
1004
Vakul Garg65a10e22018-12-21 15:16:52 +00001005 if (try_to_copy) {
1006 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1007 msg_pl, try_to_copy);
1008 if (ret < 0)
1009 goto trim_sgl;
1010 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001011
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001012 /* Open records defined only if successfully copied, otherwise
1013 * we would trim the sg but not reset the open record frags.
1014 */
1015 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001016 copied += try_to_copy;
1017 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001018 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1019 record_type, &copied,
1020 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001021 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301022 if (ret == -EINPROGRESS)
1023 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001024 else if (ret == -ENOMEM)
1025 goto wait_for_memory;
1026 else if (ret != -EAGAIN) {
1027 if (ret == -ENOSPC)
1028 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301029 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001030 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001031 }
1032 }
1033
1034 continue;
1035
1036wait_for_sndbuf:
1037 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1038wait_for_memory:
1039 ret = sk_stream_wait_memory(sk, &timeo);
1040 if (ret) {
1041trim_sgl:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001042 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001043 goto send_end;
1044 }
1045
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001046 if (msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001047 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001048 }
1049
Vakul Garga42055e2018-09-21 09:46:13 +05301050 if (!num_async) {
1051 goto send_end;
1052 } else if (num_zc) {
1053 /* Wait for pending encryptions to get completed */
1054 smp_store_mb(ctx->async_notify, true);
1055
1056 if (atomic_read(&ctx->encrypt_pending))
1057 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1058 else
1059 reinit_completion(&ctx->async_wait.completion);
1060
1061 WRITE_ONCE(ctx->async_notify, false);
1062
1063 if (ctx->async_wait.err) {
1064 ret = ctx->async_wait.err;
1065 copied = 0;
1066 }
1067 }
1068
1069 /* Transmit if any encryptions have completed */
1070 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1071 cancel_delayed_work(&ctx->tx_work.work);
1072 tls_tx_records(sk, msg->msg_flags);
1073 }
1074
Dave Watson3c4d7552017-06-14 11:37:39 -07001075send_end:
1076 ret = sk_stream_error(sk, msg->msg_flags, ret);
1077
1078 release_sock(sk);
1079 return copied ? copied : ret;
1080}
1081
YueHaibing01cb8a12019-01-16 10:39:28 +08001082static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1083 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001084{
Vakul Garga42055e2018-09-21 09:46:13 +05301085 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001086 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001087 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001088 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001089 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001090 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301091 struct tls_rec *rec;
1092 int num_async = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001093 size_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001094 bool full_record;
1095 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301096 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301097 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001098
Dave Watson3c4d7552017-06-14 11:37:39 -07001099 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001100 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1101
Vakul Garga42055e2018-09-21 09:46:13 +05301102 /* Wait till there is any pending write on socket */
1103 if (unlikely(sk->sk_write_pending)) {
1104 ret = wait_on_pending_writer(sk, &timeo);
1105 if (unlikely(ret))
1106 goto sendpage_end;
1107 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001108
1109 /* Call the sk_stream functions to manage the sndbuf mem. */
1110 while (size > 0) {
1111 size_t copy, required_size;
1112
1113 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001114 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001115 goto sendpage_end;
1116 }
1117
John Fastabendd3b18ad32018-10-13 02:46:01 +02001118 if (ctx->open_rec)
1119 rec = ctx->open_rec;
1120 else
1121 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301122 if (!rec) {
1123 ret = -ENOMEM;
1124 goto sendpage_end;
1125 }
1126
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001127 msg_pl = &rec->msg_plaintext;
1128
Dave Watson3c4d7552017-06-14 11:37:39 -07001129 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001130 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001131 copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001132 copy = size;
1133 if (copy >= record_room) {
1134 copy = record_room;
1135 full_record = true;
1136 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001137
Vakul Garg4509de12019-02-14 07:11:35 +00001138 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001139
1140 if (!sk_stream_memory_free(sk))
1141 goto wait_for_sndbuf;
1142alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001143 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001144 if (ret) {
1145 if (ret != -ENOSPC)
1146 goto wait_for_memory;
1147
1148 /* Adjust copy according to the amount that was
1149 * actually allocated. The difference is due
1150 * to max sg elements limit
1151 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001152 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001153 full_record = true;
1154 }
1155
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001156 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001157 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001158
Dave Watson3c4d7552017-06-14 11:37:39 -07001159 offset += copy;
1160 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001161 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001162
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001163 tls_ctx->pending_open_record_frags = true;
1164 if (full_record || eor || sk_msg_full(msg_pl)) {
Vakul Garg4e6d4722018-09-30 08:04:35 +05301165 rec->inplace_crypto = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001166 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1167 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001168 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301169 if (ret == -EINPROGRESS)
1170 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001171 else if (ret == -ENOMEM)
1172 goto wait_for_memory;
1173 else if (ret != -EAGAIN) {
1174 if (ret == -ENOSPC)
1175 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301176 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001177 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001178 }
1179 }
1180 continue;
1181wait_for_sndbuf:
1182 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1183wait_for_memory:
1184 ret = sk_stream_wait_memory(sk, &timeo);
1185 if (ret) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001186 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001187 goto sendpage_end;
1188 }
1189
Dave Watson3c4d7552017-06-14 11:37:39 -07001190 goto alloc_payload;
1191 }
1192
Vakul Garga42055e2018-09-21 09:46:13 +05301193 if (num_async) {
1194 /* Transmit if any encryptions have completed */
1195 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1196 cancel_delayed_work(&ctx->tx_work.work);
1197 tls_tx_records(sk, flags);
1198 }
1199 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001200sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001201 ret = sk_stream_error(sk, flags, ret);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001202 return copied ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001203}
1204
John Fastabend0608c692018-12-20 11:35:35 -08001205int tls_sw_sendpage(struct sock *sk, struct page *page,
1206 int offset, size_t size, int flags)
1207{
1208 int ret;
1209
1210 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1211 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1212 return -ENOTSUPP;
1213
1214 lock_sock(sk);
1215 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1216 release_sock(sk);
1217 return ret;
1218}
1219
John Fastabendd3b18ad32018-10-13 02:46:01 +02001220static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1221 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001222{
1223 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001224 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001225 struct sk_buff *skb;
1226 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1227
John Fastabendd3b18ad32018-10-13 02:46:01 +02001228 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001229 if (sk->sk_err) {
1230 *err = sock_error(sk);
1231 return NULL;
1232 }
1233
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001234 if (sk->sk_shutdown & RCV_SHUTDOWN)
1235 return NULL;
1236
Dave Watsonc46234e2018-03-22 10:10:35 -07001237 if (sock_flag(sk, SOCK_DONE))
1238 return NULL;
1239
1240 if ((flags & MSG_DONTWAIT) || !timeo) {
1241 *err = -EAGAIN;
1242 return NULL;
1243 }
1244
1245 add_wait_queue(sk_sleep(sk), &wait);
1246 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001247 sk_wait_event(sk, &timeo,
1248 ctx->recv_pkt != skb ||
1249 !sk_psock_queue_empty(psock),
1250 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001251 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1252 remove_wait_queue(sk_sleep(sk), &wait);
1253
1254 /* Handle signals */
1255 if (signal_pending(current)) {
1256 *err = sock_intr_errno(timeo);
1257 return NULL;
1258 }
1259 }
1260
1261 return skb;
1262}
1263
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001264static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1265 int length, int *pages_used,
1266 unsigned int *size_used,
1267 struct scatterlist *to,
1268 int to_max_pages)
1269{
1270 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1271 struct page *pages[MAX_SKB_FRAGS];
1272 unsigned int size = *size_used;
1273 ssize_t copied, use;
1274 size_t offset;
1275
1276 while (length > 0) {
1277 i = 0;
1278 maxpages = to_max_pages - num_elem;
1279 if (maxpages == 0) {
1280 rc = -EFAULT;
1281 goto out;
1282 }
1283 copied = iov_iter_get_pages(from, pages,
1284 length,
1285 maxpages, &offset);
1286 if (copied <= 0) {
1287 rc = -EFAULT;
1288 goto out;
1289 }
1290
1291 iov_iter_advance(from, copied);
1292
1293 length -= copied;
1294 size += copied;
1295 while (copied) {
1296 use = min_t(int, copied, PAGE_SIZE - offset);
1297
1298 sg_set_page(&to[num_elem],
1299 pages[i], use, offset);
1300 sg_unmark_end(&to[num_elem]);
1301 /* We do not uncharge memory from this API */
1302
1303 offset = 0;
1304 copied -= use;
1305
1306 i++;
1307 num_elem++;
1308 }
1309 }
1310 /* Mark the end in the last sg entry if newly added */
1311 if (num_elem > *pages_used)
1312 sg_mark_end(&to[num_elem - 1]);
1313out:
1314 if (rc)
1315 iov_iter_revert(from, size - *size_used);
1316 *size_used = size;
1317 *pages_used = num_elem;
1318
1319 return rc;
1320}
1321
Vakul Garg0b243d02018-08-10 20:46:41 +05301322/* This function decrypts the input skb into either out_iov or in out_sg
1323 * or in skb buffers itself. The input parameter 'zc' indicates if
1324 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1325 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1326 * NULL, then the decryption happens inside skb buffers itself, i.e.
1327 * zero-copy gets disabled and 'zc' is updated.
1328 */
1329
1330static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1331 struct iov_iter *out_iov,
1332 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001333 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301334{
1335 struct tls_context *tls_ctx = tls_get_ctx(sk);
1336 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001337 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301338 struct strp_msg *rxm = strp_msg(skb);
1339 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1340 struct aead_request *aead_req;
1341 struct sk_buff *unused;
1342 u8 *aad, *iv, *mem = NULL;
1343 struct scatterlist *sgin = NULL;
1344 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001345 const int data_len = rxm->full_len - prot->overhead_size +
1346 prot->tail_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301347
1348 if (*zc && (out_iov || out_sg)) {
1349 if (out_iov)
1350 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1351 else
1352 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001353 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1354 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301355 } else {
1356 n_sgout = 0;
1357 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001358 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301359 }
1360
Vakul Garg0b243d02018-08-10 20:46:41 +05301361 if (n_sgin < 1)
1362 return -EBADMSG;
1363
1364 /* Increment to accommodate AAD */
1365 n_sgin = n_sgin + 1;
1366
1367 nsg = n_sgin + n_sgout;
1368
1369 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1370 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001371 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301372 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1373
1374 /* Allocate a single block of memory which contains
1375 * aead_req || sgin[] || sgout[] || aad || iv.
1376 * This order achieves correct alignment for aead_req, sgin, sgout.
1377 */
1378 mem = kmalloc(mem_size, sk->sk_allocation);
1379 if (!mem)
1380 return -ENOMEM;
1381
1382 /* Segment the allocated memory */
1383 aead_req = (struct aead_request *)mem;
1384 sgin = (struct scatterlist *)(mem + aead_size);
1385 sgout = sgin + n_sgin;
1386 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001387 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301388
1389 /* Prepare IV */
1390 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1391 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
Vakul Garg4509de12019-02-14 07:11:35 +00001392 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301393 if (err < 0) {
1394 kfree(mem);
1395 return err;
1396 }
Vakul Garg4509de12019-02-14 07:11:35 +00001397 if (prot->version == TLS_1_3_VERSION)
Dave Watson130b3922019-01-30 21:58:31 +00001398 memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
1399 else
1400 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1401
Vakul Garg4509de12019-02-14 07:11:35 +00001402 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301403
1404 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001405 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1406 prot->tail_size,
1407 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1408 ctx->control, prot->version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301409
1410 /* Prepare sgin */
1411 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001412 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301413 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001414 rxm->offset + prot->prepend_size,
1415 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301416 if (err < 0) {
1417 kfree(mem);
1418 return err;
1419 }
1420
1421 if (n_sgout) {
1422 if (out_iov) {
1423 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001424 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301425
1426 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001427 err = tls_setup_from_iter(sk, out_iov, data_len,
1428 &pages, chunk, &sgout[1],
1429 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301430 if (err < 0)
1431 goto fallback_to_reg_recv;
1432 } else if (out_sg) {
1433 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1434 } else {
1435 goto fallback_to_reg_recv;
1436 }
1437 } else {
1438fallback_to_reg_recv:
1439 sgout = sgin;
1440 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001441 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301442 *zc = false;
1443 }
1444
1445 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301446 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001447 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301448 if (err == -EINPROGRESS)
1449 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301450
1451 /* Release the pages in case iov was mapped to pages */
1452 for (; pages > 0; pages--)
1453 put_page(sg_page(&sgout[pages]));
1454
1455 kfree(mem);
1456 return err;
1457}
1458
Boris Pismennydafb67f2018-07-13 14:33:40 +03001459static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001460 struct iov_iter *dest, int *chunk, bool *zc,
1461 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001462{
1463 struct tls_context *tls_ctx = tls_get_ctx(sk);
1464 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001465 struct tls_prot_info *prot = &tls_ctx->prot_info;
1466 int version = prot->version;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001467 struct strp_msg *rxm = strp_msg(skb);
1468 int err = 0;
1469
Boris Pismenny4799ac82018-07-13 14:33:43 +03001470#ifdef CONFIG_TLS_DEVICE
1471 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001472 if (err < 0)
1473 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001474#endif
1475 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001476 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301477 if (err < 0) {
1478 if (err == -EINPROGRESS)
Dave Watson130b3922019-01-30 21:58:31 +00001479 tls_advance_record_sn(sk, &tls_ctx->rx,
1480 version);
Vakul Garg94524d82018-08-29 15:26:55 +05301481
Boris Pismenny4799ac82018-07-13 14:33:43 +03001482 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301483 }
Dave Watson130b3922019-01-30 21:58:31 +00001484
1485 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
1486
Vakul Garg4509de12019-02-14 07:11:35 +00001487 rxm->offset += prot->prepend_size;
1488 rxm->full_len -= prot->overhead_size;
Dave Watson130b3922019-01-30 21:58:31 +00001489 tls_advance_record_sn(sk, &tls_ctx->rx, version);
Dave Watsonfedf2012019-01-30 21:58:24 +00001490 ctx->decrypted = true;
1491 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001492 } else {
1493 *zc = false;
1494 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001495
Boris Pismennydafb67f2018-07-13 14:33:40 +03001496 return err;
1497}
1498
1499int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1500 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001501{
Vakul Garg0b243d02018-08-10 20:46:41 +05301502 bool zc = true;
1503 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001504
Vakul Garg692d7b52019-01-16 10:40:16 +00001505 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001506}
1507
1508static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1509 unsigned int len)
1510{
1511 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001512 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001513
Vakul Garg94524d82018-08-29 15:26:55 +05301514 if (skb) {
1515 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001516
Vakul Garg94524d82018-08-29 15:26:55 +05301517 if (len < rxm->full_len) {
1518 rxm->offset += len;
1519 rxm->full_len -= len;
1520 return false;
1521 }
1522 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001523 }
1524
1525 /* Finished with message */
1526 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001527 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001528
1529 return true;
1530}
1531
Vakul Garg692d7b52019-01-16 10:40:16 +00001532/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001533 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001534 * true. Further, the records are removed from the rx_list if it is not a peek
1535 * case and the record has been consumed completely.
1536 */
1537static int process_rx_list(struct tls_sw_context_rx *ctx,
1538 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001539 u8 *control,
1540 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001541 size_t skip,
1542 size_t len,
1543 bool zc,
1544 bool is_peek)
1545{
1546 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001547 u8 ctrl = *control;
1548 u8 msgc = *cmsg;
1549 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001550 ssize_t copied = 0;
1551
Vakul Garg2b794c42019-02-23 08:42:37 +00001552 /* Set the record type in 'control' if caller didn't pass it */
1553 if (!ctrl && skb) {
1554 tlm = tls_msg(skb);
1555 ctrl = tlm->control;
1556 }
1557
Vakul Garg692d7b52019-01-16 10:40:16 +00001558 while (skip && skb) {
1559 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001560 tlm = tls_msg(skb);
1561
1562 /* Cannot process a record of different type */
1563 if (ctrl != tlm->control)
1564 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001565
1566 if (skip < rxm->full_len)
1567 break;
1568
1569 skip = skip - rxm->full_len;
1570 skb = skb_peek_next(skb, &ctx->rx_list);
1571 }
1572
1573 while (len && skb) {
1574 struct sk_buff *next_skb;
1575 struct strp_msg *rxm = strp_msg(skb);
1576 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1577
Vakul Garg2b794c42019-02-23 08:42:37 +00001578 tlm = tls_msg(skb);
1579
1580 /* Cannot process a record of different type */
1581 if (ctrl != tlm->control)
1582 return 0;
1583
1584 /* Set record type if not already done. For a non-data record,
1585 * do not proceed if record type could not be copied.
1586 */
1587 if (!msgc) {
1588 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1589 sizeof(ctrl), &ctrl);
1590 msgc = true;
1591 if (ctrl != TLS_RECORD_TYPE_DATA) {
1592 if (cerr || msg->msg_flags & MSG_CTRUNC)
1593 return -EIO;
1594
1595 *cmsg = msgc;
1596 }
1597 }
1598
Vakul Garg692d7b52019-01-16 10:40:16 +00001599 if (!zc || (rxm->full_len - skip) > len) {
1600 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1601 msg, chunk);
1602 if (err < 0)
1603 return err;
1604 }
1605
1606 len = len - chunk;
1607 copied = copied + chunk;
1608
1609 /* Consume the data from record if it is non-peek case*/
1610 if (!is_peek) {
1611 rxm->offset = rxm->offset + chunk;
1612 rxm->full_len = rxm->full_len - chunk;
1613
1614 /* Return if there is unconsumed data in the record */
1615 if (rxm->full_len - skip)
1616 break;
1617 }
1618
1619 /* The remaining skip-bytes must lie in 1st record in rx_list.
1620 * So from the 2nd record, 'skip' should be 0.
1621 */
1622 skip = 0;
1623
1624 if (msg)
1625 msg->msg_flags |= MSG_EOR;
1626
1627 next_skb = skb_peek_next(skb, &ctx->rx_list);
1628
1629 if (!is_peek) {
1630 skb_unlink(skb, &ctx->rx_list);
1631 kfree_skb(skb);
1632 }
1633
1634 skb = next_skb;
1635 }
1636
Vakul Garg2b794c42019-02-23 08:42:37 +00001637 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001638 return copied;
1639}
1640
Dave Watsonc46234e2018-03-22 10:10:35 -07001641int tls_sw_recvmsg(struct sock *sk,
1642 struct msghdr *msg,
1643 size_t len,
1644 int nonblock,
1645 int flags,
1646 int *addr_len)
1647{
1648 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001649 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001650 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001651 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001652 unsigned char control = 0;
1653 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001654 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001655 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001656 struct sk_buff *skb;
1657 ssize_t copied = 0;
1658 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001659 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001660 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001661 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001662 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301663 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001664
1665 flags |= nonblock;
1666
1667 if (unlikely(flags & MSG_ERRQUEUE))
1668 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1669
John Fastabendd3b18ad32018-10-13 02:46:01 +02001670 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001671 lock_sock(sk);
1672
Vakul Garg692d7b52019-01-16 10:40:16 +00001673 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001674 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1675 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001676 if (err < 0) {
1677 tls_err_abort(sk, err);
1678 goto end;
1679 } else {
1680 copied = err;
1681 }
1682
1683 len = len - copied;
1684 if (len) {
1685 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1686 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1687 } else {
1688 goto recv_end;
1689 }
1690
Dave Watsonc46234e2018-03-22 10:10:35 -07001691 do {
Vakul Garg692d7b52019-01-16 10:40:16 +00001692 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001693 bool zc = false;
1694 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001695 int chunk = 0;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001696 bool async;
Dave Watsonc46234e2018-03-22 10:10:35 -07001697
John Fastabendd3b18ad32018-10-13 02:46:01 +02001698 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1699 if (!skb) {
1700 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001701 int ret = __tcp_bpf_recvmsg(sk, psock,
1702 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001703
1704 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001705 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001706 len -= ret;
1707 continue;
1708 }
1709 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001710 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001711 } else {
1712 tlm = tls_msg(skb);
1713 if (prot->version == TLS_1_3_VERSION)
1714 tlm->control = 0;
1715 else
1716 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001717 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001718
1719 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301720
Vakul Garg4509de12019-02-14 07:11:35 +00001721 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001722
1723 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001724 ctx->control == TLS_RECORD_TYPE_DATA &&
Vakul Garg4509de12019-02-14 07:11:35 +00001725 prot->version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001726 zc = true;
1727
Vakul Gargc0ab4732019-02-11 11:31:05 +00001728 /* Do not use async mode if record is non-data */
1729 if (ctx->control == TLS_RECORD_TYPE_DATA)
1730 async = ctx->async_capable;
1731 else
1732 async = false;
1733
Dave Watsonfedf2012019-01-30 21:58:24 +00001734 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Vakul Gargc0ab4732019-02-11 11:31:05 +00001735 &chunk, &zc, async);
Dave Watsonfedf2012019-01-30 21:58:24 +00001736 if (err < 0 && err != -EINPROGRESS) {
1737 tls_err_abort(sk, EBADMSG);
1738 goto recv_end;
1739 }
1740
Vakul Gargc0ab4732019-02-11 11:31:05 +00001741 if (err == -EINPROGRESS)
Dave Watsonfedf2012019-01-30 21:58:24 +00001742 num_async++;
Vakul Garg2b794c42019-02-23 08:42:37 +00001743 else if (prot->version == TLS_1_3_VERSION)
1744 tlm->control = ctx->control;
1745
1746 /* If the type of records being processed is not known yet,
1747 * set it to record type just dequeued. If it is already known,
1748 * but does not match the record type just dequeued, go to end.
1749 * We always get record type here since for tls1.2, record type
1750 * is known just after record is dequeued from stream parser.
1751 * For tls1.3, we disable async.
1752 */
1753
1754 if (!control)
1755 control = tlm->control;
1756 else if (control != tlm->control)
1757 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001758
Dave Watsonc46234e2018-03-22 10:10:35 -07001759 if (!cmsg) {
1760 int cerr;
1761
1762 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001763 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001764 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001765 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001766 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1767 err = -EIO;
1768 goto recv_end;
1769 }
1770 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001771 }
1772
Vakul Gargc0ab4732019-02-11 11:31:05 +00001773 if (async)
1774 goto pick_next_record;
1775
Dave Watsonfedf2012019-01-30 21:58:24 +00001776 if (!zc) {
1777 if (rxm->full_len > len) {
1778 retain_skb = true;
1779 chunk = len;
1780 } else {
1781 chunk = rxm->full_len;
1782 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001783
Dave Watsonfedf2012019-01-30 21:58:24 +00001784 err = skb_copy_datagram_msg(skb, rxm->offset,
1785 msg, chunk);
1786 if (err < 0)
1787 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001788
Dave Watsonfedf2012019-01-30 21:58:24 +00001789 if (!is_peek) {
1790 rxm->offset = rxm->offset + chunk;
1791 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001792 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001793 }
1794
Vakul Garg94524d82018-08-29 15:26:55 +05301795pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001796 if (chunk > len)
1797 chunk = len;
1798
1799 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001800 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001801
Vakul Garg692d7b52019-01-16 10:40:16 +00001802 /* For async or peek case, queue the current skb */
1803 if (async || is_peek || retain_skb) {
1804 skb_queue_tail(&ctx->rx_list, skb);
1805 skb = NULL;
1806 }
Vakul Garg94524d82018-08-29 15:26:55 +05301807
Vakul Garg692d7b52019-01-16 10:40:16 +00001808 if (tls_sw_advance_skb(sk, skb, chunk)) {
1809 /* Return full control message to
1810 * userspace before trying to parse
1811 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001812 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001813 msg->msg_flags |= MSG_EOR;
1814 if (ctx->control != TLS_RECORD_TYPE_DATA)
1815 goto recv_end;
1816 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001817 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001818 }
Vakul Garg94524d82018-08-29 15:26:55 +05301819
Daniel Borkmann06030db2018-06-15 03:07:46 +02001820 /* If we have a new message from strparser, continue now. */
Vakul Garg692d7b52019-01-16 10:40:16 +00001821 if (decrypted >= target && !ctx->recv_pkt)
Daniel Borkmann06030db2018-06-15 03:07:46 +02001822 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001823 } while (len);
1824
1825recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301826 if (num_async) {
1827 /* Wait for all previously submitted records to be decrypted */
1828 smp_store_mb(ctx->async_notify, true);
1829 if (atomic_read(&ctx->decrypt_pending)) {
1830 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1831 if (err) {
1832 /* one of async decrypt failed */
1833 tls_err_abort(sk, err);
1834 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001835 decrypted = 0;
1836 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301837 }
1838 } else {
1839 reinit_completion(&ctx->async_wait.completion);
1840 }
1841 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001842
1843 /* Drain records from the rx_list & copy if required */
1844 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001845 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001846 decrypted, false, is_peek);
1847 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001848 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001849 decrypted, true, is_peek);
1850 if (err < 0) {
1851 tls_err_abort(sk, err);
1852 copied = 0;
1853 goto end;
1854 }
Vakul Garg94524d82018-08-29 15:26:55 +05301855 }
1856
Vakul Garg692d7b52019-01-16 10:40:16 +00001857 copied += decrypted;
1858
1859end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001860 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001861 if (psock)
1862 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001863 return copied ? : err;
1864}
1865
1866ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1867 struct pipe_inode_info *pipe,
1868 size_t len, unsigned int flags)
1869{
1870 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001871 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001872 struct strp_msg *rxm = NULL;
1873 struct sock *sk = sock->sk;
1874 struct sk_buff *skb;
1875 ssize_t copied = 0;
1876 int err = 0;
1877 long timeo;
1878 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301879 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001880
1881 lock_sock(sk);
1882
1883 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1884
John Fastabendd3b18ad32018-10-13 02:46:01 +02001885 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001886 if (!skb)
1887 goto splice_read_end;
1888
Dave Watsonc46234e2018-03-22 10:10:35 -07001889 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001890 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001891
Dave Watsonfedf2012019-01-30 21:58:24 +00001892 /* splice does not support reading control messages */
1893 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1894 err = -ENOTSUPP;
1895 goto splice_read_end;
1896 }
1897
Dave Watsonc46234e2018-03-22 10:10:35 -07001898 if (err < 0) {
1899 tls_err_abort(sk, EBADMSG);
1900 goto splice_read_end;
1901 }
1902 ctx->decrypted = true;
1903 }
1904 rxm = strp_msg(skb);
1905
1906 chunk = min_t(unsigned int, rxm->full_len, len);
1907 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1908 if (copied < 0)
1909 goto splice_read_end;
1910
1911 if (likely(!(flags & MSG_PEEK)))
1912 tls_sw_advance_skb(sk, skb, copied);
1913
1914splice_read_end:
1915 release_sock(sk);
1916 return copied ? : err;
1917}
1918
John Fastabend924ad652018-10-13 02:46:00 +02001919bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07001920{
Dave Watsonc46234e2018-03-22 10:10:35 -07001921 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001922 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001923 bool ingress_empty = true;
1924 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001925
John Fastabendd3b18ad32018-10-13 02:46:01 +02001926 rcu_read_lock();
1927 psock = sk_psock(sk);
1928 if (psock)
1929 ingress_empty = list_empty(&psock->ingress_msg);
1930 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07001931
John Fastabendd3b18ad32018-10-13 02:46:01 +02001932 return !ingress_empty || ctx->recv_pkt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001933}
1934
1935static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1936{
1937 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001938 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001939 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07001940 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001941 struct strp_msg *rxm = strp_msg(skb);
1942 size_t cipher_overhead;
1943 size_t data_len = 0;
1944 int ret;
1945
1946 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00001947 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07001948 return 0;
1949
Kees Cook3463e512018-06-25 16:55:05 -07001950 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00001951 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07001952 ret = -EINVAL;
1953 goto read_failure;
1954 }
1955
Dave Watsonc46234e2018-03-22 10:10:35 -07001956 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00001957 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07001958
1959 if (ret < 0)
1960 goto read_failure;
1961
1962 ctx->control = header[0];
1963
1964 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1965
Vakul Garg4509de12019-02-14 07:11:35 +00001966 cipher_overhead = prot->tag_size;
1967 if (prot->version != TLS_1_3_VERSION)
1968 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001969
Dave Watson130b3922019-01-30 21:58:31 +00001970 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00001971 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001972 ret = -EMSGSIZE;
1973 goto read_failure;
1974 }
1975 if (data_len < cipher_overhead) {
1976 ret = -EBADMSG;
1977 goto read_failure;
1978 }
1979
Dave Watson130b3922019-01-30 21:58:31 +00001980 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
1981 if (header[1] != TLS_1_2_VERSION_MINOR ||
1982 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001983 ret = -EINVAL;
1984 goto read_failure;
1985 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001986#ifdef CONFIG_TLS_DEVICE
1987 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1988 *(u64*)tls_ctx->rx.rec_seq);
1989#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001990 return data_len + TLS_HEADER_SIZE;
1991
1992read_failure:
1993 tls_err_abort(strp->sk, ret);
1994
1995 return ret;
1996}
1997
1998static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1999{
2000 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002001 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002002
2003 ctx->decrypted = false;
2004
2005 ctx->recv_pkt = skb;
2006 strp_pause(strp);
2007
Vakul Gargad13acc2018-07-30 16:08:33 +05302008 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002009}
2010
2011static void tls_data_ready(struct sock *sk)
2012{
2013 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002014 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002015 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002016
2017 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002018
2019 psock = sk_psock_get(sk);
2020 if (psock && !list_empty(&psock->ingress_msg)) {
2021 ctx->saved_data_ready(sk);
2022 sk_psock_put(sk, psock);
2023 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002024}
2025
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002026void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002027{
2028 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002029 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302030 struct tls_rec *rec, *tmp;
2031
2032 /* Wait for any pending async encryptions to complete */
2033 smp_store_mb(ctx->async_notify, true);
2034 if (atomic_read(&ctx->encrypt_pending))
2035 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2036
Dave Watson10231212019-01-27 00:59:03 +00002037 release_sock(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05302038 cancel_delayed_work_sync(&ctx->tx_work.work);
Dave Watson10231212019-01-27 00:59:03 +00002039 lock_sock(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05302040
2041 /* Tx whatever records we can transmit and abandon the rest */
2042 tls_tx_records(sk, -1);
2043
Vakul Garg9932a292018-09-24 15:35:56 +05302044 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302045 * the partially sent record if any at head of tx_list.
2046 */
2047 if (tls_ctx->partially_sent_record) {
2048 struct scatterlist *sg = tls_ctx->partially_sent_record;
2049
2050 while (1) {
2051 put_page(sg_page(sg));
2052 sk_mem_uncharge(sk, sg->length);
2053
2054 if (sg_is_last(sg))
2055 break;
2056 sg++;
2057 }
2058
2059 tls_ctx->partially_sent_record = NULL;
2060
Vakul Garg9932a292018-09-24 15:35:56 +05302061 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302062 struct tls_rec, list);
2063 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002064 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302065 kfree(rec);
2066 }
2067
Vakul Garg9932a292018-09-24 15:35:56 +05302068 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302069 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002070 sk_msg_free(sk, &rec->msg_encrypted);
2071 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302072 kfree(rec);
2073 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002074
Vakul Garg201876b2018-07-24 16:54:27 +05302075 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302076 tls_free_open_rec(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002077
2078 kfree(ctx);
2079}
2080
Boris Pismenny39f56e12018-07-13 14:33:41 +03002081void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002082{
2083 struct tls_context *tls_ctx = tls_get_ctx(sk);
2084 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2085
Dave Watsonc46234e2018-03-22 10:10:35 -07002086 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302087 kfree_skb(ctx->recv_pkt);
2088 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002089 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002090 crypto_free_aead(ctx->aead_recv);
2091 strp_stop(&ctx->strp);
2092 write_lock_bh(&sk->sk_callback_lock);
2093 sk->sk_data_ready = ctx->saved_data_ready;
2094 write_unlock_bh(&sk->sk_callback_lock);
2095 release_sock(sk);
2096 strp_done(&ctx->strp);
2097 lock_sock(sk);
2098 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002099}
2100
2101void tls_sw_free_resources_rx(struct sock *sk)
2102{
2103 struct tls_context *tls_ctx = tls_get_ctx(sk);
2104 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2105
2106 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07002107
Dave Watson3c4d7552017-06-14 11:37:39 -07002108 kfree(ctx);
2109}
2110
Vakul Garg9932a292018-09-24 15:35:56 +05302111/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302112static void tx_work_handler(struct work_struct *work)
2113{
2114 struct delayed_work *delayed_work = to_delayed_work(work);
2115 struct tx_work *tx_work = container_of(delayed_work,
2116 struct tx_work, work);
2117 struct sock *sk = tx_work->sk;
2118 struct tls_context *tls_ctx = tls_get_ctx(sk);
2119 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2120
2121 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2122 return;
2123
2124 lock_sock(sk);
2125 tls_tx_records(sk, -1);
2126 release_sock(sk);
2127}
2128
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002129void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2130{
2131 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2132
2133 /* Schedule the transmission if tx list is ready */
2134 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
2135 /* Schedule the transmission */
2136 if (!test_and_set_bit(BIT_TX_SCHEDULED,
2137 &tx_ctx->tx_bitmask))
2138 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2139 }
2140}
2141
Dave Watsonc46234e2018-03-22 10:10:35 -07002142int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002143{
Vakul Garg4509de12019-02-14 07:11:35 +00002144 struct tls_context *tls_ctx = tls_get_ctx(sk);
2145 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002146 struct tls_crypto_info *crypto_info;
2147 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002148 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002149 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2150 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002151 struct cipher_context *cctx;
2152 struct crypto_aead **aead;
2153 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07002154 u16 nonce_size, tag_size, iv_size, rec_seq_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002155 struct crypto_tfm *tfm;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002156 char *iv, *rec_seq, *key, *salt;
2157 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002158 int rc = 0;
2159
2160 if (!ctx) {
2161 rc = -EINVAL;
2162 goto out;
2163 }
2164
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002165 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002166 if (!ctx->priv_ctx_tx) {
2167 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2168 if (!sw_ctx_tx) {
2169 rc = -ENOMEM;
2170 goto out;
2171 }
2172 ctx->priv_ctx_tx = sw_ctx_tx;
2173 } else {
2174 sw_ctx_tx =
2175 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002176 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002177 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002178 if (!ctx->priv_ctx_rx) {
2179 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2180 if (!sw_ctx_rx) {
2181 rc = -ENOMEM;
2182 goto out;
2183 }
2184 ctx->priv_ctx_rx = sw_ctx_rx;
2185 } else {
2186 sw_ctx_rx =
2187 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002188 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002189 }
2190
Dave Watsonc46234e2018-03-22 10:10:35 -07002191 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002192 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002193 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002194 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002195 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302196 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302197 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2198 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002199 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002200 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002201 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002202 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002203 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002204 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002205 }
2206
Dave Watson3c4d7552017-06-14 11:37:39 -07002207 switch (crypto_info->cipher_type) {
2208 case TLS_CIPHER_AES_GCM_128: {
2209 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2210 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2211 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2212 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2213 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2214 rec_seq =
2215 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2216 gcm_128_info =
2217 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002218 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2219 key = gcm_128_info->key;
2220 salt = gcm_128_info->salt;
2221 break;
2222 }
2223 case TLS_CIPHER_AES_GCM_256: {
2224 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2225 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2226 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2227 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2228 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2229 rec_seq =
2230 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2231 gcm_256_info =
2232 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2233 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2234 key = gcm_256_info->key;
2235 salt = gcm_256_info->salt;
Dave Watson3c4d7552017-06-14 11:37:39 -07002236 break;
2237 }
2238 default:
2239 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002240 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002241 }
2242
Kees Cookb16520f2018-04-10 17:52:34 -07002243 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07002244 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002245 rc = -EINVAL;
2246 goto free_priv;
2247 }
2248
Dave Watson130b3922019-01-30 21:58:31 +00002249 if (crypto_info->version == TLS_1_3_VERSION) {
2250 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002251 prot->aad_size = TLS_HEADER_SIZE;
2252 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002253 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002254 prot->aad_size = TLS_AAD_SPACE_SIZE;
2255 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002256 }
2257
Vakul Garg4509de12019-02-14 07:11:35 +00002258 prot->version = crypto_info->version;
2259 prot->cipher_type = crypto_info->cipher_type;
2260 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2261 prot->tag_size = tag_size;
2262 prot->overhead_size = prot->prepend_size +
2263 prot->tag_size + prot->tail_size;
2264 prot->iv_size = iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002265 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
2266 GFP_KERNEL);
2267 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002268 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002269 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002270 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002271 /* Note: 128 & 256 bit salt are the same size */
2272 memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -07002273 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
Vakul Garg4509de12019-02-14 07:11:35 +00002274 prot->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08002275 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002276 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002277 rc = -ENOMEM;
2278 goto free_iv;
2279 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002280
Dave Watsonc46234e2018-03-22 10:10:35 -07002281 if (!*aead) {
2282 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
2283 if (IS_ERR(*aead)) {
2284 rc = PTR_ERR(*aead);
2285 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002286 goto free_rec_seq;
2287 }
2288 }
2289
2290 ctx->push_pending_record = tls_sw_push_pending_record;
2291
Dave Watsonfb99bce2019-01-30 21:58:05 +00002292 rc = crypto_aead_setkey(*aead, key, keysize);
2293
Dave Watson3c4d7552017-06-14 11:37:39 -07002294 if (rc)
2295 goto free_aead;
2296
Vakul Garg4509de12019-02-14 07:11:35 +00002297 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002298 if (rc)
2299 goto free_aead;
2300
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002301 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002302 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002303
2304 if (crypto_info->version == TLS_1_3_VERSION)
2305 sw_ctx_rx->async_capable = false;
2306 else
2307 sw_ctx_rx->async_capable =
2308 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
Vakul Garg692d7b52019-01-16 10:40:16 +00002309
Dave Watsonc46234e2018-03-22 10:10:35 -07002310 /* Set up strparser */
2311 memset(&cb, 0, sizeof(cb));
2312 cb.rcv_msg = tls_queue;
2313 cb.parse_msg = tls_read_size;
2314
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002315 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002316
2317 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002318 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07002319 sk->sk_data_ready = tls_data_ready;
2320 write_unlock_bh(&sk->sk_callback_lock);
2321
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002322 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07002323 }
2324
2325 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002326
2327free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002328 crypto_free_aead(*aead);
2329 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002330free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002331 kfree(cctx->rec_seq);
2332 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002333free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002334 kfree(cctx->iv);
2335 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002336free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002337 if (tx) {
2338 kfree(ctx->priv_ctx_tx);
2339 ctx->priv_ctx_tx = NULL;
2340 } else {
2341 kfree(ctx->priv_ctx_rx);
2342 ctx->priv_ctx_rx = NULL;
2343 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002344out:
2345 return rc;
2346}