blob: d81564078557b93e39c1e6e3507249651448df56 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Daniel Jordanda353fac2021-10-27 17:59:20 -040038#include <linux/bug.h>
Dave Watsonc46234e2018-03-22 10:10:35 -070039#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070040#include <linux/module.h>
Jim Ma974271e2021-05-14 11:11:02 +080041#include <linux/splice.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <crypto/aead.h>
43
Dave Watsonc46234e2018-03-22 10:10:35 -070044#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070045#include <net/tls.h>
46
Daniel Jordanda353fac2021-10-27 17:59:20 -040047noinline void tls_err_abort(struct sock *sk, int err)
48{
49 WARN_ON_ONCE(err >= 0);
50 /* sk->sk_err should contain a positive error code. */
51 sk->sk_err = -err;
52 sk_error_report(sk);
53}
54
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070055static int __skb_nsg(struct sk_buff *skb, int offset, int len,
56 unsigned int recursion_level)
57{
58 int start = skb_headlen(skb);
59 int i, chunk = start - offset;
60 struct sk_buff *frag_iter;
61 int elt = 0;
62
63 if (unlikely(recursion_level >= 24))
64 return -EMSGSIZE;
65
66 if (chunk > 0) {
67 if (chunk > len)
68 chunk = len;
69 elt++;
70 len -= chunk;
71 if (len == 0)
72 return elt;
73 offset += chunk;
74 }
75
76 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
77 int end;
78
79 WARN_ON(start > offset + len);
80
81 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
82 chunk = end - offset;
83 if (chunk > 0) {
84 if (chunk > len)
85 chunk = len;
86 elt++;
87 len -= chunk;
88 if (len == 0)
89 return elt;
90 offset += chunk;
91 }
92 start = end;
93 }
94
95 if (unlikely(skb_has_frag_list(skb))) {
96 skb_walk_frags(skb, frag_iter) {
97 int end, ret;
98
99 WARN_ON(start > offset + len);
100
101 end = start + frag_iter->len;
102 chunk = end - offset;
103 if (chunk > 0) {
104 if (chunk > len)
105 chunk = len;
106 ret = __skb_nsg(frag_iter, offset - start, chunk,
107 recursion_level + 1);
108 if (unlikely(ret < 0))
109 return ret;
110 elt += ret;
111 len -= chunk;
112 if (len == 0)
113 return elt;
114 offset += chunk;
115 }
116 start = end;
117 }
118 }
119 BUG_ON(len);
120 return elt;
121}
122
123/* Return the number of scatterlist elements required to completely map the
124 * skb, or -EMSGSIZE if the recursion depth is exceeded.
125 */
126static int skb_nsg(struct sk_buff *skb, int offset, int len)
127{
128 return __skb_nsg(skb, offset, len, 0);
129}
130
Dave Watson130b3922019-01-30 21:58:31 +0000131static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700132 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000133{
134 struct strp_msg *rxm = strp_msg(skb);
135 int sub = 0;
136
137 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700138 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000139 char content_type = 0;
140 int err;
141 int back = 17;
142
143 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700144 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000145 return -EBADMSG;
146 err = skb_copy_bits(skb,
147 rxm->offset + rxm->full_len - back,
148 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700149 if (err)
150 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000151 if (content_type)
152 break;
153 sub++;
154 back++;
155 }
156 ctx->control = content_type;
157 }
158 return sub;
159}
160
Vakul Garg94524d82018-08-29 15:26:55 +0530161static void tls_decrypt_done(struct crypto_async_request *req, int err)
162{
163 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530164 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000165 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700166 struct tls_sw_context_rx *ctx;
167 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000168 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530169 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700170 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530171 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700172 int pending;
173
174 skb = (struct sk_buff *)req->data;
175 tls_ctx = tls_get_ctx(skb->sk);
176 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000177 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530178
179 /* Propagate if there was an err */
180 if (err) {
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700181 if (err == -EBADMSG)
182 TLS_INC_STATS(sock_net(skb->sk),
183 LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530184 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000186 } else {
187 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700188 int pad;
189
190 pad = padding_length(ctx, prot, skb);
191 if (pad < 0) {
192 ctx->async_wait.err = pad;
193 tls_err_abort(skb->sk, pad);
194 } else {
195 rxm->full_len -= pad;
196 rxm->offset += prot->prepend_size;
197 rxm->full_len -= prot->overhead_size;
198 }
Vakul Garg94524d82018-08-29 15:26:55 +0530199 }
200
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700201 /* After using skb->sk to propagate sk through crypto async callback
202 * we need to NULL it again.
203 */
204 skb->sk = NULL;
205
Vakul Garg94524d82018-08-29 15:26:55 +0530206
Vakul Garg692d7b52019-01-16 10:40:16 +0000207 /* Free the destination pages if skb was not decrypted inplace */
208 if (sgout != sgin) {
209 /* Skip the first S/G entry as it points to AAD */
210 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
211 if (!sg)
212 break;
213 put_page(sg_page(sg));
214 }
Vakul Garg94524d82018-08-29 15:26:55 +0530215 }
216
217 kfree(aead_req);
218
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530219 spin_lock_bh(&ctx->decrypt_compl_lock);
Vakul Garg692d7b52019-01-16 10:40:16 +0000220 pending = atomic_dec_return(&ctx->decrypt_pending);
221
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530222 if (!pending && ctx->async_notify)
Vakul Garg94524d82018-08-29 15:26:55 +0530223 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530224 spin_unlock_bh(&ctx->decrypt_compl_lock);
Vakul Garg94524d82018-08-29 15:26:55 +0530225}
226
Dave Watsonc46234e2018-03-22 10:10:35 -0700227static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530228 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700229 struct scatterlist *sgin,
230 struct scatterlist *sgout,
231 char *iv_recv,
232 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530233 struct aead_request *aead_req,
234 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700235{
236 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000237 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300238 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700239 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700240
Vakul Garg0b243d02018-08-10 20:46:41 +0530241 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000242 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700243 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000244 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700245 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700246
Vakul Garg94524d82018-08-29 15:26:55 +0530247 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700248 /* Using skb->sk to push sk through to crypto async callback
249 * handler. This allows propagating errors up to the socket
250 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000251 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700252 * because it is a clone from strparser.
253 */
254 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530255 aead_request_set_callback(aead_req,
256 CRYPTO_TFM_REQ_MAY_BACKLOG,
257 tls_decrypt_done, skb);
258 atomic_inc(&ctx->decrypt_pending);
259 } else {
260 aead_request_set_callback(aead_req,
261 CRYPTO_TFM_REQ_MAY_BACKLOG,
262 crypto_req_done, &ctx->async_wait);
263 }
264
265 ret = crypto_aead_decrypt(aead_req);
266 if (ret == -EINPROGRESS) {
267 if (async)
268 return ret;
269
270 ret = crypto_wait_req(ret, &ctx->async_wait);
271 }
272
273 if (async)
274 atomic_dec(&ctx->decrypt_pending);
275
Dave Watsonc46234e2018-03-22 10:10:35 -0700276 return ret;
277}
278
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200279static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700280{
281 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000282 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300283 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530284 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700285
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200286 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700287 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000288 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200289 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700290}
291
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200292static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700293{
294 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300295 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530296 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200297 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700298
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200299 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700300}
301
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200302static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700303{
304 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000305 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300306 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530307 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200308 struct sk_msg *msg_pl = &rec->msg_plaintext;
309 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530310 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700311
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200312 /* We add page references worth len bytes from encrypted sg
313 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530314 * has enough required room (ensured by caller).
315 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200316 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530317
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200318 /* Skip initial bytes in msg_en's data to be able to use
319 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530320 */
Vakul Garg4509de12019-02-14 07:11:35 +0000321 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530322
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200323 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700324}
325
John Fastabendd3b18ad32018-10-13 02:46:01 +0200326static struct tls_rec *tls_get_rec(struct sock *sk)
327{
328 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000329 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200330 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
331 struct sk_msg *msg_pl, *msg_en;
332 struct tls_rec *rec;
333 int mem_size;
334
335 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
336
337 rec = kzalloc(mem_size, sk->sk_allocation);
338 if (!rec)
339 return NULL;
340
341 msg_pl = &rec->msg_plaintext;
342 msg_en = &rec->msg_encrypted;
343
344 sk_msg_init(msg_pl);
345 sk_msg_init(msg_en);
346
347 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000348 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200349 sg_unmark_end(&rec->sg_aead_in[1]);
350
351 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000352 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200353 sg_unmark_end(&rec->sg_aead_out[1]);
354
355 return rec;
356}
357
358static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
359{
360 sk_msg_free(sk, &rec->msg_encrypted);
361 sk_msg_free(sk, &rec->msg_plaintext);
362 kfree(rec);
363}
364
Vakul Gargc7749732018-09-25 20:21:51 +0530365static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700366{
367 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300368 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530369 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700370
John Fastabendd3b18ad32018-10-13 02:46:01 +0200371 if (rec) {
372 tls_free_rec(sk, rec);
373 ctx->open_rec = NULL;
374 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700375}
376
Vakul Garga42055e2018-09-21 09:46:13 +0530377int tls_tx_records(struct sock *sk, int flags)
378{
379 struct tls_context *tls_ctx = tls_get_ctx(sk);
380 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
381 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200382 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530383 int tx_flags, rc = 0;
384
385 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530386 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530387 struct tls_rec, list);
388
389 if (flags == -1)
390 tx_flags = rec->tx_flags;
391 else
392 tx_flags = flags;
393
394 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
395 if (rc)
396 goto tx_err;
397
398 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530399 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530400 */
Vakul Garga42055e2018-09-21 09:46:13 +0530401 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200402 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530403 kfree(rec);
404 }
405
Vakul Garg9932a292018-09-24 15:35:56 +0530406 /* Tx all ready records */
407 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
408 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530409 if (flags == -1)
410 tx_flags = rec->tx_flags;
411 else
412 tx_flags = flags;
413
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200414 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530415 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200416 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530417 0, tx_flags);
418 if (rc)
419 goto tx_err;
420
Vakul Garga42055e2018-09-21 09:46:13 +0530421 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200422 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530423 kfree(rec);
424 } else {
425 break;
426 }
427 }
428
429tx_err:
430 if (rc < 0 && rc != -EAGAIN)
Daniel Jordanda353fac2021-10-27 17:59:20 -0400431 tls_err_abort(sk, -EBADMSG);
Vakul Garga42055e2018-09-21 09:46:13 +0530432
433 return rc;
434}
435
436static void tls_encrypt_done(struct crypto_async_request *req, int err)
437{
438 struct aead_request *aead_req = (struct aead_request *)req;
439 struct sock *sk = req->data;
440 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000441 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530442 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200443 struct scatterlist *sge;
444 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530445 struct tls_rec *rec;
446 bool ready = false;
447 int pending;
448
449 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200450 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530451
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200452 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000453 sge->offset -= prot->prepend_size;
454 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530455
Vakul Garg80ece6a2018-09-26 16:22:08 +0530456 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530457 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530458 rec = NULL;
459
460 /* If err is already set on socket, return the same code */
461 if (sk->sk_err) {
Daniel Jordan1d9d6fd2021-10-27 17:59:21 -0400462 ctx->async_wait.err = -sk->sk_err;
Vakul Garga42055e2018-09-21 09:46:13 +0530463 } else {
464 ctx->async_wait.err = err;
465 tls_err_abort(sk, err);
466 }
467 }
468
Vakul Garg9932a292018-09-24 15:35:56 +0530469 if (rec) {
470 struct tls_rec *first_rec;
471
472 /* Mark the record as ready for transmission */
473 smp_store_mb(rec->tx_ready, true);
474
475 /* If received record is at head of tx_list, schedule tx */
476 first_rec = list_first_entry(&ctx->tx_list,
477 struct tls_rec, list);
478 if (rec == first_rec)
479 ready = true;
480 }
Vakul Garga42055e2018-09-21 09:46:13 +0530481
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530482 spin_lock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530483 pending = atomic_dec_return(&ctx->encrypt_pending);
484
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530485 if (!pending && ctx->async_notify)
Vakul Garga42055e2018-09-21 09:46:13 +0530486 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530487 spin_unlock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530488
489 if (!ready)
490 return;
491
492 /* Schedule the transmission */
493 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200494 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530495}
496
497static int tls_do_encryption(struct sock *sk,
498 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200499 struct tls_sw_context_tx *ctx,
500 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200501 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700502{
Vakul Garg4509de12019-02-14 07:11:35 +0000503 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530504 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200505 struct sk_msg *msg_en = &rec->msg_encrypted;
506 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000507 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700508
Vakul Gargf295b3a2019-03-20 02:03:36 +0000509 /* For CCM based ciphers, first byte of IV is a constant */
Tianjia Zhang128cfb82021-09-28 14:28:43 +0800510 switch (prot->cipher_type) {
511 case TLS_CIPHER_AES_CCM_128:
Vakul Gargf295b3a2019-03-20 02:03:36 +0000512 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
513 iv_offset = 1;
Tianjia Zhang128cfb82021-09-28 14:28:43 +0800514 break;
515 case TLS_CIPHER_SM4_CCM:
516 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
517 iv_offset = 1;
518 break;
Vakul Gargf295b3a2019-03-20 02:03:36 +0000519 }
520
521 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
522 prot->iv_size + prot->salt_size);
523
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300524 xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000525
Vakul Garg4509de12019-02-14 07:11:35 +0000526 sge->offset += prot->prepend_size;
527 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700528
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200529 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530530
Dave Watson3c4d7552017-06-14 11:37:39 -0700531 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000532 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200533 aead_request_set_crypt(aead_req, rec->sg_aead_in,
534 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000535 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530536
537 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530538 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530539
Vakul Garg9932a292018-09-24 15:35:56 +0530540 /* Add the record in tx_list */
541 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530542 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700543
Vakul Garga42055e2018-09-21 09:46:13 +0530544 rc = crypto_aead_encrypt(aead_req);
545 if (!rc || rc != -EINPROGRESS) {
546 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000547 sge->offset -= prot->prepend_size;
548 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530549 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700550
Vakul Garg9932a292018-09-24 15:35:56 +0530551 if (!rc) {
552 WRITE_ONCE(rec->tx_ready, true);
553 } else if (rc != -EINPROGRESS) {
554 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530555 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530556 }
Vakul Garga42055e2018-09-21 09:46:13 +0530557
558 /* Unhook the record from context if encryption is not failure */
559 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700560 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700561 return rc;
562}
563
John Fastabendd3b18ad32018-10-13 02:46:01 +0200564static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
565 struct tls_rec **to, struct sk_msg *msg_opl,
566 struct sk_msg *msg_oen, u32 split_point,
567 u32 tx_overhead_size, u32 *orig_end)
568{
569 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
570 struct scatterlist *sge, *osge, *nsge;
571 u32 orig_size = msg_opl->sg.size;
572 struct scatterlist tmp = { };
573 struct sk_msg *msg_npl;
574 struct tls_rec *new;
575 int ret;
576
577 new = tls_get_rec(sk);
578 if (!new)
579 return -ENOMEM;
580 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
581 tx_overhead_size, 0);
582 if (ret < 0) {
583 tls_free_rec(sk, new);
584 return ret;
585 }
586
587 *orig_end = msg_opl->sg.end;
588 i = msg_opl->sg.start;
589 sge = sk_msg_elem(msg_opl, i);
590 while (apply && sge->length) {
591 if (sge->length > apply) {
592 u32 len = sge->length - apply;
593
594 get_page(sg_page(sge));
595 sg_set_page(&tmp, sg_page(sge), len,
596 sge->offset + apply);
597 sge->length = apply;
598 bytes += apply;
599 apply = 0;
600 } else {
601 apply -= sge->length;
602 bytes += sge->length;
603 }
604
605 sk_msg_iter_var_next(i);
606 if (i == msg_opl->sg.end)
607 break;
608 sge = sk_msg_elem(msg_opl, i);
609 }
610
611 msg_opl->sg.end = i;
612 msg_opl->sg.curr = i;
613 msg_opl->sg.copybreak = 0;
614 msg_opl->apply_bytes = 0;
615 msg_opl->sg.size = bytes;
616
617 msg_npl = &new->msg_plaintext;
618 msg_npl->apply_bytes = apply;
619 msg_npl->sg.size = orig_size - bytes;
620
621 j = msg_npl->sg.start;
622 nsge = sk_msg_elem(msg_npl, j);
623 if (tmp.length) {
624 memcpy(nsge, &tmp, sizeof(*nsge));
625 sk_msg_iter_var_next(j);
626 nsge = sk_msg_elem(msg_npl, j);
627 }
628
629 osge = sk_msg_elem(msg_opl, i);
630 while (osge->length) {
631 memcpy(nsge, osge, sizeof(*nsge));
632 sg_unmark_end(nsge);
633 sk_msg_iter_var_next(i);
634 sk_msg_iter_var_next(j);
635 if (i == *orig_end)
636 break;
637 osge = sk_msg_elem(msg_opl, i);
638 nsge = sk_msg_elem(msg_npl, j);
639 }
640
641 msg_npl->sg.end = j;
642 msg_npl->sg.curr = j;
643 msg_npl->sg.copybreak = 0;
644
645 *to = new;
646 return 0;
647}
648
649static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
650 struct tls_rec *from, u32 orig_end)
651{
652 struct sk_msg *msg_npl = &from->msg_plaintext;
653 struct sk_msg *msg_opl = &to->msg_plaintext;
654 struct scatterlist *osge, *nsge;
655 u32 i, j;
656
657 i = msg_opl->sg.end;
658 sk_msg_iter_var_prev(i);
659 j = msg_npl->sg.start;
660
661 osge = sk_msg_elem(msg_opl, i);
662 nsge = sk_msg_elem(msg_npl, j);
663
664 if (sg_page(osge) == sg_page(nsge) &&
665 osge->offset + osge->length == nsge->offset) {
666 osge->length += nsge->length;
667 put_page(sg_page(nsge));
668 }
669
670 msg_opl->sg.end = orig_end;
671 msg_opl->sg.curr = orig_end;
672 msg_opl->sg.copybreak = 0;
673 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
674 msg_opl->sg.size += msg_npl->sg.size;
675
676 sk_msg_free(sk, &to->msg_encrypted);
677 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
678
679 kfree(from);
680}
681
Dave Watson3c4d7552017-06-14 11:37:39 -0700682static int tls_push_record(struct sock *sk, int flags,
683 unsigned char record_type)
684{
685 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000686 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300687 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200688 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
Kees Cook3f649ab2020-06-03 13:09:38 -0700689 u32 i, split_point, orig_end;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200690 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200691 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200692 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700693 int rc;
694
Vakul Garga42055e2018-09-21 09:46:13 +0530695 if (!rec)
696 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200697
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200698 msg_pl = &rec->msg_plaintext;
699 msg_en = &rec->msg_encrypted;
700
John Fastabendd3b18ad32018-10-13 02:46:01 +0200701 split_point = msg_pl->apply_bytes;
702 split = split_point && split_point < msg_pl->sg.size;
John Fastabendd468e472020-01-11 06:12:04 +0000703 if (unlikely((!split &&
704 msg_pl->sg.size +
705 prot->overhead_size > msg_en->sg.size) ||
706 (split &&
707 split_point +
708 prot->overhead_size > msg_en->sg.size))) {
709 split = true;
710 split_point = msg_en->sg.size;
711 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200712 if (split) {
713 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000714 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200715 &orig_end);
716 if (rc < 0)
717 return rc;
John Fastabendd468e472020-01-11 06:12:04 +0000718 /* This can happen if above tls_split_open_record allocates
719 * a single large encryption buffer instead of two smaller
720 * ones. In this case adjust pointers and continue without
721 * split.
722 */
723 if (!msg_pl->sg.size) {
724 tls_merge_open_record(sk, rec, tmp, orig_end);
725 msg_pl = &rec->msg_plaintext;
726 msg_en = &rec->msg_encrypted;
727 split = false;
728 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200729 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000730 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200731 }
732
Vakul Garga42055e2018-09-21 09:46:13 +0530733 rec->tx_flags = flags;
734 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700735
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200736 i = msg_pl->sg.end;
737 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000738
739 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000740 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000741 /* Add content type to end of message. No padding added */
742 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
743 sg_mark_end(&rec->sg_content_type);
744 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
745 &rec->sg_content_type);
746 } else {
747 sg_mark_end(sk_msg_elem(msg_pl, i));
748 }
Vakul Garga42055e2018-09-21 09:46:13 +0530749
John Fastabend9aaaa562020-01-11 06:12:05 +0000750 if (msg_pl->sg.end < msg_pl->sg.start) {
751 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
752 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
753 msg_pl->sg.data);
754 }
755
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200756 i = msg_pl->sg.start;
Jakub Kicinski9e5ffed2019-11-27 12:16:43 -0800757 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200758
759 i = msg_en->sg.end;
760 sk_msg_iter_var_prev(i);
761 sg_mark_end(sk_msg_elem(msg_en, i));
762
763 i = msg_en->sg.start;
764 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
765
Vakul Garg4509de12019-02-14 07:11:35 +0000766 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300767 tls_ctx->tx.rec_seq, record_type, prot);
Dave Watson3c4d7552017-06-14 11:37:39 -0700768
769 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200770 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000771 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000772 msg_pl->sg.size + prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300773 record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700774
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200775 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700776
Dave Watson130b3922019-01-30 21:58:31 +0000777 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000778 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700779 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200780 if (rc != -EINPROGRESS) {
Daniel Jordanda353fac2021-10-27 17:59:20 -0400781 tls_err_abort(sk, -EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200782 if (split) {
783 tls_ctx->pending_open_record_frags = true;
784 tls_merge_open_record(sk, rec, tmp, orig_end);
785 }
786 }
Dave Watson5b053e12019-01-30 22:08:21 +0000787 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530788 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200789 } else if (split) {
790 msg_pl = &tmp->msg_plaintext;
791 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000792 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200793 tls_ctx->pending_open_record_frags = true;
794 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700795 }
796
Vakul Garg9932a292018-09-24 15:35:56 +0530797 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700798}
799
John Fastabendd3b18ad32018-10-13 02:46:01 +0200800static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
801 bool full_record, u8 record_type,
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300802 ssize_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700803{
804 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300805 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200806 struct sk_msg msg_redir = { };
807 struct sk_psock *psock;
808 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530809 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800810 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200811 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800812 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530813
John Fastabend0608c692018-12-20 11:35:35 -0800814 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200815 psock = sk_psock_get(sk);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800816 if (!psock || !policy) {
817 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300818 if (err && sk->sk_err == EBADMSG) {
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800819 *copied -= sk_msg_free(sk, msg);
820 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300821 err = -sk->sk_err;
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800822 }
Xiyu Yang095f5612020-04-25 20:54:37 +0800823 if (psock)
824 sk_psock_put(sk, psock);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800825 return err;
826 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200827more_data:
828 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800829 if (psock->eval == __SK_NONE) {
830 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200831 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7361d442020-01-11 06:12:06 +0000832 delta -= msg->sg.size;
John Fastabend7246d8e2018-11-26 14:16:17 -0800833 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200834 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
835 !enospc && !full_record) {
836 err = -ENOSPC;
837 goto out_err;
838 }
839 msg->cork_bytes = 0;
840 send = msg->sg.size;
841 if (msg->apply_bytes && msg->apply_bytes < send)
842 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530843
John Fastabendd3b18ad32018-10-13 02:46:01 +0200844 switch (psock->eval) {
845 case __SK_PASS:
846 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300847 if (err && sk->sk_err == EBADMSG) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200848 *copied -= sk_msg_free(sk, msg);
849 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300850 err = -sk->sk_err;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200851 goto out_err;
852 }
853 break;
854 case __SK_REDIRECT:
855 sk_redir = psock->sk_redir;
856 memcpy(&msg_redir, msg, sizeof(*msg));
857 if (msg->apply_bytes < send)
858 msg->apply_bytes = 0;
859 else
860 msg->apply_bytes -= send;
861 sk_msg_return_zero(sk, msg, send);
862 msg->sg.size -= send;
863 release_sock(sk);
864 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
865 lock_sock(sk);
866 if (err < 0) {
867 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
868 msg->sg.size = 0;
869 }
870 if (msg->sg.size == 0)
871 tls_free_open_rec(sk);
872 break;
873 case __SK_DROP:
874 default:
875 sk_msg_free_partial(sk, msg, send);
876 if (msg->apply_bytes < send)
877 msg->apply_bytes = 0;
878 else
879 msg->apply_bytes -= send;
880 if (msg->sg.size == 0)
881 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800882 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200883 err = -EACCES;
884 }
Vakul Garga42055e2018-09-21 09:46:13 +0530885
John Fastabendd3b18ad32018-10-13 02:46:01 +0200886 if (likely(!err)) {
887 bool reset_eval = !ctx->open_rec;
888
889 rec = ctx->open_rec;
890 if (rec) {
891 msg = &rec->msg_plaintext;
892 if (!msg->apply_bytes)
893 reset_eval = true;
894 }
895 if (reset_eval) {
896 psock->eval = __SK_NONE;
897 if (psock->sk_redir) {
898 sock_put(psock->sk_redir);
899 psock->sk_redir = NULL;
900 }
901 }
902 if (rec)
903 goto more_data;
904 }
905 out_err:
906 sk_psock_put(sk, psock);
907 return err;
908}
909
910static int tls_sw_push_pending_record(struct sock *sk, int flags)
911{
912 struct tls_context *tls_ctx = tls_get_ctx(sk);
913 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
914 struct tls_rec *rec = ctx->open_rec;
915 struct sk_msg *msg_pl;
916 size_t copied;
917
Vakul Garga42055e2018-09-21 09:46:13 +0530918 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200919 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530920
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200921 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200922 copied = msg_pl->sg.size;
923 if (!copied)
924 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530925
John Fastabendd3b18ad32018-10-13 02:46:01 +0200926 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
927 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530928}
929
930int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
931{
Dave Watson3c4d7552017-06-14 11:37:39 -0700932 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530933 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000934 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530935 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000936 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530937 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100938 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700939 bool eor = !(msg->msg_flags & MSG_MORE);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300940 size_t try_to_copy;
941 ssize_t copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200942 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530943 struct tls_rec *rec;
944 int required_size;
945 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700946 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530947 int record_room;
948 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700949 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530950 int ret = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530951 int pending;
Dave Watson3c4d7552017-06-14 11:37:39 -0700952
Rouven Czerwinski1c3b63f2020-08-06 08:49:06 +0200953 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
954 MSG_CMSG_COMPAT))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100955 return -EOPNOTSUPP;
Dave Watson3c4d7552017-06-14 11:37:39 -0700956
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800957 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700958 lock_sock(sk);
959
Dave Watson3c4d7552017-06-14 11:37:39 -0700960 if (unlikely(msg->msg_controllen)) {
961 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530962 if (ret) {
963 if (ret == -EINPROGRESS)
964 num_async++;
965 else if (ret != -EAGAIN)
966 goto send_end;
967 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700968 }
969
970 while (msg_data_left(msg)) {
971 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100972 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700973 goto send_end;
974 }
975
John Fastabendd3b18ad32018-10-13 02:46:01 +0200976 if (ctx->open_rec)
977 rec = ctx->open_rec;
978 else
979 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530980 if (!rec) {
981 ret = -ENOMEM;
982 goto send_end;
983 }
984
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200985 msg_pl = &rec->msg_plaintext;
986 msg_en = &rec->msg_encrypted;
987
988 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700989 full_record = false;
990 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200991 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700992 if (try_to_copy >= record_room) {
993 try_to_copy = record_room;
994 full_record = true;
995 }
996
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200997 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000998 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700999
1000 if (!sk_stream_memory_free(sk))
1001 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +05301002
Dave Watson3c4d7552017-06-14 11:37:39 -07001003alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001004 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001005 if (ret) {
1006 if (ret != -ENOSPC)
1007 goto wait_for_memory;
1008
1009 /* Adjust try_to_copy according to the amount that was
1010 * actually allocated. The difference is due
1011 * to max sg elements limit
1012 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001013 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001014 full_record = true;
1015 }
Vakul Garga42055e2018-09-21 09:46:13 +05301016
1017 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001018 u32 first = msg_pl->sg.end;
1019
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001020 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1021 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -07001022 if (ret)
1023 goto fallback_to_reg_send;
1024
Vakul Garga42055e2018-09-21 09:46:13 +05301025 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001026 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001027
1028 sk_msg_sg_copy_set(msg_pl, first);
1029 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1030 record_type, &copied,
1031 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +05301032 if (ret) {
1033 if (ret == -EINPROGRESS)
1034 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001035 else if (ret == -ENOMEM)
1036 goto wait_for_memory;
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001037 else if (ctx->open_rec && ret == -ENOSPC)
John Fastabendd3b18ad32018-10-13 02:46:01 +02001038 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +05301039 else if (ret != -EAGAIN)
1040 goto send_end;
1041 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -07001042 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001043rollback_iter:
1044 copied -= try_to_copy;
1045 sk_msg_sg_copy_clear(msg_pl, first);
1046 iov_iter_revert(&msg->msg_iter,
1047 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001048fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001049 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001050 }
1051
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001052 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +05301053
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001054 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001055 if (ret) {
1056 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301057 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001058
1059 /* Adjust try_to_copy according to the amount that was
1060 * actually allocated. The difference is due
1061 * to max sg elements limit
1062 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001063 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001064 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001065 sk_msg_trim(sk, msg_en,
1066 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001067 }
1068
Vakul Garg65a10e22018-12-21 15:16:52 +00001069 if (try_to_copy) {
1070 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1071 msg_pl, try_to_copy);
1072 if (ret < 0)
1073 goto trim_sgl;
1074 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001075
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001076 /* Open records defined only if successfully copied, otherwise
1077 * we would trim the sg but not reset the open record frags.
1078 */
1079 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001080 copied += try_to_copy;
1081 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001082 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1083 record_type, &copied,
1084 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001085 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301086 if (ret == -EINPROGRESS)
1087 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001088 else if (ret == -ENOMEM)
1089 goto wait_for_memory;
1090 else if (ret != -EAGAIN) {
1091 if (ret == -ENOSPC)
1092 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301093 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001094 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001095 }
1096 }
1097
1098 continue;
1099
1100wait_for_sndbuf:
1101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1102wait_for_memory:
1103 ret = sk_stream_wait_memory(sk, &timeo);
1104 if (ret) {
1105trim_sgl:
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001106 if (ctx->open_rec)
1107 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001108 goto send_end;
1109 }
1110
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001111 if (ctx->open_rec && msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001112 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001113 }
1114
Vakul Garga42055e2018-09-21 09:46:13 +05301115 if (!num_async) {
1116 goto send_end;
1117 } else if (num_zc) {
1118 /* Wait for pending encryptions to get completed */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301119 spin_lock_bh(&ctx->encrypt_compl_lock);
1120 ctx->async_notify = true;
Vakul Garga42055e2018-09-21 09:46:13 +05301121
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301122 pending = atomic_read(&ctx->encrypt_pending);
1123 spin_unlock_bh(&ctx->encrypt_compl_lock);
1124 if (pending)
Vakul Garga42055e2018-09-21 09:46:13 +05301125 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1126 else
1127 reinit_completion(&ctx->async_wait.completion);
1128
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301129 /* There can be no concurrent accesses, since we have no
1130 * pending encrypt operations
1131 */
Vakul Garga42055e2018-09-21 09:46:13 +05301132 WRITE_ONCE(ctx->async_notify, false);
1133
1134 if (ctx->async_wait.err) {
1135 ret = ctx->async_wait.err;
1136 copied = 0;
1137 }
1138 }
1139
1140 /* Transmit if any encryptions have completed */
1141 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1142 cancel_delayed_work(&ctx->tx_work.work);
1143 tls_tx_records(sk, msg->msg_flags);
1144 }
1145
Dave Watson3c4d7552017-06-14 11:37:39 -07001146send_end:
1147 ret = sk_stream_error(sk, msg->msg_flags, ret);
1148
1149 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001150 mutex_unlock(&tls_ctx->tx_lock);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001151 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001152}
1153
YueHaibing01cb8a12019-01-16 10:39:28 +08001154static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1155 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001156{
Vakul Garga42055e2018-09-21 09:46:13 +05301157 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001158 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001159 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001160 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001161 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001162 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301163 struct tls_rec *rec;
1164 int num_async = 0;
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001165 ssize_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001166 bool full_record;
1167 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301168 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301169 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001170
Jakub Kicinskid452d482021-06-18 13:34:06 -07001171 eor = !(flags & MSG_SENDPAGE_NOTLAST);
Dave Watson3c4d7552017-06-14 11:37:39 -07001172 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1173
Dave Watson3c4d7552017-06-14 11:37:39 -07001174 /* Call the sk_stream functions to manage the sndbuf mem. */
1175 while (size > 0) {
1176 size_t copy, required_size;
1177
1178 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001179 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001180 goto sendpage_end;
1181 }
1182
John Fastabendd3b18ad32018-10-13 02:46:01 +02001183 if (ctx->open_rec)
1184 rec = ctx->open_rec;
1185 else
1186 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301187 if (!rec) {
1188 ret = -ENOMEM;
1189 goto sendpage_end;
1190 }
1191
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001192 msg_pl = &rec->msg_plaintext;
1193
Dave Watson3c4d7552017-06-14 11:37:39 -07001194 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001195 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001196 copy = size;
1197 if (copy >= record_room) {
1198 copy = record_room;
1199 full_record = true;
1200 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001201
Vakul Garg4509de12019-02-14 07:11:35 +00001202 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001203
1204 if (!sk_stream_memory_free(sk))
1205 goto wait_for_sndbuf;
1206alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001207 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001208 if (ret) {
1209 if (ret != -ENOSPC)
1210 goto wait_for_memory;
1211
1212 /* Adjust copy according to the amount that was
1213 * actually allocated. The difference is due
1214 * to max sg elements limit
1215 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001216 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001217 full_record = true;
1218 }
1219
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001220 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001221 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001222
Dave Watson3c4d7552017-06-14 11:37:39 -07001223 offset += copy;
1224 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001225 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001226
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001227 tls_ctx->pending_open_record_frags = true;
1228 if (full_record || eor || sk_msg_full(msg_pl)) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001229 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1230 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001231 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301232 if (ret == -EINPROGRESS)
1233 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001234 else if (ret == -ENOMEM)
1235 goto wait_for_memory;
1236 else if (ret != -EAGAIN) {
1237 if (ret == -ENOSPC)
1238 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301239 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001240 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001241 }
1242 }
1243 continue;
1244wait_for_sndbuf:
1245 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1246wait_for_memory:
1247 ret = sk_stream_wait_memory(sk, &timeo);
1248 if (ret) {
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001249 if (ctx->open_rec)
1250 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001251 goto sendpage_end;
1252 }
1253
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001254 if (ctx->open_rec)
1255 goto alloc_payload;
Dave Watson3c4d7552017-06-14 11:37:39 -07001256 }
1257
Vakul Garga42055e2018-09-21 09:46:13 +05301258 if (num_async) {
1259 /* Transmit if any encryptions have completed */
1260 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1261 cancel_delayed_work(&ctx->tx_work.work);
1262 tls_tx_records(sk, flags);
1263 }
1264 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001265sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001266 ret = sk_stream_error(sk, flags, ret);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001267 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001268}
1269
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001270int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1271 int offset, size_t size, int flags)
1272{
1273 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1274 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1275 MSG_NO_SHARED_FRAGS))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001276 return -EOPNOTSUPP;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001277
1278 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1279}
1280
John Fastabend0608c692018-12-20 11:35:35 -08001281int tls_sw_sendpage(struct sock *sk, struct page *page,
1282 int offset, size_t size, int flags)
1283{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001284 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001285 int ret;
1286
1287 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1288 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001289 return -EOPNOTSUPP;
John Fastabend0608c692018-12-20 11:35:35 -08001290
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001291 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001292 lock_sock(sk);
1293 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1294 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001295 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001296 return ret;
1297}
1298
John Fastabendd3b18ad32018-10-13 02:46:01 +02001299static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
Jim Ma974271e2021-05-14 11:11:02 +08001300 bool nonblock, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001301{
1302 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001303 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001304 struct sk_buff *skb;
1305 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1306
John Fastabendd3b18ad32018-10-13 02:46:01 +02001307 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001308 if (sk->sk_err) {
1309 *err = sock_error(sk);
1310 return NULL;
1311 }
1312
Vadim Fedorenko20ffc7a2020-11-19 18:59:48 +03001313 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1314 __strp_unpause(&ctx->strp);
1315 if (ctx->recv_pkt)
1316 return ctx->recv_pkt;
1317 }
1318
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001319 if (sk->sk_shutdown & RCV_SHUTDOWN)
1320 return NULL;
1321
Dave Watsonc46234e2018-03-22 10:10:35 -07001322 if (sock_flag(sk, SOCK_DONE))
1323 return NULL;
1324
Jim Ma974271e2021-05-14 11:11:02 +08001325 if (nonblock || !timeo) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001326 *err = -EAGAIN;
1327 return NULL;
1328 }
1329
1330 add_wait_queue(sk_sleep(sk), &wait);
1331 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001332 sk_wait_event(sk, &timeo,
1333 ctx->recv_pkt != skb ||
1334 !sk_psock_queue_empty(psock),
1335 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001336 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1337 remove_wait_queue(sk_sleep(sk), &wait);
1338
1339 /* Handle signals */
1340 if (signal_pending(current)) {
1341 *err = sock_intr_errno(timeo);
1342 return NULL;
1343 }
1344 }
1345
1346 return skb;
1347}
1348
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001349static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1350 int length, int *pages_used,
1351 unsigned int *size_used,
1352 struct scatterlist *to,
1353 int to_max_pages)
1354{
1355 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1356 struct page *pages[MAX_SKB_FRAGS];
1357 unsigned int size = *size_used;
1358 ssize_t copied, use;
1359 size_t offset;
1360
1361 while (length > 0) {
1362 i = 0;
1363 maxpages = to_max_pages - num_elem;
1364 if (maxpages == 0) {
1365 rc = -EFAULT;
1366 goto out;
1367 }
1368 copied = iov_iter_get_pages(from, pages,
1369 length,
1370 maxpages, &offset);
1371 if (copied <= 0) {
1372 rc = -EFAULT;
1373 goto out;
1374 }
1375
1376 iov_iter_advance(from, copied);
1377
1378 length -= copied;
1379 size += copied;
1380 while (copied) {
1381 use = min_t(int, copied, PAGE_SIZE - offset);
1382
1383 sg_set_page(&to[num_elem],
1384 pages[i], use, offset);
1385 sg_unmark_end(&to[num_elem]);
1386 /* We do not uncharge memory from this API */
1387
1388 offset = 0;
1389 copied -= use;
1390
1391 i++;
1392 num_elem++;
1393 }
1394 }
1395 /* Mark the end in the last sg entry if newly added */
1396 if (num_elem > *pages_used)
1397 sg_mark_end(&to[num_elem - 1]);
1398out:
1399 if (rc)
1400 iov_iter_revert(from, size - *size_used);
1401 *size_used = size;
1402 *pages_used = num_elem;
1403
1404 return rc;
1405}
1406
Vakul Garg0b243d02018-08-10 20:46:41 +05301407/* This function decrypts the input skb into either out_iov or in out_sg
1408 * or in skb buffers itself. The input parameter 'zc' indicates if
1409 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1410 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1411 * NULL, then the decryption happens inside skb buffers itself, i.e.
1412 * zero-copy gets disabled and 'zc' is updated.
1413 */
1414
1415static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1416 struct iov_iter *out_iov,
1417 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001418 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301419{
1420 struct tls_context *tls_ctx = tls_get_ctx(sk);
1421 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001422 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301423 struct strp_msg *rxm = strp_msg(skb);
1424 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1425 struct aead_request *aead_req;
1426 struct sk_buff *unused;
1427 u8 *aad, *iv, *mem = NULL;
1428 struct scatterlist *sgin = NULL;
1429 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001430 const int data_len = rxm->full_len - prot->overhead_size +
1431 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001432 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301433
1434 if (*zc && (out_iov || out_sg)) {
1435 if (out_iov)
1436 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1437 else
1438 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001439 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1440 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301441 } else {
1442 n_sgout = 0;
1443 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001444 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301445 }
1446
Vakul Garg0b243d02018-08-10 20:46:41 +05301447 if (n_sgin < 1)
1448 return -EBADMSG;
1449
1450 /* Increment to accommodate AAD */
1451 n_sgin = n_sgin + 1;
1452
1453 nsg = n_sgin + n_sgout;
1454
1455 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1456 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001457 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301458 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1459
1460 /* Allocate a single block of memory which contains
1461 * aead_req || sgin[] || sgout[] || aad || iv.
1462 * This order achieves correct alignment for aead_req, sgin, sgout.
1463 */
1464 mem = kmalloc(mem_size, sk->sk_allocation);
1465 if (!mem)
1466 return -ENOMEM;
1467
1468 /* Segment the allocated memory */
1469 aead_req = (struct aead_request *)mem;
1470 sgin = (struct scatterlist *)(mem + aead_size);
1471 sgout = sgin + n_sgin;
1472 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001473 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301474
Tianjia Zhang128cfb82021-09-28 14:28:43 +08001475 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1476 switch (prot->cipher_type) {
1477 case TLS_CIPHER_AES_CCM_128:
1478 iv[0] = TLS_AES_CCM_IV_B0_BYTE;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001479 iv_offset = 1;
Tianjia Zhang128cfb82021-09-28 14:28:43 +08001480 break;
1481 case TLS_CIPHER_SM4_CCM:
1482 iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1483 iv_offset = 1;
1484 break;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001485 }
1486
Vakul Garg0b243d02018-08-10 20:46:41 +05301487 /* Prepare IV */
1488 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001489 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001490 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301491 if (err < 0) {
1492 kfree(mem);
1493 return err;
1494 }
Vadim Fedorenkoa6acbe62020-11-24 18:24:48 +03001495 if (prot->version == TLS_1_3_VERSION ||
1496 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001497 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1498 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001499 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001500 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001501
Vadim Fedorenko6942a282020-11-24 18:24:46 +03001502 xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301503
1504 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001505 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1506 prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +03001507 tls_ctx->rx.rec_seq, ctx->control, prot);
Vakul Garg0b243d02018-08-10 20:46:41 +05301508
1509 /* Prepare sgin */
1510 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001511 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301512 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001513 rxm->offset + prot->prepend_size,
1514 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301515 if (err < 0) {
1516 kfree(mem);
1517 return err;
1518 }
1519
1520 if (n_sgout) {
1521 if (out_iov) {
1522 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001523 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301524
1525 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001526 err = tls_setup_from_iter(sk, out_iov, data_len,
1527 &pages, chunk, &sgout[1],
1528 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301529 if (err < 0)
1530 goto fallback_to_reg_recv;
1531 } else if (out_sg) {
1532 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1533 } else {
1534 goto fallback_to_reg_recv;
1535 }
1536 } else {
1537fallback_to_reg_recv:
1538 sgout = sgin;
1539 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001540 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301541 *zc = false;
1542 }
1543
1544 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301545 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001546 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301547 if (err == -EINPROGRESS)
1548 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301549
1550 /* Release the pages in case iov was mapped to pages */
1551 for (; pages > 0; pages--)
1552 put_page(sg_page(&sgout[pages]));
1553
1554 kfree(mem);
1555 return err;
1556}
1557
Boris Pismennydafb67f2018-07-13 14:33:40 +03001558static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001559 struct iov_iter *dest, int *chunk, bool *zc,
1560 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001561{
1562 struct tls_context *tls_ctx = tls_get_ctx(sk);
1563 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001564 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001565 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001566 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001567
Boris Pismenny4799ac82018-07-13 14:33:43 +03001568 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001569 if (tls_ctx->rx_conf == TLS_HW) {
Jakub Kicinski4de30a82019-10-06 21:09:30 -07001570 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001571 if (err < 0)
1572 return err;
1573 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001574
Boris Pismennyd069b782019-02-27 17:38:06 +02001575 /* Still not decrypted after tls_device */
1576 if (!ctx->decrypted) {
1577 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1578 async);
1579 if (err < 0) {
1580 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001581 tls_advance_record_sn(sk, prot,
1582 &tls_ctx->rx);
Jakub Kicinski5c5d22a2020-01-10 04:36:55 -08001583 else if (err == -EBADMSG)
1584 TLS_INC_STATS(sock_net(sk),
1585 LINUX_MIB_TLSDECRYPTERROR);
Boris Pismennyd069b782019-02-27 17:38:06 +02001586 return err;
1587 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001588 } else {
1589 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301590 }
Dave Watson130b3922019-01-30 21:58:31 +00001591
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001592 pad = padding_length(ctx, prot, skb);
1593 if (pad < 0)
1594 return pad;
1595
1596 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001597 rxm->offset += prot->prepend_size;
1598 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001599 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001600 ctx->decrypted = 1;
Dave Watsonfedf2012019-01-30 21:58:24 +00001601 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001602 } else {
1603 *zc = false;
1604 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001605
Boris Pismennydafb67f2018-07-13 14:33:40 +03001606 return err;
1607}
1608
1609int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1610 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001611{
Vakul Garg0b243d02018-08-10 20:46:41 +05301612 bool zc = true;
1613 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001614
Vakul Garg692d7b52019-01-16 10:40:16 +00001615 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001616}
1617
1618static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1619 unsigned int len)
1620{
1621 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001622 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001623
Vakul Garg94524d82018-08-29 15:26:55 +05301624 if (skb) {
1625 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001626
Vakul Garg94524d82018-08-29 15:26:55 +05301627 if (len < rxm->full_len) {
1628 rxm->offset += len;
1629 rxm->full_len -= len;
1630 return false;
1631 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001632 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001633 }
1634
1635 /* Finished with message */
1636 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001637 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001638
1639 return true;
1640}
1641
Vakul Garg692d7b52019-01-16 10:40:16 +00001642/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001643 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001644 * true. Further, the records are removed from the rx_list if it is not a peek
1645 * case and the record has been consumed completely.
1646 */
1647static int process_rx_list(struct tls_sw_context_rx *ctx,
1648 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001649 u8 *control,
1650 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001651 size_t skip,
1652 size_t len,
1653 bool zc,
1654 bool is_peek)
1655{
1656 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001657 u8 ctrl = *control;
1658 u8 msgc = *cmsg;
1659 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001660 ssize_t copied = 0;
1661
Vakul Garg2b794c42019-02-23 08:42:37 +00001662 /* Set the record type in 'control' if caller didn't pass it */
1663 if (!ctrl && skb) {
1664 tlm = tls_msg(skb);
1665 ctrl = tlm->control;
1666 }
1667
Vakul Garg692d7b52019-01-16 10:40:16 +00001668 while (skip && skb) {
1669 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001670 tlm = tls_msg(skb);
1671
1672 /* Cannot process a record of different type */
1673 if (ctrl != tlm->control)
1674 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001675
1676 if (skip < rxm->full_len)
1677 break;
1678
1679 skip = skip - rxm->full_len;
1680 skb = skb_peek_next(skb, &ctx->rx_list);
1681 }
1682
1683 while (len && skb) {
1684 struct sk_buff *next_skb;
1685 struct strp_msg *rxm = strp_msg(skb);
1686 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1687
Vakul Garg2b794c42019-02-23 08:42:37 +00001688 tlm = tls_msg(skb);
1689
1690 /* Cannot process a record of different type */
1691 if (ctrl != tlm->control)
1692 return 0;
1693
1694 /* Set record type if not already done. For a non-data record,
1695 * do not proceed if record type could not be copied.
1696 */
1697 if (!msgc) {
1698 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1699 sizeof(ctrl), &ctrl);
1700 msgc = true;
1701 if (ctrl != TLS_RECORD_TYPE_DATA) {
1702 if (cerr || msg->msg_flags & MSG_CTRUNC)
1703 return -EIO;
1704
1705 *cmsg = msgc;
1706 }
1707 }
1708
Vakul Garg692d7b52019-01-16 10:40:16 +00001709 if (!zc || (rxm->full_len - skip) > len) {
1710 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1711 msg, chunk);
1712 if (err < 0)
1713 return err;
1714 }
1715
1716 len = len - chunk;
1717 copied = copied + chunk;
1718
1719 /* Consume the data from record if it is non-peek case*/
1720 if (!is_peek) {
1721 rxm->offset = rxm->offset + chunk;
1722 rxm->full_len = rxm->full_len - chunk;
1723
1724 /* Return if there is unconsumed data in the record */
1725 if (rxm->full_len - skip)
1726 break;
1727 }
1728
1729 /* The remaining skip-bytes must lie in 1st record in rx_list.
1730 * So from the 2nd record, 'skip' should be 0.
1731 */
1732 skip = 0;
1733
1734 if (msg)
1735 msg->msg_flags |= MSG_EOR;
1736
1737 next_skb = skb_peek_next(skb, &ctx->rx_list);
1738
1739 if (!is_peek) {
1740 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001741 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001742 }
1743
1744 skb = next_skb;
1745 }
1746
Vakul Garg2b794c42019-02-23 08:42:37 +00001747 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001748 return copied;
1749}
1750
Dave Watsonc46234e2018-03-22 10:10:35 -07001751int tls_sw_recvmsg(struct sock *sk,
1752 struct msghdr *msg,
1753 size_t len,
1754 int nonblock,
1755 int flags,
1756 int *addr_len)
1757{
1758 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001759 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001760 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001761 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001762 unsigned char control = 0;
1763 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001764 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001765 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001766 struct sk_buff *skb;
1767 ssize_t copied = 0;
1768 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001769 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001770 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001771 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001772 bool is_peek = flags & MSG_PEEK;
John Fastabende91de6a2020-05-29 16:06:59 -07001773 bool bpf_strp_enabled;
Vakul Garg94524d82018-08-29 15:26:55 +05301774 int num_async = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301775 int pending;
Dave Watsonc46234e2018-03-22 10:10:35 -07001776
1777 flags |= nonblock;
1778
1779 if (unlikely(flags & MSG_ERRQUEUE))
1780 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1781
John Fastabendd3b18ad32018-10-13 02:46:01 +02001782 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001783 lock_sock(sk);
John Fastabende91de6a2020-05-29 16:06:59 -07001784 bpf_strp_enabled = sk_psock_strp_enabled(psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001785
Vakul Garg692d7b52019-01-16 10:40:16 +00001786 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001787 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1788 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001789 if (err < 0) {
1790 tls_err_abort(sk, err);
1791 goto end;
1792 } else {
1793 copied = err;
1794 }
1795
Jakub Kicinski46a16952019-05-24 10:34:30 -07001796 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001797 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001798
1799 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1800 len = len - copied;
1801 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001802
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001803 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001804 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001805 bool zc = false;
1806 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001807 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001808 bool async_capable;
1809 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001810
Jim Ma974271e2021-05-14 11:11:02 +08001811 skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001812 if (!skb) {
1813 if (psock) {
Cong Wang2bc793e2021-03-30 19:32:33 -07001814 int ret = sk_msg_recvmsg(sk, psock, msg, len,
1815 flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001816
1817 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001818 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001819 len -= ret;
1820 continue;
1821 }
1822 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001823 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001824 } else {
1825 tlm = tls_msg(skb);
1826 if (prot->version == TLS_1_3_VERSION)
1827 tlm->control = 0;
1828 else
1829 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001830 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001831
1832 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301833
Vakul Garg4509de12019-02-14 07:11:35 +00001834 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001835
1836 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001837 ctx->control == TLS_RECORD_TYPE_DATA &&
John Fastabende91de6a2020-05-29 16:06:59 -07001838 prot->version != TLS_1_3_VERSION &&
1839 !bpf_strp_enabled)
Dave Watsonfedf2012019-01-30 21:58:24 +00001840 zc = true;
1841
Vakul Gargc0ab4732019-02-11 11:31:05 +00001842 /* Do not use async mode if record is non-data */
John Fastabende91de6a2020-05-29 16:06:59 -07001843 if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001844 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001845 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001846 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001847
Dave Watsonfedf2012019-01-30 21:58:24 +00001848 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001849 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001850 if (err < 0 && err != -EINPROGRESS) {
Daniel Jordanda353fac2021-10-27 17:59:20 -04001851 tls_err_abort(sk, -EBADMSG);
Dave Watsonfedf2012019-01-30 21:58:24 +00001852 goto recv_end;
1853 }
1854
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001855 if (err == -EINPROGRESS) {
1856 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001857 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001858 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001859 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001860 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001861
1862 /* If the type of records being processed is not known yet,
1863 * set it to record type just dequeued. If it is already known,
1864 * but does not match the record type just dequeued, go to end.
1865 * We always get record type here since for tls1.2, record type
1866 * is known just after record is dequeued from stream parser.
1867 * For tls1.3, we disable async.
1868 */
1869
1870 if (!control)
1871 control = tlm->control;
1872 else if (control != tlm->control)
1873 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001874
Dave Watsonc46234e2018-03-22 10:10:35 -07001875 if (!cmsg) {
1876 int cerr;
1877
1878 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001879 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001880 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001881 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001882 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1883 err = -EIO;
1884 goto recv_end;
1885 }
1886 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001887 }
1888
Vakul Gargc0ab4732019-02-11 11:31:05 +00001889 if (async)
1890 goto pick_next_record;
1891
Dave Watsonfedf2012019-01-30 21:58:24 +00001892 if (!zc) {
John Fastabende91de6a2020-05-29 16:06:59 -07001893 if (bpf_strp_enabled) {
1894 err = sk_psock_tls_strp_read(psock, skb);
1895 if (err != __SK_PASS) {
1896 rxm->offset = rxm->offset + rxm->full_len;
1897 rxm->full_len = 0;
1898 if (err == __SK_DROP)
1899 consume_skb(skb);
1900 ctx->recv_pkt = NULL;
1901 __strp_unpause(&ctx->strp);
1902 continue;
1903 }
1904 }
1905
Dave Watsonfedf2012019-01-30 21:58:24 +00001906 if (rxm->full_len > len) {
1907 retain_skb = true;
1908 chunk = len;
1909 } else {
1910 chunk = rxm->full_len;
1911 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001912
Dave Watsonfedf2012019-01-30 21:58:24 +00001913 err = skb_copy_datagram_msg(skb, rxm->offset,
1914 msg, chunk);
1915 if (err < 0)
1916 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001917
Dave Watsonfedf2012019-01-30 21:58:24 +00001918 if (!is_peek) {
1919 rxm->offset = rxm->offset + chunk;
1920 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001921 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001922 }
1923
Vakul Garg94524d82018-08-29 15:26:55 +05301924pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001925 if (chunk > len)
1926 chunk = len;
1927
1928 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001929 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001930
Vakul Garg692d7b52019-01-16 10:40:16 +00001931 /* For async or peek case, queue the current skb */
1932 if (async || is_peek || retain_skb) {
1933 skb_queue_tail(&ctx->rx_list, skb);
1934 skb = NULL;
1935 }
Vakul Garg94524d82018-08-29 15:26:55 +05301936
Vakul Garg692d7b52019-01-16 10:40:16 +00001937 if (tls_sw_advance_skb(sk, skb, chunk)) {
1938 /* Return full control message to
1939 * userspace before trying to parse
1940 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001941 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001942 msg->msg_flags |= MSG_EOR;
Vadim Fedorenko3fe16ed2020-11-15 07:16:00 +03001943 if (control != TLS_RECORD_TYPE_DATA)
Vakul Garg692d7b52019-01-16 10:40:16 +00001944 goto recv_end;
1945 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001946 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001947 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001948 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001949
1950recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301951 if (num_async) {
1952 /* Wait for all previously submitted records to be decrypted */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301953 spin_lock_bh(&ctx->decrypt_compl_lock);
1954 ctx->async_notify = true;
1955 pending = atomic_read(&ctx->decrypt_pending);
1956 spin_unlock_bh(&ctx->decrypt_compl_lock);
1957 if (pending) {
Vakul Garg94524d82018-08-29 15:26:55 +05301958 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1959 if (err) {
1960 /* one of async decrypt failed */
1961 tls_err_abort(sk, err);
1962 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001963 decrypted = 0;
1964 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301965 }
1966 } else {
1967 reinit_completion(&ctx->async_wait.completion);
1968 }
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301969
1970 /* There can be no concurrent accesses, since we have no
1971 * pending decrypt operations
1972 */
Vakul Garg94524d82018-08-29 15:26:55 +05301973 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001974
1975 /* Drain records from the rx_list & copy if required */
1976 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001977 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001978 decrypted, false, is_peek);
1979 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001980 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001981 decrypted, true, is_peek);
1982 if (err < 0) {
1983 tls_err_abort(sk, err);
1984 copied = 0;
1985 goto end;
1986 }
Vakul Garg94524d82018-08-29 15:26:55 +05301987 }
1988
Vakul Garg692d7b52019-01-16 10:40:16 +00001989 copied += decrypted;
1990
1991end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001992 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001993 if (psock)
1994 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001995 return copied ? : err;
1996}
1997
1998ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1999 struct pipe_inode_info *pipe,
2000 size_t len, unsigned int flags)
2001{
2002 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002003 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002004 struct strp_msg *rxm = NULL;
2005 struct sock *sk = sock->sk;
2006 struct sk_buff *skb;
2007 ssize_t copied = 0;
2008 int err = 0;
2009 long timeo;
2010 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05302011 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07002012
2013 lock_sock(sk);
2014
Jim Ma974271e2021-05-14 11:11:02 +08002015 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
Dave Watsonc46234e2018-03-22 10:10:35 -07002016
Jim Ma974271e2021-05-14 11:11:02 +08002017 skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07002018 if (!skb)
2019 goto splice_read_end;
2020
Dave Watsonc46234e2018-03-22 10:10:35 -07002021 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002022 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07002023
Dave Watsonfedf2012019-01-30 21:58:24 +00002024 /* splice does not support reading control messages */
2025 if (ctx->control != TLS_RECORD_TYPE_DATA) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01002026 err = -EINVAL;
Dave Watsonfedf2012019-01-30 21:58:24 +00002027 goto splice_read_end;
2028 }
2029
Dave Watsonc46234e2018-03-22 10:10:35 -07002030 if (err < 0) {
Daniel Jordanda353fac2021-10-27 17:59:20 -04002031 tls_err_abort(sk, -EBADMSG);
Dave Watsonc46234e2018-03-22 10:10:35 -07002032 goto splice_read_end;
2033 }
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002034 ctx->decrypted = 1;
Dave Watsonc46234e2018-03-22 10:10:35 -07002035 }
2036 rxm = strp_msg(skb);
2037
2038 chunk = min_t(unsigned int, rxm->full_len, len);
2039 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2040 if (copied < 0)
2041 goto splice_read_end;
2042
Jim Mad8654f42021-05-12 17:00:11 +08002043 tls_sw_advance_skb(sk, skb, copied);
Dave Watsonc46234e2018-03-22 10:10:35 -07002044
2045splice_read_end:
2046 release_sock(sk);
2047 return copied ? : err;
2048}
2049
Cong Wang7b50ecf2021-10-08 13:33:03 -07002050bool tls_sw_sock_is_readable(struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07002051{
Dave Watsonc46234e2018-03-22 10:10:35 -07002052 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002053 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002054 bool ingress_empty = true;
2055 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002056
John Fastabendd3b18ad32018-10-13 02:46:01 +02002057 rcu_read_lock();
2058 psock = sk_psock(sk);
2059 if (psock)
2060 ingress_empty = list_empty(&psock->ingress_msg);
2061 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07002062
Jakub Kicinski13aecb12019-07-04 14:50:36 -07002063 return !ingress_empty || ctx->recv_pkt ||
2064 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002065}
2066
2067static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2068{
2069 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002070 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00002071 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07002072 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07002073 struct strp_msg *rxm = strp_msg(skb);
2074 size_t cipher_overhead;
2075 size_t data_len = 0;
2076 int ret;
2077
2078 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00002079 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07002080 return 0;
2081
Kees Cook3463e512018-06-25 16:55:05 -07002082 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00002083 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07002084 ret = -EINVAL;
2085 goto read_failure;
2086 }
2087
Dave Watsonc46234e2018-03-22 10:10:35 -07002088 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00002089 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002090
2091 if (ret < 0)
2092 goto read_failure;
2093
2094 ctx->control = header[0];
2095
2096 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2097
Vakul Garg4509de12019-02-14 07:11:35 +00002098 cipher_overhead = prot->tag_size;
Vadim Fedorenkoa6acbe62020-11-24 18:24:48 +03002099 if (prot->version != TLS_1_3_VERSION &&
2100 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
Vakul Garg4509de12019-02-14 07:11:35 +00002101 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002102
Dave Watson130b3922019-01-30 21:58:31 +00002103 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00002104 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002105 ret = -EMSGSIZE;
2106 goto read_failure;
2107 }
2108 if (data_len < cipher_overhead) {
2109 ret = -EBADMSG;
2110 goto read_failure;
2111 }
2112
Dave Watson130b3922019-01-30 21:58:31 +00002113 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2114 if (header[1] != TLS_1_2_VERSION_MINOR ||
2115 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002116 ret = -EINVAL;
2117 goto read_failure;
2118 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002119
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002120 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002121 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002122 return data_len + TLS_HEADER_SIZE;
2123
2124read_failure:
2125 tls_err_abort(strp->sk, ret);
2126
2127 return ret;
2128}
2129
2130static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2131{
2132 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002133 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002134
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002135 ctx->decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07002136
2137 ctx->recv_pkt = skb;
2138 strp_pause(strp);
2139
Vakul Gargad13acc2018-07-30 16:08:33 +05302140 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002141}
2142
2143static void tls_data_ready(struct sock *sk)
2144{
2145 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002146 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002147 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002148
2149 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002150
2151 psock = sk_psock_get(sk);
Xiyu Yang62b40112020-04-25 21:10:23 +08002152 if (psock) {
2153 if (!list_empty(&psock->ingress_msg))
2154 ctx->saved_data_ready(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002155 sk_psock_put(sk, psock);
2156 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002157}
2158
John Fastabendf87e62d2019-07-19 10:29:16 -07002159void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2160{
2161 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2162
2163 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2164 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2165 cancel_delayed_work_sync(&ctx->tx_work.work);
2166}
2167
John Fastabend313ab002019-07-19 10:29:17 -07002168void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002169{
2170 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002171 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302172 struct tls_rec *rec, *tmp;
Rohit Maheshwari38f7e1c2020-09-24 12:28:45 +05302173 int pending;
Vakul Garga42055e2018-09-21 09:46:13 +05302174
2175 /* Wait for any pending async encryptions to complete */
Rohit Maheshwari38f7e1c2020-09-24 12:28:45 +05302176 spin_lock_bh(&ctx->encrypt_compl_lock);
2177 ctx->async_notify = true;
2178 pending = atomic_read(&ctx->encrypt_pending);
2179 spin_unlock_bh(&ctx->encrypt_compl_lock);
2180
2181 if (pending)
Vakul Garga42055e2018-09-21 09:46:13 +05302182 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2183
Vakul Garga42055e2018-09-21 09:46:13 +05302184 tls_tx_records(sk, -1);
2185
Vakul Garg9932a292018-09-24 15:35:56 +05302186 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302187 * the partially sent record if any at head of tx_list.
2188 */
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -08002189 if (tls_ctx->partially_sent_record) {
2190 tls_free_partial_record(sk, tls_ctx);
Vakul Garg9932a292018-09-24 15:35:56 +05302191 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302192 struct tls_rec, list);
2193 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002194 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302195 kfree(rec);
2196 }
2197
Vakul Garg9932a292018-09-24 15:35:56 +05302198 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302199 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002200 sk_msg_free(sk, &rec->msg_encrypted);
2201 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302202 kfree(rec);
2203 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002204
Vakul Garg201876b2018-07-24 16:54:27 +05302205 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302206 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002207}
2208
2209void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2210{
2211 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002212
2213 kfree(ctx);
2214}
2215
Boris Pismenny39f56e12018-07-13 14:33:41 +03002216void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002217{
2218 struct tls_context *tls_ctx = tls_get_ctx(sk);
2219 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2220
Jakub Kicinski12c76862019-04-19 16:52:19 -07002221 kfree(tls_ctx->rx.rec_seq);
2222 kfree(tls_ctx->rx.iv);
2223
Dave Watsonc46234e2018-03-22 10:10:35 -07002224 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302225 kfree_skb(ctx->recv_pkt);
2226 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002227 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002228 crypto_free_aead(ctx->aead_recv);
2229 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002230 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2231 * we still want to strp_stop(), but sk->sk_data_ready was
2232 * never swapped.
2233 */
2234 if (ctx->saved_data_ready) {
2235 write_lock_bh(&sk->sk_callback_lock);
2236 sk->sk_data_ready = ctx->saved_data_ready;
2237 write_unlock_bh(&sk->sk_callback_lock);
2238 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002239 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002240}
2241
John Fastabend313ab002019-07-19 10:29:17 -07002242void tls_sw_strparser_done(struct tls_context *tls_ctx)
2243{
2244 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2245
2246 strp_done(&ctx->strp);
2247}
2248
2249void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2250{
2251 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2252
2253 kfree(ctx);
2254}
2255
Boris Pismenny39f56e12018-07-13 14:33:41 +03002256void tls_sw_free_resources_rx(struct sock *sk)
2257{
2258 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002259
2260 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002261 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002262}
2263
Vakul Garg9932a292018-09-24 15:35:56 +05302264/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302265static void tx_work_handler(struct work_struct *work)
2266{
2267 struct delayed_work *delayed_work = to_delayed_work(work);
2268 struct tx_work *tx_work = container_of(delayed_work,
2269 struct tx_work, work);
2270 struct sock *sk = tx_work->sk;
2271 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002272 struct tls_sw_context_tx *ctx;
2273
2274 if (unlikely(!tls_ctx))
2275 return;
2276
2277 ctx = tls_sw_ctx_tx(tls_ctx);
2278 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2279 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302280
2281 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2282 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002283 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302284 lock_sock(sk);
2285 tls_tx_records(sk, -1);
2286 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002287 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302288}
2289
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002290void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2291{
2292 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2293
2294 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002295 if (is_tx_ready(tx_ctx) &&
2296 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2297 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002298}
2299
Jakub Kicinski318892a2019-07-19 10:29:14 -07002300void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2301{
2302 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2303
2304 write_lock_bh(&sk->sk_callback_lock);
2305 rx_ctx->saved_data_ready = sk->sk_data_ready;
2306 sk->sk_data_ready = tls_data_ready;
2307 write_unlock_bh(&sk->sk_callback_lock);
2308
2309 strp_check_rcv(&rx_ctx->strp);
2310}
2311
Dave Watsonc46234e2018-03-22 10:10:35 -07002312int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002313{
Vakul Garg4509de12019-02-14 07:11:35 +00002314 struct tls_context *tls_ctx = tls_get_ctx(sk);
2315 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002316 struct tls_crypto_info *crypto_info;
2317 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002318 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002319 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Vadim Fedorenko74ea6102020-11-24 18:24:49 +03002320 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002321 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2322 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002323 struct cipher_context *cctx;
2324 struct crypto_aead **aead;
2325 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002326 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002327 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002328 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002329 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002330 int rc = 0;
2331
2332 if (!ctx) {
2333 rc = -EINVAL;
2334 goto out;
2335 }
2336
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002337 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002338 if (!ctx->priv_ctx_tx) {
2339 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2340 if (!sw_ctx_tx) {
2341 rc = -ENOMEM;
2342 goto out;
2343 }
2344 ctx->priv_ctx_tx = sw_ctx_tx;
2345 } else {
2346 sw_ctx_tx =
2347 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002348 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002349 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002350 if (!ctx->priv_ctx_rx) {
2351 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2352 if (!sw_ctx_rx) {
2353 rc = -ENOMEM;
2354 goto out;
2355 }
2356 ctx->priv_ctx_rx = sw_ctx_rx;
2357 } else {
2358 sw_ctx_rx =
2359 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002360 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002361 }
2362
Dave Watsonc46234e2018-03-22 10:10:35 -07002363 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002364 crypto_init_wait(&sw_ctx_tx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302365 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002366 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002367 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002368 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302369 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302370 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2371 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002372 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002373 crypto_init_wait(&sw_ctx_rx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302374 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002375 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002376 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002377 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002378 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002379 }
2380
Dave Watson3c4d7552017-06-14 11:37:39 -07002381 switch (crypto_info->cipher_type) {
2382 case TLS_CIPHER_AES_GCM_128: {
2383 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2384 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2385 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2386 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2387 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2388 rec_seq =
2389 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2390 gcm_128_info =
2391 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002392 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2393 key = gcm_128_info->key;
2394 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002395 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2396 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002397 break;
2398 }
2399 case TLS_CIPHER_AES_GCM_256: {
2400 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2401 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2402 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2403 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2404 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2405 rec_seq =
2406 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2407 gcm_256_info =
2408 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2409 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2410 key = gcm_256_info->key;
2411 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002412 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2413 cipher_name = "gcm(aes)";
2414 break;
2415 }
2416 case TLS_CIPHER_AES_CCM_128: {
2417 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2418 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2419 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2420 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2421 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2422 rec_seq =
2423 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2424 ccm_128_info =
2425 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2426 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2427 key = ccm_128_info->key;
2428 salt = ccm_128_info->salt;
2429 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2430 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002431 break;
2432 }
Vadim Fedorenko74ea6102020-11-24 18:24:49 +03002433 case TLS_CIPHER_CHACHA20_POLY1305: {
2434 chacha20_poly1305_info = (void *)crypto_info;
2435 nonce_size = 0;
2436 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2437 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2438 iv = chacha20_poly1305_info->iv;
2439 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2440 rec_seq = chacha20_poly1305_info->rec_seq;
2441 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2442 key = chacha20_poly1305_info->key;
2443 salt = chacha20_poly1305_info->salt;
2444 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2445 cipher_name = "rfc7539(chacha20,poly1305)";
2446 break;
2447 }
Tianjia Zhang227b9642021-09-16 11:37:38 +08002448 case TLS_CIPHER_SM4_GCM: {
2449 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2450
2451 sm4_gcm_info = (void *)crypto_info;
2452 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2453 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2454 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2455 iv = sm4_gcm_info->iv;
2456 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2457 rec_seq = sm4_gcm_info->rec_seq;
2458 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2459 key = sm4_gcm_info->key;
2460 salt = sm4_gcm_info->salt;
2461 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2462 cipher_name = "gcm(sm4)";
2463 break;
2464 }
2465 case TLS_CIPHER_SM4_CCM: {
2466 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2467
2468 sm4_ccm_info = (void *)crypto_info;
2469 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2470 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2471 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2472 iv = sm4_ccm_info->iv;
2473 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2474 rec_seq = sm4_ccm_info->rec_seq;
2475 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2476 key = sm4_ccm_info->key;
2477 salt = sm4_ccm_info->salt;
2478 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2479 cipher_name = "ccm(sm4)";
2480 break;
2481 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002482 default:
2483 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002484 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002485 }
2486
Jakub Kicinski89fec472019-06-10 21:40:00 -07002487 /* Sanity-check the sizes for stack allocations. */
2488 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2489 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002490 rc = -EINVAL;
2491 goto free_priv;
2492 }
2493
Dave Watson130b3922019-01-30 21:58:31 +00002494 if (crypto_info->version == TLS_1_3_VERSION) {
2495 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002496 prot->aad_size = TLS_HEADER_SIZE;
2497 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002498 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002499 prot->aad_size = TLS_AAD_SPACE_SIZE;
2500 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002501 }
2502
Vakul Garg4509de12019-02-14 07:11:35 +00002503 prot->version = crypto_info->version;
2504 prot->cipher_type = crypto_info->cipher_type;
2505 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2506 prot->tag_size = tag_size;
2507 prot->overhead_size = prot->prepend_size +
2508 prot->tag_size + prot->tail_size;
2509 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002510 prot->salt_size = salt_size;
2511 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002512 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002513 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002514 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002515 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002516 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002517 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002518 memcpy(cctx->iv, salt, salt_size);
2519 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002520 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002521 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002522 rc = -ENOMEM;
2523 goto free_iv;
2524 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002525
Dave Watsonc46234e2018-03-22 10:10:35 -07002526 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002527 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002528 if (IS_ERR(*aead)) {
2529 rc = PTR_ERR(*aead);
2530 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002531 goto free_rec_seq;
2532 }
2533 }
2534
2535 ctx->push_pending_record = tls_sw_push_pending_record;
2536
Dave Watsonfb99bce2019-01-30 21:58:05 +00002537 rc = crypto_aead_setkey(*aead, key, keysize);
2538
Dave Watson3c4d7552017-06-14 11:37:39 -07002539 if (rc)
2540 goto free_aead;
2541
Vakul Garg4509de12019-02-14 07:11:35 +00002542 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002543 if (rc)
2544 goto free_aead;
2545
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002546 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002547 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002548
2549 if (crypto_info->version == TLS_1_3_VERSION)
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002550 sw_ctx_rx->async_capable = 0;
Vakul Garg8497ded2019-02-09 07:53:28 +00002551 else
2552 sw_ctx_rx->async_capable =
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002553 !!(tfm->__crt_alg->cra_flags &
2554 CRYPTO_ALG_ASYNC);
Vakul Garg692d7b52019-01-16 10:40:16 +00002555
Dave Watsonc46234e2018-03-22 10:10:35 -07002556 /* Set up strparser */
2557 memset(&cb, 0, sizeof(cb));
2558 cb.rcv_msg = tls_queue;
2559 cb.parse_msg = tls_read_size;
2560
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002561 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002562 }
2563
2564 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002565
2566free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002567 crypto_free_aead(*aead);
2568 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002569free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002570 kfree(cctx->rec_seq);
2571 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002572free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002573 kfree(cctx->iv);
2574 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002575free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002576 if (tx) {
2577 kfree(ctx->priv_ctx_tx);
2578 ctx->priv_ctx_tx = NULL;
2579 } else {
2580 kfree(ctx->priv_ctx_rx);
2581 ctx->priv_ctx_rx = NULL;
2582 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002583out:
2584 return rc;
2585}