blob: 4147bb2e705730a059e093edf806107cefc4231c [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
Jim Ma974271e2021-05-14 11:11:02 +080040#include <linux/splice.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070041#include <crypto/aead.h>
42
Dave Watsonc46234e2018-03-22 10:10:35 -070043#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070044#include <net/tls.h>
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Dave Watson130b3922019-01-30 21:58:31 +0000122static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700123 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000124{
125 struct strp_msg *rxm = strp_msg(skb);
126 int sub = 0;
127
128 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700129 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000130 char content_type = 0;
131 int err;
132 int back = 17;
133
134 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700135 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000136 return -EBADMSG;
137 err = skb_copy_bits(skb,
138 rxm->offset + rxm->full_len - back,
139 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700140 if (err)
141 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000142 if (content_type)
143 break;
144 sub++;
145 back++;
146 }
147 ctx->control = content_type;
148 }
149 return sub;
150}
151
Vakul Garg94524d82018-08-29 15:26:55 +0530152static void tls_decrypt_done(struct crypto_async_request *req, int err)
153{
154 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530155 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000156 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700157 struct tls_sw_context_rx *ctx;
158 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000159 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530160 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700161 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530162 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700163 int pending;
164
165 skb = (struct sk_buff *)req->data;
166 tls_ctx = tls_get_ctx(skb->sk);
167 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000168 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530169
170 /* Propagate if there was an err */
171 if (err) {
Jakub Kicinski5c5ec662019-10-04 16:19:26 -0700172 if (err == -EBADMSG)
173 TLS_INC_STATS(sock_net(skb->sk),
174 LINUX_MIB_TLSDECRYPTERROR);
Vakul Garg94524d82018-08-29 15:26:55 +0530175 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700176 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000177 } else {
178 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700179 int pad;
180
181 pad = padding_length(ctx, prot, skb);
182 if (pad < 0) {
183 ctx->async_wait.err = pad;
184 tls_err_abort(skb->sk, pad);
185 } else {
186 rxm->full_len -= pad;
187 rxm->offset += prot->prepend_size;
188 rxm->full_len -= prot->overhead_size;
189 }
Vakul Garg94524d82018-08-29 15:26:55 +0530190 }
191
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700192 /* After using skb->sk to propagate sk through crypto async callback
193 * we need to NULL it again.
194 */
195 skb->sk = NULL;
196
Vakul Garg94524d82018-08-29 15:26:55 +0530197
Vakul Garg692d7b52019-01-16 10:40:16 +0000198 /* Free the destination pages if skb was not decrypted inplace */
199 if (sgout != sgin) {
200 /* Skip the first S/G entry as it points to AAD */
201 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
202 if (!sg)
203 break;
204 put_page(sg_page(sg));
205 }
Vakul Garg94524d82018-08-29 15:26:55 +0530206 }
207
208 kfree(aead_req);
209
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530210 spin_lock_bh(&ctx->decrypt_compl_lock);
Vakul Garg692d7b52019-01-16 10:40:16 +0000211 pending = atomic_dec_return(&ctx->decrypt_pending);
212
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530213 if (!pending && ctx->async_notify)
Vakul Garg94524d82018-08-29 15:26:55 +0530214 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530215 spin_unlock_bh(&ctx->decrypt_compl_lock);
Vakul Garg94524d82018-08-29 15:26:55 +0530216}
217
Dave Watsonc46234e2018-03-22 10:10:35 -0700218static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530219 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700220 struct scatterlist *sgin,
221 struct scatterlist *sgout,
222 char *iv_recv,
223 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530224 struct aead_request *aead_req,
225 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700226{
227 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000228 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300229 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700230 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700231
Vakul Garg0b243d02018-08-10 20:46:41 +0530232 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000233 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700234 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000235 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700236 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700237
Vakul Garg94524d82018-08-29 15:26:55 +0530238 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700239 /* Using skb->sk to push sk through to crypto async callback
240 * handler. This allows propagating errors up to the socket
241 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000242 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700243 * because it is a clone from strparser.
244 */
245 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530246 aead_request_set_callback(aead_req,
247 CRYPTO_TFM_REQ_MAY_BACKLOG,
248 tls_decrypt_done, skb);
249 atomic_inc(&ctx->decrypt_pending);
250 } else {
251 aead_request_set_callback(aead_req,
252 CRYPTO_TFM_REQ_MAY_BACKLOG,
253 crypto_req_done, &ctx->async_wait);
254 }
255
256 ret = crypto_aead_decrypt(aead_req);
257 if (ret == -EINPROGRESS) {
258 if (async)
259 return ret;
260
261 ret = crypto_wait_req(ret, &ctx->async_wait);
262 }
263
264 if (async)
265 atomic_dec(&ctx->decrypt_pending);
266
Dave Watsonc46234e2018-03-22 10:10:35 -0700267 return ret;
268}
269
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200270static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700271{
272 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000273 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300274 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530275 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700276
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200277 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700278 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000279 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200280 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700281}
282
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200283static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700284{
285 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300286 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530287 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200288 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700289
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200290 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700291}
292
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200293static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700294{
295 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000296 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300297 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530298 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200299 struct sk_msg *msg_pl = &rec->msg_plaintext;
300 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530301 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700302
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200303 /* We add page references worth len bytes from encrypted sg
304 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530305 * has enough required room (ensured by caller).
306 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200307 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530308
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200309 /* Skip initial bytes in msg_en's data to be able to use
310 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530311 */
Vakul Garg4509de12019-02-14 07:11:35 +0000312 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530313
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200314 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700315}
316
John Fastabendd3b18ad32018-10-13 02:46:01 +0200317static struct tls_rec *tls_get_rec(struct sock *sk)
318{
319 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000320 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200321 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
322 struct sk_msg *msg_pl, *msg_en;
323 struct tls_rec *rec;
324 int mem_size;
325
326 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
327
328 rec = kzalloc(mem_size, sk->sk_allocation);
329 if (!rec)
330 return NULL;
331
332 msg_pl = &rec->msg_plaintext;
333 msg_en = &rec->msg_encrypted;
334
335 sk_msg_init(msg_pl);
336 sk_msg_init(msg_en);
337
338 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000339 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200340 sg_unmark_end(&rec->sg_aead_in[1]);
341
342 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000343 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200344 sg_unmark_end(&rec->sg_aead_out[1]);
345
346 return rec;
347}
348
349static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
350{
351 sk_msg_free(sk, &rec->msg_encrypted);
352 sk_msg_free(sk, &rec->msg_plaintext);
353 kfree(rec);
354}
355
Vakul Gargc7749732018-09-25 20:21:51 +0530356static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700357{
358 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300359 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530360 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700361
John Fastabendd3b18ad32018-10-13 02:46:01 +0200362 if (rec) {
363 tls_free_rec(sk, rec);
364 ctx->open_rec = NULL;
365 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700366}
367
Vakul Garga42055e2018-09-21 09:46:13 +0530368int tls_tx_records(struct sock *sk, int flags)
369{
370 struct tls_context *tls_ctx = tls_get_ctx(sk);
371 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
372 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200373 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530374 int tx_flags, rc = 0;
375
376 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530377 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530378 struct tls_rec, list);
379
380 if (flags == -1)
381 tx_flags = rec->tx_flags;
382 else
383 tx_flags = flags;
384
385 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
386 if (rc)
387 goto tx_err;
388
389 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530390 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530391 */
Vakul Garga42055e2018-09-21 09:46:13 +0530392 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200393 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530394 kfree(rec);
395 }
396
Vakul Garg9932a292018-09-24 15:35:56 +0530397 /* Tx all ready records */
398 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
399 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530400 if (flags == -1)
401 tx_flags = rec->tx_flags;
402 else
403 tx_flags = flags;
404
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200405 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530406 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200407 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530408 0, tx_flags);
409 if (rc)
410 goto tx_err;
411
Vakul Garga42055e2018-09-21 09:46:13 +0530412 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200413 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530414 kfree(rec);
415 } else {
416 break;
417 }
418 }
419
420tx_err:
421 if (rc < 0 && rc != -EAGAIN)
422 tls_err_abort(sk, EBADMSG);
423
424 return rc;
425}
426
427static void tls_encrypt_done(struct crypto_async_request *req, int err)
428{
429 struct aead_request *aead_req = (struct aead_request *)req;
430 struct sock *sk = req->data;
431 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000432 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530433 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200434 struct scatterlist *sge;
435 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530436 struct tls_rec *rec;
437 bool ready = false;
438 int pending;
439
440 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200441 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530442
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200443 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000444 sge->offset -= prot->prepend_size;
445 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530446
Vakul Garg80ece6a2018-09-26 16:22:08 +0530447 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530448 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530449 rec = NULL;
450
451 /* If err is already set on socket, return the same code */
452 if (sk->sk_err) {
453 ctx->async_wait.err = sk->sk_err;
454 } else {
455 ctx->async_wait.err = err;
456 tls_err_abort(sk, err);
457 }
458 }
459
Vakul Garg9932a292018-09-24 15:35:56 +0530460 if (rec) {
461 struct tls_rec *first_rec;
462
463 /* Mark the record as ready for transmission */
464 smp_store_mb(rec->tx_ready, true);
465
466 /* If received record is at head of tx_list, schedule tx */
467 first_rec = list_first_entry(&ctx->tx_list,
468 struct tls_rec, list);
469 if (rec == first_rec)
470 ready = true;
471 }
Vakul Garga42055e2018-09-21 09:46:13 +0530472
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530473 spin_lock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530474 pending = atomic_dec_return(&ctx->encrypt_pending);
475
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530476 if (!pending && ctx->async_notify)
Vakul Garga42055e2018-09-21 09:46:13 +0530477 complete(&ctx->async_wait.completion);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530478 spin_unlock_bh(&ctx->encrypt_compl_lock);
Vakul Garga42055e2018-09-21 09:46:13 +0530479
480 if (!ready)
481 return;
482
483 /* Schedule the transmission */
484 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200485 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530486}
487
488static int tls_do_encryption(struct sock *sk,
489 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200490 struct tls_sw_context_tx *ctx,
491 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200492 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700493{
Vakul Garg4509de12019-02-14 07:11:35 +0000494 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530495 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200496 struct sk_msg *msg_en = &rec->msg_encrypted;
497 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000498 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700499
Vakul Gargf295b3a2019-03-20 02:03:36 +0000500 /* For CCM based ciphers, first byte of IV is a constant */
Tianjia Zhang128cfb82021-09-28 14:28:43 +0800501 switch (prot->cipher_type) {
502 case TLS_CIPHER_AES_CCM_128:
Vakul Gargf295b3a2019-03-20 02:03:36 +0000503 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
504 iv_offset = 1;
Tianjia Zhang128cfb82021-09-28 14:28:43 +0800505 break;
506 case TLS_CIPHER_SM4_CCM:
507 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
508 iv_offset = 1;
509 break;
Vakul Gargf295b3a2019-03-20 02:03:36 +0000510 }
511
512 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
513 prot->iv_size + prot->salt_size);
514
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300515 xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000516
Vakul Garg4509de12019-02-14 07:11:35 +0000517 sge->offset += prot->prepend_size;
518 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700519
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200520 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530521
Dave Watson3c4d7552017-06-14 11:37:39 -0700522 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000523 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200524 aead_request_set_crypt(aead_req, rec->sg_aead_in,
525 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000526 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530527
528 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530529 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530530
Vakul Garg9932a292018-09-24 15:35:56 +0530531 /* Add the record in tx_list */
532 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530533 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700534
Vakul Garga42055e2018-09-21 09:46:13 +0530535 rc = crypto_aead_encrypt(aead_req);
536 if (!rc || rc != -EINPROGRESS) {
537 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000538 sge->offset -= prot->prepend_size;
539 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530540 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700541
Vakul Garg9932a292018-09-24 15:35:56 +0530542 if (!rc) {
543 WRITE_ONCE(rec->tx_ready, true);
544 } else if (rc != -EINPROGRESS) {
545 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530546 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530547 }
Vakul Garga42055e2018-09-21 09:46:13 +0530548
549 /* Unhook the record from context if encryption is not failure */
550 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700551 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700552 return rc;
553}
554
John Fastabendd3b18ad32018-10-13 02:46:01 +0200555static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
556 struct tls_rec **to, struct sk_msg *msg_opl,
557 struct sk_msg *msg_oen, u32 split_point,
558 u32 tx_overhead_size, u32 *orig_end)
559{
560 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
561 struct scatterlist *sge, *osge, *nsge;
562 u32 orig_size = msg_opl->sg.size;
563 struct scatterlist tmp = { };
564 struct sk_msg *msg_npl;
565 struct tls_rec *new;
566 int ret;
567
568 new = tls_get_rec(sk);
569 if (!new)
570 return -ENOMEM;
571 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
572 tx_overhead_size, 0);
573 if (ret < 0) {
574 tls_free_rec(sk, new);
575 return ret;
576 }
577
578 *orig_end = msg_opl->sg.end;
579 i = msg_opl->sg.start;
580 sge = sk_msg_elem(msg_opl, i);
581 while (apply && sge->length) {
582 if (sge->length > apply) {
583 u32 len = sge->length - apply;
584
585 get_page(sg_page(sge));
586 sg_set_page(&tmp, sg_page(sge), len,
587 sge->offset + apply);
588 sge->length = apply;
589 bytes += apply;
590 apply = 0;
591 } else {
592 apply -= sge->length;
593 bytes += sge->length;
594 }
595
596 sk_msg_iter_var_next(i);
597 if (i == msg_opl->sg.end)
598 break;
599 sge = sk_msg_elem(msg_opl, i);
600 }
601
602 msg_opl->sg.end = i;
603 msg_opl->sg.curr = i;
604 msg_opl->sg.copybreak = 0;
605 msg_opl->apply_bytes = 0;
606 msg_opl->sg.size = bytes;
607
608 msg_npl = &new->msg_plaintext;
609 msg_npl->apply_bytes = apply;
610 msg_npl->sg.size = orig_size - bytes;
611
612 j = msg_npl->sg.start;
613 nsge = sk_msg_elem(msg_npl, j);
614 if (tmp.length) {
615 memcpy(nsge, &tmp, sizeof(*nsge));
616 sk_msg_iter_var_next(j);
617 nsge = sk_msg_elem(msg_npl, j);
618 }
619
620 osge = sk_msg_elem(msg_opl, i);
621 while (osge->length) {
622 memcpy(nsge, osge, sizeof(*nsge));
623 sg_unmark_end(nsge);
624 sk_msg_iter_var_next(i);
625 sk_msg_iter_var_next(j);
626 if (i == *orig_end)
627 break;
628 osge = sk_msg_elem(msg_opl, i);
629 nsge = sk_msg_elem(msg_npl, j);
630 }
631
632 msg_npl->sg.end = j;
633 msg_npl->sg.curr = j;
634 msg_npl->sg.copybreak = 0;
635
636 *to = new;
637 return 0;
638}
639
640static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
641 struct tls_rec *from, u32 orig_end)
642{
643 struct sk_msg *msg_npl = &from->msg_plaintext;
644 struct sk_msg *msg_opl = &to->msg_plaintext;
645 struct scatterlist *osge, *nsge;
646 u32 i, j;
647
648 i = msg_opl->sg.end;
649 sk_msg_iter_var_prev(i);
650 j = msg_npl->sg.start;
651
652 osge = sk_msg_elem(msg_opl, i);
653 nsge = sk_msg_elem(msg_npl, j);
654
655 if (sg_page(osge) == sg_page(nsge) &&
656 osge->offset + osge->length == nsge->offset) {
657 osge->length += nsge->length;
658 put_page(sg_page(nsge));
659 }
660
661 msg_opl->sg.end = orig_end;
662 msg_opl->sg.curr = orig_end;
663 msg_opl->sg.copybreak = 0;
664 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
665 msg_opl->sg.size += msg_npl->sg.size;
666
667 sk_msg_free(sk, &to->msg_encrypted);
668 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
669
670 kfree(from);
671}
672
Dave Watson3c4d7552017-06-14 11:37:39 -0700673static int tls_push_record(struct sock *sk, int flags,
674 unsigned char record_type)
675{
676 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000677 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300678 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200679 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
Kees Cook3f649ab2020-06-03 13:09:38 -0700680 u32 i, split_point, orig_end;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200681 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200682 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200683 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700684 int rc;
685
Vakul Garga42055e2018-09-21 09:46:13 +0530686 if (!rec)
687 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200688
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200689 msg_pl = &rec->msg_plaintext;
690 msg_en = &rec->msg_encrypted;
691
John Fastabendd3b18ad32018-10-13 02:46:01 +0200692 split_point = msg_pl->apply_bytes;
693 split = split_point && split_point < msg_pl->sg.size;
John Fastabendd468e472020-01-11 06:12:04 +0000694 if (unlikely((!split &&
695 msg_pl->sg.size +
696 prot->overhead_size > msg_en->sg.size) ||
697 (split &&
698 split_point +
699 prot->overhead_size > msg_en->sg.size))) {
700 split = true;
701 split_point = msg_en->sg.size;
702 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200703 if (split) {
704 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000705 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200706 &orig_end);
707 if (rc < 0)
708 return rc;
John Fastabendd468e472020-01-11 06:12:04 +0000709 /* This can happen if above tls_split_open_record allocates
710 * a single large encryption buffer instead of two smaller
711 * ones. In this case adjust pointers and continue without
712 * split.
713 */
714 if (!msg_pl->sg.size) {
715 tls_merge_open_record(sk, rec, tmp, orig_end);
716 msg_pl = &rec->msg_plaintext;
717 msg_en = &rec->msg_encrypted;
718 split = false;
719 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200720 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000721 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200722 }
723
Vakul Garga42055e2018-09-21 09:46:13 +0530724 rec->tx_flags = flags;
725 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700726
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200727 i = msg_pl->sg.end;
728 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000729
730 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000731 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000732 /* Add content type to end of message. No padding added */
733 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
734 sg_mark_end(&rec->sg_content_type);
735 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
736 &rec->sg_content_type);
737 } else {
738 sg_mark_end(sk_msg_elem(msg_pl, i));
739 }
Vakul Garga42055e2018-09-21 09:46:13 +0530740
John Fastabend9aaaa562020-01-11 06:12:05 +0000741 if (msg_pl->sg.end < msg_pl->sg.start) {
742 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
743 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
744 msg_pl->sg.data);
745 }
746
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200747 i = msg_pl->sg.start;
Jakub Kicinski9e5ffed2019-11-27 12:16:43 -0800748 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200749
750 i = msg_en->sg.end;
751 sk_msg_iter_var_prev(i);
752 sg_mark_end(sk_msg_elem(msg_en, i));
753
754 i = msg_en->sg.start;
755 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
756
Vakul Garg4509de12019-02-14 07:11:35 +0000757 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300758 tls_ctx->tx.rec_seq, record_type, prot);
Dave Watson3c4d7552017-06-14 11:37:39 -0700759
760 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200761 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000762 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000763 msg_pl->sg.size + prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +0300764 record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700765
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200766 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700767
Dave Watson130b3922019-01-30 21:58:31 +0000768 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000769 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700770 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200771 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200772 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200773 if (split) {
774 tls_ctx->pending_open_record_frags = true;
775 tls_merge_open_record(sk, rec, tmp, orig_end);
776 }
777 }
Dave Watson5b053e12019-01-30 22:08:21 +0000778 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530779 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200780 } else if (split) {
781 msg_pl = &tmp->msg_plaintext;
782 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000783 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200784 tls_ctx->pending_open_record_frags = true;
785 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700786 }
787
Vakul Garg9932a292018-09-24 15:35:56 +0530788 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700789}
790
John Fastabendd3b18ad32018-10-13 02:46:01 +0200791static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
792 bool full_record, u8 record_type,
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300793 ssize_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700794{
795 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300796 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200797 struct sk_msg msg_redir = { };
798 struct sk_psock *psock;
799 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530800 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800801 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200802 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800803 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530804
John Fastabend0608c692018-12-20 11:35:35 -0800805 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200806 psock = sk_psock_get(sk);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800807 if (!psock || !policy) {
808 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300809 if (err && sk->sk_err == EBADMSG) {
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800810 *copied -= sk_msg_free(sk, msg);
811 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300812 err = -sk->sk_err;
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800813 }
Xiyu Yang095f5612020-04-25 20:54:37 +0800814 if (psock)
815 sk_psock_put(sk, psock);
Jakub Kicinskid10523d2019-11-27 12:16:40 -0800816 return err;
817 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200818more_data:
819 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800820 if (psock->eval == __SK_NONE) {
821 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200822 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7361d442020-01-11 06:12:06 +0000823 delta -= msg->sg.size;
John Fastabend7246d8e2018-11-26 14:16:17 -0800824 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200825 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
826 !enospc && !full_record) {
827 err = -ENOSPC;
828 goto out_err;
829 }
830 msg->cork_bytes = 0;
831 send = msg->sg.size;
832 if (msg->apply_bytes && msg->apply_bytes < send)
833 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530834
John Fastabendd3b18ad32018-10-13 02:46:01 +0200835 switch (psock->eval) {
836 case __SK_PASS:
837 err = tls_push_record(sk, flags, record_type);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300838 if (err && sk->sk_err == EBADMSG) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200839 *copied -= sk_msg_free(sk, msg);
840 tls_free_open_rec(sk);
Vadim Fedorenko635d9392020-05-20 11:41:44 +0300841 err = -sk->sk_err;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200842 goto out_err;
843 }
844 break;
845 case __SK_REDIRECT:
846 sk_redir = psock->sk_redir;
847 memcpy(&msg_redir, msg, sizeof(*msg));
848 if (msg->apply_bytes < send)
849 msg->apply_bytes = 0;
850 else
851 msg->apply_bytes -= send;
852 sk_msg_return_zero(sk, msg, send);
853 msg->sg.size -= send;
854 release_sock(sk);
855 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
856 lock_sock(sk);
857 if (err < 0) {
858 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
859 msg->sg.size = 0;
860 }
861 if (msg->sg.size == 0)
862 tls_free_open_rec(sk);
863 break;
864 case __SK_DROP:
865 default:
866 sk_msg_free_partial(sk, msg, send);
867 if (msg->apply_bytes < send)
868 msg->apply_bytes = 0;
869 else
870 msg->apply_bytes -= send;
871 if (msg->sg.size == 0)
872 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800873 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200874 err = -EACCES;
875 }
Vakul Garga42055e2018-09-21 09:46:13 +0530876
John Fastabendd3b18ad32018-10-13 02:46:01 +0200877 if (likely(!err)) {
878 bool reset_eval = !ctx->open_rec;
879
880 rec = ctx->open_rec;
881 if (rec) {
882 msg = &rec->msg_plaintext;
883 if (!msg->apply_bytes)
884 reset_eval = true;
885 }
886 if (reset_eval) {
887 psock->eval = __SK_NONE;
888 if (psock->sk_redir) {
889 sock_put(psock->sk_redir);
890 psock->sk_redir = NULL;
891 }
892 }
893 if (rec)
894 goto more_data;
895 }
896 out_err:
897 sk_psock_put(sk, psock);
898 return err;
899}
900
901static int tls_sw_push_pending_record(struct sock *sk, int flags)
902{
903 struct tls_context *tls_ctx = tls_get_ctx(sk);
904 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
905 struct tls_rec *rec = ctx->open_rec;
906 struct sk_msg *msg_pl;
907 size_t copied;
908
Vakul Garga42055e2018-09-21 09:46:13 +0530909 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200910 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530911
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200912 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200913 copied = msg_pl->sg.size;
914 if (!copied)
915 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530916
John Fastabendd3b18ad32018-10-13 02:46:01 +0200917 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
918 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530919}
920
921int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
922{
Dave Watson3c4d7552017-06-14 11:37:39 -0700923 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530924 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000925 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530926 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000927 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530928 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100929 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700930 bool eor = !(msg->msg_flags & MSG_MORE);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +0300931 size_t try_to_copy;
932 ssize_t copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200933 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530934 struct tls_rec *rec;
935 int required_size;
936 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700937 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530938 int record_room;
939 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700940 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530941 int ret = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +0530942 int pending;
Dave Watson3c4d7552017-06-14 11:37:39 -0700943
Rouven Czerwinski1c3b63f2020-08-06 08:49:06 +0200944 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
945 MSG_CMSG_COMPAT))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100946 return -EOPNOTSUPP;
Dave Watson3c4d7552017-06-14 11:37:39 -0700947
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800948 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700949 lock_sock(sk);
950
Dave Watson3c4d7552017-06-14 11:37:39 -0700951 if (unlikely(msg->msg_controllen)) {
952 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530953 if (ret) {
954 if (ret == -EINPROGRESS)
955 num_async++;
956 else if (ret != -EAGAIN)
957 goto send_end;
958 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700959 }
960
961 while (msg_data_left(msg)) {
962 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100963 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700964 goto send_end;
965 }
966
John Fastabendd3b18ad32018-10-13 02:46:01 +0200967 if (ctx->open_rec)
968 rec = ctx->open_rec;
969 else
970 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530971 if (!rec) {
972 ret = -ENOMEM;
973 goto send_end;
974 }
975
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200976 msg_pl = &rec->msg_plaintext;
977 msg_en = &rec->msg_encrypted;
978
979 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700980 full_record = false;
981 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200982 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700983 if (try_to_copy >= record_room) {
984 try_to_copy = record_room;
985 full_record = true;
986 }
987
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200988 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000989 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700990
991 if (!sk_stream_memory_free(sk))
992 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530993
Dave Watson3c4d7552017-06-14 11:37:39 -0700994alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200995 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700996 if (ret) {
997 if (ret != -ENOSPC)
998 goto wait_for_memory;
999
1000 /* Adjust try_to_copy according to the amount that was
1001 * actually allocated. The difference is due
1002 * to max sg elements limit
1003 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001004 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001005 full_record = true;
1006 }
Vakul Garga42055e2018-09-21 09:46:13 +05301007
1008 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001009 u32 first = msg_pl->sg.end;
1010
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001011 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1012 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -07001013 if (ret)
1014 goto fallback_to_reg_send;
1015
Vakul Garga42055e2018-09-21 09:46:13 +05301016 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001017 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001018
1019 sk_msg_sg_copy_set(msg_pl, first);
1020 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1021 record_type, &copied,
1022 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +05301023 if (ret) {
1024 if (ret == -EINPROGRESS)
1025 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001026 else if (ret == -ENOMEM)
1027 goto wait_for_memory;
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001028 else if (ctx->open_rec && ret == -ENOSPC)
John Fastabendd3b18ad32018-10-13 02:46:01 +02001029 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +05301030 else if (ret != -EAGAIN)
1031 goto send_end;
1032 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -07001033 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001034rollback_iter:
1035 copied -= try_to_copy;
1036 sk_msg_sg_copy_clear(msg_pl, first);
1037 iov_iter_revert(&msg->msg_iter,
1038 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001039fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001040 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001041 }
1042
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001043 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +05301044
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001045 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001046 if (ret) {
1047 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301048 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001049
1050 /* Adjust try_to_copy according to the amount that was
1051 * actually allocated. The difference is due
1052 * to max sg elements limit
1053 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001054 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001055 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001056 sk_msg_trim(sk, msg_en,
1057 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001058 }
1059
Vakul Garg65a10e22018-12-21 15:16:52 +00001060 if (try_to_copy) {
1061 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1062 msg_pl, try_to_copy);
1063 if (ret < 0)
1064 goto trim_sgl;
1065 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001066
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001067 /* Open records defined only if successfully copied, otherwise
1068 * we would trim the sg but not reset the open record frags.
1069 */
1070 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001071 copied += try_to_copy;
1072 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001073 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1074 record_type, &copied,
1075 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001076 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301077 if (ret == -EINPROGRESS)
1078 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001079 else if (ret == -ENOMEM)
1080 goto wait_for_memory;
1081 else if (ret != -EAGAIN) {
1082 if (ret == -ENOSPC)
1083 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301084 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001085 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001086 }
1087 }
1088
1089 continue;
1090
1091wait_for_sndbuf:
1092 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1093wait_for_memory:
1094 ret = sk_stream_wait_memory(sk, &timeo);
1095 if (ret) {
1096trim_sgl:
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001097 if (ctx->open_rec)
1098 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001099 goto send_end;
1100 }
1101
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001102 if (ctx->open_rec && msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001103 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001104 }
1105
Vakul Garga42055e2018-09-21 09:46:13 +05301106 if (!num_async) {
1107 goto send_end;
1108 } else if (num_zc) {
1109 /* Wait for pending encryptions to get completed */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301110 spin_lock_bh(&ctx->encrypt_compl_lock);
1111 ctx->async_notify = true;
Vakul Garga42055e2018-09-21 09:46:13 +05301112
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301113 pending = atomic_read(&ctx->encrypt_pending);
1114 spin_unlock_bh(&ctx->encrypt_compl_lock);
1115 if (pending)
Vakul Garga42055e2018-09-21 09:46:13 +05301116 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1117 else
1118 reinit_completion(&ctx->async_wait.completion);
1119
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301120 /* There can be no concurrent accesses, since we have no
1121 * pending encrypt operations
1122 */
Vakul Garga42055e2018-09-21 09:46:13 +05301123 WRITE_ONCE(ctx->async_notify, false);
1124
1125 if (ctx->async_wait.err) {
1126 ret = ctx->async_wait.err;
1127 copied = 0;
1128 }
1129 }
1130
1131 /* Transmit if any encryptions have completed */
1132 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1133 cancel_delayed_work(&ctx->tx_work.work);
1134 tls_tx_records(sk, msg->msg_flags);
1135 }
1136
Dave Watson3c4d7552017-06-14 11:37:39 -07001137send_end:
1138 ret = sk_stream_error(sk, msg->msg_flags, ret);
1139
1140 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001141 mutex_unlock(&tls_ctx->tx_lock);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001142 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001143}
1144
YueHaibing01cb8a12019-01-16 10:39:28 +08001145static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1146 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001147{
Vakul Garga42055e2018-09-21 09:46:13 +05301148 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001149 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001150 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001151 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001152 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001153 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301154 struct tls_rec *rec;
1155 int num_async = 0;
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001156 ssize_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001157 bool full_record;
1158 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301159 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301160 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001161
Jakub Kicinskid452d482021-06-18 13:34:06 -07001162 eor = !(flags & MSG_SENDPAGE_NOTLAST);
Dave Watson3c4d7552017-06-14 11:37:39 -07001163 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1164
Dave Watson3c4d7552017-06-14 11:37:39 -07001165 /* Call the sk_stream functions to manage the sndbuf mem. */
1166 while (size > 0) {
1167 size_t copy, required_size;
1168
1169 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001170 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001171 goto sendpage_end;
1172 }
1173
John Fastabendd3b18ad32018-10-13 02:46:01 +02001174 if (ctx->open_rec)
1175 rec = ctx->open_rec;
1176 else
1177 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301178 if (!rec) {
1179 ret = -ENOMEM;
1180 goto sendpage_end;
1181 }
1182
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001183 msg_pl = &rec->msg_plaintext;
1184
Dave Watson3c4d7552017-06-14 11:37:39 -07001185 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001186 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001187 copy = size;
1188 if (copy >= record_room) {
1189 copy = record_room;
1190 full_record = true;
1191 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001192
Vakul Garg4509de12019-02-14 07:11:35 +00001193 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001194
1195 if (!sk_stream_memory_free(sk))
1196 goto wait_for_sndbuf;
1197alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001198 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001199 if (ret) {
1200 if (ret != -ENOSPC)
1201 goto wait_for_memory;
1202
1203 /* Adjust copy according to the amount that was
1204 * actually allocated. The difference is due
1205 * to max sg elements limit
1206 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001207 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001208 full_record = true;
1209 }
1210
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001211 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001212 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001213
Dave Watson3c4d7552017-06-14 11:37:39 -07001214 offset += copy;
1215 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001216 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001217
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001218 tls_ctx->pending_open_record_frags = true;
1219 if (full_record || eor || sk_msg_full(msg_pl)) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001220 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1221 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001222 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301223 if (ret == -EINPROGRESS)
1224 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001225 else if (ret == -ENOMEM)
1226 goto wait_for_memory;
1227 else if (ret != -EAGAIN) {
1228 if (ret == -ENOSPC)
1229 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301230 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001231 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001232 }
1233 }
1234 continue;
1235wait_for_sndbuf:
1236 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1237wait_for_memory:
1238 ret = sk_stream_wait_memory(sk, &timeo);
1239 if (ret) {
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001240 if (ctx->open_rec)
1241 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001242 goto sendpage_end;
1243 }
1244
Jakub Kicinskic329ef92019-11-27 12:16:39 -08001245 if (ctx->open_rec)
1246 goto alloc_payload;
Dave Watson3c4d7552017-06-14 11:37:39 -07001247 }
1248
Vakul Garga42055e2018-09-21 09:46:13 +05301249 if (num_async) {
1250 /* Transmit if any encryptions have completed */
1251 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1252 cancel_delayed_work(&ctx->tx_work.work);
1253 tls_tx_records(sk, flags);
1254 }
1255 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001256sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001257 ret = sk_stream_error(sk, flags, ret);
Vadim Fedorenkoa7bff112020-05-20 11:41:43 +03001258 return copied > 0 ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001259}
1260
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001261int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1262 int offset, size_t size, int flags)
1263{
1264 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1265 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1266 MSG_NO_SHARED_FRAGS))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001267 return -EOPNOTSUPP;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -05001268
1269 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1270}
1271
John Fastabend0608c692018-12-20 11:35:35 -08001272int tls_sw_sendpage(struct sock *sk, struct page *page,
1273 int offset, size_t size, int flags)
1274{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001275 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001276 int ret;
1277
1278 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1279 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01001280 return -EOPNOTSUPP;
John Fastabend0608c692018-12-20 11:35:35 -08001281
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001282 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001283 lock_sock(sk);
1284 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1285 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001286 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001287 return ret;
1288}
1289
John Fastabendd3b18ad32018-10-13 02:46:01 +02001290static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
Jim Ma974271e2021-05-14 11:11:02 +08001291 bool nonblock, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001292{
1293 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001294 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001295 struct sk_buff *skb;
1296 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1297
John Fastabendd3b18ad32018-10-13 02:46:01 +02001298 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001299 if (sk->sk_err) {
1300 *err = sock_error(sk);
1301 return NULL;
1302 }
1303
Vadim Fedorenko20ffc7a2020-11-19 18:59:48 +03001304 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1305 __strp_unpause(&ctx->strp);
1306 if (ctx->recv_pkt)
1307 return ctx->recv_pkt;
1308 }
1309
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001310 if (sk->sk_shutdown & RCV_SHUTDOWN)
1311 return NULL;
1312
Dave Watsonc46234e2018-03-22 10:10:35 -07001313 if (sock_flag(sk, SOCK_DONE))
1314 return NULL;
1315
Jim Ma974271e2021-05-14 11:11:02 +08001316 if (nonblock || !timeo) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001317 *err = -EAGAIN;
1318 return NULL;
1319 }
1320
1321 add_wait_queue(sk_sleep(sk), &wait);
1322 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001323 sk_wait_event(sk, &timeo,
1324 ctx->recv_pkt != skb ||
1325 !sk_psock_queue_empty(psock),
1326 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001327 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1328 remove_wait_queue(sk_sleep(sk), &wait);
1329
1330 /* Handle signals */
1331 if (signal_pending(current)) {
1332 *err = sock_intr_errno(timeo);
1333 return NULL;
1334 }
1335 }
1336
1337 return skb;
1338}
1339
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001340static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1341 int length, int *pages_used,
1342 unsigned int *size_used,
1343 struct scatterlist *to,
1344 int to_max_pages)
1345{
1346 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1347 struct page *pages[MAX_SKB_FRAGS];
1348 unsigned int size = *size_used;
1349 ssize_t copied, use;
1350 size_t offset;
1351
1352 while (length > 0) {
1353 i = 0;
1354 maxpages = to_max_pages - num_elem;
1355 if (maxpages == 0) {
1356 rc = -EFAULT;
1357 goto out;
1358 }
1359 copied = iov_iter_get_pages(from, pages,
1360 length,
1361 maxpages, &offset);
1362 if (copied <= 0) {
1363 rc = -EFAULT;
1364 goto out;
1365 }
1366
1367 iov_iter_advance(from, copied);
1368
1369 length -= copied;
1370 size += copied;
1371 while (copied) {
1372 use = min_t(int, copied, PAGE_SIZE - offset);
1373
1374 sg_set_page(&to[num_elem],
1375 pages[i], use, offset);
1376 sg_unmark_end(&to[num_elem]);
1377 /* We do not uncharge memory from this API */
1378
1379 offset = 0;
1380 copied -= use;
1381
1382 i++;
1383 num_elem++;
1384 }
1385 }
1386 /* Mark the end in the last sg entry if newly added */
1387 if (num_elem > *pages_used)
1388 sg_mark_end(&to[num_elem - 1]);
1389out:
1390 if (rc)
1391 iov_iter_revert(from, size - *size_used);
1392 *size_used = size;
1393 *pages_used = num_elem;
1394
1395 return rc;
1396}
1397
Vakul Garg0b243d02018-08-10 20:46:41 +05301398/* This function decrypts the input skb into either out_iov or in out_sg
1399 * or in skb buffers itself. The input parameter 'zc' indicates if
1400 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1401 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1402 * NULL, then the decryption happens inside skb buffers itself, i.e.
1403 * zero-copy gets disabled and 'zc' is updated.
1404 */
1405
1406static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1407 struct iov_iter *out_iov,
1408 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001409 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301410{
1411 struct tls_context *tls_ctx = tls_get_ctx(sk);
1412 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001413 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301414 struct strp_msg *rxm = strp_msg(skb);
1415 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1416 struct aead_request *aead_req;
1417 struct sk_buff *unused;
1418 u8 *aad, *iv, *mem = NULL;
1419 struct scatterlist *sgin = NULL;
1420 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001421 const int data_len = rxm->full_len - prot->overhead_size +
1422 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001423 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301424
1425 if (*zc && (out_iov || out_sg)) {
1426 if (out_iov)
1427 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1428 else
1429 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001430 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1431 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301432 } else {
1433 n_sgout = 0;
1434 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001435 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301436 }
1437
Vakul Garg0b243d02018-08-10 20:46:41 +05301438 if (n_sgin < 1)
1439 return -EBADMSG;
1440
1441 /* Increment to accommodate AAD */
1442 n_sgin = n_sgin + 1;
1443
1444 nsg = n_sgin + n_sgout;
1445
1446 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1447 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001448 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301449 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1450
1451 /* Allocate a single block of memory which contains
1452 * aead_req || sgin[] || sgout[] || aad || iv.
1453 * This order achieves correct alignment for aead_req, sgin, sgout.
1454 */
1455 mem = kmalloc(mem_size, sk->sk_allocation);
1456 if (!mem)
1457 return -ENOMEM;
1458
1459 /* Segment the allocated memory */
1460 aead_req = (struct aead_request *)mem;
1461 sgin = (struct scatterlist *)(mem + aead_size);
1462 sgout = sgin + n_sgin;
1463 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001464 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301465
Tianjia Zhang128cfb82021-09-28 14:28:43 +08001466 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1467 switch (prot->cipher_type) {
1468 case TLS_CIPHER_AES_CCM_128:
1469 iv[0] = TLS_AES_CCM_IV_B0_BYTE;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001470 iv_offset = 1;
Tianjia Zhang128cfb82021-09-28 14:28:43 +08001471 break;
1472 case TLS_CIPHER_SM4_CCM:
1473 iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1474 iv_offset = 1;
1475 break;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001476 }
1477
Vakul Garg0b243d02018-08-10 20:46:41 +05301478 /* Prepare IV */
1479 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001480 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001481 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301482 if (err < 0) {
1483 kfree(mem);
1484 return err;
1485 }
Vadim Fedorenkoa6acbe62020-11-24 18:24:48 +03001486 if (prot->version == TLS_1_3_VERSION ||
1487 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001488 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1489 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001490 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001491 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001492
Vadim Fedorenko6942a282020-11-24 18:24:46 +03001493 xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301494
1495 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001496 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1497 prot->tail_size,
Vadim Fedorenko6942a282020-11-24 18:24:46 +03001498 tls_ctx->rx.rec_seq, ctx->control, prot);
Vakul Garg0b243d02018-08-10 20:46:41 +05301499
1500 /* Prepare sgin */
1501 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001502 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301503 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001504 rxm->offset + prot->prepend_size,
1505 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301506 if (err < 0) {
1507 kfree(mem);
1508 return err;
1509 }
1510
1511 if (n_sgout) {
1512 if (out_iov) {
1513 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001514 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301515
1516 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001517 err = tls_setup_from_iter(sk, out_iov, data_len,
1518 &pages, chunk, &sgout[1],
1519 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301520 if (err < 0)
1521 goto fallback_to_reg_recv;
1522 } else if (out_sg) {
1523 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1524 } else {
1525 goto fallback_to_reg_recv;
1526 }
1527 } else {
1528fallback_to_reg_recv:
1529 sgout = sgin;
1530 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001531 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301532 *zc = false;
1533 }
1534
1535 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301536 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001537 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301538 if (err == -EINPROGRESS)
1539 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301540
1541 /* Release the pages in case iov was mapped to pages */
1542 for (; pages > 0; pages--)
1543 put_page(sg_page(&sgout[pages]));
1544
1545 kfree(mem);
1546 return err;
1547}
1548
Boris Pismennydafb67f2018-07-13 14:33:40 +03001549static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001550 struct iov_iter *dest, int *chunk, bool *zc,
1551 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001552{
1553 struct tls_context *tls_ctx = tls_get_ctx(sk);
1554 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001555 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001556 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001557 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001558
Boris Pismenny4799ac82018-07-13 14:33:43 +03001559 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001560 if (tls_ctx->rx_conf == TLS_HW) {
Jakub Kicinski4de30a82019-10-06 21:09:30 -07001561 err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001562 if (err < 0)
1563 return err;
1564 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001565
Boris Pismennyd069b782019-02-27 17:38:06 +02001566 /* Still not decrypted after tls_device */
1567 if (!ctx->decrypted) {
1568 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1569 async);
1570 if (err < 0) {
1571 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001572 tls_advance_record_sn(sk, prot,
1573 &tls_ctx->rx);
Jakub Kicinski5c5d22a2020-01-10 04:36:55 -08001574 else if (err == -EBADMSG)
1575 TLS_INC_STATS(sock_net(sk),
1576 LINUX_MIB_TLSDECRYPTERROR);
Boris Pismennyd069b782019-02-27 17:38:06 +02001577 return err;
1578 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001579 } else {
1580 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301581 }
Dave Watson130b3922019-01-30 21:58:31 +00001582
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001583 pad = padding_length(ctx, prot, skb);
1584 if (pad < 0)
1585 return pad;
1586
1587 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001588 rxm->offset += prot->prepend_size;
1589 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001590 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07001591 ctx->decrypted = 1;
Dave Watsonfedf2012019-01-30 21:58:24 +00001592 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001593 } else {
1594 *zc = false;
1595 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001596
Boris Pismennydafb67f2018-07-13 14:33:40 +03001597 return err;
1598}
1599
1600int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1601 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001602{
Vakul Garg0b243d02018-08-10 20:46:41 +05301603 bool zc = true;
1604 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001605
Vakul Garg692d7b52019-01-16 10:40:16 +00001606 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001607}
1608
1609static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1610 unsigned int len)
1611{
1612 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001613 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001614
Vakul Garg94524d82018-08-29 15:26:55 +05301615 if (skb) {
1616 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001617
Vakul Garg94524d82018-08-29 15:26:55 +05301618 if (len < rxm->full_len) {
1619 rxm->offset += len;
1620 rxm->full_len -= len;
1621 return false;
1622 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001623 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001624 }
1625
1626 /* Finished with message */
1627 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001628 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001629
1630 return true;
1631}
1632
Vakul Garg692d7b52019-01-16 10:40:16 +00001633/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001634 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001635 * true. Further, the records are removed from the rx_list if it is not a peek
1636 * case and the record has been consumed completely.
1637 */
1638static int process_rx_list(struct tls_sw_context_rx *ctx,
1639 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001640 u8 *control,
1641 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001642 size_t skip,
1643 size_t len,
1644 bool zc,
1645 bool is_peek)
1646{
1647 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001648 u8 ctrl = *control;
1649 u8 msgc = *cmsg;
1650 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001651 ssize_t copied = 0;
1652
Vakul Garg2b794c42019-02-23 08:42:37 +00001653 /* Set the record type in 'control' if caller didn't pass it */
1654 if (!ctrl && skb) {
1655 tlm = tls_msg(skb);
1656 ctrl = tlm->control;
1657 }
1658
Vakul Garg692d7b52019-01-16 10:40:16 +00001659 while (skip && skb) {
1660 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001661 tlm = tls_msg(skb);
1662
1663 /* Cannot process a record of different type */
1664 if (ctrl != tlm->control)
1665 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001666
1667 if (skip < rxm->full_len)
1668 break;
1669
1670 skip = skip - rxm->full_len;
1671 skb = skb_peek_next(skb, &ctx->rx_list);
1672 }
1673
1674 while (len && skb) {
1675 struct sk_buff *next_skb;
1676 struct strp_msg *rxm = strp_msg(skb);
1677 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1678
Vakul Garg2b794c42019-02-23 08:42:37 +00001679 tlm = tls_msg(skb);
1680
1681 /* Cannot process a record of different type */
1682 if (ctrl != tlm->control)
1683 return 0;
1684
1685 /* Set record type if not already done. For a non-data record,
1686 * do not proceed if record type could not be copied.
1687 */
1688 if (!msgc) {
1689 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1690 sizeof(ctrl), &ctrl);
1691 msgc = true;
1692 if (ctrl != TLS_RECORD_TYPE_DATA) {
1693 if (cerr || msg->msg_flags & MSG_CTRUNC)
1694 return -EIO;
1695
1696 *cmsg = msgc;
1697 }
1698 }
1699
Vakul Garg692d7b52019-01-16 10:40:16 +00001700 if (!zc || (rxm->full_len - skip) > len) {
1701 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1702 msg, chunk);
1703 if (err < 0)
1704 return err;
1705 }
1706
1707 len = len - chunk;
1708 copied = copied + chunk;
1709
1710 /* Consume the data from record if it is non-peek case*/
1711 if (!is_peek) {
1712 rxm->offset = rxm->offset + chunk;
1713 rxm->full_len = rxm->full_len - chunk;
1714
1715 /* Return if there is unconsumed data in the record */
1716 if (rxm->full_len - skip)
1717 break;
1718 }
1719
1720 /* The remaining skip-bytes must lie in 1st record in rx_list.
1721 * So from the 2nd record, 'skip' should be 0.
1722 */
1723 skip = 0;
1724
1725 if (msg)
1726 msg->msg_flags |= MSG_EOR;
1727
1728 next_skb = skb_peek_next(skb, &ctx->rx_list);
1729
1730 if (!is_peek) {
1731 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001732 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001733 }
1734
1735 skb = next_skb;
1736 }
1737
Vakul Garg2b794c42019-02-23 08:42:37 +00001738 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001739 return copied;
1740}
1741
Dave Watsonc46234e2018-03-22 10:10:35 -07001742int tls_sw_recvmsg(struct sock *sk,
1743 struct msghdr *msg,
1744 size_t len,
1745 int nonblock,
1746 int flags,
1747 int *addr_len)
1748{
1749 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001750 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001751 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001752 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001753 unsigned char control = 0;
1754 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001755 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001756 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001757 struct sk_buff *skb;
1758 ssize_t copied = 0;
1759 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001760 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001761 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001762 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001763 bool is_peek = flags & MSG_PEEK;
John Fastabende91de6a2020-05-29 16:06:59 -07001764 bool bpf_strp_enabled;
Vakul Garg94524d82018-08-29 15:26:55 +05301765 int num_async = 0;
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301766 int pending;
Dave Watsonc46234e2018-03-22 10:10:35 -07001767
1768 flags |= nonblock;
1769
1770 if (unlikely(flags & MSG_ERRQUEUE))
1771 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1772
John Fastabendd3b18ad32018-10-13 02:46:01 +02001773 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001774 lock_sock(sk);
John Fastabende91de6a2020-05-29 16:06:59 -07001775 bpf_strp_enabled = sk_psock_strp_enabled(psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001776
Vakul Garg692d7b52019-01-16 10:40:16 +00001777 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001778 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1779 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001780 if (err < 0) {
1781 tls_err_abort(sk, err);
1782 goto end;
1783 } else {
1784 copied = err;
1785 }
1786
Jakub Kicinski46a16952019-05-24 10:34:30 -07001787 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001788 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001789
1790 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1791 len = len - copied;
1792 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001793
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001794 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001795 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001796 bool zc = false;
1797 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001798 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001799 bool async_capable;
1800 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001801
Jim Ma974271e2021-05-14 11:11:02 +08001802 skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001803 if (!skb) {
1804 if (psock) {
Cong Wang2bc793e2021-03-30 19:32:33 -07001805 int ret = sk_msg_recvmsg(sk, psock, msg, len,
1806 flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001807
1808 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001809 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001810 len -= ret;
1811 continue;
1812 }
1813 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001814 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001815 } else {
1816 tlm = tls_msg(skb);
1817 if (prot->version == TLS_1_3_VERSION)
1818 tlm->control = 0;
1819 else
1820 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001821 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001822
1823 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301824
Vakul Garg4509de12019-02-14 07:11:35 +00001825 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001826
1827 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001828 ctx->control == TLS_RECORD_TYPE_DATA &&
John Fastabende91de6a2020-05-29 16:06:59 -07001829 prot->version != TLS_1_3_VERSION &&
1830 !bpf_strp_enabled)
Dave Watsonfedf2012019-01-30 21:58:24 +00001831 zc = true;
1832
Vakul Gargc0ab4732019-02-11 11:31:05 +00001833 /* Do not use async mode if record is non-data */
John Fastabende91de6a2020-05-29 16:06:59 -07001834 if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001835 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001836 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001837 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001838
Dave Watsonfedf2012019-01-30 21:58:24 +00001839 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001840 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001841 if (err < 0 && err != -EINPROGRESS) {
1842 tls_err_abort(sk, EBADMSG);
1843 goto recv_end;
1844 }
1845
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001846 if (err == -EINPROGRESS) {
1847 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001848 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001849 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001850 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001851 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001852
1853 /* If the type of records being processed is not known yet,
1854 * set it to record type just dequeued. If it is already known,
1855 * but does not match the record type just dequeued, go to end.
1856 * We always get record type here since for tls1.2, record type
1857 * is known just after record is dequeued from stream parser.
1858 * For tls1.3, we disable async.
1859 */
1860
1861 if (!control)
1862 control = tlm->control;
1863 else if (control != tlm->control)
1864 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001865
Dave Watsonc46234e2018-03-22 10:10:35 -07001866 if (!cmsg) {
1867 int cerr;
1868
1869 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001870 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001871 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001872 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001873 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1874 err = -EIO;
1875 goto recv_end;
1876 }
1877 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001878 }
1879
Vakul Gargc0ab4732019-02-11 11:31:05 +00001880 if (async)
1881 goto pick_next_record;
1882
Dave Watsonfedf2012019-01-30 21:58:24 +00001883 if (!zc) {
John Fastabende91de6a2020-05-29 16:06:59 -07001884 if (bpf_strp_enabled) {
1885 err = sk_psock_tls_strp_read(psock, skb);
1886 if (err != __SK_PASS) {
1887 rxm->offset = rxm->offset + rxm->full_len;
1888 rxm->full_len = 0;
1889 if (err == __SK_DROP)
1890 consume_skb(skb);
1891 ctx->recv_pkt = NULL;
1892 __strp_unpause(&ctx->strp);
1893 continue;
1894 }
1895 }
1896
Dave Watsonfedf2012019-01-30 21:58:24 +00001897 if (rxm->full_len > len) {
1898 retain_skb = true;
1899 chunk = len;
1900 } else {
1901 chunk = rxm->full_len;
1902 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001903
Dave Watsonfedf2012019-01-30 21:58:24 +00001904 err = skb_copy_datagram_msg(skb, rxm->offset,
1905 msg, chunk);
1906 if (err < 0)
1907 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001908
Dave Watsonfedf2012019-01-30 21:58:24 +00001909 if (!is_peek) {
1910 rxm->offset = rxm->offset + chunk;
1911 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001912 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001913 }
1914
Vakul Garg94524d82018-08-29 15:26:55 +05301915pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001916 if (chunk > len)
1917 chunk = len;
1918
1919 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001920 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001921
Vakul Garg692d7b52019-01-16 10:40:16 +00001922 /* For async or peek case, queue the current skb */
1923 if (async || is_peek || retain_skb) {
1924 skb_queue_tail(&ctx->rx_list, skb);
1925 skb = NULL;
1926 }
Vakul Garg94524d82018-08-29 15:26:55 +05301927
Vakul Garg692d7b52019-01-16 10:40:16 +00001928 if (tls_sw_advance_skb(sk, skb, chunk)) {
1929 /* Return full control message to
1930 * userspace before trying to parse
1931 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001932 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001933 msg->msg_flags |= MSG_EOR;
Vadim Fedorenko3fe16ed2020-11-15 07:16:00 +03001934 if (control != TLS_RECORD_TYPE_DATA)
Vakul Garg692d7b52019-01-16 10:40:16 +00001935 goto recv_end;
1936 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001937 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001938 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001939 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001940
1941recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301942 if (num_async) {
1943 /* Wait for all previously submitted records to be decrypted */
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301944 spin_lock_bh(&ctx->decrypt_compl_lock);
1945 ctx->async_notify = true;
1946 pending = atomic_read(&ctx->decrypt_pending);
1947 spin_unlock_bh(&ctx->decrypt_compl_lock);
1948 if (pending) {
Vakul Garg94524d82018-08-29 15:26:55 +05301949 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1950 if (err) {
1951 /* one of async decrypt failed */
1952 tls_err_abort(sk, err);
1953 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001954 decrypted = 0;
1955 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301956 }
1957 } else {
1958 reinit_completion(&ctx->async_wait.completion);
1959 }
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05301960
1961 /* There can be no concurrent accesses, since we have no
1962 * pending decrypt operations
1963 */
Vakul Garg94524d82018-08-29 15:26:55 +05301964 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001965
1966 /* Drain records from the rx_list & copy if required */
1967 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001968 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001969 decrypted, false, is_peek);
1970 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001971 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001972 decrypted, true, is_peek);
1973 if (err < 0) {
1974 tls_err_abort(sk, err);
1975 copied = 0;
1976 goto end;
1977 }
Vakul Garg94524d82018-08-29 15:26:55 +05301978 }
1979
Vakul Garg692d7b52019-01-16 10:40:16 +00001980 copied += decrypted;
1981
1982end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001983 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001984 if (psock)
1985 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001986 return copied ? : err;
1987}
1988
1989ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1990 struct pipe_inode_info *pipe,
1991 size_t len, unsigned int flags)
1992{
1993 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001994 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001995 struct strp_msg *rxm = NULL;
1996 struct sock *sk = sock->sk;
1997 struct sk_buff *skb;
1998 ssize_t copied = 0;
1999 int err = 0;
2000 long timeo;
2001 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05302002 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07002003
2004 lock_sock(sk);
2005
Jim Ma974271e2021-05-14 11:11:02 +08002006 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
Dave Watsonc46234e2018-03-22 10:10:35 -07002007
Jim Ma974271e2021-05-14 11:11:02 +08002008 skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07002009 if (!skb)
2010 goto splice_read_end;
2011
Dave Watsonc46234e2018-03-22 10:10:35 -07002012 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002013 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07002014
Dave Watsonfedf2012019-01-30 21:58:24 +00002015 /* splice does not support reading control messages */
2016 if (ctx->control != TLS_RECORD_TYPE_DATA) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +01002017 err = -EINVAL;
Dave Watsonfedf2012019-01-30 21:58:24 +00002018 goto splice_read_end;
2019 }
2020
Dave Watsonc46234e2018-03-22 10:10:35 -07002021 if (err < 0) {
2022 tls_err_abort(sk, EBADMSG);
2023 goto splice_read_end;
2024 }
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002025 ctx->decrypted = 1;
Dave Watsonc46234e2018-03-22 10:10:35 -07002026 }
2027 rxm = strp_msg(skb);
2028
2029 chunk = min_t(unsigned int, rxm->full_len, len);
2030 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2031 if (copied < 0)
2032 goto splice_read_end;
2033
Jim Mad8654f42021-05-12 17:00:11 +08002034 tls_sw_advance_skb(sk, skb, copied);
Dave Watsonc46234e2018-03-22 10:10:35 -07002035
2036splice_read_end:
2037 release_sock(sk);
2038 return copied ? : err;
2039}
2040
John Fastabend924ad652018-10-13 02:46:00 +02002041bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07002042{
Dave Watsonc46234e2018-03-22 10:10:35 -07002043 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002044 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002045 bool ingress_empty = true;
2046 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002047
John Fastabendd3b18ad32018-10-13 02:46:01 +02002048 rcu_read_lock();
2049 psock = sk_psock(sk);
2050 if (psock)
2051 ingress_empty = list_empty(&psock->ingress_msg);
2052 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07002053
Jakub Kicinski13aecb12019-07-04 14:50:36 -07002054 return !ingress_empty || ctx->recv_pkt ||
2055 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002056}
2057
2058static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2059{
2060 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002061 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00002062 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07002063 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07002064 struct strp_msg *rxm = strp_msg(skb);
2065 size_t cipher_overhead;
2066 size_t data_len = 0;
2067 int ret;
2068
2069 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00002070 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07002071 return 0;
2072
Kees Cook3463e512018-06-25 16:55:05 -07002073 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00002074 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07002075 ret = -EINVAL;
2076 goto read_failure;
2077 }
2078
Dave Watsonc46234e2018-03-22 10:10:35 -07002079 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00002080 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002081
2082 if (ret < 0)
2083 goto read_failure;
2084
2085 ctx->control = header[0];
2086
2087 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2088
Vakul Garg4509de12019-02-14 07:11:35 +00002089 cipher_overhead = prot->tag_size;
Vadim Fedorenkoa6acbe62020-11-24 18:24:48 +03002090 if (prot->version != TLS_1_3_VERSION &&
2091 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
Vakul Garg4509de12019-02-14 07:11:35 +00002092 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07002093
Dave Watson130b3922019-01-30 21:58:31 +00002094 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00002095 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002096 ret = -EMSGSIZE;
2097 goto read_failure;
2098 }
2099 if (data_len < cipher_overhead) {
2100 ret = -EBADMSG;
2101 goto read_failure;
2102 }
2103
Dave Watson130b3922019-01-30 21:58:31 +00002104 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2105 if (header[1] != TLS_1_2_VERSION_MINOR ||
2106 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002107 ret = -EINVAL;
2108 goto read_failure;
2109 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002110
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002111 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002112 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002113 return data_len + TLS_HEADER_SIZE;
2114
2115read_failure:
2116 tls_err_abort(strp->sk, ret);
2117
2118 return ret;
2119}
2120
2121static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2122{
2123 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002124 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002125
Jakub Kicinskibc76e5b2019-10-06 21:09:32 -07002126 ctx->decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07002127
2128 ctx->recv_pkt = skb;
2129 strp_pause(strp);
2130
Vakul Gargad13acc2018-07-30 16:08:33 +05302131 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002132}
2133
2134static void tls_data_ready(struct sock *sk)
2135{
2136 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002137 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002138 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002139
2140 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002141
2142 psock = sk_psock_get(sk);
Xiyu Yang62b40112020-04-25 21:10:23 +08002143 if (psock) {
2144 if (!list_empty(&psock->ingress_msg))
2145 ctx->saved_data_ready(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002146 sk_psock_put(sk, psock);
2147 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002148}
2149
John Fastabendf87e62d2019-07-19 10:29:16 -07002150void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2151{
2152 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2153
2154 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2155 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2156 cancel_delayed_work_sync(&ctx->tx_work.work);
2157}
2158
John Fastabend313ab002019-07-19 10:29:17 -07002159void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002160{
2161 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002162 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302163 struct tls_rec *rec, *tmp;
Rohit Maheshwari38f7e1c2020-09-24 12:28:45 +05302164 int pending;
Vakul Garga42055e2018-09-21 09:46:13 +05302165
2166 /* Wait for any pending async encryptions to complete */
Rohit Maheshwari38f7e1c2020-09-24 12:28:45 +05302167 spin_lock_bh(&ctx->encrypt_compl_lock);
2168 ctx->async_notify = true;
2169 pending = atomic_read(&ctx->encrypt_pending);
2170 spin_unlock_bh(&ctx->encrypt_compl_lock);
2171
2172 if (pending)
Vakul Garga42055e2018-09-21 09:46:13 +05302173 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2174
Vakul Garga42055e2018-09-21 09:46:13 +05302175 tls_tx_records(sk, -1);
2176
Vakul Garg9932a292018-09-24 15:35:56 +05302177 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302178 * the partially sent record if any at head of tx_list.
2179 */
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -08002180 if (tls_ctx->partially_sent_record) {
2181 tls_free_partial_record(sk, tls_ctx);
Vakul Garg9932a292018-09-24 15:35:56 +05302182 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302183 struct tls_rec, list);
2184 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002185 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302186 kfree(rec);
2187 }
2188
Vakul Garg9932a292018-09-24 15:35:56 +05302189 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302190 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002191 sk_msg_free(sk, &rec->msg_encrypted);
2192 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302193 kfree(rec);
2194 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002195
Vakul Garg201876b2018-07-24 16:54:27 +05302196 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302197 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002198}
2199
2200void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2201{
2202 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002203
2204 kfree(ctx);
2205}
2206
Boris Pismenny39f56e12018-07-13 14:33:41 +03002207void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002208{
2209 struct tls_context *tls_ctx = tls_get_ctx(sk);
2210 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2211
Jakub Kicinski12c76862019-04-19 16:52:19 -07002212 kfree(tls_ctx->rx.rec_seq);
2213 kfree(tls_ctx->rx.iv);
2214
Dave Watsonc46234e2018-03-22 10:10:35 -07002215 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302216 kfree_skb(ctx->recv_pkt);
2217 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002218 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002219 crypto_free_aead(ctx->aead_recv);
2220 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002221 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2222 * we still want to strp_stop(), but sk->sk_data_ready was
2223 * never swapped.
2224 */
2225 if (ctx->saved_data_ready) {
2226 write_lock_bh(&sk->sk_callback_lock);
2227 sk->sk_data_ready = ctx->saved_data_ready;
2228 write_unlock_bh(&sk->sk_callback_lock);
2229 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002230 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002231}
2232
John Fastabend313ab002019-07-19 10:29:17 -07002233void tls_sw_strparser_done(struct tls_context *tls_ctx)
2234{
2235 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2236
2237 strp_done(&ctx->strp);
2238}
2239
2240void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2241{
2242 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2243
2244 kfree(ctx);
2245}
2246
Boris Pismenny39f56e12018-07-13 14:33:41 +03002247void tls_sw_free_resources_rx(struct sock *sk)
2248{
2249 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002250
2251 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002252 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002253}
2254
Vakul Garg9932a292018-09-24 15:35:56 +05302255/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302256static void tx_work_handler(struct work_struct *work)
2257{
2258 struct delayed_work *delayed_work = to_delayed_work(work);
2259 struct tx_work *tx_work = container_of(delayed_work,
2260 struct tx_work, work);
2261 struct sock *sk = tx_work->sk;
2262 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002263 struct tls_sw_context_tx *ctx;
2264
2265 if (unlikely(!tls_ctx))
2266 return;
2267
2268 ctx = tls_sw_ctx_tx(tls_ctx);
2269 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2270 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302271
2272 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2273 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002274 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302275 lock_sock(sk);
2276 tls_tx_records(sk, -1);
2277 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002278 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302279}
2280
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002281void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2282{
2283 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2284
2285 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002286 if (is_tx_ready(tx_ctx) &&
2287 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2288 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002289}
2290
Jakub Kicinski318892a2019-07-19 10:29:14 -07002291void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2292{
2293 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2294
2295 write_lock_bh(&sk->sk_callback_lock);
2296 rx_ctx->saved_data_ready = sk->sk_data_ready;
2297 sk->sk_data_ready = tls_data_ready;
2298 write_unlock_bh(&sk->sk_callback_lock);
2299
2300 strp_check_rcv(&rx_ctx->strp);
2301}
2302
Dave Watsonc46234e2018-03-22 10:10:35 -07002303int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002304{
Vakul Garg4509de12019-02-14 07:11:35 +00002305 struct tls_context *tls_ctx = tls_get_ctx(sk);
2306 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002307 struct tls_crypto_info *crypto_info;
2308 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002309 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002310 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Vadim Fedorenko74ea6102020-11-24 18:24:49 +03002311 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002312 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2313 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002314 struct cipher_context *cctx;
2315 struct crypto_aead **aead;
2316 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002317 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002318 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002319 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002320 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002321 int rc = 0;
2322
2323 if (!ctx) {
2324 rc = -EINVAL;
2325 goto out;
2326 }
2327
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002328 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002329 if (!ctx->priv_ctx_tx) {
2330 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2331 if (!sw_ctx_tx) {
2332 rc = -ENOMEM;
2333 goto out;
2334 }
2335 ctx->priv_ctx_tx = sw_ctx_tx;
2336 } else {
2337 sw_ctx_tx =
2338 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002339 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002340 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002341 if (!ctx->priv_ctx_rx) {
2342 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2343 if (!sw_ctx_rx) {
2344 rc = -ENOMEM;
2345 goto out;
2346 }
2347 ctx->priv_ctx_rx = sw_ctx_rx;
2348 } else {
2349 sw_ctx_rx =
2350 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002351 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002352 }
2353
Dave Watsonc46234e2018-03-22 10:10:35 -07002354 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002355 crypto_init_wait(&sw_ctx_tx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302356 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002357 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002358 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002359 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302360 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302361 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2362 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002363 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002364 crypto_init_wait(&sw_ctx_rx->async_wait);
Vinay Kumar Yadav0cada332020-05-23 01:40:31 +05302365 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002366 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002367 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002368 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002369 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002370 }
2371
Dave Watson3c4d7552017-06-14 11:37:39 -07002372 switch (crypto_info->cipher_type) {
2373 case TLS_CIPHER_AES_GCM_128: {
2374 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2375 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2376 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2377 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2378 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2379 rec_seq =
2380 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2381 gcm_128_info =
2382 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002383 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2384 key = gcm_128_info->key;
2385 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002386 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2387 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002388 break;
2389 }
2390 case TLS_CIPHER_AES_GCM_256: {
2391 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2392 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2393 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2394 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2395 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2396 rec_seq =
2397 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2398 gcm_256_info =
2399 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2400 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2401 key = gcm_256_info->key;
2402 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002403 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2404 cipher_name = "gcm(aes)";
2405 break;
2406 }
2407 case TLS_CIPHER_AES_CCM_128: {
2408 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2409 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2410 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2411 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2412 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2413 rec_seq =
2414 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2415 ccm_128_info =
2416 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2417 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2418 key = ccm_128_info->key;
2419 salt = ccm_128_info->salt;
2420 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2421 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002422 break;
2423 }
Vadim Fedorenko74ea6102020-11-24 18:24:49 +03002424 case TLS_CIPHER_CHACHA20_POLY1305: {
2425 chacha20_poly1305_info = (void *)crypto_info;
2426 nonce_size = 0;
2427 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2428 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2429 iv = chacha20_poly1305_info->iv;
2430 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2431 rec_seq = chacha20_poly1305_info->rec_seq;
2432 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2433 key = chacha20_poly1305_info->key;
2434 salt = chacha20_poly1305_info->salt;
2435 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2436 cipher_name = "rfc7539(chacha20,poly1305)";
2437 break;
2438 }
Tianjia Zhang227b9642021-09-16 11:37:38 +08002439 case TLS_CIPHER_SM4_GCM: {
2440 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2441
2442 sm4_gcm_info = (void *)crypto_info;
2443 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2444 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2445 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2446 iv = sm4_gcm_info->iv;
2447 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2448 rec_seq = sm4_gcm_info->rec_seq;
2449 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2450 key = sm4_gcm_info->key;
2451 salt = sm4_gcm_info->salt;
2452 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2453 cipher_name = "gcm(sm4)";
2454 break;
2455 }
2456 case TLS_CIPHER_SM4_CCM: {
2457 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2458
2459 sm4_ccm_info = (void *)crypto_info;
2460 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2461 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2462 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2463 iv = sm4_ccm_info->iv;
2464 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2465 rec_seq = sm4_ccm_info->rec_seq;
2466 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2467 key = sm4_ccm_info->key;
2468 salt = sm4_ccm_info->salt;
2469 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2470 cipher_name = "ccm(sm4)";
2471 break;
2472 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002473 default:
2474 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002475 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002476 }
2477
Jakub Kicinski89fec472019-06-10 21:40:00 -07002478 /* Sanity-check the sizes for stack allocations. */
2479 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2480 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002481 rc = -EINVAL;
2482 goto free_priv;
2483 }
2484
Dave Watson130b3922019-01-30 21:58:31 +00002485 if (crypto_info->version == TLS_1_3_VERSION) {
2486 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002487 prot->aad_size = TLS_HEADER_SIZE;
2488 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002489 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002490 prot->aad_size = TLS_AAD_SPACE_SIZE;
2491 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002492 }
2493
Vakul Garg4509de12019-02-14 07:11:35 +00002494 prot->version = crypto_info->version;
2495 prot->cipher_type = crypto_info->cipher_type;
2496 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2497 prot->tag_size = tag_size;
2498 prot->overhead_size = prot->prepend_size +
2499 prot->tag_size + prot->tail_size;
2500 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002501 prot->salt_size = salt_size;
2502 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002503 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002504 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002505 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002506 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002507 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002508 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002509 memcpy(cctx->iv, salt, salt_size);
2510 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002511 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002512 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002513 rc = -ENOMEM;
2514 goto free_iv;
2515 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002516
Dave Watsonc46234e2018-03-22 10:10:35 -07002517 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002518 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002519 if (IS_ERR(*aead)) {
2520 rc = PTR_ERR(*aead);
2521 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002522 goto free_rec_seq;
2523 }
2524 }
2525
2526 ctx->push_pending_record = tls_sw_push_pending_record;
2527
Dave Watsonfb99bce2019-01-30 21:58:05 +00002528 rc = crypto_aead_setkey(*aead, key, keysize);
2529
Dave Watson3c4d7552017-06-14 11:37:39 -07002530 if (rc)
2531 goto free_aead;
2532
Vakul Garg4509de12019-02-14 07:11:35 +00002533 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002534 if (rc)
2535 goto free_aead;
2536
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002537 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002538 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002539
2540 if (crypto_info->version == TLS_1_3_VERSION)
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002541 sw_ctx_rx->async_capable = 0;
Vakul Garg8497ded2019-02-09 07:53:28 +00002542 else
2543 sw_ctx_rx->async_capable =
Jakub Kicinski5c5458e2019-10-06 21:09:31 -07002544 !!(tfm->__crt_alg->cra_flags &
2545 CRYPTO_ALG_ASYNC);
Vakul Garg692d7b52019-01-16 10:40:16 +00002546
Dave Watsonc46234e2018-03-22 10:10:35 -07002547 /* Set up strparser */
2548 memset(&cb, 0, sizeof(cb));
2549 cb.rcv_msg = tls_queue;
2550 cb.parse_msg = tls_read_size;
2551
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002552 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002553 }
2554
2555 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002556
2557free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002558 crypto_free_aead(*aead);
2559 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002560free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002561 kfree(cctx->rec_seq);
2562 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002563free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002564 kfree(cctx->iv);
2565 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002566free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002567 if (tx) {
2568 kfree(ctx->priv_ctx_tx);
2569 ctx->priv_ctx_tx = NULL;
2570 } else {
2571 kfree(ctx->priv_ctx_rx);
2572 ctx->priv_ctx_rx = NULL;
2573 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002574out:
2575 return rc;
2576}