blob: 446f23c1f3ce4a3e6b2d2978060de7c159b2b678 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
John Fastabendd3b18ad32018-10-13 02:46:01 +02007 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
Dave Watson3c4d7552017-06-14 11:37:39 -07008 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 */
37
Dave Watsonc46234e2018-03-22 10:10:35 -070038#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070039#include <linux/module.h>
40#include <crypto/aead.h>
41
Dave Watsonc46234e2018-03-22 10:10:35 -070042#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043#include <net/tls.h>
44
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070045static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46 unsigned int recursion_level)
47{
48 int start = skb_headlen(skb);
49 int i, chunk = start - offset;
50 struct sk_buff *frag_iter;
51 int elt = 0;
52
53 if (unlikely(recursion_level >= 24))
54 return -EMSGSIZE;
55
56 if (chunk > 0) {
57 if (chunk > len)
58 chunk = len;
59 elt++;
60 len -= chunk;
61 if (len == 0)
62 return elt;
63 offset += chunk;
64 }
65
66 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67 int end;
68
69 WARN_ON(start > offset + len);
70
71 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72 chunk = end - offset;
73 if (chunk > 0) {
74 if (chunk > len)
75 chunk = len;
76 elt++;
77 len -= chunk;
78 if (len == 0)
79 return elt;
80 offset += chunk;
81 }
82 start = end;
83 }
84
85 if (unlikely(skb_has_frag_list(skb))) {
86 skb_walk_frags(skb, frag_iter) {
87 int end, ret;
88
89 WARN_ON(start > offset + len);
90
91 end = start + frag_iter->len;
92 chunk = end - offset;
93 if (chunk > 0) {
94 if (chunk > len)
95 chunk = len;
96 ret = __skb_nsg(frag_iter, offset - start, chunk,
97 recursion_level + 1);
98 if (unlikely(ret < 0))
99 return ret;
100 elt += ret;
101 len -= chunk;
102 if (len == 0)
103 return elt;
104 offset += chunk;
105 }
106 start = end;
107 }
108 }
109 BUG_ON(len);
110 return elt;
111}
112
113/* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
115 */
116static int skb_nsg(struct sk_buff *skb, int offset, int len)
117{
118 return __skb_nsg(skb, offset, len, 0);
119}
120
Dave Watson130b3922019-01-30 21:58:31 +0000121static int padding_length(struct tls_sw_context_rx *ctx,
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700122 struct tls_prot_info *prot, struct sk_buff *skb)
Dave Watson130b3922019-01-30 21:58:31 +0000123{
124 struct strp_msg *rxm = strp_msg(skb);
125 int sub = 0;
126
127 /* Determine zero-padding length */
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700128 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000129 char content_type = 0;
130 int err;
131 int back = 17;
132
133 while (content_type == 0) {
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700134 if (back > rxm->full_len - prot->prepend_size)
Dave Watson130b3922019-01-30 21:58:31 +0000135 return -EBADMSG;
136 err = skb_copy_bits(skb,
137 rxm->offset + rxm->full_len - back,
138 &content_type, 1);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700139 if (err)
140 return err;
Dave Watson130b3922019-01-30 21:58:31 +0000141 if (content_type)
142 break;
143 sub++;
144 back++;
145 }
146 ctx->control = content_type;
147 }
148 return sub;
149}
150
Vakul Garg94524d82018-08-29 15:26:55 +0530151static void tls_decrypt_done(struct crypto_async_request *req, int err)
152{
153 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530154 struct scatterlist *sgout = aead_req->dst;
Vakul Garg692d7b52019-01-16 10:40:16 +0000155 struct scatterlist *sgin = aead_req->src;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700156 struct tls_sw_context_rx *ctx;
157 struct tls_context *tls_ctx;
Vakul Garg4509de12019-02-14 07:11:35 +0000158 struct tls_prot_info *prot;
Vakul Garg94524d82018-08-29 15:26:55 +0530159 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700160 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530161 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700162 int pending;
163
164 skb = (struct sk_buff *)req->data;
165 tls_ctx = tls_get_ctx(skb->sk);
166 ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +0000167 prot = &tls_ctx->prot_info;
Vakul Garg94524d82018-08-29 15:26:55 +0530168
169 /* Propagate if there was an err */
170 if (err) {
171 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700172 tls_err_abort(skb->sk, err);
Vakul Garg692d7b52019-01-16 10:40:16 +0000173 } else {
174 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -0700175 int pad;
176
177 pad = padding_length(ctx, prot, skb);
178 if (pad < 0) {
179 ctx->async_wait.err = pad;
180 tls_err_abort(skb->sk, pad);
181 } else {
182 rxm->full_len -= pad;
183 rxm->offset += prot->prepend_size;
184 rxm->full_len -= prot->overhead_size;
185 }
Vakul Garg94524d82018-08-29 15:26:55 +0530186 }
187
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700188 /* After using skb->sk to propagate sk through crypto async callback
189 * we need to NULL it again.
190 */
191 skb->sk = NULL;
192
Vakul Garg94524d82018-08-29 15:26:55 +0530193
Vakul Garg692d7b52019-01-16 10:40:16 +0000194 /* Free the destination pages if skb was not decrypted inplace */
195 if (sgout != sgin) {
196 /* Skip the first S/G entry as it points to AAD */
197 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
198 if (!sg)
199 break;
200 put_page(sg_page(sg));
201 }
Vakul Garg94524d82018-08-29 15:26:55 +0530202 }
203
204 kfree(aead_req);
205
Vakul Garg692d7b52019-01-16 10:40:16 +0000206 pending = atomic_dec_return(&ctx->decrypt_pending);
207
Vakul Garg94524d82018-08-29 15:26:55 +0530208 if (!pending && READ_ONCE(ctx->async_notify))
209 complete(&ctx->async_wait.completion);
210}
211
Dave Watsonc46234e2018-03-22 10:10:35 -0700212static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530213 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700214 struct scatterlist *sgin,
215 struct scatterlist *sgout,
216 char *iv_recv,
217 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530218 struct aead_request *aead_req,
219 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700220{
221 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000222 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300223 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700224 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700225
Vakul Garg0b243d02018-08-10 20:46:41 +0530226 aead_request_set_tfm(aead_req, ctx->aead_recv);
Vakul Garg4509de12019-02-14 07:11:35 +0000227 aead_request_set_ad(aead_req, prot->aad_size);
Dave Watsonc46234e2018-03-22 10:10:35 -0700228 aead_request_set_crypt(aead_req, sgin, sgout,
Vakul Garg4509de12019-02-14 07:11:35 +0000229 data_len + prot->tag_size,
Dave Watsonc46234e2018-03-22 10:10:35 -0700230 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700231
Vakul Garg94524d82018-08-29 15:26:55 +0530232 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700233 /* Using skb->sk to push sk through to crypto async callback
234 * handler. This allows propagating errors up to the socket
235 * if needed. It _must_ be cleared in the async handler
Vakul Garga88c26f2019-03-21 11:59:57 +0000236 * before consume_skb is called. We _know_ skb->sk is NULL
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700237 * because it is a clone from strparser.
238 */
239 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530240 aead_request_set_callback(aead_req,
241 CRYPTO_TFM_REQ_MAY_BACKLOG,
242 tls_decrypt_done, skb);
243 atomic_inc(&ctx->decrypt_pending);
244 } else {
245 aead_request_set_callback(aead_req,
246 CRYPTO_TFM_REQ_MAY_BACKLOG,
247 crypto_req_done, &ctx->async_wait);
248 }
249
250 ret = crypto_aead_decrypt(aead_req);
251 if (ret == -EINPROGRESS) {
252 if (async)
253 return ret;
254
255 ret = crypto_wait_req(ret, &ctx->async_wait);
256 }
257
258 if (async)
259 atomic_dec(&ctx->decrypt_pending);
260
Dave Watsonc46234e2018-03-22 10:10:35 -0700261 return ret;
262}
263
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200264static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700265{
266 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000267 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200271 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700272 if (target_size > 0)
Vakul Garg4509de12019-02-14 07:11:35 +0000273 target_size += prot->overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200274 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700275}
276
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200277static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700278{
279 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300280 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530281 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200282 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700283
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200284 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700285}
286
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200287static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700288{
289 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000290 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300291 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530292 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200293 struct sk_msg *msg_pl = &rec->msg_plaintext;
294 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530295 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700296
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200297 /* We add page references worth len bytes from encrypted sg
298 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530299 * has enough required room (ensured by caller).
300 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200301 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530302
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200303 /* Skip initial bytes in msg_en's data to be able to use
304 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530305 */
Vakul Garg4509de12019-02-14 07:11:35 +0000306 skip = prot->prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530307
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200308 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700309}
310
John Fastabendd3b18ad32018-10-13 02:46:01 +0200311static struct tls_rec *tls_get_rec(struct sock *sk)
312{
313 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000314 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200315 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
316 struct sk_msg *msg_pl, *msg_en;
317 struct tls_rec *rec;
318 int mem_size;
319
320 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
321
322 rec = kzalloc(mem_size, sk->sk_allocation);
323 if (!rec)
324 return NULL;
325
326 msg_pl = &rec->msg_plaintext;
327 msg_en = &rec->msg_encrypted;
328
329 sk_msg_init(msg_pl);
330 sk_msg_init(msg_en);
331
332 sg_init_table(rec->sg_aead_in, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000333 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200334 sg_unmark_end(&rec->sg_aead_in[1]);
335
336 sg_init_table(rec->sg_aead_out, 2);
Vakul Garg4509de12019-02-14 07:11:35 +0000337 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200338 sg_unmark_end(&rec->sg_aead_out[1]);
339
340 return rec;
341}
342
343static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
344{
345 sk_msg_free(sk, &rec->msg_encrypted);
346 sk_msg_free(sk, &rec->msg_plaintext);
347 kfree(rec);
348}
349
Vakul Gargc7749732018-09-25 20:21:51 +0530350static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700351{
352 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300353 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530354 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700355
John Fastabendd3b18ad32018-10-13 02:46:01 +0200356 if (rec) {
357 tls_free_rec(sk, rec);
358 ctx->open_rec = NULL;
359 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700360}
361
Vakul Garga42055e2018-09-21 09:46:13 +0530362int tls_tx_records(struct sock *sk, int flags)
363{
364 struct tls_context *tls_ctx = tls_get_ctx(sk);
365 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
366 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200367 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530368 int tx_flags, rc = 0;
369
370 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530371 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530372 struct tls_rec, list);
373
374 if (flags == -1)
375 tx_flags = rec->tx_flags;
376 else
377 tx_flags = flags;
378
379 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
380 if (rc)
381 goto tx_err;
382
383 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530384 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530385 */
Vakul Garga42055e2018-09-21 09:46:13 +0530386 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200387 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530388 kfree(rec);
389 }
390
Vakul Garg9932a292018-09-24 15:35:56 +0530391 /* Tx all ready records */
392 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
393 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530394 if (flags == -1)
395 tx_flags = rec->tx_flags;
396 else
397 tx_flags = flags;
398
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200399 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530400 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200401 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530402 0, tx_flags);
403 if (rc)
404 goto tx_err;
405
Vakul Garga42055e2018-09-21 09:46:13 +0530406 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200407 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530408 kfree(rec);
409 } else {
410 break;
411 }
412 }
413
414tx_err:
415 if (rc < 0 && rc != -EAGAIN)
416 tls_err_abort(sk, EBADMSG);
417
418 return rc;
419}
420
421static void tls_encrypt_done(struct crypto_async_request *req, int err)
422{
423 struct aead_request *aead_req = (struct aead_request *)req;
424 struct sock *sk = req->data;
425 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000426 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530427 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200428 struct scatterlist *sge;
429 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530430 struct tls_rec *rec;
431 bool ready = false;
432 int pending;
433
434 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200435 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530436
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200437 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
Vakul Garg4509de12019-02-14 07:11:35 +0000438 sge->offset -= prot->prepend_size;
439 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530440
Vakul Garg80ece6a2018-09-26 16:22:08 +0530441 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530442 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530443 rec = NULL;
444
445 /* If err is already set on socket, return the same code */
446 if (sk->sk_err) {
447 ctx->async_wait.err = sk->sk_err;
448 } else {
449 ctx->async_wait.err = err;
450 tls_err_abort(sk, err);
451 }
452 }
453
Vakul Garg9932a292018-09-24 15:35:56 +0530454 if (rec) {
455 struct tls_rec *first_rec;
456
457 /* Mark the record as ready for transmission */
458 smp_store_mb(rec->tx_ready, true);
459
460 /* If received record is at head of tx_list, schedule tx */
461 first_rec = list_first_entry(&ctx->tx_list,
462 struct tls_rec, list);
463 if (rec == first_rec)
464 ready = true;
465 }
Vakul Garga42055e2018-09-21 09:46:13 +0530466
467 pending = atomic_dec_return(&ctx->encrypt_pending);
468
469 if (!pending && READ_ONCE(ctx->async_notify))
470 complete(&ctx->async_wait.completion);
471
472 if (!ready)
473 return;
474
475 /* Schedule the transmission */
476 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200477 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530478}
479
480static int tls_do_encryption(struct sock *sk,
481 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200482 struct tls_sw_context_tx *ctx,
483 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200484 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700485{
Vakul Garg4509de12019-02-14 07:11:35 +0000486 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530487 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200488 struct sk_msg *msg_en = &rec->msg_encrypted;
489 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Vakul Gargf295b3a2019-03-20 02:03:36 +0000490 int rc, iv_offset = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700491
Vakul Gargf295b3a2019-03-20 02:03:36 +0000492 /* For CCM based ciphers, first byte of IV is a constant */
493 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
494 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
495 iv_offset = 1;
496 }
497
498 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
499 prot->iv_size + prot->salt_size);
500
501 xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
Dave Watson32eb67b2019-01-27 00:57:38 +0000502
Vakul Garg4509de12019-02-14 07:11:35 +0000503 sge->offset += prot->prepend_size;
504 sge->length -= prot->prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700505
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200506 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530507
Dave Watson3c4d7552017-06-14 11:37:39 -0700508 aead_request_set_tfm(aead_req, ctx->aead_send);
Vakul Garg4509de12019-02-14 07:11:35 +0000509 aead_request_set_ad(aead_req, prot->aad_size);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200510 aead_request_set_crypt(aead_req, rec->sg_aead_in,
511 rec->sg_aead_out,
Dave Watson32eb67b2019-01-27 00:57:38 +0000512 data_len, rec->iv_data);
Vakul Garga54667f2018-01-31 21:34:37 +0530513
514 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530515 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530516
Vakul Garg9932a292018-09-24 15:35:56 +0530517 /* Add the record in tx_list */
518 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530519 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700520
Vakul Garga42055e2018-09-21 09:46:13 +0530521 rc = crypto_aead_encrypt(aead_req);
522 if (!rc || rc != -EINPROGRESS) {
523 atomic_dec(&ctx->encrypt_pending);
Vakul Garg4509de12019-02-14 07:11:35 +0000524 sge->offset -= prot->prepend_size;
525 sge->length += prot->prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530526 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700527
Vakul Garg9932a292018-09-24 15:35:56 +0530528 if (!rc) {
529 WRITE_ONCE(rec->tx_ready, true);
530 } else if (rc != -EINPROGRESS) {
531 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530532 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530533 }
Vakul Garga42055e2018-09-21 09:46:13 +0530534
535 /* Unhook the record from context if encryption is not failure */
536 ctx->open_rec = NULL;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -0700537 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700538 return rc;
539}
540
John Fastabendd3b18ad32018-10-13 02:46:01 +0200541static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
542 struct tls_rec **to, struct sk_msg *msg_opl,
543 struct sk_msg *msg_oen, u32 split_point,
544 u32 tx_overhead_size, u32 *orig_end)
545{
546 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
547 struct scatterlist *sge, *osge, *nsge;
548 u32 orig_size = msg_opl->sg.size;
549 struct scatterlist tmp = { };
550 struct sk_msg *msg_npl;
551 struct tls_rec *new;
552 int ret;
553
554 new = tls_get_rec(sk);
555 if (!new)
556 return -ENOMEM;
557 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
558 tx_overhead_size, 0);
559 if (ret < 0) {
560 tls_free_rec(sk, new);
561 return ret;
562 }
563
564 *orig_end = msg_opl->sg.end;
565 i = msg_opl->sg.start;
566 sge = sk_msg_elem(msg_opl, i);
567 while (apply && sge->length) {
568 if (sge->length > apply) {
569 u32 len = sge->length - apply;
570
571 get_page(sg_page(sge));
572 sg_set_page(&tmp, sg_page(sge), len,
573 sge->offset + apply);
574 sge->length = apply;
575 bytes += apply;
576 apply = 0;
577 } else {
578 apply -= sge->length;
579 bytes += sge->length;
580 }
581
582 sk_msg_iter_var_next(i);
583 if (i == msg_opl->sg.end)
584 break;
585 sge = sk_msg_elem(msg_opl, i);
586 }
587
588 msg_opl->sg.end = i;
589 msg_opl->sg.curr = i;
590 msg_opl->sg.copybreak = 0;
591 msg_opl->apply_bytes = 0;
592 msg_opl->sg.size = bytes;
593
594 msg_npl = &new->msg_plaintext;
595 msg_npl->apply_bytes = apply;
596 msg_npl->sg.size = orig_size - bytes;
597
598 j = msg_npl->sg.start;
599 nsge = sk_msg_elem(msg_npl, j);
600 if (tmp.length) {
601 memcpy(nsge, &tmp, sizeof(*nsge));
602 sk_msg_iter_var_next(j);
603 nsge = sk_msg_elem(msg_npl, j);
604 }
605
606 osge = sk_msg_elem(msg_opl, i);
607 while (osge->length) {
608 memcpy(nsge, osge, sizeof(*nsge));
609 sg_unmark_end(nsge);
610 sk_msg_iter_var_next(i);
611 sk_msg_iter_var_next(j);
612 if (i == *orig_end)
613 break;
614 osge = sk_msg_elem(msg_opl, i);
615 nsge = sk_msg_elem(msg_npl, j);
616 }
617
618 msg_npl->sg.end = j;
619 msg_npl->sg.curr = j;
620 msg_npl->sg.copybreak = 0;
621
622 *to = new;
623 return 0;
624}
625
626static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
627 struct tls_rec *from, u32 orig_end)
628{
629 struct sk_msg *msg_npl = &from->msg_plaintext;
630 struct sk_msg *msg_opl = &to->msg_plaintext;
631 struct scatterlist *osge, *nsge;
632 u32 i, j;
633
634 i = msg_opl->sg.end;
635 sk_msg_iter_var_prev(i);
636 j = msg_npl->sg.start;
637
638 osge = sk_msg_elem(msg_opl, i);
639 nsge = sk_msg_elem(msg_npl, j);
640
641 if (sg_page(osge) == sg_page(nsge) &&
642 osge->offset + osge->length == nsge->offset) {
643 osge->length += nsge->length;
644 put_page(sg_page(nsge));
645 }
646
647 msg_opl->sg.end = orig_end;
648 msg_opl->sg.curr = orig_end;
649 msg_opl->sg.copybreak = 0;
650 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
651 msg_opl->sg.size += msg_npl->sg.size;
652
653 sk_msg_free(sk, &to->msg_encrypted);
654 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
655
656 kfree(from);
657}
658
Dave Watson3c4d7552017-06-14 11:37:39 -0700659static int tls_push_record(struct sock *sk, int flags,
660 unsigned char record_type)
661{
662 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000663 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300664 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200665 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
666 u32 i, split_point, uninitialized_var(orig_end);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200667 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200668 struct aead_request *req;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200669 bool split;
Dave Watson3c4d7552017-06-14 11:37:39 -0700670 int rc;
671
Vakul Garga42055e2018-09-21 09:46:13 +0530672 if (!rec)
673 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200674
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200675 msg_pl = &rec->msg_plaintext;
676 msg_en = &rec->msg_encrypted;
677
John Fastabendd3b18ad32018-10-13 02:46:01 +0200678 split_point = msg_pl->apply_bytes;
679 split = split_point && split_point < msg_pl->sg.size;
680 if (split) {
681 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
Vakul Garg4509de12019-02-14 07:11:35 +0000682 split_point, prot->overhead_size,
John Fastabendd3b18ad32018-10-13 02:46:01 +0200683 &orig_end);
684 if (rc < 0)
685 return rc;
686 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
Vakul Garg4509de12019-02-14 07:11:35 +0000687 prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200688 }
689
Vakul Garga42055e2018-09-21 09:46:13 +0530690 rec->tx_flags = flags;
691 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700692
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200693 i = msg_pl->sg.end;
694 sk_msg_iter_var_prev(i);
Dave Watson130b3922019-01-30 21:58:31 +0000695
696 rec->content_type = record_type;
Vakul Garg4509de12019-02-14 07:11:35 +0000697 if (prot->version == TLS_1_3_VERSION) {
Dave Watson130b3922019-01-30 21:58:31 +0000698 /* Add content type to end of message. No padding added */
699 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
700 sg_mark_end(&rec->sg_content_type);
701 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
702 &rec->sg_content_type);
703 } else {
704 sg_mark_end(sk_msg_elem(msg_pl, i));
705 }
Vakul Garga42055e2018-09-21 09:46:13 +0530706
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200707 i = msg_pl->sg.start;
708 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
709 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
710
711 i = msg_en->sg.end;
712 sk_msg_iter_var_prev(i);
713 sg_mark_end(sk_msg_elem(msg_en, i));
714
715 i = msg_en->sg.start;
716 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
717
Vakul Garg4509de12019-02-14 07:11:35 +0000718 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
719 tls_ctx->tx.rec_seq, prot->rec_seq_size,
720 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700721
722 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200723 page_address(sg_page(&msg_en->sg.data[i])) +
Dave Watson130b3922019-01-30 21:58:31 +0000724 msg_en->sg.data[i].offset,
Vakul Garg4509de12019-02-14 07:11:35 +0000725 msg_pl->sg.size + prot->tail_size,
726 record_type, prot->version);
Dave Watson3c4d7552017-06-14 11:37:39 -0700727
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200728 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700729
Dave Watson130b3922019-01-30 21:58:31 +0000730 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
Vakul Garg4509de12019-02-14 07:11:35 +0000731 msg_pl->sg.size + prot->tail_size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700732 if (rc < 0) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200733 if (rc != -EINPROGRESS) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200734 tls_err_abort(sk, EBADMSG);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200735 if (split) {
736 tls_ctx->pending_open_record_frags = true;
737 tls_merge_open_record(sk, rec, tmp, orig_end);
738 }
739 }
Dave Watson5b053e12019-01-30 22:08:21 +0000740 ctx->async_capable = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530741 return rc;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200742 } else if (split) {
743 msg_pl = &tmp->msg_plaintext;
744 msg_en = &tmp->msg_encrypted;
Vakul Garg4509de12019-02-14 07:11:35 +0000745 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200746 tls_ctx->pending_open_record_frags = true;
747 ctx->open_rec = tmp;
Dave Watson3c4d7552017-06-14 11:37:39 -0700748 }
749
Vakul Garg9932a292018-09-24 15:35:56 +0530750 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700751}
752
John Fastabendd3b18ad32018-10-13 02:46:01 +0200753static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
754 bool full_record, u8 record_type,
755 size_t *copied, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700756{
757 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300758 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200759 struct sk_msg msg_redir = { };
760 struct sk_psock *psock;
761 struct sock *sk_redir;
Vakul Garga42055e2018-09-21 09:46:13 +0530762 struct tls_rec *rec;
John Fastabend0608c692018-12-20 11:35:35 -0800763 bool enospc, policy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200764 int err = 0, send;
John Fastabend7246d8e2018-11-26 14:16:17 -0800765 u32 delta = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530766
John Fastabend0608c692018-12-20 11:35:35 -0800767 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200768 psock = sk_psock_get(sk);
John Fastabend0608c692018-12-20 11:35:35 -0800769 if (!psock || !policy)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200770 return tls_push_record(sk, flags, record_type);
771more_data:
772 enospc = sk_msg_full(msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800773 if (psock->eval == __SK_NONE) {
774 delta = msg->sg.size;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200775 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
John Fastabend7246d8e2018-11-26 14:16:17 -0800776 if (delta < msg->sg.size)
777 delta -= msg->sg.size;
778 else
779 delta = 0;
780 }
John Fastabendd3b18ad32018-10-13 02:46:01 +0200781 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
782 !enospc && !full_record) {
783 err = -ENOSPC;
784 goto out_err;
785 }
786 msg->cork_bytes = 0;
787 send = msg->sg.size;
788 if (msg->apply_bytes && msg->apply_bytes < send)
789 send = msg->apply_bytes;
Vakul Garga42055e2018-09-21 09:46:13 +0530790
John Fastabendd3b18ad32018-10-13 02:46:01 +0200791 switch (psock->eval) {
792 case __SK_PASS:
793 err = tls_push_record(sk, flags, record_type);
794 if (err < 0) {
795 *copied -= sk_msg_free(sk, msg);
796 tls_free_open_rec(sk);
797 goto out_err;
798 }
799 break;
800 case __SK_REDIRECT:
801 sk_redir = psock->sk_redir;
802 memcpy(&msg_redir, msg, sizeof(*msg));
803 if (msg->apply_bytes < send)
804 msg->apply_bytes = 0;
805 else
806 msg->apply_bytes -= send;
807 sk_msg_return_zero(sk, msg, send);
808 msg->sg.size -= send;
809 release_sock(sk);
810 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
811 lock_sock(sk);
812 if (err < 0) {
813 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
814 msg->sg.size = 0;
815 }
816 if (msg->sg.size == 0)
817 tls_free_open_rec(sk);
818 break;
819 case __SK_DROP:
820 default:
821 sk_msg_free_partial(sk, msg, send);
822 if (msg->apply_bytes < send)
823 msg->apply_bytes = 0;
824 else
825 msg->apply_bytes -= send;
826 if (msg->sg.size == 0)
827 tls_free_open_rec(sk);
John Fastabend7246d8e2018-11-26 14:16:17 -0800828 *copied -= (send + delta);
John Fastabendd3b18ad32018-10-13 02:46:01 +0200829 err = -EACCES;
830 }
Vakul Garga42055e2018-09-21 09:46:13 +0530831
John Fastabendd3b18ad32018-10-13 02:46:01 +0200832 if (likely(!err)) {
833 bool reset_eval = !ctx->open_rec;
834
835 rec = ctx->open_rec;
836 if (rec) {
837 msg = &rec->msg_plaintext;
838 if (!msg->apply_bytes)
839 reset_eval = true;
840 }
841 if (reset_eval) {
842 psock->eval = __SK_NONE;
843 if (psock->sk_redir) {
844 sock_put(psock->sk_redir);
845 psock->sk_redir = NULL;
846 }
847 }
848 if (rec)
849 goto more_data;
850 }
851 out_err:
852 sk_psock_put(sk, psock);
853 return err;
854}
855
856static int tls_sw_push_pending_record(struct sock *sk, int flags)
857{
858 struct tls_context *tls_ctx = tls_get_ctx(sk);
859 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
860 struct tls_rec *rec = ctx->open_rec;
861 struct sk_msg *msg_pl;
862 size_t copied;
863
Vakul Garga42055e2018-09-21 09:46:13 +0530864 if (!rec)
John Fastabendd3b18ad32018-10-13 02:46:01 +0200865 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530866
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200867 msg_pl = &rec->msg_plaintext;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200868 copied = msg_pl->sg.size;
869 if (!copied)
870 return 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530871
John Fastabendd3b18ad32018-10-13 02:46:01 +0200872 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
873 &copied, flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530874}
875
876int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
877{
Dave Watson3c4d7552017-06-14 11:37:39 -0700878 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530879 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000880 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garga42055e2018-09-21 09:46:13 +0530881 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson5b053e12019-01-30 22:08:21 +0000882 bool async_capable = ctx->async_capable;
Vakul Garga42055e2018-09-21 09:46:13 +0530883 unsigned char record_type = TLS_RECORD_TYPE_DATA;
David Howells00e23702018-10-22 13:07:28 +0100884 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Dave Watson3c4d7552017-06-14 11:37:39 -0700885 bool eor = !(msg->msg_flags & MSG_MORE);
886 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200887 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530888 struct tls_rec *rec;
889 int required_size;
890 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700891 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530892 int record_room;
893 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700894 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530895 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700896
897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
898 return -ENOTSUPP;
899
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800900 mutex_lock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700901 lock_sock(sk);
902
Dave Watson3c4d7552017-06-14 11:37:39 -0700903 if (unlikely(msg->msg_controllen)) {
904 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530905 if (ret) {
906 if (ret == -EINPROGRESS)
907 num_async++;
908 else if (ret != -EAGAIN)
909 goto send_end;
910 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700911 }
912
913 while (msg_data_left(msg)) {
914 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100915 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700916 goto send_end;
917 }
918
John Fastabendd3b18ad32018-10-13 02:46:01 +0200919 if (ctx->open_rec)
920 rec = ctx->open_rec;
921 else
922 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530923 if (!rec) {
924 ret = -ENOMEM;
925 goto send_end;
926 }
927
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200928 msg_pl = &rec->msg_plaintext;
929 msg_en = &rec->msg_encrypted;
930
931 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700932 full_record = false;
933 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200934 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700935 if (try_to_copy >= record_room) {
936 try_to_copy = record_room;
937 full_record = true;
938 }
939
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200940 required_size = msg_pl->sg.size + try_to_copy +
Vakul Garg4509de12019-02-14 07:11:35 +0000941 prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700942
943 if (!sk_stream_memory_free(sk))
944 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530945
Dave Watson3c4d7552017-06-14 11:37:39 -0700946alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200947 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700948 if (ret) {
949 if (ret != -ENOSPC)
950 goto wait_for_memory;
951
952 /* Adjust try_to_copy according to the amount that was
953 * actually allocated. The difference is due
954 * to max sg elements limit
955 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200956 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700957 full_record = true;
958 }
Vakul Garga42055e2018-09-21 09:46:13 +0530959
960 if (!is_kvec && (full_record || eor) && !async_capable) {
John Fastabendd3b18ad32018-10-13 02:46:01 +0200961 u32 first = msg_pl->sg.end;
962
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200963 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
964 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -0700965 if (ret)
966 goto fallback_to_reg_send;
967
Vakul Garg4e6d4722018-09-30 08:04:35 +0530968 rec->inplace_crypto = 0;
969
Vakul Garga42055e2018-09-21 09:46:13 +0530970 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700971 copied += try_to_copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200972
973 sk_msg_sg_copy_set(msg_pl, first);
974 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
975 record_type, &copied,
976 msg->msg_flags);
Vakul Garga42055e2018-09-21 09:46:13 +0530977 if (ret) {
978 if (ret == -EINPROGRESS)
979 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200980 else if (ret == -ENOMEM)
981 goto wait_for_memory;
982 else if (ret == -ENOSPC)
983 goto rollback_iter;
Vakul Garga42055e2018-09-21 09:46:13 +0530984 else if (ret != -EAGAIN)
985 goto send_end;
986 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700987 continue;
John Fastabendd3b18ad32018-10-13 02:46:01 +0200988rollback_iter:
989 copied -= try_to_copy;
990 sk_msg_sg_copy_clear(msg_pl, first);
991 iov_iter_revert(&msg->msg_iter,
992 msg_pl->sg.size - orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700993fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200994 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700995 }
996
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200997 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530998
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200999 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001000 if (ret) {
1001 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +05301002 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001003
1004 /* Adjust try_to_copy according to the amount that was
1005 * actually allocated. The difference is due
1006 * to max sg elements limit
1007 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001008 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001009 full_record = true;
Vakul Garg4509de12019-02-14 07:11:35 +00001010 sk_msg_trim(sk, msg_en,
1011 msg_pl->sg.size + prot->overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001012 }
1013
Vakul Garg65a10e22018-12-21 15:16:52 +00001014 if (try_to_copy) {
1015 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1016 msg_pl, try_to_copy);
1017 if (ret < 0)
1018 goto trim_sgl;
1019 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001020
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001021 /* Open records defined only if successfully copied, otherwise
1022 * we would trim the sg but not reset the open record frags.
1023 */
1024 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -07001025 copied += try_to_copy;
1026 if (full_record || eor) {
John Fastabendd3b18ad32018-10-13 02:46:01 +02001027 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1028 record_type, &copied,
1029 msg->msg_flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001030 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301031 if (ret == -EINPROGRESS)
1032 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001033 else if (ret == -ENOMEM)
1034 goto wait_for_memory;
1035 else if (ret != -EAGAIN) {
1036 if (ret == -ENOSPC)
1037 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301038 goto send_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001039 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001040 }
1041 }
1042
1043 continue;
1044
1045wait_for_sndbuf:
1046 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1047wait_for_memory:
1048 ret = sk_stream_wait_memory(sk, &timeo);
1049 if (ret) {
1050trim_sgl:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001051 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001052 goto send_end;
1053 }
1054
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001055 if (msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -07001056 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -07001057 }
1058
Vakul Garga42055e2018-09-21 09:46:13 +05301059 if (!num_async) {
1060 goto send_end;
1061 } else if (num_zc) {
1062 /* Wait for pending encryptions to get completed */
1063 smp_store_mb(ctx->async_notify, true);
1064
1065 if (atomic_read(&ctx->encrypt_pending))
1066 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1067 else
1068 reinit_completion(&ctx->async_wait.completion);
1069
1070 WRITE_ONCE(ctx->async_notify, false);
1071
1072 if (ctx->async_wait.err) {
1073 ret = ctx->async_wait.err;
1074 copied = 0;
1075 }
1076 }
1077
1078 /* Transmit if any encryptions have completed */
1079 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1080 cancel_delayed_work(&ctx->tx_work.work);
1081 tls_tx_records(sk, msg->msg_flags);
1082 }
1083
Dave Watson3c4d7552017-06-14 11:37:39 -07001084send_end:
1085 ret = sk_stream_error(sk, msg->msg_flags, ret);
1086
1087 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001088 mutex_unlock(&tls_ctx->tx_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -07001089 return copied ? copied : ret;
1090}
1091
YueHaibing01cb8a12019-01-16 10:39:28 +08001092static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1093 int offset, size_t size, int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -07001094{
Vakul Garga42055e2018-09-21 09:46:13 +05301095 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -07001096 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001097 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001098 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07001099 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001100 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +05301101 struct tls_rec *rec;
1102 int num_async = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001103 size_t copied = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001104 bool full_record;
1105 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +05301106 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301107 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -07001108
Dave Watson3c4d7552017-06-14 11:37:39 -07001109 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
Dave Watson3c4d7552017-06-14 11:37:39 -07001110 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1111
Dave Watson3c4d7552017-06-14 11:37:39 -07001112 /* Call the sk_stream functions to manage the sndbuf mem. */
1113 while (size > 0) {
1114 size_t copy, required_size;
1115
1116 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +01001117 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -07001118 goto sendpage_end;
1119 }
1120
John Fastabendd3b18ad32018-10-13 02:46:01 +02001121 if (ctx->open_rec)
1122 rec = ctx->open_rec;
1123 else
1124 rec = ctx->open_rec = tls_get_rec(sk);
Vakul Garga42055e2018-09-21 09:46:13 +05301125 if (!rec) {
1126 ret = -ENOMEM;
1127 goto sendpage_end;
1128 }
1129
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001130 msg_pl = &rec->msg_plaintext;
1131
Dave Watson3c4d7552017-06-14 11:37:39 -07001132 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001133 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001134 copy = size;
1135 if (copy >= record_room) {
1136 copy = record_room;
1137 full_record = true;
1138 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001139
Vakul Garg4509de12019-02-14 07:11:35 +00001140 required_size = msg_pl->sg.size + copy + prot->overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001141
1142 if (!sk_stream_memory_free(sk))
1143 goto wait_for_sndbuf;
1144alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001145 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001146 if (ret) {
1147 if (ret != -ENOSPC)
1148 goto wait_for_memory;
1149
1150 /* Adjust copy according to the amount that was
1151 * actually allocated. The difference is due
1152 * to max sg elements limit
1153 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001154 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -07001155 full_record = true;
1156 }
1157
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001158 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -07001159 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001160
Dave Watson3c4d7552017-06-14 11:37:39 -07001161 offset += copy;
1162 size -= copy;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001163 copied += copy;
Dave Watson3c4d7552017-06-14 11:37:39 -07001164
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001165 tls_ctx->pending_open_record_frags = true;
1166 if (full_record || eor || sk_msg_full(msg_pl)) {
Vakul Garg4e6d4722018-09-30 08:04:35 +05301167 rec->inplace_crypto = 0;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001168 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1169 record_type, &copied, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -07001170 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301171 if (ret == -EINPROGRESS)
1172 num_async++;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001173 else if (ret == -ENOMEM)
1174 goto wait_for_memory;
1175 else if (ret != -EAGAIN) {
1176 if (ret == -ENOSPC)
1177 ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +05301178 goto sendpage_end;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001179 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001180 }
1181 }
1182 continue;
1183wait_for_sndbuf:
1184 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1185wait_for_memory:
1186 ret = sk_stream_wait_memory(sk, &timeo);
1187 if (ret) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001188 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001189 goto sendpage_end;
1190 }
1191
Dave Watson3c4d7552017-06-14 11:37:39 -07001192 goto alloc_payload;
1193 }
1194
Vakul Garga42055e2018-09-21 09:46:13 +05301195 if (num_async) {
1196 /* Transmit if any encryptions have completed */
1197 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1198 cancel_delayed_work(&ctx->tx_work.work);
1199 tls_tx_records(sk, flags);
1200 }
1201 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001202sendpage_end:
John Fastabendd3b18ad32018-10-13 02:46:01 +02001203 ret = sk_stream_error(sk, flags, ret);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001204 return copied ? copied : ret;
Dave Watson3c4d7552017-06-14 11:37:39 -07001205}
1206
John Fastabend0608c692018-12-20 11:35:35 -08001207int tls_sw_sendpage(struct sock *sk, struct page *page,
1208 int offset, size_t size, int flags)
1209{
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001210 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabend0608c692018-12-20 11:35:35 -08001211 int ret;
1212
1213 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1214 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1215 return -ENOTSUPP;
1216
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001217 mutex_lock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001218 lock_sock(sk);
1219 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1220 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08001221 mutex_unlock(&tls_ctx->tx_lock);
John Fastabend0608c692018-12-20 11:35:35 -08001222 return ret;
1223}
1224
John Fastabendd3b18ad32018-10-13 02:46:01 +02001225static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1226 int flags, long timeo, int *err)
Dave Watsonc46234e2018-03-22 10:10:35 -07001227{
1228 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001229 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001230 struct sk_buff *skb;
1231 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1232
John Fastabendd3b18ad32018-10-13 02:46:01 +02001233 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001234 if (sk->sk_err) {
1235 *err = sock_error(sk);
1236 return NULL;
1237 }
1238
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001239 if (sk->sk_shutdown & RCV_SHUTDOWN)
1240 return NULL;
1241
Dave Watsonc46234e2018-03-22 10:10:35 -07001242 if (sock_flag(sk, SOCK_DONE))
1243 return NULL;
1244
1245 if ((flags & MSG_DONTWAIT) || !timeo) {
1246 *err = -EAGAIN;
1247 return NULL;
1248 }
1249
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001252 sk_wait_event(sk, &timeo,
1253 ctx->recv_pkt != skb ||
1254 !sk_psock_queue_empty(psock),
1255 &wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001256 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1257 remove_wait_queue(sk_sleep(sk), &wait);
1258
1259 /* Handle signals */
1260 if (signal_pending(current)) {
1261 *err = sock_intr_errno(timeo);
1262 return NULL;
1263 }
1264 }
1265
1266 return skb;
1267}
1268
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001269static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1270 int length, int *pages_used,
1271 unsigned int *size_used,
1272 struct scatterlist *to,
1273 int to_max_pages)
1274{
1275 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1276 struct page *pages[MAX_SKB_FRAGS];
1277 unsigned int size = *size_used;
1278 ssize_t copied, use;
1279 size_t offset;
1280
1281 while (length > 0) {
1282 i = 0;
1283 maxpages = to_max_pages - num_elem;
1284 if (maxpages == 0) {
1285 rc = -EFAULT;
1286 goto out;
1287 }
1288 copied = iov_iter_get_pages(from, pages,
1289 length,
1290 maxpages, &offset);
1291 if (copied <= 0) {
1292 rc = -EFAULT;
1293 goto out;
1294 }
1295
1296 iov_iter_advance(from, copied);
1297
1298 length -= copied;
1299 size += copied;
1300 while (copied) {
1301 use = min_t(int, copied, PAGE_SIZE - offset);
1302
1303 sg_set_page(&to[num_elem],
1304 pages[i], use, offset);
1305 sg_unmark_end(&to[num_elem]);
1306 /* We do not uncharge memory from this API */
1307
1308 offset = 0;
1309 copied -= use;
1310
1311 i++;
1312 num_elem++;
1313 }
1314 }
1315 /* Mark the end in the last sg entry if newly added */
1316 if (num_elem > *pages_used)
1317 sg_mark_end(&to[num_elem - 1]);
1318out:
1319 if (rc)
1320 iov_iter_revert(from, size - *size_used);
1321 *size_used = size;
1322 *pages_used = num_elem;
1323
1324 return rc;
1325}
1326
Vakul Garg0b243d02018-08-10 20:46:41 +05301327/* This function decrypts the input skb into either out_iov or in out_sg
1328 * or in skb buffers itself. The input parameter 'zc' indicates if
1329 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1330 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1331 * NULL, then the decryption happens inside skb buffers itself, i.e.
1332 * zero-copy gets disabled and 'zc' is updated.
1333 */
1334
1335static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1336 struct iov_iter *out_iov,
1337 struct scatterlist *out_sg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001338 int *chunk, bool *zc, bool async)
Vakul Garg0b243d02018-08-10 20:46:41 +05301339{
1340 struct tls_context *tls_ctx = tls_get_ctx(sk);
1341 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001342 struct tls_prot_info *prot = &tls_ctx->prot_info;
Vakul Garg0b243d02018-08-10 20:46:41 +05301343 struct strp_msg *rxm = strp_msg(skb);
1344 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1345 struct aead_request *aead_req;
1346 struct sk_buff *unused;
1347 u8 *aad, *iv, *mem = NULL;
1348 struct scatterlist *sgin = NULL;
1349 struct scatterlist *sgout = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +00001350 const int data_len = rxm->full_len - prot->overhead_size +
1351 prot->tail_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00001352 int iv_offset = 0;
Vakul Garg0b243d02018-08-10 20:46:41 +05301353
1354 if (*zc && (out_iov || out_sg)) {
1355 if (out_iov)
1356 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1357 else
1358 n_sgout = sg_nents(out_sg);
Vakul Garg4509de12019-02-14 07:11:35 +00001359 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1360 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301361 } else {
1362 n_sgout = 0;
1363 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001364 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301365 }
1366
Vakul Garg0b243d02018-08-10 20:46:41 +05301367 if (n_sgin < 1)
1368 return -EBADMSG;
1369
1370 /* Increment to accommodate AAD */
1371 n_sgin = n_sgin + 1;
1372
1373 nsg = n_sgin + n_sgout;
1374
1375 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1376 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
Vakul Garg4509de12019-02-14 07:11:35 +00001377 mem_size = mem_size + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301378 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1379
1380 /* Allocate a single block of memory which contains
1381 * aead_req || sgin[] || sgout[] || aad || iv.
1382 * This order achieves correct alignment for aead_req, sgin, sgout.
1383 */
1384 mem = kmalloc(mem_size, sk->sk_allocation);
1385 if (!mem)
1386 return -ENOMEM;
1387
1388 /* Segment the allocated memory */
1389 aead_req = (struct aead_request *)mem;
1390 sgin = (struct scatterlist *)(mem + aead_size);
1391 sgout = sgin + n_sgin;
1392 aad = (u8 *)(sgout + n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001393 iv = aad + prot->aad_size;
Vakul Garg0b243d02018-08-10 20:46:41 +05301394
Vakul Gargf295b3a2019-03-20 02:03:36 +00001395 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1396 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1397 iv[0] = 2;
1398 iv_offset = 1;
1399 }
1400
Vakul Garg0b243d02018-08-10 20:46:41 +05301401 /* Prepare IV */
1402 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
Vakul Gargf295b3a2019-03-20 02:03:36 +00001403 iv + iv_offset + prot->salt_size,
Vakul Garg4509de12019-02-14 07:11:35 +00001404 prot->iv_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301405 if (err < 0) {
1406 kfree(mem);
1407 return err;
1408 }
Vakul Garg4509de12019-02-14 07:11:35 +00001409 if (prot->version == TLS_1_3_VERSION)
Vakul Gargf295b3a2019-03-20 02:03:36 +00001410 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1411 crypto_aead_ivsize(ctx->aead_recv));
Dave Watson130b3922019-01-30 21:58:31 +00001412 else
Vakul Gargf295b3a2019-03-20 02:03:36 +00001413 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
Dave Watson130b3922019-01-30 21:58:31 +00001414
Vakul Garg4509de12019-02-14 07:11:35 +00001415 xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
Vakul Garg0b243d02018-08-10 20:46:41 +05301416
1417 /* Prepare AAD */
Vakul Garg4509de12019-02-14 07:11:35 +00001418 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1419 prot->tail_size,
1420 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1421 ctx->control, prot->version);
Vakul Garg0b243d02018-08-10 20:46:41 +05301422
1423 /* Prepare sgin */
1424 sg_init_table(sgin, n_sgin);
Vakul Garg4509de12019-02-14 07:11:35 +00001425 sg_set_buf(&sgin[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301426 err = skb_to_sgvec(skb, &sgin[1],
Vakul Garg4509de12019-02-14 07:11:35 +00001427 rxm->offset + prot->prepend_size,
1428 rxm->full_len - prot->prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301429 if (err < 0) {
1430 kfree(mem);
1431 return err;
1432 }
1433
1434 if (n_sgout) {
1435 if (out_iov) {
1436 sg_init_table(sgout, n_sgout);
Vakul Garg4509de12019-02-14 07:11:35 +00001437 sg_set_buf(&sgout[0], aad, prot->aad_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301438
1439 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001440 err = tls_setup_from_iter(sk, out_iov, data_len,
1441 &pages, chunk, &sgout[1],
1442 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301443 if (err < 0)
1444 goto fallback_to_reg_recv;
1445 } else if (out_sg) {
1446 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1447 } else {
1448 goto fallback_to_reg_recv;
1449 }
1450 } else {
1451fallback_to_reg_recv:
1452 sgout = sgin;
1453 pages = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001454 *chunk = data_len;
Vakul Garg0b243d02018-08-10 20:46:41 +05301455 *zc = false;
1456 }
1457
1458 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301459 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
Vakul Garg692d7b52019-01-16 10:40:16 +00001460 data_len, aead_req, async);
Vakul Garg94524d82018-08-29 15:26:55 +05301461 if (err == -EINPROGRESS)
1462 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301463
1464 /* Release the pages in case iov was mapped to pages */
1465 for (; pages > 0; pages--)
1466 put_page(sg_page(&sgout[pages]));
1467
1468 kfree(mem);
1469 return err;
1470}
1471
Boris Pismennydafb67f2018-07-13 14:33:40 +03001472static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg692d7b52019-01-16 10:40:16 +00001473 struct iov_iter *dest, int *chunk, bool *zc,
1474 bool async)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001475{
1476 struct tls_context *tls_ctx = tls_get_ctx(sk);
1477 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001478 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001479 struct strp_msg *rxm = strp_msg(skb);
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001480 int pad, err = 0;
Boris Pismennydafb67f2018-07-13 14:33:40 +03001481
Boris Pismenny4799ac82018-07-13 14:33:43 +03001482 if (!ctx->decrypted) {
Jakub Kicinskib9d8fec2019-06-03 15:17:01 -07001483 if (tls_ctx->rx_conf == TLS_HW) {
1484 err = tls_device_decrypted(sk, skb);
1485 if (err < 0)
1486 return err;
1487 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07001488
Boris Pismennyd069b782019-02-27 17:38:06 +02001489 /* Still not decrypted after tls_device */
1490 if (!ctx->decrypted) {
1491 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1492 async);
1493 if (err < 0) {
1494 if (err == -EINPROGRESS)
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001495 tls_advance_record_sn(sk, prot,
1496 &tls_ctx->rx);
Boris Pismennyd069b782019-02-27 17:38:06 +02001497
1498 return err;
1499 }
Jakub Kicinskic43ac972019-03-28 14:54:43 -07001500 } else {
1501 *zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301502 }
Dave Watson130b3922019-01-30 21:58:31 +00001503
Jakub Kicinskib53f4972019-05-09 16:14:07 -07001504 pad = padding_length(ctx, prot, skb);
1505 if (pad < 0)
1506 return pad;
1507
1508 rxm->full_len -= pad;
Vakul Garg4509de12019-02-14 07:11:35 +00001509 rxm->offset += prot->prepend_size;
1510 rxm->full_len -= prot->overhead_size;
Jakub Kicinskifb0f8862019-06-03 15:17:05 -07001511 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
Dave Watsonfedf2012019-01-30 21:58:24 +00001512 ctx->decrypted = true;
1513 ctx->saved_data_ready(sk);
Boris Pismenny4799ac82018-07-13 14:33:43 +03001514 } else {
1515 *zc = false;
1516 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001517
Boris Pismennydafb67f2018-07-13 14:33:40 +03001518 return err;
1519}
1520
1521int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1522 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001523{
Vakul Garg0b243d02018-08-10 20:46:41 +05301524 bool zc = true;
1525 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001526
Vakul Garg692d7b52019-01-16 10:40:16 +00001527 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001528}
1529
1530static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1531 unsigned int len)
1532{
1533 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001534 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001535
Vakul Garg94524d82018-08-29 15:26:55 +05301536 if (skb) {
1537 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001538
Vakul Garg94524d82018-08-29 15:26:55 +05301539 if (len < rxm->full_len) {
1540 rxm->offset += len;
1541 rxm->full_len -= len;
1542 return false;
1543 }
Vakul Garga88c26f2019-03-21 11:59:57 +00001544 consume_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001545 }
1546
1547 /* Finished with message */
1548 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001549 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001550
1551 return true;
1552}
1553
Vakul Garg692d7b52019-01-16 10:40:16 +00001554/* This function traverses the rx_list in tls receive context to copies the
Vakul Garg2b794c42019-02-23 08:42:37 +00001555 * decrypted records into the buffer provided by caller zero copy is not
Vakul Garg692d7b52019-01-16 10:40:16 +00001556 * true. Further, the records are removed from the rx_list if it is not a peek
1557 * case and the record has been consumed completely.
1558 */
1559static int process_rx_list(struct tls_sw_context_rx *ctx,
1560 struct msghdr *msg,
Vakul Garg2b794c42019-02-23 08:42:37 +00001561 u8 *control,
1562 bool *cmsg,
Vakul Garg692d7b52019-01-16 10:40:16 +00001563 size_t skip,
1564 size_t len,
1565 bool zc,
1566 bool is_peek)
1567{
1568 struct sk_buff *skb = skb_peek(&ctx->rx_list);
Vakul Garg2b794c42019-02-23 08:42:37 +00001569 u8 ctrl = *control;
1570 u8 msgc = *cmsg;
1571 struct tls_msg *tlm;
Vakul Garg692d7b52019-01-16 10:40:16 +00001572 ssize_t copied = 0;
1573
Vakul Garg2b794c42019-02-23 08:42:37 +00001574 /* Set the record type in 'control' if caller didn't pass it */
1575 if (!ctrl && skb) {
1576 tlm = tls_msg(skb);
1577 ctrl = tlm->control;
1578 }
1579
Vakul Garg692d7b52019-01-16 10:40:16 +00001580 while (skip && skb) {
1581 struct strp_msg *rxm = strp_msg(skb);
Vakul Garg2b794c42019-02-23 08:42:37 +00001582 tlm = tls_msg(skb);
1583
1584 /* Cannot process a record of different type */
1585 if (ctrl != tlm->control)
1586 return 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001587
1588 if (skip < rxm->full_len)
1589 break;
1590
1591 skip = skip - rxm->full_len;
1592 skb = skb_peek_next(skb, &ctx->rx_list);
1593 }
1594
1595 while (len && skb) {
1596 struct sk_buff *next_skb;
1597 struct strp_msg *rxm = strp_msg(skb);
1598 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1599
Vakul Garg2b794c42019-02-23 08:42:37 +00001600 tlm = tls_msg(skb);
1601
1602 /* Cannot process a record of different type */
1603 if (ctrl != tlm->control)
1604 return 0;
1605
1606 /* Set record type if not already done. For a non-data record,
1607 * do not proceed if record type could not be copied.
1608 */
1609 if (!msgc) {
1610 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1611 sizeof(ctrl), &ctrl);
1612 msgc = true;
1613 if (ctrl != TLS_RECORD_TYPE_DATA) {
1614 if (cerr || msg->msg_flags & MSG_CTRUNC)
1615 return -EIO;
1616
1617 *cmsg = msgc;
1618 }
1619 }
1620
Vakul Garg692d7b52019-01-16 10:40:16 +00001621 if (!zc || (rxm->full_len - skip) > len) {
1622 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1623 msg, chunk);
1624 if (err < 0)
1625 return err;
1626 }
1627
1628 len = len - chunk;
1629 copied = copied + chunk;
1630
1631 /* Consume the data from record if it is non-peek case*/
1632 if (!is_peek) {
1633 rxm->offset = rxm->offset + chunk;
1634 rxm->full_len = rxm->full_len - chunk;
1635
1636 /* Return if there is unconsumed data in the record */
1637 if (rxm->full_len - skip)
1638 break;
1639 }
1640
1641 /* The remaining skip-bytes must lie in 1st record in rx_list.
1642 * So from the 2nd record, 'skip' should be 0.
1643 */
1644 skip = 0;
1645
1646 if (msg)
1647 msg->msg_flags |= MSG_EOR;
1648
1649 next_skb = skb_peek_next(skb, &ctx->rx_list);
1650
1651 if (!is_peek) {
1652 skb_unlink(skb, &ctx->rx_list);
Vakul Garga88c26f2019-03-21 11:59:57 +00001653 consume_skb(skb);
Vakul Garg692d7b52019-01-16 10:40:16 +00001654 }
1655
1656 skb = next_skb;
1657 }
1658
Vakul Garg2b794c42019-02-23 08:42:37 +00001659 *control = ctrl;
Vakul Garg692d7b52019-01-16 10:40:16 +00001660 return copied;
1661}
1662
Dave Watsonc46234e2018-03-22 10:10:35 -07001663int tls_sw_recvmsg(struct sock *sk,
1664 struct msghdr *msg,
1665 size_t len,
1666 int nonblock,
1667 int flags,
1668 int *addr_len)
1669{
1670 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001671 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001672 struct tls_prot_info *prot = &tls_ctx->prot_info;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001673 struct sk_psock *psock;
Vakul Garg692d7b52019-01-16 10:40:16 +00001674 unsigned char control = 0;
1675 ssize_t decrypted = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001676 struct strp_msg *rxm;
Vakul Garg2b794c42019-02-23 08:42:37 +00001677 struct tls_msg *tlm;
Dave Watsonc46234e2018-03-22 10:10:35 -07001678 struct sk_buff *skb;
1679 ssize_t copied = 0;
1680 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001681 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001682 long timeo;
David Howells00e23702018-10-22 13:07:28 +01001683 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
Vakul Garg692d7b52019-01-16 10:40:16 +00001684 bool is_peek = flags & MSG_PEEK;
Vakul Garg94524d82018-08-29 15:26:55 +05301685 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001686
1687 flags |= nonblock;
1688
1689 if (unlikely(flags & MSG_ERRQUEUE))
1690 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1691
John Fastabendd3b18ad32018-10-13 02:46:01 +02001692 psock = sk_psock_get(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001693 lock_sock(sk);
1694
Vakul Garg692d7b52019-01-16 10:40:16 +00001695 /* Process pending decrypted records. It must be non-zero-copy */
Vakul Garg2b794c42019-02-23 08:42:37 +00001696 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1697 is_peek);
Vakul Garg692d7b52019-01-16 10:40:16 +00001698 if (err < 0) {
1699 tls_err_abort(sk, err);
1700 goto end;
1701 } else {
1702 copied = err;
1703 }
1704
Jakub Kicinski46a16952019-05-24 10:34:30 -07001705 if (len <= copied)
Vakul Garg692d7b52019-01-16 10:40:16 +00001706 goto recv_end;
Jakub Kicinski46a16952019-05-24 10:34:30 -07001707
1708 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1709 len = len - copied;
1710 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Vakul Garg692d7b52019-01-16 10:40:16 +00001711
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001712 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001713 bool retain_skb = false;
Vakul Garg692d7b52019-01-16 10:40:16 +00001714 bool zc = false;
1715 int to_decrypt;
Dave Watsonc46234e2018-03-22 10:10:35 -07001716 int chunk = 0;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001717 bool async_capable;
1718 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001719
John Fastabendd3b18ad32018-10-13 02:46:01 +02001720 skb = tls_wait_data(sk, psock, flags, timeo, &err);
1721 if (!skb) {
1722 if (psock) {
John Fastabend02c558b2018-10-16 11:08:04 -07001723 int ret = __tcp_bpf_recvmsg(sk, psock,
1724 msg, len, flags);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001725
1726 if (ret > 0) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001727 decrypted += ret;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001728 len -= ret;
1729 continue;
1730 }
1731 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001732 goto recv_end;
Vakul Garg2b794c42019-02-23 08:42:37 +00001733 } else {
1734 tlm = tls_msg(skb);
1735 if (prot->version == TLS_1_3_VERSION)
1736 tlm->control = 0;
1737 else
1738 tlm->control = ctx->control;
John Fastabendd3b18ad32018-10-13 02:46:01 +02001739 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001740
1741 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301742
Vakul Garg4509de12019-02-14 07:11:35 +00001743 to_decrypt = rxm->full_len - prot->overhead_size;
Dave Watsonfedf2012019-01-30 21:58:24 +00001744
1745 if (to_decrypt <= len && !is_kvec && !is_peek &&
Dave Watson130b3922019-01-30 21:58:31 +00001746 ctx->control == TLS_RECORD_TYPE_DATA &&
Vakul Garg4509de12019-02-14 07:11:35 +00001747 prot->version != TLS_1_3_VERSION)
Dave Watsonfedf2012019-01-30 21:58:24 +00001748 zc = true;
1749
Vakul Gargc0ab4732019-02-11 11:31:05 +00001750 /* Do not use async mode if record is non-data */
1751 if (ctx->control == TLS_RECORD_TYPE_DATA)
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001752 async_capable = ctx->async_capable;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001753 else
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001754 async_capable = false;
Vakul Gargc0ab4732019-02-11 11:31:05 +00001755
Dave Watsonfedf2012019-01-30 21:58:24 +00001756 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001757 &chunk, &zc, async_capable);
Dave Watsonfedf2012019-01-30 21:58:24 +00001758 if (err < 0 && err != -EINPROGRESS) {
1759 tls_err_abort(sk, EBADMSG);
1760 goto recv_end;
1761 }
1762
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001763 if (err == -EINPROGRESS) {
1764 async = true;
Dave Watsonfedf2012019-01-30 21:58:24 +00001765 num_async++;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001766 } else if (prot->version == TLS_1_3_VERSION) {
Vakul Garg2b794c42019-02-23 08:42:37 +00001767 tlm->control = ctx->control;
Eran Ben Elisha7754bd62019-02-27 17:38:05 +02001768 }
Vakul Garg2b794c42019-02-23 08:42:37 +00001769
1770 /* If the type of records being processed is not known yet,
1771 * set it to record type just dequeued. If it is already known,
1772 * but does not match the record type just dequeued, go to end.
1773 * We always get record type here since for tls1.2, record type
1774 * is known just after record is dequeued from stream parser.
1775 * For tls1.3, we disable async.
1776 */
1777
1778 if (!control)
1779 control = tlm->control;
1780 else if (control != tlm->control)
1781 goto recv_end;
Dave Watsonfedf2012019-01-30 21:58:24 +00001782
Dave Watsonc46234e2018-03-22 10:10:35 -07001783 if (!cmsg) {
1784 int cerr;
1785
1786 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
Vakul Garg2b794c42019-02-23 08:42:37 +00001787 sizeof(control), &control);
Dave Watsonc46234e2018-03-22 10:10:35 -07001788 cmsg = true;
Vakul Garg2b794c42019-02-23 08:42:37 +00001789 if (control != TLS_RECORD_TYPE_DATA) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001790 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1791 err = -EIO;
1792 goto recv_end;
1793 }
1794 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001795 }
1796
Vakul Gargc0ab4732019-02-11 11:31:05 +00001797 if (async)
1798 goto pick_next_record;
1799
Dave Watsonfedf2012019-01-30 21:58:24 +00001800 if (!zc) {
1801 if (rxm->full_len > len) {
1802 retain_skb = true;
1803 chunk = len;
1804 } else {
1805 chunk = rxm->full_len;
1806 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001807
Dave Watsonfedf2012019-01-30 21:58:24 +00001808 err = skb_copy_datagram_msg(skb, rxm->offset,
1809 msg, chunk);
1810 if (err < 0)
1811 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001812
Dave Watsonfedf2012019-01-30 21:58:24 +00001813 if (!is_peek) {
1814 rxm->offset = rxm->offset + chunk;
1815 rxm->full_len = rxm->full_len - chunk;
Vakul Garg692d7b52019-01-16 10:40:16 +00001816 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001817 }
1818
Vakul Garg94524d82018-08-29 15:26:55 +05301819pick_next_record:
Vakul Garg692d7b52019-01-16 10:40:16 +00001820 if (chunk > len)
1821 chunk = len;
1822
1823 decrypted += chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001824 len -= chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001825
Vakul Garg692d7b52019-01-16 10:40:16 +00001826 /* For async or peek case, queue the current skb */
1827 if (async || is_peek || retain_skb) {
1828 skb_queue_tail(&ctx->rx_list, skb);
1829 skb = NULL;
1830 }
Vakul Garg94524d82018-08-29 15:26:55 +05301831
Vakul Garg692d7b52019-01-16 10:40:16 +00001832 if (tls_sw_advance_skb(sk, skb, chunk)) {
1833 /* Return full control message to
1834 * userspace before trying to parse
1835 * another message type
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001836 */
Vakul Garg692d7b52019-01-16 10:40:16 +00001837 msg->msg_flags |= MSG_EOR;
1838 if (ctx->control != TLS_RECORD_TYPE_DATA)
1839 goto recv_end;
1840 } else {
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001841 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001842 }
Jakub Kicinski04b25a52019-05-24 10:34:32 -07001843 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001844
1845recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301846 if (num_async) {
1847 /* Wait for all previously submitted records to be decrypted */
1848 smp_store_mb(ctx->async_notify, true);
1849 if (atomic_read(&ctx->decrypt_pending)) {
1850 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1851 if (err) {
1852 /* one of async decrypt failed */
1853 tls_err_abort(sk, err);
1854 copied = 0;
Vakul Garg692d7b52019-01-16 10:40:16 +00001855 decrypted = 0;
1856 goto end;
Vakul Garg94524d82018-08-29 15:26:55 +05301857 }
1858 } else {
1859 reinit_completion(&ctx->async_wait.completion);
1860 }
1861 WRITE_ONCE(ctx->async_notify, false);
Vakul Garg692d7b52019-01-16 10:40:16 +00001862
1863 /* Drain records from the rx_list & copy if required */
1864 if (is_peek || is_kvec)
Vakul Garg2b794c42019-02-23 08:42:37 +00001865 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
Vakul Garg692d7b52019-01-16 10:40:16 +00001866 decrypted, false, is_peek);
1867 else
Vakul Garg2b794c42019-02-23 08:42:37 +00001868 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
Vakul Garg692d7b52019-01-16 10:40:16 +00001869 decrypted, true, is_peek);
1870 if (err < 0) {
1871 tls_err_abort(sk, err);
1872 copied = 0;
1873 goto end;
1874 }
Vakul Garg94524d82018-08-29 15:26:55 +05301875 }
1876
Vakul Garg692d7b52019-01-16 10:40:16 +00001877 copied += decrypted;
1878
1879end:
Dave Watsonc46234e2018-03-22 10:10:35 -07001880 release_sock(sk);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001881 if (psock)
1882 sk_psock_put(sk, psock);
Dave Watsonc46234e2018-03-22 10:10:35 -07001883 return copied ? : err;
1884}
1885
1886ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1887 struct pipe_inode_info *pipe,
1888 size_t len, unsigned int flags)
1889{
1890 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001891 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001892 struct strp_msg *rxm = NULL;
1893 struct sock *sk = sock->sk;
1894 struct sk_buff *skb;
1895 ssize_t copied = 0;
1896 int err = 0;
1897 long timeo;
1898 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301899 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001900
1901 lock_sock(sk);
1902
1903 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1904
John Fastabendd3b18ad32018-10-13 02:46:01 +02001905 skb = tls_wait_data(sk, NULL, flags, timeo, &err);
Dave Watsonc46234e2018-03-22 10:10:35 -07001906 if (!skb)
1907 goto splice_read_end;
1908
Dave Watsonc46234e2018-03-22 10:10:35 -07001909 if (!ctx->decrypted) {
Vakul Garg692d7b52019-01-16 10:40:16 +00001910 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
Dave Watsonc46234e2018-03-22 10:10:35 -07001911
Dave Watsonfedf2012019-01-30 21:58:24 +00001912 /* splice does not support reading control messages */
1913 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1914 err = -ENOTSUPP;
1915 goto splice_read_end;
1916 }
1917
Dave Watsonc46234e2018-03-22 10:10:35 -07001918 if (err < 0) {
1919 tls_err_abort(sk, EBADMSG);
1920 goto splice_read_end;
1921 }
1922 ctx->decrypted = true;
1923 }
1924 rxm = strp_msg(skb);
1925
1926 chunk = min_t(unsigned int, rxm->full_len, len);
1927 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1928 if (copied < 0)
1929 goto splice_read_end;
1930
1931 if (likely(!(flags & MSG_PEEK)))
1932 tls_sw_advance_skb(sk, skb, copied);
1933
1934splice_read_end:
1935 release_sock(sk);
1936 return copied ? : err;
1937}
1938
John Fastabend924ad652018-10-13 02:46:00 +02001939bool tls_sw_stream_read(const struct sock *sk)
Dave Watsonc46234e2018-03-22 10:10:35 -07001940{
Dave Watsonc46234e2018-03-22 10:10:35 -07001941 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001942 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02001943 bool ingress_empty = true;
1944 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07001945
John Fastabendd3b18ad32018-10-13 02:46:01 +02001946 rcu_read_lock();
1947 psock = sk_psock(sk);
1948 if (psock)
1949 ingress_empty = list_empty(&psock->ingress_msg);
1950 rcu_read_unlock();
Dave Watsonc46234e2018-03-22 10:10:35 -07001951
Jakub Kicinski13aecb12019-07-04 14:50:36 -07001952 return !ingress_empty || ctx->recv_pkt ||
1953 !skb_queue_empty(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07001954}
1955
1956static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1957{
1958 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001959 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Vakul Garg4509de12019-02-14 07:11:35 +00001960 struct tls_prot_info *prot = &tls_ctx->prot_info;
Kees Cook3463e512018-06-25 16:55:05 -07001961 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001962 struct strp_msg *rxm = strp_msg(skb);
1963 size_t cipher_overhead;
1964 size_t data_len = 0;
1965 int ret;
1966
1967 /* Verify that we have a full TLS header, or wait for more data */
Vakul Garg4509de12019-02-14 07:11:35 +00001968 if (rxm->offset + prot->prepend_size > skb->len)
Dave Watsonc46234e2018-03-22 10:10:35 -07001969 return 0;
1970
Kees Cook3463e512018-06-25 16:55:05 -07001971 /* Sanity-check size of on-stack buffer. */
Vakul Garg4509de12019-02-14 07:11:35 +00001972 if (WARN_ON(prot->prepend_size > sizeof(header))) {
Kees Cook3463e512018-06-25 16:55:05 -07001973 ret = -EINVAL;
1974 goto read_failure;
1975 }
1976
Dave Watsonc46234e2018-03-22 10:10:35 -07001977 /* Linearize header to local buffer */
Vakul Garg4509de12019-02-14 07:11:35 +00001978 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07001979
1980 if (ret < 0)
1981 goto read_failure;
1982
1983 ctx->control = header[0];
1984
1985 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1986
Vakul Garg4509de12019-02-14 07:11:35 +00001987 cipher_overhead = prot->tag_size;
1988 if (prot->version != TLS_1_3_VERSION)
1989 cipher_overhead += prot->iv_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001990
Dave Watson130b3922019-01-30 21:58:31 +00001991 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
Vakul Garg4509de12019-02-14 07:11:35 +00001992 prot->tail_size) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001993 ret = -EMSGSIZE;
1994 goto read_failure;
1995 }
1996 if (data_len < cipher_overhead) {
1997 ret = -EBADMSG;
1998 goto read_failure;
1999 }
2000
Dave Watson130b3922019-01-30 21:58:31 +00002001 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2002 if (header[1] != TLS_1_2_VERSION_MINOR ||
2003 header[2] != TLS_1_2_VERSION_MAJOR) {
Dave Watsonc46234e2018-03-22 10:10:35 -07002004 ret = -EINVAL;
2005 goto read_failure;
2006 }
Jakub Kicinskibe2fbc12019-09-02 21:31:05 -07002007
Jakub Kicinskif953d33b2019-06-10 21:40:02 -07002008 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
Jakub Kicinskife58a5a2019-06-10 21:40:01 -07002009 TCP_SKB_CB(skb)->seq + rxm->offset);
Dave Watsonc46234e2018-03-22 10:10:35 -07002010 return data_len + TLS_HEADER_SIZE;
2011
2012read_failure:
2013 tls_err_abort(strp->sk, ret);
2014
2015 return ret;
2016}
2017
2018static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2019{
2020 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002021 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07002022
2023 ctx->decrypted = false;
2024
2025 ctx->recv_pkt = skb;
2026 strp_pause(strp);
2027
Vakul Gargad13acc2018-07-30 16:08:33 +05302028 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07002029}
2030
2031static void tls_data_ready(struct sock *sk)
2032{
2033 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002034 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002035 struct sk_psock *psock;
Dave Watsonc46234e2018-03-22 10:10:35 -07002036
2037 strp_data_ready(&ctx->strp);
John Fastabendd3b18ad32018-10-13 02:46:01 +02002038
2039 psock = sk_psock_get(sk);
2040 if (psock && !list_empty(&psock->ingress_msg)) {
2041 ctx->saved_data_ready(sk);
2042 sk_psock_put(sk, psock);
2043 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002044}
2045
John Fastabendf87e62d2019-07-19 10:29:16 -07002046void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2047{
2048 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2049
2050 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2051 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2052 cancel_delayed_work_sync(&ctx->tx_work.work);
2053}
2054
John Fastabend313ab002019-07-19 10:29:17 -07002055void tls_sw_release_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07002056{
2057 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002058 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05302059 struct tls_rec *rec, *tmp;
2060
2061 /* Wait for any pending async encryptions to complete */
2062 smp_store_mb(ctx->async_notify, true);
2063 if (atomic_read(&ctx->encrypt_pending))
2064 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2065
Vakul Garga42055e2018-09-21 09:46:13 +05302066 tls_tx_records(sk, -1);
2067
Vakul Garg9932a292018-09-24 15:35:56 +05302068 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05302069 * the partially sent record if any at head of tx_list.
2070 */
Jakub Kicinski35b71a342019-04-10 11:04:31 -07002071 if (tls_free_partial_record(sk, tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +05302072 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05302073 struct tls_rec, list);
2074 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002075 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302076 kfree(rec);
2077 }
2078
Vakul Garg9932a292018-09-24 15:35:56 +05302079 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05302080 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02002081 sk_msg_free(sk, &rec->msg_encrypted);
2082 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05302083 kfree(rec);
2084 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002085
Vakul Garg201876b2018-07-24 16:54:27 +05302086 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05302087 tls_free_open_rec(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002088}
2089
2090void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2091{
2092 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002093
2094 kfree(ctx);
2095}
2096
Boris Pismenny39f56e12018-07-13 14:33:41 +03002097void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002098{
2099 struct tls_context *tls_ctx = tls_get_ctx(sk);
2100 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2101
Jakub Kicinski12c76862019-04-19 16:52:19 -07002102 kfree(tls_ctx->rx.rec_seq);
2103 kfree(tls_ctx->rx.iv);
2104
Dave Watsonc46234e2018-03-22 10:10:35 -07002105 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05302106 kfree_skb(ctx->recv_pkt);
2107 ctx->recv_pkt = NULL;
Vakul Garg692d7b52019-01-16 10:40:16 +00002108 skb_queue_purge(&ctx->rx_list);
Dave Watsonc46234e2018-03-22 10:10:35 -07002109 crypto_free_aead(ctx->aead_recv);
2110 strp_stop(&ctx->strp);
John Fastabend313ab002019-07-19 10:29:17 -07002111 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2112 * we still want to strp_stop(), but sk->sk_data_ready was
2113 * never swapped.
2114 */
2115 if (ctx->saved_data_ready) {
2116 write_lock_bh(&sk->sk_callback_lock);
2117 sk->sk_data_ready = ctx->saved_data_ready;
2118 write_unlock_bh(&sk->sk_callback_lock);
2119 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002120 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03002121}
2122
John Fastabend313ab002019-07-19 10:29:17 -07002123void tls_sw_strparser_done(struct tls_context *tls_ctx)
2124{
2125 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2126
2127 strp_done(&ctx->strp);
2128}
2129
2130void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2131{
2132 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2133
2134 kfree(ctx);
2135}
2136
Boris Pismenny39f56e12018-07-13 14:33:41 +03002137void tls_sw_free_resources_rx(struct sock *sk)
2138{
2139 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismenny39f56e12018-07-13 14:33:41 +03002140
2141 tls_sw_release_resources_rx(sk);
John Fastabend313ab002019-07-19 10:29:17 -07002142 tls_sw_free_ctx_rx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07002143}
2144
Vakul Garg9932a292018-09-24 15:35:56 +05302145/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05302146static void tx_work_handler(struct work_struct *work)
2147{
2148 struct delayed_work *delayed_work = to_delayed_work(work);
2149 struct tx_work *tx_work = container_of(delayed_work,
2150 struct tx_work, work);
2151 struct sock *sk = tx_work->sk;
2152 struct tls_context *tls_ctx = tls_get_ctx(sk);
John Fastabendf87e62d2019-07-19 10:29:16 -07002153 struct tls_sw_context_tx *ctx;
2154
2155 if (unlikely(!tls_ctx))
2156 return;
2157
2158 ctx = tls_sw_ctx_tx(tls_ctx);
2159 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2160 return;
Vakul Garga42055e2018-09-21 09:46:13 +05302161
2162 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2163 return;
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002164 mutex_lock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302165 lock_sock(sk);
2166 tls_tx_records(sk, -1);
2167 release_sock(sk);
Jakub Kicinski79ffe602019-11-05 14:24:35 -08002168 mutex_unlock(&tls_ctx->tx_lock);
Vakul Garga42055e2018-09-21 09:46:13 +05302169}
2170
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002171void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2172{
2173 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2174
2175 /* Schedule the transmission if tx list is ready */
Jakub Kicinski02b1fa02019-11-05 14:24:34 -08002176 if (is_tx_ready(tx_ctx) &&
2177 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2178 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Boris Pismenny7463d3a2019-02-27 17:38:04 +02002179}
2180
Jakub Kicinski318892a2019-07-19 10:29:14 -07002181void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2182{
2183 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2184
2185 write_lock_bh(&sk->sk_callback_lock);
2186 rx_ctx->saved_data_ready = sk->sk_data_ready;
2187 sk->sk_data_ready = tls_data_ready;
2188 write_unlock_bh(&sk->sk_callback_lock);
2189
2190 strp_check_rcv(&rx_ctx->strp);
2191}
2192
Dave Watsonc46234e2018-03-22 10:10:35 -07002193int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07002194{
Vakul Garg4509de12019-02-14 07:11:35 +00002195 struct tls_context *tls_ctx = tls_get_ctx(sk);
2196 struct tls_prot_info *prot = &tls_ctx->prot_info;
Dave Watson3c4d7552017-06-14 11:37:39 -07002197 struct tls_crypto_info *crypto_info;
2198 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002199 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002200 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002201 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2202 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07002203 struct cipher_context *cctx;
2204 struct crypto_aead **aead;
2205 struct strp_callbacks cb;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002206 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
Vakul Garg692d7b52019-01-16 10:40:16 +00002207 struct crypto_tfm *tfm;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002208 char *iv, *rec_seq, *key, *salt, *cipher_name;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002209 size_t keysize;
Dave Watson3c4d7552017-06-14 11:37:39 -07002210 int rc = 0;
2211
2212 if (!ctx) {
2213 rc = -EINVAL;
2214 goto out;
2215 }
2216
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002217 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002218 if (!ctx->priv_ctx_tx) {
2219 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2220 if (!sw_ctx_tx) {
2221 rc = -ENOMEM;
2222 goto out;
2223 }
2224 ctx->priv_ctx_tx = sw_ctx_tx;
2225 } else {
2226 sw_ctx_tx =
2227 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07002228 }
Dave Watsonc46234e2018-03-22 10:10:35 -07002229 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002230 if (!ctx->priv_ctx_rx) {
2231 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2232 if (!sw_ctx_rx) {
2233 rc = -ENOMEM;
2234 goto out;
2235 }
2236 ctx->priv_ctx_rx = sw_ctx_rx;
2237 } else {
2238 sw_ctx_rx =
2239 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002240 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002241 }
2242
Dave Watsonc46234e2018-03-22 10:10:35 -07002243 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03002244 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002245 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002246 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002247 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05302248 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05302249 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2250 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07002251 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03002252 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02002253 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07002254 cctx = &ctx->rx;
Vakul Garg692d7b52019-01-16 10:40:16 +00002255 skb_queue_head_init(&sw_ctx_rx->rx_list);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002256 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07002257 }
2258
Dave Watson3c4d7552017-06-14 11:37:39 -07002259 switch (crypto_info->cipher_type) {
2260 case TLS_CIPHER_AES_GCM_128: {
2261 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2262 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2263 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2264 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2265 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2266 rec_seq =
2267 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2268 gcm_128_info =
2269 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
Dave Watsonfb99bce2019-01-30 21:58:05 +00002270 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2271 key = gcm_128_info->key;
2272 salt = gcm_128_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002273 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2274 cipher_name = "gcm(aes)";
Dave Watsonfb99bce2019-01-30 21:58:05 +00002275 break;
2276 }
2277 case TLS_CIPHER_AES_GCM_256: {
2278 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2279 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2280 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2281 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2282 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2283 rec_seq =
2284 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2285 gcm_256_info =
2286 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2287 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2288 key = gcm_256_info->key;
2289 salt = gcm_256_info->salt;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002290 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2291 cipher_name = "gcm(aes)";
2292 break;
2293 }
2294 case TLS_CIPHER_AES_CCM_128: {
2295 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2296 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2297 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2298 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2299 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2300 rec_seq =
2301 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2302 ccm_128_info =
2303 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2304 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2305 key = ccm_128_info->key;
2306 salt = ccm_128_info->salt;
2307 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2308 cipher_name = "ccm(aes)";
Dave Watson3c4d7552017-06-14 11:37:39 -07002309 break;
2310 }
2311 default:
2312 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002313 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002314 }
2315
Jakub Kicinski89fec472019-06-10 21:40:00 -07002316 /* Sanity-check the sizes for stack allocations. */
2317 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2318 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07002319 rc = -EINVAL;
2320 goto free_priv;
2321 }
2322
Dave Watson130b3922019-01-30 21:58:31 +00002323 if (crypto_info->version == TLS_1_3_VERSION) {
2324 nonce_size = 0;
Vakul Garg4509de12019-02-14 07:11:35 +00002325 prot->aad_size = TLS_HEADER_SIZE;
2326 prot->tail_size = 1;
Dave Watson130b3922019-01-30 21:58:31 +00002327 } else {
Vakul Garg4509de12019-02-14 07:11:35 +00002328 prot->aad_size = TLS_AAD_SPACE_SIZE;
2329 prot->tail_size = 0;
Dave Watson130b3922019-01-30 21:58:31 +00002330 }
2331
Vakul Garg4509de12019-02-14 07:11:35 +00002332 prot->version = crypto_info->version;
2333 prot->cipher_type = crypto_info->cipher_type;
2334 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2335 prot->tag_size = tag_size;
2336 prot->overhead_size = prot->prepend_size +
2337 prot->tag_size + prot->tail_size;
2338 prot->iv_size = iv_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002339 prot->salt_size = salt_size;
2340 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002341 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002342 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002343 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07002344 }
Dave Watsonfb99bce2019-01-30 21:58:05 +00002345 /* Note: 128 & 256 bit salt are the same size */
Vakul Garg4509de12019-02-14 07:11:35 +00002346 prot->rec_seq_size = rec_seq_size;
Vakul Gargf295b3a2019-03-20 02:03:36 +00002347 memcpy(cctx->iv, salt, salt_size);
2348 memcpy(cctx->iv + salt_size, iv, iv_size);
zhong jiang969d5092018-08-01 00:50:24 +08002349 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07002350 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07002351 rc = -ENOMEM;
2352 goto free_iv;
2353 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002354
Dave Watsonc46234e2018-03-22 10:10:35 -07002355 if (!*aead) {
Vakul Gargf295b3a2019-03-20 02:03:36 +00002356 *aead = crypto_alloc_aead(cipher_name, 0, 0);
Dave Watsonc46234e2018-03-22 10:10:35 -07002357 if (IS_ERR(*aead)) {
2358 rc = PTR_ERR(*aead);
2359 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002360 goto free_rec_seq;
2361 }
2362 }
2363
2364 ctx->push_pending_record = tls_sw_push_pending_record;
2365
Dave Watsonfb99bce2019-01-30 21:58:05 +00002366 rc = crypto_aead_setkey(*aead, key, keysize);
2367
Dave Watson3c4d7552017-06-14 11:37:39 -07002368 if (rc)
2369 goto free_aead;
2370
Vakul Garg4509de12019-02-14 07:11:35 +00002371 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
Dave Watsonc46234e2018-03-22 10:10:35 -07002372 if (rc)
2373 goto free_aead;
2374
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002375 if (sw_ctx_rx) {
Vakul Garg692d7b52019-01-16 10:40:16 +00002376 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
Vakul Garg8497ded2019-02-09 07:53:28 +00002377
2378 if (crypto_info->version == TLS_1_3_VERSION)
2379 sw_ctx_rx->async_capable = false;
2380 else
2381 sw_ctx_rx->async_capable =
2382 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
Vakul Garg692d7b52019-01-16 10:40:16 +00002383
Dave Watsonc46234e2018-03-22 10:10:35 -07002384 /* Set up strparser */
2385 memset(&cb, 0, sizeof(cb));
2386 cb.rcv_msg = tls_queue;
2387 cb.parse_msg = tls_read_size;
2388
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002389 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07002390 }
2391
2392 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07002393
2394free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07002395 crypto_free_aead(*aead);
2396 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002397free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07002398 kfree(cctx->rec_seq);
2399 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07002400free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002401 kfree(cctx->iv);
2402 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01002403free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03002404 if (tx) {
2405 kfree(ctx->priv_ctx_tx);
2406 ctx->priv_ctx_tx = NULL;
2407 } else {
2408 kfree(ctx->priv_ctx_rx);
2409 ctx->priv_ctx_rx = NULL;
2410 }
Dave Watson3c4d7552017-06-14 11:37:39 -07002411out:
2412 return rc;
2413}