blob: d30d65bf0753afc20da5ad55fcc011248ce6ee2b [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Vakul Garg94524d82018-08-29 15:26:55 +0530122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{
124 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530125 struct scatterlist *sgout = aead_req->dst;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530128 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700129 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530130 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
Vakul Garg94524d82018-08-29 15:26:55 +0530137
138 /* Propagate if there was an err */
139 if (err) {
140 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700141 tls_err_abort(skb->sk, err);
Vakul Garg94524d82018-08-29 15:26:55 +0530142 }
143
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
Vakul Garg94524d82018-08-29 15:26:55 +0530149 /* Release the skb, pages and memory allocated for crypto req */
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700150 kfree_skb(skb);
Vakul Garg94524d82018-08-29 15:26:55 +0530151
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
154 if (!sg)
155 break;
156 put_page(sg_page(sg));
157 }
158
159 kfree(aead_req);
160
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
163}
164
Dave Watsonc46234e2018-03-22 10:10:35 -0700165static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530166 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
169 char *iv_recv,
170 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530171 struct aead_request *aead_req,
172 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700173{
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700176 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700177
Vakul Garg0b243d02018-08-10 20:46:41 +0530178 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
182 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700183
Vakul Garg94524d82018-08-29 15:26:55 +0530184 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
196 } else {
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
200 }
201
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
204 if (async)
205 return ret;
206
207 ret = crypto_wait_req(ret, &ctx->async_wait);
208 }
209
210 if (async)
211 atomic_dec(&ctx->decrypt_pending);
212
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 return ret;
214}
215
Dave Watson3c4d7552017-06-14 11:37:39 -0700216static void trim_sg(struct sock *sk, struct scatterlist *sg,
217 int *sg_num_elem, unsigned int *sg_size, int target_size)
218{
219 int i = *sg_num_elem - 1;
220 int trim = *sg_size - target_size;
221
222 if (trim <= 0) {
223 WARN_ON(trim < 0);
224 return;
225 }
226
227 *sg_size = target_size;
228 while (trim >= sg[i].length) {
229 trim -= sg[i].length;
230 sk_mem_uncharge(sk, sg[i].length);
231 put_page(sg_page(&sg[i]));
232 i--;
233
234 if (i < 0)
235 goto out;
236 }
237
238 sg[i].length -= trim;
239 sk_mem_uncharge(sk, trim);
240
241out:
242 *sg_num_elem = i + 1;
243}
244
245static void trim_both_sgl(struct sock *sk, int target_size)
246{
247 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300248 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530249 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Vakul Garga42055e2018-09-21 09:46:13 +0530251 trim_sg(sk, rec->sg_plaintext_data,
252 &rec->sg_plaintext_num_elem,
253 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 target_size);
255
256 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700257 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700258
Vakul Garga42055e2018-09-21 09:46:13 +0530259 trim_sg(sk, rec->sg_encrypted_data,
260 &rec->sg_encrypted_num_elem,
261 &rec->sg_encrypted_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700262 target_size);
263}
264
Dave Watson3c4d7552017-06-14 11:37:39 -0700265static int alloc_encrypted_sg(struct sock *sk, int len)
266{
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270 int rc = 0;
271
John Fastabend2c3682f2018-03-18 12:56:49 -0700272 rc = sk_alloc_sg(sk, len,
Vakul Garga42055e2018-09-21 09:46:13 +0530273 rec->sg_encrypted_data, 0,
274 &rec->sg_encrypted_num_elem,
275 &rec->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700276
Vakul Garg52ea9922018-09-06 21:41:40 +0530277 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530278 rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530279
Dave Watson3c4d7552017-06-14 11:37:39 -0700280 return rc;
281}
282
283static int alloc_plaintext_sg(struct sock *sk, int len)
284{
285 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300286 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530287 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700288 int rc = 0;
289
Vakul Garga42055e2018-09-21 09:46:13 +0530290 rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
291 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
John Fastabend2c3682f2018-03-18 12:56:49 -0700292 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700293
Vakul Garg52ea9922018-09-06 21:41:40 +0530294 if (rc == -ENOSPC)
Vakul Garga42055e2018-09-21 09:46:13 +0530295 rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
Vakul Garg52ea9922018-09-06 21:41:40 +0530296
Dave Watson3c4d7552017-06-14 11:37:39 -0700297 return rc;
298}
299
300static void free_sg(struct sock *sk, struct scatterlist *sg,
301 int *sg_num_elem, unsigned int *sg_size)
302{
303 int i, n = *sg_num_elem;
304
305 for (i = 0; i < n; ++i) {
306 sk_mem_uncharge(sk, sg[i].length);
307 put_page(sg_page(&sg[i]));
308 }
309 *sg_num_elem = 0;
310 *sg_size = 0;
311}
312
313static void tls_free_both_sg(struct sock *sk)
314{
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300316 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530317 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700318
Vakul Garga42055e2018-09-21 09:46:13 +0530319 /* Return if there is no open record */
320 if (!rec)
321 return;
Dave Watson3c4d7552017-06-14 11:37:39 -0700322
Vakul Garga42055e2018-09-21 09:46:13 +0530323 free_sg(sk, rec->sg_encrypted_data,
324 &rec->sg_encrypted_num_elem,
325 &rec->sg_encrypted_size);
326
327 free_sg(sk, rec->sg_plaintext_data,
328 &rec->sg_plaintext_num_elem,
329 &rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700330}
331
Vakul Garga42055e2018-09-21 09:46:13 +0530332int tls_tx_records(struct sock *sk, int flags)
333{
334 struct tls_context *tls_ctx = tls_get_ctx(sk);
335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 struct tls_rec *rec, *tmp;
337 int tx_flags, rc = 0;
338
339 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530340 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530341 struct tls_rec, list);
342
343 if (flags == -1)
344 tx_flags = rec->tx_flags;
345 else
346 tx_flags = flags;
347
348 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
349 if (rc)
350 goto tx_err;
351
352 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530353 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530354 */
Vakul Garga42055e2018-09-21 09:46:13 +0530355 list_del(&rec->list);
356 kfree(rec);
357 }
358
Vakul Garg9932a292018-09-24 15:35:56 +0530359 /* Tx all ready records */
360 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
361 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530362 if (flags == -1)
363 tx_flags = rec->tx_flags;
364 else
365 tx_flags = flags;
366
367 rc = tls_push_sg(sk, tls_ctx,
368 &rec->sg_encrypted_data[0],
369 0, tx_flags);
370 if (rc)
371 goto tx_err;
372
Vakul Garga42055e2018-09-21 09:46:13 +0530373 list_del(&rec->list);
374 kfree(rec);
375 } else {
376 break;
377 }
378 }
379
380tx_err:
381 if (rc < 0 && rc != -EAGAIN)
382 tls_err_abort(sk, EBADMSG);
383
384 return rc;
385}
386
387static void tls_encrypt_done(struct crypto_async_request *req, int err)
388{
389 struct aead_request *aead_req = (struct aead_request *)req;
390 struct sock *sk = req->data;
391 struct tls_context *tls_ctx = tls_get_ctx(sk);
392 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
393 struct tls_rec *rec;
394 bool ready = false;
395 int pending;
396
397 rec = container_of(aead_req, struct tls_rec, aead_req);
398
399 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
400 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
401
402 free_sg(sk, rec->sg_plaintext_data,
403 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
404
405 /* Free the record if error is previously set on socket */
406 if (err || sk->sk_err) {
407 free_sg(sk, rec->sg_encrypted_data,
408 &rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
409
410 kfree(rec);
411 rec = NULL;
412
413 /* If err is already set on socket, return the same code */
414 if (sk->sk_err) {
415 ctx->async_wait.err = sk->sk_err;
416 } else {
417 ctx->async_wait.err = err;
418 tls_err_abort(sk, err);
419 }
420 }
421
Vakul Garg9932a292018-09-24 15:35:56 +0530422 if (rec) {
423 struct tls_rec *first_rec;
424
425 /* Mark the record as ready for transmission */
426 smp_store_mb(rec->tx_ready, true);
427
428 /* If received record is at head of tx_list, schedule tx */
429 first_rec = list_first_entry(&ctx->tx_list,
430 struct tls_rec, list);
431 if (rec == first_rec)
432 ready = true;
433 }
Vakul Garga42055e2018-09-21 09:46:13 +0530434
435 pending = atomic_dec_return(&ctx->encrypt_pending);
436
437 if (!pending && READ_ONCE(ctx->async_notify))
438 complete(&ctx->async_wait.completion);
439
440 if (!ready)
441 return;
442
443 /* Schedule the transmission */
444 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
445 schedule_delayed_work(&ctx->tx_work.work, 1);
446}
447
448static int tls_do_encryption(struct sock *sk,
449 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200450 struct tls_sw_context_tx *ctx,
451 struct aead_request *aead_req,
452 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700453{
Vakul Garga42055e2018-09-21 09:46:13 +0530454 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700455 int rc;
456
Vakul Garga42055e2018-09-21 09:46:13 +0530457 rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
458 rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700459
460 aead_request_set_tfm(aead_req, ctx->aead_send);
461 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
Vakul Garga42055e2018-09-21 09:46:13 +0530462 aead_request_set_crypt(aead_req, rec->sg_aead_in,
463 rec->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700464 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530465
466 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530467 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530468
Vakul Garg9932a292018-09-24 15:35:56 +0530469 /* Add the record in tx_list */
470 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530471 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700472
Vakul Garga42055e2018-09-21 09:46:13 +0530473 rc = crypto_aead_encrypt(aead_req);
474 if (!rc || rc != -EINPROGRESS) {
475 atomic_dec(&ctx->encrypt_pending);
476 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
477 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
478 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700479
Vakul Garg9932a292018-09-24 15:35:56 +0530480 if (!rc) {
481 WRITE_ONCE(rec->tx_ready, true);
482 } else if (rc != -EINPROGRESS) {
483 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530484 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530485 }
Vakul Garga42055e2018-09-21 09:46:13 +0530486
487 /* Unhook the record from context if encryption is not failure */
488 ctx->open_rec = NULL;
489 tls_advance_record_sn(sk, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700490 return rc;
491}
492
493static int tls_push_record(struct sock *sk, int flags,
494 unsigned char record_type)
495{
496 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300497 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530498 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200499 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700500 int rc;
501
Vakul Garga42055e2018-09-21 09:46:13 +0530502 if (!rec)
503 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200504
Vakul Garga42055e2018-09-21 09:46:13 +0530505 rec->tx_flags = flags;
506 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700507
Vakul Garga42055e2018-09-21 09:46:13 +0530508 sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
509 sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
510
511 tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700512 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700513 record_type);
514
515 tls_fill_prepend(tls_ctx,
Vakul Garga42055e2018-09-21 09:46:13 +0530516 page_address(sg_page(&rec->sg_encrypted_data[0])) +
517 rec->sg_encrypted_data[0].offset,
518 rec->sg_plaintext_size, record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700519
520 tls_ctx->pending_open_record_frags = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700521
Vakul Garga42055e2018-09-21 09:46:13 +0530522 rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
523 if (rc == -EINPROGRESS)
524 return -EINPROGRESS;
525
526 free_sg(sk, rec->sg_plaintext_data, &rec->sg_plaintext_num_elem,
527 &rec->sg_plaintext_size);
528
Dave Watson3c4d7552017-06-14 11:37:39 -0700529 if (rc < 0) {
Vakul Garga42055e2018-09-21 09:46:13 +0530530 tls_err_abort(sk, EBADMSG);
531 return rc;
Dave Watson3c4d7552017-06-14 11:37:39 -0700532 }
533
Vakul Garg9932a292018-09-24 15:35:56 +0530534 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700535}
536
537static int tls_sw_push_pending_record(struct sock *sk, int flags)
538{
539 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
540}
541
542static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700543 int length, int *pages_used,
544 unsigned int *size_used,
545 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700546 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700547{
Dave Watson3c4d7552017-06-14 11:37:39 -0700548 struct page *pages[MAX_SKB_FRAGS];
549
550 size_t offset;
551 ssize_t copied, use;
552 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700553 unsigned int size = *size_used;
554 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700555 int rc = 0;
556 int maxpages;
557
558 while (length > 0) {
559 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700560 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700561 if (maxpages == 0) {
562 rc = -EFAULT;
563 goto out;
564 }
565 copied = iov_iter_get_pages(from, pages,
566 length,
567 maxpages, &offset);
568 if (copied <= 0) {
569 rc = -EFAULT;
570 goto out;
571 }
572
573 iov_iter_advance(from, copied);
574
575 length -= copied;
576 size += copied;
577 while (copied) {
578 use = min_t(int, copied, PAGE_SIZE - offset);
579
Dave Watson69ca9292018-03-22 10:09:53 -0700580 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700581 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700582 sg_unmark_end(&to[num_elem]);
583 if (charge)
584 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700585
586 offset = 0;
587 copied -= use;
588
589 ++i;
590 ++num_elem;
591 }
592 }
593
Vakul Gargcfb40992018-08-02 20:43:10 +0530594 /* Mark the end in the last sg entry if newly added */
595 if (num_elem > *pages_used)
596 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700597out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700598 if (rc)
599 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700600 *size_used = size;
601 *pages_used = num_elem;
602
Dave Watson3c4d7552017-06-14 11:37:39 -0700603 return rc;
604}
605
606static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
607 int bytes)
608{
609 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300610 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530611 struct tls_rec *rec = ctx->open_rec;
612 struct scatterlist *sg = rec->sg_plaintext_data;
Dave Watson3c4d7552017-06-14 11:37:39 -0700613 int copy, i, rc = 0;
614
615 for (i = tls_ctx->pending_open_record_frags;
Vakul Garga42055e2018-09-21 09:46:13 +0530616 i < rec->sg_plaintext_num_elem; ++i) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700617 copy = sg[i].length;
618 if (copy_from_iter(
619 page_address(sg_page(&sg[i])) + sg[i].offset,
620 copy, from) != copy) {
621 rc = -EFAULT;
622 goto out;
623 }
624 bytes -= copy;
625
626 ++tls_ctx->pending_open_record_frags;
627
628 if (!bytes)
629 break;
630 }
631
632out:
633 return rc;
634}
635
Vakul Garga42055e2018-09-21 09:46:13 +0530636struct tls_rec *get_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700637{
638 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300639 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530640 struct tls_rec *rec;
641 int mem_size;
642
643 /* Return if we already have an open record */
644 if (ctx->open_rec)
645 return ctx->open_rec;
646
647 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
648
649 rec = kzalloc(mem_size, sk->sk_allocation);
650 if (!rec)
651 return NULL;
652
653 sg_init_table(&rec->sg_plaintext_data[0],
654 ARRAY_SIZE(rec->sg_plaintext_data));
655 sg_init_table(&rec->sg_encrypted_data[0],
656 ARRAY_SIZE(rec->sg_encrypted_data));
657
658 sg_init_table(rec->sg_aead_in, 2);
659 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
660 sizeof(rec->aad_space));
661 sg_unmark_end(&rec->sg_aead_in[1]);
662 sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
663
664 sg_init_table(rec->sg_aead_out, 2);
665 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
666 sizeof(rec->aad_space));
667 sg_unmark_end(&rec->sg_aead_out[1]);
668 sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
669
670 ctx->open_rec = rec;
671
672 return rec;
673}
674
675int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
676{
Dave Watson3c4d7552017-06-14 11:37:39 -0700677 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530678 struct tls_context *tls_ctx = tls_get_ctx(sk);
679 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
680 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
681 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
682 unsigned char record_type = TLS_RECORD_TYPE_DATA;
683 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700684 bool eor = !(msg->msg_flags & MSG_MORE);
685 size_t try_to_copy, copied = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530686 struct tls_rec *rec;
687 int required_size;
688 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700689 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530690 int record_room;
691 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700692 int orig_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530693 int ret;
Dave Watson3c4d7552017-06-14 11:37:39 -0700694
695 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
696 return -ENOTSUPP;
697
698 lock_sock(sk);
699
Vakul Garga42055e2018-09-21 09:46:13 +0530700 /* Wait till there is any pending write on socket */
701 if (unlikely(sk->sk_write_pending)) {
702 ret = wait_on_pending_writer(sk, &timeo);
703 if (unlikely(ret))
704 goto send_end;
705 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700706
707 if (unlikely(msg->msg_controllen)) {
708 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530709 if (ret) {
710 if (ret == -EINPROGRESS)
711 num_async++;
712 else if (ret != -EAGAIN)
713 goto send_end;
714 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700715 }
716
717 while (msg_data_left(msg)) {
718 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100719 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700720 goto send_end;
721 }
722
Vakul Garga42055e2018-09-21 09:46:13 +0530723 rec = get_rec(sk);
724 if (!rec) {
725 ret = -ENOMEM;
726 goto send_end;
727 }
728
729 orig_size = rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700730 full_record = false;
731 try_to_copy = msg_data_left(msg);
Vakul Garga42055e2018-09-21 09:46:13 +0530732 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700733 if (try_to_copy >= record_room) {
734 try_to_copy = record_room;
735 full_record = true;
736 }
737
Vakul Garga42055e2018-09-21 09:46:13 +0530738 required_size = rec->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700739 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700740
741 if (!sk_stream_memory_free(sk))
742 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530743
Dave Watson3c4d7552017-06-14 11:37:39 -0700744alloc_encrypted:
745 ret = alloc_encrypted_sg(sk, required_size);
746 if (ret) {
747 if (ret != -ENOSPC)
748 goto wait_for_memory;
749
750 /* Adjust try_to_copy according to the amount that was
751 * actually allocated. The difference is due
752 * to max sg elements limit
753 */
Vakul Garga42055e2018-09-21 09:46:13 +0530754 try_to_copy -= required_size - rec->sg_encrypted_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700755 full_record = true;
756 }
Vakul Garga42055e2018-09-21 09:46:13 +0530757
758 if (!is_kvec && (full_record || eor) && !async_capable) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700759 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Vakul Garga42055e2018-09-21 09:46:13 +0530760 try_to_copy, &rec->sg_plaintext_num_elem,
761 &rec->sg_plaintext_size,
762 rec->sg_plaintext_data,
763 ARRAY_SIZE(rec->sg_plaintext_data),
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700764 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700765 if (ret)
766 goto fallback_to_reg_send;
767
Vakul Garga42055e2018-09-21 09:46:13 +0530768 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700769 copied += try_to_copy;
770 ret = tls_push_record(sk, msg->msg_flags, record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530771 if (ret) {
772 if (ret == -EINPROGRESS)
773 num_async++;
774 else if (ret != -EAGAIN)
775 goto send_end;
776 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700777 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700778
Dave Watson3c4d7552017-06-14 11:37:39 -0700779fallback_to_reg_send:
Vakul Garga42055e2018-09-21 09:46:13 +0530780 trim_sg(sk, rec->sg_plaintext_data,
781 &rec->sg_plaintext_num_elem,
782 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700783 orig_size);
784 }
785
Vakul Garga42055e2018-09-21 09:46:13 +0530786 required_size = rec->sg_plaintext_size + try_to_copy;
Dave Watson3c4d7552017-06-14 11:37:39 -0700787alloc_plaintext:
788 ret = alloc_plaintext_sg(sk, required_size);
789 if (ret) {
790 if (ret != -ENOSPC)
791 goto wait_for_memory;
792
793 /* Adjust try_to_copy according to the amount that was
794 * actually allocated. The difference is due
795 * to max sg elements limit
796 */
Vakul Garga42055e2018-09-21 09:46:13 +0530797 try_to_copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700798 full_record = true;
799
Vakul Garga42055e2018-09-21 09:46:13 +0530800 trim_sg(sk, rec->sg_encrypted_data,
801 &rec->sg_encrypted_num_elem,
802 &rec->sg_encrypted_size,
803 rec->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700804 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700805 }
806
807 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
808 if (ret)
809 goto trim_sgl;
810
811 copied += try_to_copy;
812 if (full_record || eor) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700813 ret = tls_push_record(sk, msg->msg_flags, record_type);
814 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530815 if (ret == -EINPROGRESS)
816 num_async++;
817 else if (ret != -EAGAIN)
818 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700819 }
820 }
821
822 continue;
823
824wait_for_sndbuf:
825 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
826wait_for_memory:
827 ret = sk_stream_wait_memory(sk, &timeo);
828 if (ret) {
829trim_sgl:
830 trim_both_sgl(sk, orig_size);
831 goto send_end;
832 }
833
Vakul Garga42055e2018-09-21 09:46:13 +0530834 if (rec->sg_encrypted_size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700835 goto alloc_encrypted;
836
837 goto alloc_plaintext;
838 }
839
Vakul Garga42055e2018-09-21 09:46:13 +0530840 if (!num_async) {
841 goto send_end;
842 } else if (num_zc) {
843 /* Wait for pending encryptions to get completed */
844 smp_store_mb(ctx->async_notify, true);
845
846 if (atomic_read(&ctx->encrypt_pending))
847 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
848 else
849 reinit_completion(&ctx->async_wait.completion);
850
851 WRITE_ONCE(ctx->async_notify, false);
852
853 if (ctx->async_wait.err) {
854 ret = ctx->async_wait.err;
855 copied = 0;
856 }
857 }
858
859 /* Transmit if any encryptions have completed */
860 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
861 cancel_delayed_work(&ctx->tx_work.work);
862 tls_tx_records(sk, msg->msg_flags);
863 }
864
Dave Watson3c4d7552017-06-14 11:37:39 -0700865send_end:
866 ret = sk_stream_error(sk, msg->msg_flags, ret);
867
868 release_sock(sk);
869 return copied ? copied : ret;
870}
871
872int tls_sw_sendpage(struct sock *sk, struct page *page,
873 int offset, size_t size, int flags)
874{
Vakul Garga42055e2018-09-21 09:46:13 +0530875 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -0700876 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300877 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700878 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Vakul Garga42055e2018-09-21 09:46:13 +0530879 size_t orig_size = size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700880 struct scatterlist *sg;
Vakul Garga42055e2018-09-21 09:46:13 +0530881 struct tls_rec *rec;
882 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700883 bool full_record;
884 int record_room;
Vakul Garga42055e2018-09-21 09:46:13 +0530885 bool eor;
886 int ret;
Dave Watson3c4d7552017-06-14 11:37:39 -0700887
888 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
889 MSG_SENDPAGE_NOTLAST))
890 return -ENOTSUPP;
891
892 /* No MSG_EOR from splice, only look at MSG_MORE */
893 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
894
895 lock_sock(sk);
896
897 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
898
Vakul Garga42055e2018-09-21 09:46:13 +0530899 /* Wait till there is any pending write on socket */
900 if (unlikely(sk->sk_write_pending)) {
901 ret = wait_on_pending_writer(sk, &timeo);
902 if (unlikely(ret))
903 goto sendpage_end;
904 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700905
906 /* Call the sk_stream functions to manage the sndbuf mem. */
907 while (size > 0) {
908 size_t copy, required_size;
909
910 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100911 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700912 goto sendpage_end;
913 }
914
Vakul Garga42055e2018-09-21 09:46:13 +0530915 rec = get_rec(sk);
916 if (!rec) {
917 ret = -ENOMEM;
918 goto sendpage_end;
919 }
920
Dave Watson3c4d7552017-06-14 11:37:39 -0700921 full_record = false;
Vakul Garga42055e2018-09-21 09:46:13 +0530922 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700923 copy = size;
924 if (copy >= record_room) {
925 copy = record_room;
926 full_record = true;
927 }
Vakul Garga42055e2018-09-21 09:46:13 +0530928 required_size = rec->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700929 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700930
931 if (!sk_stream_memory_free(sk))
932 goto wait_for_sndbuf;
933alloc_payload:
934 ret = alloc_encrypted_sg(sk, required_size);
935 if (ret) {
936 if (ret != -ENOSPC)
937 goto wait_for_memory;
938
939 /* Adjust copy according to the amount that was
940 * actually allocated. The difference is due
941 * to max sg elements limit
942 */
Vakul Garga42055e2018-09-21 09:46:13 +0530943 copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700944 full_record = true;
945 }
946
947 get_page(page);
Vakul Garga42055e2018-09-21 09:46:13 +0530948 sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700949 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800950 sg_unmark_end(sg);
951
Vakul Garga42055e2018-09-21 09:46:13 +0530952 rec->sg_plaintext_num_elem++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700953
954 sk_mem_charge(sk, copy);
955 offset += copy;
956 size -= copy;
Vakul Garga42055e2018-09-21 09:46:13 +0530957 rec->sg_plaintext_size += copy;
958 tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700959
960 if (full_record || eor ||
Vakul Garga42055e2018-09-21 09:46:13 +0530961 rec->sg_plaintext_num_elem ==
962 ARRAY_SIZE(rec->sg_plaintext_data)) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700963 ret = tls_push_record(sk, flags, record_type);
964 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530965 if (ret == -EINPROGRESS)
966 num_async++;
967 else if (ret != -EAGAIN)
968 goto sendpage_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700969 }
970 }
971 continue;
972wait_for_sndbuf:
973 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
974wait_for_memory:
975 ret = sk_stream_wait_memory(sk, &timeo);
976 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530977 trim_both_sgl(sk, rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700978 goto sendpage_end;
979 }
980
Dave Watson3c4d7552017-06-14 11:37:39 -0700981 goto alloc_payload;
982 }
983
Vakul Garga42055e2018-09-21 09:46:13 +0530984 if (num_async) {
985 /* Transmit if any encryptions have completed */
986 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
987 cancel_delayed_work(&ctx->tx_work.work);
988 tls_tx_records(sk, flags);
989 }
990 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700991sendpage_end:
992 if (orig_size > size)
993 ret = orig_size - size;
994 else
995 ret = sk_stream_error(sk, flags, ret);
996
997 release_sock(sk);
998 return ret;
999}
1000
Dave Watsonc46234e2018-03-22 10:10:35 -07001001static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
1002 long timeo, int *err)
1003{
1004 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001005 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001006 struct sk_buff *skb;
1007 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1008
1009 while (!(skb = ctx->recv_pkt)) {
1010 if (sk->sk_err) {
1011 *err = sock_error(sk);
1012 return NULL;
1013 }
1014
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001015 if (sk->sk_shutdown & RCV_SHUTDOWN)
1016 return NULL;
1017
Dave Watsonc46234e2018-03-22 10:10:35 -07001018 if (sock_flag(sk, SOCK_DONE))
1019 return NULL;
1020
1021 if ((flags & MSG_DONTWAIT) || !timeo) {
1022 *err = -EAGAIN;
1023 return NULL;
1024 }
1025
1026 add_wait_queue(sk_sleep(sk), &wait);
1027 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1028 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
1029 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1030 remove_wait_queue(sk_sleep(sk), &wait);
1031
1032 /* Handle signals */
1033 if (signal_pending(current)) {
1034 *err = sock_intr_errno(timeo);
1035 return NULL;
1036 }
1037 }
1038
1039 return skb;
1040}
1041
Vakul Garg0b243d02018-08-10 20:46:41 +05301042/* This function decrypts the input skb into either out_iov or in out_sg
1043 * or in skb buffers itself. The input parameter 'zc' indicates if
1044 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1045 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1046 * NULL, then the decryption happens inside skb buffers itself, i.e.
1047 * zero-copy gets disabled and 'zc' is updated.
1048 */
1049
1050static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1051 struct iov_iter *out_iov,
1052 struct scatterlist *out_sg,
1053 int *chunk, bool *zc)
1054{
1055 struct tls_context *tls_ctx = tls_get_ctx(sk);
1056 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1057 struct strp_msg *rxm = strp_msg(skb);
1058 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1059 struct aead_request *aead_req;
1060 struct sk_buff *unused;
1061 u8 *aad, *iv, *mem = NULL;
1062 struct scatterlist *sgin = NULL;
1063 struct scatterlist *sgout = NULL;
1064 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1065
1066 if (*zc && (out_iov || out_sg)) {
1067 if (out_iov)
1068 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1069 else
1070 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001071 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1072 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301073 } else {
1074 n_sgout = 0;
1075 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001076 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301077 }
1078
Vakul Garg0b243d02018-08-10 20:46:41 +05301079 if (n_sgin < 1)
1080 return -EBADMSG;
1081
1082 /* Increment to accommodate AAD */
1083 n_sgin = n_sgin + 1;
1084
1085 nsg = n_sgin + n_sgout;
1086
1087 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1088 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1089 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1090 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1091
1092 /* Allocate a single block of memory which contains
1093 * aead_req || sgin[] || sgout[] || aad || iv.
1094 * This order achieves correct alignment for aead_req, sgin, sgout.
1095 */
1096 mem = kmalloc(mem_size, sk->sk_allocation);
1097 if (!mem)
1098 return -ENOMEM;
1099
1100 /* Segment the allocated memory */
1101 aead_req = (struct aead_request *)mem;
1102 sgin = (struct scatterlist *)(mem + aead_size);
1103 sgout = sgin + n_sgin;
1104 aad = (u8 *)(sgout + n_sgout);
1105 iv = aad + TLS_AAD_SPACE_SIZE;
1106
1107 /* Prepare IV */
1108 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1109 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1110 tls_ctx->rx.iv_size);
1111 if (err < 0) {
1112 kfree(mem);
1113 return err;
1114 }
1115 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1116
1117 /* Prepare AAD */
1118 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1119 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1120 ctx->control);
1121
1122 /* Prepare sgin */
1123 sg_init_table(sgin, n_sgin);
1124 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1125 err = skb_to_sgvec(skb, &sgin[1],
1126 rxm->offset + tls_ctx->rx.prepend_size,
1127 rxm->full_len - tls_ctx->rx.prepend_size);
1128 if (err < 0) {
1129 kfree(mem);
1130 return err;
1131 }
1132
1133 if (n_sgout) {
1134 if (out_iov) {
1135 sg_init_table(sgout, n_sgout);
1136 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1137
1138 *chunk = 0;
1139 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
1140 chunk, &sgout[1],
1141 (n_sgout - 1), false);
1142 if (err < 0)
1143 goto fallback_to_reg_recv;
1144 } else if (out_sg) {
1145 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1146 } else {
1147 goto fallback_to_reg_recv;
1148 }
1149 } else {
1150fallback_to_reg_recv:
1151 sgout = sgin;
1152 pages = 0;
1153 *chunk = 0;
1154 *zc = false;
1155 }
1156
1157 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301158 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1159 data_len, aead_req, *zc);
1160 if (err == -EINPROGRESS)
1161 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301162
1163 /* Release the pages in case iov was mapped to pages */
1164 for (; pages > 0; pages--)
1165 put_page(sg_page(&sgout[pages]));
1166
1167 kfree(mem);
1168 return err;
1169}
1170
Boris Pismennydafb67f2018-07-13 14:33:40 +03001171static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +05301172 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001173{
1174 struct tls_context *tls_ctx = tls_get_ctx(sk);
1175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1176 struct strp_msg *rxm = strp_msg(skb);
1177 int err = 0;
1178
Boris Pismenny4799ac82018-07-13 14:33:43 +03001179#ifdef CONFIG_TLS_DEVICE
1180 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001181 if (err < 0)
1182 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001183#endif
1184 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301185 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301186 if (err < 0) {
1187 if (err == -EINPROGRESS)
1188 tls_advance_record_sn(sk, &tls_ctx->rx);
1189
Boris Pismenny4799ac82018-07-13 14:33:43 +03001190 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301191 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001192 } else {
1193 *zc = false;
1194 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001195
1196 rxm->offset += tls_ctx->rx.prepend_size;
1197 rxm->full_len -= tls_ctx->rx.overhead_size;
1198 tls_advance_record_sn(sk, &tls_ctx->rx);
1199 ctx->decrypted = true;
1200 ctx->saved_data_ready(sk);
1201
1202 return err;
1203}
1204
1205int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1206 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001207{
Vakul Garg0b243d02018-08-10 20:46:41 +05301208 bool zc = true;
1209 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001210
Vakul Garg0b243d02018-08-10 20:46:41 +05301211 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001212}
1213
1214static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1215 unsigned int len)
1216{
1217 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001218 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001219
Vakul Garg94524d82018-08-29 15:26:55 +05301220 if (skb) {
1221 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001222
Vakul Garg94524d82018-08-29 15:26:55 +05301223 if (len < rxm->full_len) {
1224 rxm->offset += len;
1225 rxm->full_len -= len;
1226 return false;
1227 }
1228 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001229 }
1230
1231 /* Finished with message */
1232 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001233 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001234
1235 return true;
1236}
1237
1238int tls_sw_recvmsg(struct sock *sk,
1239 struct msghdr *msg,
1240 size_t len,
1241 int nonblock,
1242 int flags,
1243 int *addr_len)
1244{
1245 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001246 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001247 unsigned char control;
1248 struct strp_msg *rxm;
1249 struct sk_buff *skb;
1250 ssize_t copied = 0;
1251 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001252 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001253 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -07001254 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Vakul Garg94524d82018-08-29 15:26:55 +05301255 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001256
1257 flags |= nonblock;
1258
1259 if (unlikely(flags & MSG_ERRQUEUE))
1260 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1261
1262 lock_sock(sk);
1263
Daniel Borkmann06030db2018-06-15 03:07:46 +02001264 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -07001265 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1266 do {
1267 bool zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301268 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001269 int chunk = 0;
1270
1271 skb = tls_wait_data(sk, flags, timeo, &err);
1272 if (!skb)
1273 goto recv_end;
1274
1275 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301276
Dave Watsonc46234e2018-03-22 10:10:35 -07001277 if (!cmsg) {
1278 int cerr;
1279
1280 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1281 sizeof(ctx->control), &ctx->control);
1282 cmsg = true;
1283 control = ctx->control;
1284 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1285 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1286 err = -EIO;
1287 goto recv_end;
1288 }
1289 }
1290 } else if (control != ctx->control) {
1291 goto recv_end;
1292 }
1293
1294 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301295 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001296
Vakul Garg0b243d02018-08-10 20:46:41 +05301297 if (!is_kvec && to_copy <= len &&
1298 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -07001299 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -07001300
Vakul Garg0b243d02018-08-10 20:46:41 +05301301 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1302 &chunk, &zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301303 if (err < 0 && err != -EINPROGRESS) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301304 tls_err_abort(sk, EBADMSG);
1305 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001306 }
Vakul Garg94524d82018-08-29 15:26:55 +05301307
1308 if (err == -EINPROGRESS) {
1309 async = true;
1310 num_async++;
1311 goto pick_next_record;
1312 }
1313
Dave Watsonc46234e2018-03-22 10:10:35 -07001314 ctx->decrypted = true;
1315 }
1316
1317 if (!zc) {
1318 chunk = min_t(unsigned int, rxm->full_len, len);
Vakul Garg94524d82018-08-29 15:26:55 +05301319
Dave Watsonc46234e2018-03-22 10:10:35 -07001320 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1321 chunk);
1322 if (err < 0)
1323 goto recv_end;
1324 }
1325
Vakul Garg94524d82018-08-29 15:26:55 +05301326pick_next_record:
Dave Watsonc46234e2018-03-22 10:10:35 -07001327 copied += chunk;
1328 len -= chunk;
1329 if (likely(!(flags & MSG_PEEK))) {
1330 u8 control = ctx->control;
1331
Vakul Garg94524d82018-08-29 15:26:55 +05301332 /* For async, drop current skb reference */
1333 if (async)
1334 skb = NULL;
1335
Dave Watsonc46234e2018-03-22 10:10:35 -07001336 if (tls_sw_advance_skb(sk, skb, chunk)) {
1337 /* Return full control message to
1338 * userspace before trying to parse
1339 * another message type
1340 */
1341 msg->msg_flags |= MSG_EOR;
1342 if (control != TLS_RECORD_TYPE_DATA)
1343 goto recv_end;
Vakul Garg94524d82018-08-29 15:26:55 +05301344 } else {
1345 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001346 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001347 } else {
1348 /* MSG_PEEK right now cannot look beyond current skb
1349 * from strparser, meaning we cannot advance skb here
1350 * and thus unpause strparser since we'd loose original
1351 * one.
1352 */
1353 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001354 }
Vakul Garg94524d82018-08-29 15:26:55 +05301355
Daniel Borkmann06030db2018-06-15 03:07:46 +02001356 /* If we have a new message from strparser, continue now. */
1357 if (copied >= target && !ctx->recv_pkt)
1358 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001359 } while (len);
1360
1361recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301362 if (num_async) {
1363 /* Wait for all previously submitted records to be decrypted */
1364 smp_store_mb(ctx->async_notify, true);
1365 if (atomic_read(&ctx->decrypt_pending)) {
1366 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1367 if (err) {
1368 /* one of async decrypt failed */
1369 tls_err_abort(sk, err);
1370 copied = 0;
1371 }
1372 } else {
1373 reinit_completion(&ctx->async_wait.completion);
1374 }
1375 WRITE_ONCE(ctx->async_notify, false);
1376 }
1377
Dave Watsonc46234e2018-03-22 10:10:35 -07001378 release_sock(sk);
1379 return copied ? : err;
1380}
1381
1382ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1383 struct pipe_inode_info *pipe,
1384 size_t len, unsigned int flags)
1385{
1386 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001387 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001388 struct strp_msg *rxm = NULL;
1389 struct sock *sk = sock->sk;
1390 struct sk_buff *skb;
1391 ssize_t copied = 0;
1392 int err = 0;
1393 long timeo;
1394 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301395 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001396
1397 lock_sock(sk);
1398
1399 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1400
1401 skb = tls_wait_data(sk, flags, timeo, &err);
1402 if (!skb)
1403 goto splice_read_end;
1404
1405 /* splice does not support reading control messages */
1406 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1407 err = -ENOTSUPP;
1408 goto splice_read_end;
1409 }
1410
1411 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301412 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001413
1414 if (err < 0) {
1415 tls_err_abort(sk, EBADMSG);
1416 goto splice_read_end;
1417 }
1418 ctx->decrypted = true;
1419 }
1420 rxm = strp_msg(skb);
1421
1422 chunk = min_t(unsigned int, rxm->full_len, len);
1423 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1424 if (copied < 0)
1425 goto splice_read_end;
1426
1427 if (likely(!(flags & MSG_PEEK)))
1428 tls_sw_advance_skb(sk, skb, copied);
1429
1430splice_read_end:
1431 release_sock(sk);
1432 return copied ? : err;
1433}
1434
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001435unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1436 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001437{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001438 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001439 struct sock *sk = sock->sk;
1440 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001441 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001442
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001443 /* Grab POLLOUT and POLLHUP from the underlying socket */
1444 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001445
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001446 /* Clear POLLIN bits, and set based on recv_pkt */
1447 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001448 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001449 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001450
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001451 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001452}
1453
1454static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1455{
1456 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001457 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001458 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001459 struct strp_msg *rxm = strp_msg(skb);
1460 size_t cipher_overhead;
1461 size_t data_len = 0;
1462 int ret;
1463
1464 /* Verify that we have a full TLS header, or wait for more data */
1465 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1466 return 0;
1467
Kees Cook3463e512018-06-25 16:55:05 -07001468 /* Sanity-check size of on-stack buffer. */
1469 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1470 ret = -EINVAL;
1471 goto read_failure;
1472 }
1473
Dave Watsonc46234e2018-03-22 10:10:35 -07001474 /* Linearize header to local buffer */
1475 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1476
1477 if (ret < 0)
1478 goto read_failure;
1479
1480 ctx->control = header[0];
1481
1482 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1483
1484 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1485
1486 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1487 ret = -EMSGSIZE;
1488 goto read_failure;
1489 }
1490 if (data_len < cipher_overhead) {
1491 ret = -EBADMSG;
1492 goto read_failure;
1493 }
1494
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001495 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1496 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001497 ret = -EINVAL;
1498 goto read_failure;
1499 }
1500
Boris Pismenny4799ac82018-07-13 14:33:43 +03001501#ifdef CONFIG_TLS_DEVICE
1502 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1503 *(u64*)tls_ctx->rx.rec_seq);
1504#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001505 return data_len + TLS_HEADER_SIZE;
1506
1507read_failure:
1508 tls_err_abort(strp->sk, ret);
1509
1510 return ret;
1511}
1512
1513static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1514{
1515 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001516 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001517
1518 ctx->decrypted = false;
1519
1520 ctx->recv_pkt = skb;
1521 strp_pause(strp);
1522
Vakul Gargad13acc2018-07-30 16:08:33 +05301523 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001524}
1525
1526static void tls_data_ready(struct sock *sk)
1527{
1528 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001529 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001530
1531 strp_data_ready(&ctx->strp);
1532}
1533
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001534void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001535{
1536 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001537 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301538 struct tls_rec *rec, *tmp;
1539
1540 /* Wait for any pending async encryptions to complete */
1541 smp_store_mb(ctx->async_notify, true);
1542 if (atomic_read(&ctx->encrypt_pending))
1543 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1544
1545 cancel_delayed_work_sync(&ctx->tx_work.work);
1546
1547 /* Tx whatever records we can transmit and abandon the rest */
1548 tls_tx_records(sk, -1);
1549
Vakul Garg9932a292018-09-24 15:35:56 +05301550 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05301551 * the partially sent record if any at head of tx_list.
1552 */
1553 if (tls_ctx->partially_sent_record) {
1554 struct scatterlist *sg = tls_ctx->partially_sent_record;
1555
1556 while (1) {
1557 put_page(sg_page(sg));
1558 sk_mem_uncharge(sk, sg->length);
1559
1560 if (sg_is_last(sg))
1561 break;
1562 sg++;
1563 }
1564
1565 tls_ctx->partially_sent_record = NULL;
1566
Vakul Garg9932a292018-09-24 15:35:56 +05301567 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05301568 struct tls_rec, list);
1569 list_del(&rec->list);
1570 kfree(rec);
1571 }
1572
Vakul Garg9932a292018-09-24 15:35:56 +05301573 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05301574 free_sg(sk, rec->sg_encrypted_data,
1575 &rec->sg_encrypted_num_elem,
1576 &rec->sg_encrypted_size);
1577
1578 list_del(&rec->list);
1579 kfree(rec);
1580 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001581
Vakul Garg201876b2018-07-24 16:54:27 +05301582 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001583 tls_free_both_sg(sk);
1584
1585 kfree(ctx);
1586}
1587
Boris Pismenny39f56e12018-07-13 14:33:41 +03001588void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001589{
1590 struct tls_context *tls_ctx = tls_get_ctx(sk);
1591 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1592
Dave Watsonc46234e2018-03-22 10:10:35 -07001593 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301594 kfree_skb(ctx->recv_pkt);
1595 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001596 crypto_free_aead(ctx->aead_recv);
1597 strp_stop(&ctx->strp);
1598 write_lock_bh(&sk->sk_callback_lock);
1599 sk->sk_data_ready = ctx->saved_data_ready;
1600 write_unlock_bh(&sk->sk_callback_lock);
1601 release_sock(sk);
1602 strp_done(&ctx->strp);
1603 lock_sock(sk);
1604 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001605}
1606
1607void tls_sw_free_resources_rx(struct sock *sk)
1608{
1609 struct tls_context *tls_ctx = tls_get_ctx(sk);
1610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1611
1612 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001613
Dave Watson3c4d7552017-06-14 11:37:39 -07001614 kfree(ctx);
1615}
1616
Vakul Garg9932a292018-09-24 15:35:56 +05301617/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05301618static void tx_work_handler(struct work_struct *work)
1619{
1620 struct delayed_work *delayed_work = to_delayed_work(work);
1621 struct tx_work *tx_work = container_of(delayed_work,
1622 struct tx_work, work);
1623 struct sock *sk = tx_work->sk;
1624 struct tls_context *tls_ctx = tls_get_ctx(sk);
1625 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1626
1627 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1628 return;
1629
1630 lock_sock(sk);
1631 tls_tx_records(sk, -1);
1632 release_sock(sk);
1633}
1634
Dave Watsonc46234e2018-03-22 10:10:35 -07001635int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001636{
Dave Watson3c4d7552017-06-14 11:37:39 -07001637 struct tls_crypto_info *crypto_info;
1638 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001639 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1640 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001641 struct cipher_context *cctx;
1642 struct crypto_aead **aead;
1643 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001644 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1645 char *iv, *rec_seq;
1646 int rc = 0;
1647
1648 if (!ctx) {
1649 rc = -EINVAL;
1650 goto out;
1651 }
1652
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001653 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001654 if (!ctx->priv_ctx_tx) {
1655 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1656 if (!sw_ctx_tx) {
1657 rc = -ENOMEM;
1658 goto out;
1659 }
1660 ctx->priv_ctx_tx = sw_ctx_tx;
1661 } else {
1662 sw_ctx_tx =
1663 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001664 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001665 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001666 if (!ctx->priv_ctx_rx) {
1667 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1668 if (!sw_ctx_rx) {
1669 rc = -ENOMEM;
1670 goto out;
1671 }
1672 ctx->priv_ctx_rx = sw_ctx_rx;
1673 } else {
1674 sw_ctx_rx =
1675 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001676 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001677 }
1678
Dave Watsonc46234e2018-03-22 10:10:35 -07001679 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001680 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001681 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001682 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001683 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05301684 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05301685 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1686 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001687 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001688 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001689 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001690 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001691 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001692 }
1693
Dave Watson3c4d7552017-06-14 11:37:39 -07001694 switch (crypto_info->cipher_type) {
1695 case TLS_CIPHER_AES_GCM_128: {
1696 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1697 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1698 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1699 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1700 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1701 rec_seq =
1702 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1703 gcm_128_info =
1704 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1705 break;
1706 }
1707 default:
1708 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001709 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001710 }
1711
Kees Cookb16520f2018-04-10 17:52:34 -07001712 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001713 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001714 rc = -EINVAL;
1715 goto free_priv;
1716 }
1717
Dave Watsonc46234e2018-03-22 10:10:35 -07001718 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1719 cctx->tag_size = tag_size;
1720 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1721 cctx->iv_size = iv_size;
1722 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1723 GFP_KERNEL);
1724 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001725 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001726 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001727 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001728 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1729 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1730 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001731 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001732 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001733 rc = -ENOMEM;
1734 goto free_iv;
1735 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001736
Dave Watsonc46234e2018-03-22 10:10:35 -07001737 if (!*aead) {
1738 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1739 if (IS_ERR(*aead)) {
1740 rc = PTR_ERR(*aead);
1741 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001742 goto free_rec_seq;
1743 }
1744 }
1745
1746 ctx->push_pending_record = tls_sw_push_pending_record;
1747
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001748 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001749 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1750 if (rc)
1751 goto free_aead;
1752
Dave Watsonc46234e2018-03-22 10:10:35 -07001753 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1754 if (rc)
1755 goto free_aead;
1756
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001757 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001758 /* Set up strparser */
1759 memset(&cb, 0, sizeof(cb));
1760 cb.rcv_msg = tls_queue;
1761 cb.parse_msg = tls_read_size;
1762
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001763 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001764
1765 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001766 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001767 sk->sk_data_ready = tls_data_ready;
1768 write_unlock_bh(&sk->sk_callback_lock);
1769
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001770 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001771
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001772 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001773 }
1774
1775 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001776
1777free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001778 crypto_free_aead(*aead);
1779 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001780free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001781 kfree(cctx->rec_seq);
1782 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001783free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001784 kfree(cctx->iv);
1785 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001786free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001787 if (tx) {
1788 kfree(ctx->priv_ctx_tx);
1789 ctx->priv_ctx_tx = NULL;
1790 } else {
1791 kfree(ctx->priv_ctx_rx);
1792 ctx->priv_ctx_rx = NULL;
1793 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001794out:
1795 return rc;
1796}