blob: 0687a7a4689faa78d9dba8a84d32a7ac7792d169 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Dave Watsonc46234e2018-03-22 10:10:35 -070046static int tls_do_decryption(struct sock *sk,
47 struct scatterlist *sgin,
48 struct scatterlist *sgout,
49 char *iv_recv,
50 size_t data_len,
51 struct sk_buff *skb,
52 gfp_t flags)
53{
54 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030055 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -070056 struct aead_request *aead_req;
57
58 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -070059
Vakul Gargd2bdd262018-07-11 14:32:20 +053060 aead_req = aead_request_alloc(ctx->aead_recv, flags);
Dave Watsonc46234e2018-03-22 10:10:35 -070061 if (!aead_req)
62 return -ENOMEM;
63
Dave Watsonc46234e2018-03-22 10:10:35 -070064 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
65 aead_request_set_crypt(aead_req, sgin, sgout,
66 data_len + tls_ctx->rx.tag_size,
67 (u8 *)iv_recv);
68 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
69 crypto_req_done, &ctx->async_wait);
70
71 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
72
Vakul Gargd2bdd262018-07-11 14:32:20 +053073 aead_request_free(aead_req);
Dave Watsonc46234e2018-03-22 10:10:35 -070074 return ret;
75}
76
Dave Watson3c4d7552017-06-14 11:37:39 -070077static void trim_sg(struct sock *sk, struct scatterlist *sg,
78 int *sg_num_elem, unsigned int *sg_size, int target_size)
79{
80 int i = *sg_num_elem - 1;
81 int trim = *sg_size - target_size;
82
83 if (trim <= 0) {
84 WARN_ON(trim < 0);
85 return;
86 }
87
88 *sg_size = target_size;
89 while (trim >= sg[i].length) {
90 trim -= sg[i].length;
91 sk_mem_uncharge(sk, sg[i].length);
92 put_page(sg_page(&sg[i]));
93 i--;
94
95 if (i < 0)
96 goto out;
97 }
98
99 sg[i].length -= trim;
100 sk_mem_uncharge(sk, trim);
101
102out:
103 *sg_num_elem = i + 1;
104}
105
106static void trim_both_sgl(struct sock *sk, int target_size)
107{
108 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300109 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700110
111 trim_sg(sk, ctx->sg_plaintext_data,
112 &ctx->sg_plaintext_num_elem,
113 &ctx->sg_plaintext_size,
114 target_size);
115
116 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700117 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700118
119 trim_sg(sk, ctx->sg_encrypted_data,
120 &ctx->sg_encrypted_num_elem,
121 &ctx->sg_encrypted_size,
122 target_size);
123}
124
Dave Watson3c4d7552017-06-14 11:37:39 -0700125static int alloc_encrypted_sg(struct sock *sk, int len)
126{
127 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300128 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700129 int rc = 0;
130
John Fastabend2c3682f2018-03-18 12:56:49 -0700131 rc = sk_alloc_sg(sk, len,
John Fastabend8c05dbf2018-03-18 12:57:05 -0700132 ctx->sg_encrypted_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700133 &ctx->sg_encrypted_num_elem,
134 &ctx->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700135
136 return rc;
137}
138
139static int alloc_plaintext_sg(struct sock *sk, int len)
140{
141 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300142 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700143 int rc = 0;
144
John Fastabend8c05dbf2018-03-18 12:57:05 -0700145 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700146 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
147 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700148
149 return rc;
150}
151
152static void free_sg(struct sock *sk, struct scatterlist *sg,
153 int *sg_num_elem, unsigned int *sg_size)
154{
155 int i, n = *sg_num_elem;
156
157 for (i = 0; i < n; ++i) {
158 sk_mem_uncharge(sk, sg[i].length);
159 put_page(sg_page(&sg[i]));
160 }
161 *sg_num_elem = 0;
162 *sg_size = 0;
163}
164
165static void tls_free_both_sg(struct sock *sk)
166{
167 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300168 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700169
170 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
171 &ctx->sg_encrypted_size);
172
173 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
174 &ctx->sg_plaintext_size);
175}
176
177static int tls_do_encryption(struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200178 struct tls_sw_context_tx *ctx,
179 struct aead_request *aead_req,
180 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700181{
Dave Watson3c4d7552017-06-14 11:37:39 -0700182 int rc;
183
Dave Watsondbe42552018-03-22 10:10:06 -0700184 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
185 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700186
187 aead_request_set_tfm(aead_req, ctx->aead_send);
188 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
189 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700190 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530191
192 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
193 crypto_req_done, &ctx->async_wait);
194
195 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
Dave Watson3c4d7552017-06-14 11:37:39 -0700196
Dave Watsondbe42552018-03-22 10:10:06 -0700197 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
198 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700199
Dave Watson3c4d7552017-06-14 11:37:39 -0700200 return rc;
201}
202
203static int tls_push_record(struct sock *sk, int flags,
204 unsigned char record_type)
205{
206 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300207 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200208 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700209 int rc;
210
Vakul Gargd2bdd262018-07-11 14:32:20 +0530211 req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200212 if (!req)
213 return -ENOMEM;
214
Dave Watson3c4d7552017-06-14 11:37:39 -0700215 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
216 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
217
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200218 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700219 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700220 record_type);
221
222 tls_fill_prepend(tls_ctx,
223 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
224 ctx->sg_encrypted_data[0].offset,
225 ctx->sg_plaintext_size, record_type);
226
227 tls_ctx->pending_open_record_frags = 0;
228 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
229
Daniel Borkmanna447da72018-06-15 03:07:45 +0200230 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700231 if (rc < 0) {
232 /* If we are called from write_space and
233 * we fail, we need to set this SOCK_NOSPACE
234 * to trigger another write_space in the future.
235 */
236 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200237 goto out_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700238 }
239
240 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
241 &ctx->sg_plaintext_size);
242
243 ctx->sg_encrypted_num_elem = 0;
244 ctx->sg_encrypted_size = 0;
245
246 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
247 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
248 if (rc < 0 && rc != -EAGAIN)
Dave Watsonf4a8e432018-03-22 10:10:15 -0700249 tls_err_abort(sk, EBADMSG);
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Dave Watsondbe42552018-03-22 10:10:06 -0700251 tls_advance_record_sn(sk, &tls_ctx->tx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200252out_req:
Vakul Gargd2bdd262018-07-11 14:32:20 +0530253 aead_request_free(req);
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 return rc;
255}
256
257static int tls_sw_push_pending_record(struct sock *sk, int flags)
258{
259 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
260}
261
262static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700263 int length, int *pages_used,
264 unsigned int *size_used,
265 struct scatterlist *to, int to_max_pages,
Boris Pismenny47187992018-07-13 14:33:44 +0300266 bool charge, bool revert)
Dave Watson3c4d7552017-06-14 11:37:39 -0700267{
Dave Watson3c4d7552017-06-14 11:37:39 -0700268 struct page *pages[MAX_SKB_FRAGS];
269
270 size_t offset;
271 ssize_t copied, use;
272 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700273 unsigned int size = *size_used;
274 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700275 int rc = 0;
276 int maxpages;
277
278 while (length > 0) {
279 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700280 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700281 if (maxpages == 0) {
282 rc = -EFAULT;
283 goto out;
284 }
285 copied = iov_iter_get_pages(from, pages,
286 length,
287 maxpages, &offset);
288 if (copied <= 0) {
289 rc = -EFAULT;
290 goto out;
291 }
292
293 iov_iter_advance(from, copied);
294
295 length -= copied;
296 size += copied;
297 while (copied) {
298 use = min_t(int, copied, PAGE_SIZE - offset);
299
Dave Watson69ca9292018-03-22 10:09:53 -0700300 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700301 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700302 sg_unmark_end(&to[num_elem]);
303 if (charge)
304 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700305
306 offset = 0;
307 copied -= use;
308
309 ++i;
310 ++num_elem;
311 }
312 }
313
314out:
Dave Watson69ca9292018-03-22 10:09:53 -0700315 *size_used = size;
316 *pages_used = num_elem;
Boris Pismenny47187992018-07-13 14:33:44 +0300317 if (revert)
318 iov_iter_revert(from, size);
Dave Watson69ca9292018-03-22 10:09:53 -0700319
Dave Watson3c4d7552017-06-14 11:37:39 -0700320 return rc;
321}
322
323static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
324 int bytes)
325{
326 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300327 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700328 struct scatterlist *sg = ctx->sg_plaintext_data;
329 int copy, i, rc = 0;
330
331 for (i = tls_ctx->pending_open_record_frags;
332 i < ctx->sg_plaintext_num_elem; ++i) {
333 copy = sg[i].length;
334 if (copy_from_iter(
335 page_address(sg_page(&sg[i])) + sg[i].offset,
336 copy, from) != copy) {
337 rc = -EFAULT;
338 goto out;
339 }
340 bytes -= copy;
341
342 ++tls_ctx->pending_open_record_frags;
343
344 if (!bytes)
345 break;
346 }
347
348out:
349 return rc;
350}
351
352int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
353{
354 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300355 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700356 int ret = 0;
357 int required_size;
358 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
359 bool eor = !(msg->msg_flags & MSG_MORE);
360 size_t try_to_copy, copied = 0;
361 unsigned char record_type = TLS_RECORD_TYPE_DATA;
362 int record_room;
363 bool full_record;
364 int orig_size;
365
366 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
367 return -ENOTSUPP;
368
369 lock_sock(sk);
370
371 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
372 goto send_end;
373
374 if (unlikely(msg->msg_controllen)) {
375 ret = tls_proccess_cmsg(sk, msg, &record_type);
376 if (ret)
377 goto send_end;
378 }
379
380 while (msg_data_left(msg)) {
381 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100382 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700383 goto send_end;
384 }
385
386 orig_size = ctx->sg_plaintext_size;
387 full_record = false;
388 try_to_copy = msg_data_left(msg);
389 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
390 if (try_to_copy >= record_room) {
391 try_to_copy = record_room;
392 full_record = true;
393 }
394
395 required_size = ctx->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700396 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700397
398 if (!sk_stream_memory_free(sk))
399 goto wait_for_sndbuf;
400alloc_encrypted:
401 ret = alloc_encrypted_sg(sk, required_size);
402 if (ret) {
403 if (ret != -ENOSPC)
404 goto wait_for_memory;
405
406 /* Adjust try_to_copy according to the amount that was
407 * actually allocated. The difference is due
408 * to max sg elements limit
409 */
410 try_to_copy -= required_size - ctx->sg_encrypted_size;
411 full_record = true;
412 }
413
414 if (full_record || eor) {
415 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Dave Watson69ca9292018-03-22 10:09:53 -0700416 try_to_copy, &ctx->sg_plaintext_num_elem,
417 &ctx->sg_plaintext_size,
418 ctx->sg_plaintext_data,
419 ARRAY_SIZE(ctx->sg_plaintext_data),
Boris Pismenny47187992018-07-13 14:33:44 +0300420 true, false);
Dave Watson3c4d7552017-06-14 11:37:39 -0700421 if (ret)
422 goto fallback_to_reg_send;
423
424 copied += try_to_copy;
425 ret = tls_push_record(sk, msg->msg_flags, record_type);
426 if (!ret)
427 continue;
Dave Watson32da1222018-07-12 08:03:43 -0700428 if (ret < 0)
Dave Watson3c4d7552017-06-14 11:37:39 -0700429 goto send_end;
430
431 copied -= try_to_copy;
432fallback_to_reg_send:
433 iov_iter_revert(&msg->msg_iter,
434 ctx->sg_plaintext_size - orig_size);
435 trim_sg(sk, ctx->sg_plaintext_data,
436 &ctx->sg_plaintext_num_elem,
437 &ctx->sg_plaintext_size,
438 orig_size);
439 }
440
441 required_size = ctx->sg_plaintext_size + try_to_copy;
442alloc_plaintext:
443 ret = alloc_plaintext_sg(sk, required_size);
444 if (ret) {
445 if (ret != -ENOSPC)
446 goto wait_for_memory;
447
448 /* Adjust try_to_copy according to the amount that was
449 * actually allocated. The difference is due
450 * to max sg elements limit
451 */
452 try_to_copy -= required_size - ctx->sg_plaintext_size;
453 full_record = true;
454
455 trim_sg(sk, ctx->sg_encrypted_data,
456 &ctx->sg_encrypted_num_elem,
457 &ctx->sg_encrypted_size,
458 ctx->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700459 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700460 }
461
462 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
463 if (ret)
464 goto trim_sgl;
465
466 copied += try_to_copy;
467 if (full_record || eor) {
468push_record:
469 ret = tls_push_record(sk, msg->msg_flags, record_type);
470 if (ret) {
471 if (ret == -ENOMEM)
472 goto wait_for_memory;
473
474 goto send_end;
475 }
476 }
477
478 continue;
479
480wait_for_sndbuf:
481 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
482wait_for_memory:
483 ret = sk_stream_wait_memory(sk, &timeo);
484 if (ret) {
485trim_sgl:
486 trim_both_sgl(sk, orig_size);
487 goto send_end;
488 }
489
490 if (tls_is_pending_closed_record(tls_ctx))
491 goto push_record;
492
493 if (ctx->sg_encrypted_size < required_size)
494 goto alloc_encrypted;
495
496 goto alloc_plaintext;
497 }
498
499send_end:
500 ret = sk_stream_error(sk, msg->msg_flags, ret);
501
502 release_sock(sk);
503 return copied ? copied : ret;
504}
505
506int tls_sw_sendpage(struct sock *sk, struct page *page,
507 int offset, size_t size, int flags)
508{
509 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300510 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700511 int ret = 0;
512 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
513 bool eor;
514 size_t orig_size = size;
515 unsigned char record_type = TLS_RECORD_TYPE_DATA;
516 struct scatterlist *sg;
517 bool full_record;
518 int record_room;
519
520 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
521 MSG_SENDPAGE_NOTLAST))
522 return -ENOTSUPP;
523
524 /* No MSG_EOR from splice, only look at MSG_MORE */
525 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
526
527 lock_sock(sk);
528
529 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
530
531 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
532 goto sendpage_end;
533
534 /* Call the sk_stream functions to manage the sndbuf mem. */
535 while (size > 0) {
536 size_t copy, required_size;
537
538 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100539 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700540 goto sendpage_end;
541 }
542
543 full_record = false;
544 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
545 copy = size;
546 if (copy >= record_room) {
547 copy = record_room;
548 full_record = true;
549 }
550 required_size = ctx->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700551 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700552
553 if (!sk_stream_memory_free(sk))
554 goto wait_for_sndbuf;
555alloc_payload:
556 ret = alloc_encrypted_sg(sk, required_size);
557 if (ret) {
558 if (ret != -ENOSPC)
559 goto wait_for_memory;
560
561 /* Adjust copy according to the amount that was
562 * actually allocated. The difference is due
563 * to max sg elements limit
564 */
565 copy -= required_size - ctx->sg_plaintext_size;
566 full_record = true;
567 }
568
569 get_page(page);
570 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
571 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800572 sg_unmark_end(sg);
573
Dave Watson3c4d7552017-06-14 11:37:39 -0700574 ctx->sg_plaintext_num_elem++;
575
576 sk_mem_charge(sk, copy);
577 offset += copy;
578 size -= copy;
579 ctx->sg_plaintext_size += copy;
580 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
581
582 if (full_record || eor ||
583 ctx->sg_plaintext_num_elem ==
584 ARRAY_SIZE(ctx->sg_plaintext_data)) {
585push_record:
586 ret = tls_push_record(sk, flags, record_type);
587 if (ret) {
588 if (ret == -ENOMEM)
589 goto wait_for_memory;
590
591 goto sendpage_end;
592 }
593 }
594 continue;
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 ret = sk_stream_wait_memory(sk, &timeo);
599 if (ret) {
600 trim_both_sgl(sk, ctx->sg_plaintext_size);
601 goto sendpage_end;
602 }
603
604 if (tls_is_pending_closed_record(tls_ctx))
605 goto push_record;
606
607 goto alloc_payload;
608 }
609
610sendpage_end:
611 if (orig_size > size)
612 ret = orig_size - size;
613 else
614 ret = sk_stream_error(sk, flags, ret);
615
616 release_sock(sk);
617 return ret;
618}
619
Dave Watsonc46234e2018-03-22 10:10:35 -0700620static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
621 long timeo, int *err)
622{
623 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300624 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700625 struct sk_buff *skb;
626 DEFINE_WAIT_FUNC(wait, woken_wake_function);
627
628 while (!(skb = ctx->recv_pkt)) {
629 if (sk->sk_err) {
630 *err = sock_error(sk);
631 return NULL;
632 }
633
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -0700634 if (sk->sk_shutdown & RCV_SHUTDOWN)
635 return NULL;
636
Dave Watsonc46234e2018-03-22 10:10:35 -0700637 if (sock_flag(sk, SOCK_DONE))
638 return NULL;
639
640 if ((flags & MSG_DONTWAIT) || !timeo) {
641 *err = -EAGAIN;
642 return NULL;
643 }
644
645 add_wait_queue(sk_sleep(sk), &wait);
646 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
647 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
648 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
649 remove_wait_queue(sk_sleep(sk), &wait);
650
651 /* Handle signals */
652 if (signal_pending(current)) {
653 *err = sock_intr_errno(timeo);
654 return NULL;
655 }
656 }
657
658 return skb;
659}
660
Boris Pismennydafb67f2018-07-13 14:33:40 +0300661static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Boris Pismenny4799ac82018-07-13 14:33:43 +0300662 struct scatterlist *sgout, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +0300663{
664 struct tls_context *tls_ctx = tls_get_ctx(sk);
665 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
666 struct strp_msg *rxm = strp_msg(skb);
667 int err = 0;
668
Boris Pismenny4799ac82018-07-13 14:33:43 +0300669#ifdef CONFIG_TLS_DEVICE
670 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +0300671 if (err < 0)
672 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300673#endif
674 if (!ctx->decrypted) {
675 err = decrypt_skb(sk, skb, sgout);
676 if (err < 0)
677 return err;
678 } else {
679 *zc = false;
680 }
Boris Pismennydafb67f2018-07-13 14:33:40 +0300681
682 rxm->offset += tls_ctx->rx.prepend_size;
683 rxm->full_len -= tls_ctx->rx.overhead_size;
684 tls_advance_record_sn(sk, &tls_ctx->rx);
685 ctx->decrypted = true;
686 ctx->saved_data_ready(sk);
687
688 return err;
689}
690
691int decrypt_skb(struct sock *sk, struct sk_buff *skb,
692 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -0700693{
694 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300695 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cookb16520f2018-04-10 17:52:34 -0700696 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -0700697 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
698 struct scatterlist *sgin = &sgin_arr[0];
699 struct strp_msg *rxm = strp_msg(skb);
700 int ret, nsg = ARRAY_SIZE(sgin_arr);
Dave Watsonc46234e2018-03-22 10:10:35 -0700701 struct sk_buff *unused;
702
703 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
704 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
705 tls_ctx->rx.iv_size);
706 if (ret < 0)
707 return ret;
708
709 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
710 if (!sgout) {
711 nsg = skb_cow_data(skb, 0, &unused) + 1;
712 sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
Colin Ian King95ad7542018-04-24 13:36:58 +0100713 sgout = sgin;
Dave Watsonc46234e2018-03-22 10:10:35 -0700714 }
715
716 sg_init_table(sgin, nsg);
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700717 sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -0700718
719 nsg = skb_to_sgvec(skb, &sgin[1],
720 rxm->offset + tls_ctx->rx.prepend_size,
721 rxm->full_len - tls_ctx->rx.prepend_size);
Doron Roberts-Kedes52ee6ef2018-07-02 10:25:05 -0700722 if (nsg < 0) {
723 ret = nsg;
724 goto out;
725 }
Dave Watsonc46234e2018-03-22 10:10:35 -0700726
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700727 tls_make_aad(ctx->rx_aad_ciphertext,
Dave Watsonc46234e2018-03-22 10:10:35 -0700728 rxm->full_len - tls_ctx->rx.overhead_size,
729 tls_ctx->rx.rec_seq,
730 tls_ctx->rx.rec_seq_size,
731 ctx->control);
732
733 ret = tls_do_decryption(sk, sgin, sgout, iv,
734 rxm->full_len - tls_ctx->rx.overhead_size,
735 skb, sk->sk_allocation);
736
Doron Roberts-Kedes52ee6ef2018-07-02 10:25:05 -0700737out:
Dave Watsonc46234e2018-03-22 10:10:35 -0700738 if (sgin != &sgin_arr[0])
739 kfree(sgin);
740
741 return ret;
742}
743
744static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
745 unsigned int len)
746{
747 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300748 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700749 struct strp_msg *rxm = strp_msg(skb);
750
751 if (len < rxm->full_len) {
752 rxm->offset += len;
753 rxm->full_len -= len;
754
755 return false;
756 }
757
758 /* Finished with message */
759 ctx->recv_pkt = NULL;
760 kfree_skb(skb);
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -0700761 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -0700762
763 return true;
764}
765
766int tls_sw_recvmsg(struct sock *sk,
767 struct msghdr *msg,
768 size_t len,
769 int nonblock,
770 int flags,
771 int *addr_len)
772{
773 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300774 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700775 unsigned char control;
776 struct strp_msg *rxm;
777 struct sk_buff *skb;
778 ssize_t copied = 0;
779 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +0200780 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -0700781 long timeo;
782
783 flags |= nonblock;
784
785 if (unlikely(flags & MSG_ERRQUEUE))
786 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
787
788 lock_sock(sk);
789
Daniel Borkmann06030db2018-06-15 03:07:46 +0200790 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -0700791 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
792 do {
793 bool zc = false;
794 int chunk = 0;
795
796 skb = tls_wait_data(sk, flags, timeo, &err);
797 if (!skb)
798 goto recv_end;
799
800 rxm = strp_msg(skb);
801 if (!cmsg) {
802 int cerr;
803
804 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
805 sizeof(ctx->control), &ctx->control);
806 cmsg = true;
807 control = ctx->control;
808 if (ctx->control != TLS_RECORD_TYPE_DATA) {
809 if (cerr || msg->msg_flags & MSG_CTRUNC) {
810 err = -EIO;
811 goto recv_end;
812 }
813 }
814 } else if (control != ctx->control) {
815 goto recv_end;
816 }
817
818 if (!ctx->decrypted) {
819 int page_count;
820 int to_copy;
821
822 page_count = iov_iter_npages(&msg->msg_iter,
823 MAX_SKB_FRAGS);
824 to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
825 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
826 likely(!(flags & MSG_PEEK))) {
827 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
Dave Watsonc46234e2018-03-22 10:10:35 -0700828 int pages = 0;
829
830 zc = true;
831 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700832 sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
833 TLS_AAD_SPACE_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -0700834
835 err = zerocopy_from_iter(sk, &msg->msg_iter,
836 to_copy, &pages,
837 &chunk, &sgin[1],
Boris Pismenny47187992018-07-13 14:33:44 +0300838 MAX_SKB_FRAGS, false, true);
Dave Watsonc46234e2018-03-22 10:10:35 -0700839 if (err < 0)
840 goto fallback_to_reg_recv;
841
Boris Pismenny4799ac82018-07-13 14:33:43 +0300842 err = decrypt_skb_update(sk, skb, sgin, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700843 for (; pages > 0; pages--)
844 put_page(sg_page(&sgin[pages]));
845 if (err < 0) {
846 tls_err_abort(sk, EBADMSG);
847 goto recv_end;
848 }
849 } else {
850fallback_to_reg_recv:
Boris Pismenny4799ac82018-07-13 14:33:43 +0300851 err = decrypt_skb_update(sk, skb, NULL, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700852 if (err < 0) {
853 tls_err_abort(sk, EBADMSG);
854 goto recv_end;
855 }
856 }
857 ctx->decrypted = true;
858 }
859
860 if (!zc) {
861 chunk = min_t(unsigned int, rxm->full_len, len);
862 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
863 chunk);
864 if (err < 0)
865 goto recv_end;
866 }
867
868 copied += chunk;
869 len -= chunk;
870 if (likely(!(flags & MSG_PEEK))) {
871 u8 control = ctx->control;
872
873 if (tls_sw_advance_skb(sk, skb, chunk)) {
874 /* Return full control message to
875 * userspace before trying to parse
876 * another message type
877 */
878 msg->msg_flags |= MSG_EOR;
879 if (control != TLS_RECORD_TYPE_DATA)
880 goto recv_end;
881 }
882 }
Daniel Borkmann06030db2018-06-15 03:07:46 +0200883 /* If we have a new message from strparser, continue now. */
884 if (copied >= target && !ctx->recv_pkt)
885 break;
Dave Watsonc46234e2018-03-22 10:10:35 -0700886 } while (len);
887
888recv_end:
889 release_sock(sk);
890 return copied ? : err;
891}
892
893ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
894 struct pipe_inode_info *pipe,
895 size_t len, unsigned int flags)
896{
897 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300898 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700899 struct strp_msg *rxm = NULL;
900 struct sock *sk = sock->sk;
901 struct sk_buff *skb;
902 ssize_t copied = 0;
903 int err = 0;
904 long timeo;
905 int chunk;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300906 bool zc;
Dave Watsonc46234e2018-03-22 10:10:35 -0700907
908 lock_sock(sk);
909
910 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
911
912 skb = tls_wait_data(sk, flags, timeo, &err);
913 if (!skb)
914 goto splice_read_end;
915
916 /* splice does not support reading control messages */
917 if (ctx->control != TLS_RECORD_TYPE_DATA) {
918 err = -ENOTSUPP;
919 goto splice_read_end;
920 }
921
922 if (!ctx->decrypted) {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300923 err = decrypt_skb_update(sk, skb, NULL, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700924
925 if (err < 0) {
926 tls_err_abort(sk, EBADMSG);
927 goto splice_read_end;
928 }
929 ctx->decrypted = true;
930 }
931 rxm = strp_msg(skb);
932
933 chunk = min_t(unsigned int, rxm->full_len, len);
934 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
935 if (copied < 0)
936 goto splice_read_end;
937
938 if (likely(!(flags & MSG_PEEK)))
939 tls_sw_advance_skb(sk, skb, copied);
940
941splice_read_end:
942 release_sock(sk);
943 return copied ? : err;
944}
945
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700946unsigned int tls_sw_poll(struct file *file, struct socket *sock,
947 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -0700948{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700949 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700950 struct sock *sk = sock->sk;
951 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300952 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700953
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700954 /* Grab POLLOUT and POLLHUP from the underlying socket */
955 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -0700956
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700957 /* Clear POLLIN bits, and set based on recv_pkt */
958 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -0700959 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700960 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -0700961
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700962 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700963}
964
965static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
966{
967 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300968 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -0700969 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -0700970 struct strp_msg *rxm = strp_msg(skb);
971 size_t cipher_overhead;
972 size_t data_len = 0;
973 int ret;
974
975 /* Verify that we have a full TLS header, or wait for more data */
976 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
977 return 0;
978
Kees Cook3463e512018-06-25 16:55:05 -0700979 /* Sanity-check size of on-stack buffer. */
980 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
981 ret = -EINVAL;
982 goto read_failure;
983 }
984
Dave Watsonc46234e2018-03-22 10:10:35 -0700985 /* Linearize header to local buffer */
986 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
987
988 if (ret < 0)
989 goto read_failure;
990
991 ctx->control = header[0];
992
993 data_len = ((header[4] & 0xFF) | (header[3] << 8));
994
995 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
996
997 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
998 ret = -EMSGSIZE;
999 goto read_failure;
1000 }
1001 if (data_len < cipher_overhead) {
1002 ret = -EBADMSG;
1003 goto read_failure;
1004 }
1005
1006 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
1007 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
1008 ret = -EINVAL;
1009 goto read_failure;
1010 }
1011
Boris Pismenny4799ac82018-07-13 14:33:43 +03001012#ifdef CONFIG_TLS_DEVICE
1013 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1014 *(u64*)tls_ctx->rx.rec_seq);
1015#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001016 return data_len + TLS_HEADER_SIZE;
1017
1018read_failure:
1019 tls_err_abort(strp->sk, ret);
1020
1021 return ret;
1022}
1023
1024static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1025{
1026 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001027 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001028
1029 ctx->decrypted = false;
1030
1031 ctx->recv_pkt = skb;
1032 strp_pause(strp);
1033
1034 strp->sk->sk_state_change(strp->sk);
1035}
1036
1037static void tls_data_ready(struct sock *sk)
1038{
1039 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001040 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001041
1042 strp_data_ready(&ctx->strp);
1043}
1044
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001045void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001046{
1047 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001048 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07001049
Vakul Garg201876b2018-07-24 16:54:27 +05301050 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001051 tls_free_both_sg(sk);
1052
1053 kfree(ctx);
1054}
1055
Boris Pismenny39f56e12018-07-13 14:33:41 +03001056void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001057{
1058 struct tls_context *tls_ctx = tls_get_ctx(sk);
1059 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1060
Dave Watsonc46234e2018-03-22 10:10:35 -07001061 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301062 kfree_skb(ctx->recv_pkt);
1063 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001064 crypto_free_aead(ctx->aead_recv);
1065 strp_stop(&ctx->strp);
1066 write_lock_bh(&sk->sk_callback_lock);
1067 sk->sk_data_ready = ctx->saved_data_ready;
1068 write_unlock_bh(&sk->sk_callback_lock);
1069 release_sock(sk);
1070 strp_done(&ctx->strp);
1071 lock_sock(sk);
1072 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001073}
1074
1075void tls_sw_free_resources_rx(struct sock *sk)
1076{
1077 struct tls_context *tls_ctx = tls_get_ctx(sk);
1078 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1079
1080 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001081
Dave Watson3c4d7552017-06-14 11:37:39 -07001082 kfree(ctx);
1083}
1084
Dave Watsonc46234e2018-03-22 10:10:35 -07001085int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001086{
1087 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1088 struct tls_crypto_info *crypto_info;
1089 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001090 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1091 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001092 struct cipher_context *cctx;
1093 struct crypto_aead **aead;
1094 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001095 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1096 char *iv, *rec_seq;
1097 int rc = 0;
1098
1099 if (!ctx) {
1100 rc = -EINVAL;
1101 goto out;
1102 }
1103
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001104 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001105 if (!ctx->priv_ctx_tx) {
1106 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1107 if (!sw_ctx_tx) {
1108 rc = -ENOMEM;
1109 goto out;
1110 }
1111 ctx->priv_ctx_tx = sw_ctx_tx;
1112 } else {
1113 sw_ctx_tx =
1114 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001115 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001116 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001117 if (!ctx->priv_ctx_rx) {
1118 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1119 if (!sw_ctx_rx) {
1120 rc = -ENOMEM;
1121 goto out;
1122 }
1123 ctx->priv_ctx_rx = sw_ctx_rx;
1124 } else {
1125 sw_ctx_rx =
1126 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001127 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001128 }
1129
Dave Watsonc46234e2018-03-22 10:10:35 -07001130 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001131 crypto_init_wait(&sw_ctx_tx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001132 crypto_info = &ctx->crypto_send;
1133 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001134 aead = &sw_ctx_tx->aead_send;
Dave Watsonc46234e2018-03-22 10:10:35 -07001135 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001136 crypto_init_wait(&sw_ctx_rx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001137 crypto_info = &ctx->crypto_recv;
1138 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001139 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001140 }
1141
Dave Watson3c4d7552017-06-14 11:37:39 -07001142 switch (crypto_info->cipher_type) {
1143 case TLS_CIPHER_AES_GCM_128: {
1144 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1145 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1146 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1147 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1148 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1149 rec_seq =
1150 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1151 gcm_128_info =
1152 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1153 break;
1154 }
1155 default:
1156 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001157 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001158 }
1159
Kees Cookb16520f2018-04-10 17:52:34 -07001160 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001161 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001162 rc = -EINVAL;
1163 goto free_priv;
1164 }
1165
Dave Watsonc46234e2018-03-22 10:10:35 -07001166 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1167 cctx->tag_size = tag_size;
1168 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1169 cctx->iv_size = iv_size;
1170 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1171 GFP_KERNEL);
1172 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001173 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001174 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001175 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001176 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1177 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1178 cctx->rec_seq_size = rec_seq_size;
1179 cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
1180 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001181 rc = -ENOMEM;
1182 goto free_iv;
1183 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001184 memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001185
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001186 if (sw_ctx_tx) {
1187 sg_init_table(sw_ctx_tx->sg_encrypted_data,
1188 ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
1189 sg_init_table(sw_ctx_tx->sg_plaintext_data,
1190 ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
Dave Watson3c4d7552017-06-14 11:37:39 -07001191
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001192 sg_init_table(sw_ctx_tx->sg_aead_in, 2);
1193 sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
1194 sizeof(sw_ctx_tx->aad_space));
1195 sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
1196 sg_chain(sw_ctx_tx->sg_aead_in, 2,
1197 sw_ctx_tx->sg_plaintext_data);
1198 sg_init_table(sw_ctx_tx->sg_aead_out, 2);
1199 sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
1200 sizeof(sw_ctx_tx->aad_space));
1201 sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
1202 sg_chain(sw_ctx_tx->sg_aead_out, 2,
1203 sw_ctx_tx->sg_encrypted_data);
Dave Watsonc46234e2018-03-22 10:10:35 -07001204 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001205
Dave Watsonc46234e2018-03-22 10:10:35 -07001206 if (!*aead) {
1207 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1208 if (IS_ERR(*aead)) {
1209 rc = PTR_ERR(*aead);
1210 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001211 goto free_rec_seq;
1212 }
1213 }
1214
1215 ctx->push_pending_record = tls_sw_push_pending_record;
1216
1217 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1218
Dave Watsonc46234e2018-03-22 10:10:35 -07001219 rc = crypto_aead_setkey(*aead, keyval,
Dave Watson3c4d7552017-06-14 11:37:39 -07001220 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1221 if (rc)
1222 goto free_aead;
1223
Dave Watsonc46234e2018-03-22 10:10:35 -07001224 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1225 if (rc)
1226 goto free_aead;
1227
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001228 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001229 /* Set up strparser */
1230 memset(&cb, 0, sizeof(cb));
1231 cb.rcv_msg = tls_queue;
1232 cb.parse_msg = tls_read_size;
1233
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001234 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001235
1236 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001237 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001238 sk->sk_data_ready = tls_data_ready;
1239 write_unlock_bh(&sk->sk_callback_lock);
1240
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001241 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001242
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001243 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001244 }
1245
1246 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001247
1248free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001249 crypto_free_aead(*aead);
1250 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001251free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001252 kfree(cctx->rec_seq);
1253 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001254free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001255 kfree(cctx->iv);
1256 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001257free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001258 if (tx) {
1259 kfree(ctx->priv_ctx_tx);
1260 ctx->priv_ctx_tx = NULL;
1261 } else {
1262 kfree(ctx->priv_ctx_rx);
1263 ctx->priv_ctx_rx = NULL;
1264 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001265out:
1266 return rc;
1267}