blob: 5f7d70b24be654904ca0cf1052fcc8a086ca8990 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Dave Watsonc46234e2018-03-22 10:10:35 -070046static int tls_do_decryption(struct sock *sk,
47 struct scatterlist *sgin,
48 struct scatterlist *sgout,
49 char *iv_recv,
50 size_t data_len,
51 struct sk_buff *skb,
52 gfp_t flags)
53{
54 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030055 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -070056 struct aead_request *aead_req;
57
58 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -070059
Vakul Gargd2bdd262018-07-11 14:32:20 +053060 aead_req = aead_request_alloc(ctx->aead_recv, flags);
Dave Watsonc46234e2018-03-22 10:10:35 -070061 if (!aead_req)
62 return -ENOMEM;
63
Dave Watsonc46234e2018-03-22 10:10:35 -070064 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
65 aead_request_set_crypt(aead_req, sgin, sgout,
66 data_len + tls_ctx->rx.tag_size,
67 (u8 *)iv_recv);
68 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
69 crypto_req_done, &ctx->async_wait);
70
71 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
72
Vakul Gargd2bdd262018-07-11 14:32:20 +053073 aead_request_free(aead_req);
Dave Watsonc46234e2018-03-22 10:10:35 -070074 return ret;
75}
76
Dave Watson3c4d7552017-06-14 11:37:39 -070077static void trim_sg(struct sock *sk, struct scatterlist *sg,
78 int *sg_num_elem, unsigned int *sg_size, int target_size)
79{
80 int i = *sg_num_elem - 1;
81 int trim = *sg_size - target_size;
82
83 if (trim <= 0) {
84 WARN_ON(trim < 0);
85 return;
86 }
87
88 *sg_size = target_size;
89 while (trim >= sg[i].length) {
90 trim -= sg[i].length;
91 sk_mem_uncharge(sk, sg[i].length);
92 put_page(sg_page(&sg[i]));
93 i--;
94
95 if (i < 0)
96 goto out;
97 }
98
99 sg[i].length -= trim;
100 sk_mem_uncharge(sk, trim);
101
102out:
103 *sg_num_elem = i + 1;
104}
105
106static void trim_both_sgl(struct sock *sk, int target_size)
107{
108 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300109 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700110
111 trim_sg(sk, ctx->sg_plaintext_data,
112 &ctx->sg_plaintext_num_elem,
113 &ctx->sg_plaintext_size,
114 target_size);
115
116 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700117 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700118
119 trim_sg(sk, ctx->sg_encrypted_data,
120 &ctx->sg_encrypted_num_elem,
121 &ctx->sg_encrypted_size,
122 target_size);
123}
124
Dave Watson3c4d7552017-06-14 11:37:39 -0700125static int alloc_encrypted_sg(struct sock *sk, int len)
126{
127 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300128 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700129 int rc = 0;
130
John Fastabend2c3682f2018-03-18 12:56:49 -0700131 rc = sk_alloc_sg(sk, len,
John Fastabend8c05dbf2018-03-18 12:57:05 -0700132 ctx->sg_encrypted_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700133 &ctx->sg_encrypted_num_elem,
134 &ctx->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700135
136 return rc;
137}
138
139static int alloc_plaintext_sg(struct sock *sk, int len)
140{
141 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300142 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700143 int rc = 0;
144
John Fastabend8c05dbf2018-03-18 12:57:05 -0700145 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700146 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
147 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700148
149 return rc;
150}
151
152static void free_sg(struct sock *sk, struct scatterlist *sg,
153 int *sg_num_elem, unsigned int *sg_size)
154{
155 int i, n = *sg_num_elem;
156
157 for (i = 0; i < n; ++i) {
158 sk_mem_uncharge(sk, sg[i].length);
159 put_page(sg_page(&sg[i]));
160 }
161 *sg_num_elem = 0;
162 *sg_size = 0;
163}
164
165static void tls_free_both_sg(struct sock *sk)
166{
167 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300168 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700169
170 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
171 &ctx->sg_encrypted_size);
172
173 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
174 &ctx->sg_plaintext_size);
175}
176
177static int tls_do_encryption(struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200178 struct tls_sw_context_tx *ctx,
179 struct aead_request *aead_req,
180 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700181{
Dave Watson3c4d7552017-06-14 11:37:39 -0700182 int rc;
183
Dave Watsondbe42552018-03-22 10:10:06 -0700184 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
185 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700186
187 aead_request_set_tfm(aead_req, ctx->aead_send);
188 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
189 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700190 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530191
192 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
193 crypto_req_done, &ctx->async_wait);
194
195 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
Dave Watson3c4d7552017-06-14 11:37:39 -0700196
Dave Watsondbe42552018-03-22 10:10:06 -0700197 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
198 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700199
Dave Watson3c4d7552017-06-14 11:37:39 -0700200 return rc;
201}
202
203static int tls_push_record(struct sock *sk, int flags,
204 unsigned char record_type)
205{
206 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300207 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200208 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700209 int rc;
210
Vakul Gargd2bdd262018-07-11 14:32:20 +0530211 req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200212 if (!req)
213 return -ENOMEM;
214
Dave Watson3c4d7552017-06-14 11:37:39 -0700215 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
216 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
217
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200218 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700219 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700220 record_type);
221
222 tls_fill_prepend(tls_ctx,
223 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
224 ctx->sg_encrypted_data[0].offset,
225 ctx->sg_plaintext_size, record_type);
226
227 tls_ctx->pending_open_record_frags = 0;
228 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
229
Daniel Borkmanna447da72018-06-15 03:07:45 +0200230 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700231 if (rc < 0) {
232 /* If we are called from write_space and
233 * we fail, we need to set this SOCK_NOSPACE
234 * to trigger another write_space in the future.
235 */
236 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200237 goto out_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700238 }
239
240 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
241 &ctx->sg_plaintext_size);
242
243 ctx->sg_encrypted_num_elem = 0;
244 ctx->sg_encrypted_size = 0;
245
246 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
247 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
248 if (rc < 0 && rc != -EAGAIN)
Dave Watsonf4a8e432018-03-22 10:10:15 -0700249 tls_err_abort(sk, EBADMSG);
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Dave Watsondbe42552018-03-22 10:10:06 -0700251 tls_advance_record_sn(sk, &tls_ctx->tx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200252out_req:
Vakul Gargd2bdd262018-07-11 14:32:20 +0530253 aead_request_free(req);
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 return rc;
255}
256
257static int tls_sw_push_pending_record(struct sock *sk, int flags)
258{
259 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
260}
261
262static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700263 int length, int *pages_used,
264 unsigned int *size_used,
265 struct scatterlist *to, int to_max_pages,
266 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700267{
Dave Watson3c4d7552017-06-14 11:37:39 -0700268 struct page *pages[MAX_SKB_FRAGS];
269
270 size_t offset;
271 ssize_t copied, use;
272 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700273 unsigned int size = *size_used;
274 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700275 int rc = 0;
276 int maxpages;
277
278 while (length > 0) {
279 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700280 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700281 if (maxpages == 0) {
282 rc = -EFAULT;
283 goto out;
284 }
285 copied = iov_iter_get_pages(from, pages,
286 length,
287 maxpages, &offset);
288 if (copied <= 0) {
289 rc = -EFAULT;
290 goto out;
291 }
292
293 iov_iter_advance(from, copied);
294
295 length -= copied;
296 size += copied;
297 while (copied) {
298 use = min_t(int, copied, PAGE_SIZE - offset);
299
Dave Watson69ca9292018-03-22 10:09:53 -0700300 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700301 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700302 sg_unmark_end(&to[num_elem]);
303 if (charge)
304 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700305
306 offset = 0;
307 copied -= use;
308
309 ++i;
310 ++num_elem;
311 }
312 }
313
314out:
Dave Watson69ca9292018-03-22 10:09:53 -0700315 *size_used = size;
316 *pages_used = num_elem;
317
Dave Watson3c4d7552017-06-14 11:37:39 -0700318 return rc;
319}
320
321static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
322 int bytes)
323{
324 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300325 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700326 struct scatterlist *sg = ctx->sg_plaintext_data;
327 int copy, i, rc = 0;
328
329 for (i = tls_ctx->pending_open_record_frags;
330 i < ctx->sg_plaintext_num_elem; ++i) {
331 copy = sg[i].length;
332 if (copy_from_iter(
333 page_address(sg_page(&sg[i])) + sg[i].offset,
334 copy, from) != copy) {
335 rc = -EFAULT;
336 goto out;
337 }
338 bytes -= copy;
339
340 ++tls_ctx->pending_open_record_frags;
341
342 if (!bytes)
343 break;
344 }
345
346out:
347 return rc;
348}
349
350int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
351{
352 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300353 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700354 int ret = 0;
355 int required_size;
356 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
357 bool eor = !(msg->msg_flags & MSG_MORE);
358 size_t try_to_copy, copied = 0;
359 unsigned char record_type = TLS_RECORD_TYPE_DATA;
360 int record_room;
361 bool full_record;
362 int orig_size;
363
364 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
365 return -ENOTSUPP;
366
367 lock_sock(sk);
368
369 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
370 goto send_end;
371
372 if (unlikely(msg->msg_controllen)) {
373 ret = tls_proccess_cmsg(sk, msg, &record_type);
374 if (ret)
375 goto send_end;
376 }
377
378 while (msg_data_left(msg)) {
379 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100380 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700381 goto send_end;
382 }
383
384 orig_size = ctx->sg_plaintext_size;
385 full_record = false;
386 try_to_copy = msg_data_left(msg);
387 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
388 if (try_to_copy >= record_room) {
389 try_to_copy = record_room;
390 full_record = true;
391 }
392
393 required_size = ctx->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700394 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700395
396 if (!sk_stream_memory_free(sk))
397 goto wait_for_sndbuf;
398alloc_encrypted:
399 ret = alloc_encrypted_sg(sk, required_size);
400 if (ret) {
401 if (ret != -ENOSPC)
402 goto wait_for_memory;
403
404 /* Adjust try_to_copy according to the amount that was
405 * actually allocated. The difference is due
406 * to max sg elements limit
407 */
408 try_to_copy -= required_size - ctx->sg_encrypted_size;
409 full_record = true;
410 }
411
412 if (full_record || eor) {
413 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Dave Watson69ca9292018-03-22 10:09:53 -0700414 try_to_copy, &ctx->sg_plaintext_num_elem,
415 &ctx->sg_plaintext_size,
416 ctx->sg_plaintext_data,
417 ARRAY_SIZE(ctx->sg_plaintext_data),
418 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700419 if (ret)
420 goto fallback_to_reg_send;
421
422 copied += try_to_copy;
423 ret = tls_push_record(sk, msg->msg_flags, record_type);
424 if (!ret)
425 continue;
426 if (ret == -EAGAIN)
427 goto send_end;
428
429 copied -= try_to_copy;
430fallback_to_reg_send:
431 iov_iter_revert(&msg->msg_iter,
432 ctx->sg_plaintext_size - orig_size);
433 trim_sg(sk, ctx->sg_plaintext_data,
434 &ctx->sg_plaintext_num_elem,
435 &ctx->sg_plaintext_size,
436 orig_size);
437 }
438
439 required_size = ctx->sg_plaintext_size + try_to_copy;
440alloc_plaintext:
441 ret = alloc_plaintext_sg(sk, required_size);
442 if (ret) {
443 if (ret != -ENOSPC)
444 goto wait_for_memory;
445
446 /* Adjust try_to_copy according to the amount that was
447 * actually allocated. The difference is due
448 * to max sg elements limit
449 */
450 try_to_copy -= required_size - ctx->sg_plaintext_size;
451 full_record = true;
452
453 trim_sg(sk, ctx->sg_encrypted_data,
454 &ctx->sg_encrypted_num_elem,
455 &ctx->sg_encrypted_size,
456 ctx->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700457 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700458 }
459
460 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
461 if (ret)
462 goto trim_sgl;
463
464 copied += try_to_copy;
465 if (full_record || eor) {
466push_record:
467 ret = tls_push_record(sk, msg->msg_flags, record_type);
468 if (ret) {
469 if (ret == -ENOMEM)
470 goto wait_for_memory;
471
472 goto send_end;
473 }
474 }
475
476 continue;
477
478wait_for_sndbuf:
479 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
480wait_for_memory:
481 ret = sk_stream_wait_memory(sk, &timeo);
482 if (ret) {
483trim_sgl:
484 trim_both_sgl(sk, orig_size);
485 goto send_end;
486 }
487
488 if (tls_is_pending_closed_record(tls_ctx))
489 goto push_record;
490
491 if (ctx->sg_encrypted_size < required_size)
492 goto alloc_encrypted;
493
494 goto alloc_plaintext;
495 }
496
497send_end:
498 ret = sk_stream_error(sk, msg->msg_flags, ret);
499
500 release_sock(sk);
501 return copied ? copied : ret;
502}
503
504int tls_sw_sendpage(struct sock *sk, struct page *page,
505 int offset, size_t size, int flags)
506{
507 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300508 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700509 int ret = 0;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511 bool eor;
512 size_t orig_size = size;
513 unsigned char record_type = TLS_RECORD_TYPE_DATA;
514 struct scatterlist *sg;
515 bool full_record;
516 int record_room;
517
518 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
519 MSG_SENDPAGE_NOTLAST))
520 return -ENOTSUPP;
521
522 /* No MSG_EOR from splice, only look at MSG_MORE */
523 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
524
525 lock_sock(sk);
526
527 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
528
529 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
530 goto sendpage_end;
531
532 /* Call the sk_stream functions to manage the sndbuf mem. */
533 while (size > 0) {
534 size_t copy, required_size;
535
536 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100537 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700538 goto sendpage_end;
539 }
540
541 full_record = false;
542 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
543 copy = size;
544 if (copy >= record_room) {
545 copy = record_room;
546 full_record = true;
547 }
548 required_size = ctx->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700549 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700550
551 if (!sk_stream_memory_free(sk))
552 goto wait_for_sndbuf;
553alloc_payload:
554 ret = alloc_encrypted_sg(sk, required_size);
555 if (ret) {
556 if (ret != -ENOSPC)
557 goto wait_for_memory;
558
559 /* Adjust copy according to the amount that was
560 * actually allocated. The difference is due
561 * to max sg elements limit
562 */
563 copy -= required_size - ctx->sg_plaintext_size;
564 full_record = true;
565 }
566
567 get_page(page);
568 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
569 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800570 sg_unmark_end(sg);
571
Dave Watson3c4d7552017-06-14 11:37:39 -0700572 ctx->sg_plaintext_num_elem++;
573
574 sk_mem_charge(sk, copy);
575 offset += copy;
576 size -= copy;
577 ctx->sg_plaintext_size += copy;
578 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
579
580 if (full_record || eor ||
581 ctx->sg_plaintext_num_elem ==
582 ARRAY_SIZE(ctx->sg_plaintext_data)) {
583push_record:
584 ret = tls_push_record(sk, flags, record_type);
585 if (ret) {
586 if (ret == -ENOMEM)
587 goto wait_for_memory;
588
589 goto sendpage_end;
590 }
591 }
592 continue;
593wait_for_sndbuf:
594 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
595wait_for_memory:
596 ret = sk_stream_wait_memory(sk, &timeo);
597 if (ret) {
598 trim_both_sgl(sk, ctx->sg_plaintext_size);
599 goto sendpage_end;
600 }
601
602 if (tls_is_pending_closed_record(tls_ctx))
603 goto push_record;
604
605 goto alloc_payload;
606 }
607
608sendpage_end:
609 if (orig_size > size)
610 ret = orig_size - size;
611 else
612 ret = sk_stream_error(sk, flags, ret);
613
614 release_sock(sk);
615 return ret;
616}
617
Dave Watsonc46234e2018-03-22 10:10:35 -0700618static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
619 long timeo, int *err)
620{
621 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300622 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700623 struct sk_buff *skb;
624 DEFINE_WAIT_FUNC(wait, woken_wake_function);
625
626 while (!(skb = ctx->recv_pkt)) {
627 if (sk->sk_err) {
628 *err = sock_error(sk);
629 return NULL;
630 }
631
632 if (sock_flag(sk, SOCK_DONE))
633 return NULL;
634
635 if ((flags & MSG_DONTWAIT) || !timeo) {
636 *err = -EAGAIN;
637 return NULL;
638 }
639
640 add_wait_queue(sk_sleep(sk), &wait);
641 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
642 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
643 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
644 remove_wait_queue(sk_sleep(sk), &wait);
645
646 /* Handle signals */
647 if (signal_pending(current)) {
648 *err = sock_intr_errno(timeo);
649 return NULL;
650 }
651 }
652
653 return skb;
654}
655
Boris Pismennydafb67f2018-07-13 14:33:40 +0300656static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
657 struct scatterlist *sgout)
658{
659 struct tls_context *tls_ctx = tls_get_ctx(sk);
660 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
661 struct strp_msg *rxm = strp_msg(skb);
662 int err = 0;
663
664 err = decrypt_skb(sk, skb, sgout);
665 if (err < 0)
666 return err;
667
668 rxm->offset += tls_ctx->rx.prepend_size;
669 rxm->full_len -= tls_ctx->rx.overhead_size;
670 tls_advance_record_sn(sk, &tls_ctx->rx);
671 ctx->decrypted = true;
672 ctx->saved_data_ready(sk);
673
674 return err;
675}
676
677int decrypt_skb(struct sock *sk, struct sk_buff *skb,
678 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -0700679{
680 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300681 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cookb16520f2018-04-10 17:52:34 -0700682 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -0700683 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
684 struct scatterlist *sgin = &sgin_arr[0];
685 struct strp_msg *rxm = strp_msg(skb);
686 int ret, nsg = ARRAY_SIZE(sgin_arr);
Dave Watsonc46234e2018-03-22 10:10:35 -0700687 struct sk_buff *unused;
688
689 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
690 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
691 tls_ctx->rx.iv_size);
692 if (ret < 0)
693 return ret;
694
695 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
696 if (!sgout) {
697 nsg = skb_cow_data(skb, 0, &unused) + 1;
698 sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
Colin Ian King95ad7542018-04-24 13:36:58 +0100699 sgout = sgin;
Dave Watsonc46234e2018-03-22 10:10:35 -0700700 }
701
702 sg_init_table(sgin, nsg);
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700703 sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -0700704
705 nsg = skb_to_sgvec(skb, &sgin[1],
706 rxm->offset + tls_ctx->rx.prepend_size,
707 rxm->full_len - tls_ctx->rx.prepend_size);
708
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700709 tls_make_aad(ctx->rx_aad_ciphertext,
Dave Watsonc46234e2018-03-22 10:10:35 -0700710 rxm->full_len - tls_ctx->rx.overhead_size,
711 tls_ctx->rx.rec_seq,
712 tls_ctx->rx.rec_seq_size,
713 ctx->control);
714
715 ret = tls_do_decryption(sk, sgin, sgout, iv,
716 rxm->full_len - tls_ctx->rx.overhead_size,
717 skb, sk->sk_allocation);
718
719 if (sgin != &sgin_arr[0])
720 kfree(sgin);
721
722 return ret;
723}
724
725static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
726 unsigned int len)
727{
728 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300729 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700730 struct strp_msg *rxm = strp_msg(skb);
731
732 if (len < rxm->full_len) {
733 rxm->offset += len;
734 rxm->full_len -= len;
735
736 return false;
737 }
738
739 /* Finished with message */
740 ctx->recv_pkt = NULL;
741 kfree_skb(skb);
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -0700742 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -0700743
744 return true;
745}
746
747int tls_sw_recvmsg(struct sock *sk,
748 struct msghdr *msg,
749 size_t len,
750 int nonblock,
751 int flags,
752 int *addr_len)
753{
754 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300755 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700756 unsigned char control;
757 struct strp_msg *rxm;
758 struct sk_buff *skb;
759 ssize_t copied = 0;
760 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +0200761 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -0700762 long timeo;
763
764 flags |= nonblock;
765
766 if (unlikely(flags & MSG_ERRQUEUE))
767 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
768
769 lock_sock(sk);
770
Daniel Borkmann06030db2018-06-15 03:07:46 +0200771 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -0700772 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
773 do {
774 bool zc = false;
775 int chunk = 0;
776
777 skb = tls_wait_data(sk, flags, timeo, &err);
778 if (!skb)
779 goto recv_end;
780
781 rxm = strp_msg(skb);
782 if (!cmsg) {
783 int cerr;
784
785 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
786 sizeof(ctx->control), &ctx->control);
787 cmsg = true;
788 control = ctx->control;
789 if (ctx->control != TLS_RECORD_TYPE_DATA) {
790 if (cerr || msg->msg_flags & MSG_CTRUNC) {
791 err = -EIO;
792 goto recv_end;
793 }
794 }
795 } else if (control != ctx->control) {
796 goto recv_end;
797 }
798
799 if (!ctx->decrypted) {
800 int page_count;
801 int to_copy;
802
803 page_count = iov_iter_npages(&msg->msg_iter,
804 MAX_SKB_FRAGS);
805 to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
806 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
807 likely(!(flags & MSG_PEEK))) {
808 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
Dave Watsonc46234e2018-03-22 10:10:35 -0700809 int pages = 0;
810
811 zc = true;
812 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
Matt Mullins8ab6ffb2018-05-16 10:48:40 -0700813 sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
814 TLS_AAD_SPACE_SIZE);
Dave Watsonc46234e2018-03-22 10:10:35 -0700815
816 err = zerocopy_from_iter(sk, &msg->msg_iter,
817 to_copy, &pages,
818 &chunk, &sgin[1],
819 MAX_SKB_FRAGS, false);
820 if (err < 0)
821 goto fallback_to_reg_recv;
822
Boris Pismennydafb67f2018-07-13 14:33:40 +0300823 err = decrypt_skb_update(sk, skb, sgin);
Dave Watsonc46234e2018-03-22 10:10:35 -0700824 for (; pages > 0; pages--)
825 put_page(sg_page(&sgin[pages]));
826 if (err < 0) {
827 tls_err_abort(sk, EBADMSG);
828 goto recv_end;
829 }
830 } else {
831fallback_to_reg_recv:
Boris Pismennydafb67f2018-07-13 14:33:40 +0300832 err = decrypt_skb_update(sk, skb, NULL);
Dave Watsonc46234e2018-03-22 10:10:35 -0700833 if (err < 0) {
834 tls_err_abort(sk, EBADMSG);
835 goto recv_end;
836 }
837 }
838 ctx->decrypted = true;
839 }
840
841 if (!zc) {
842 chunk = min_t(unsigned int, rxm->full_len, len);
843 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
844 chunk);
845 if (err < 0)
846 goto recv_end;
847 }
848
849 copied += chunk;
850 len -= chunk;
851 if (likely(!(flags & MSG_PEEK))) {
852 u8 control = ctx->control;
853
854 if (tls_sw_advance_skb(sk, skb, chunk)) {
855 /* Return full control message to
856 * userspace before trying to parse
857 * another message type
858 */
859 msg->msg_flags |= MSG_EOR;
860 if (control != TLS_RECORD_TYPE_DATA)
861 goto recv_end;
862 }
863 }
Daniel Borkmann06030db2018-06-15 03:07:46 +0200864 /* If we have a new message from strparser, continue now. */
865 if (copied >= target && !ctx->recv_pkt)
866 break;
Dave Watsonc46234e2018-03-22 10:10:35 -0700867 } while (len);
868
869recv_end:
870 release_sock(sk);
871 return copied ? : err;
872}
873
874ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
875 struct pipe_inode_info *pipe,
876 size_t len, unsigned int flags)
877{
878 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300879 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700880 struct strp_msg *rxm = NULL;
881 struct sock *sk = sock->sk;
882 struct sk_buff *skb;
883 ssize_t copied = 0;
884 int err = 0;
885 long timeo;
886 int chunk;
887
888 lock_sock(sk);
889
890 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
891
892 skb = tls_wait_data(sk, flags, timeo, &err);
893 if (!skb)
894 goto splice_read_end;
895
896 /* splice does not support reading control messages */
897 if (ctx->control != TLS_RECORD_TYPE_DATA) {
898 err = -ENOTSUPP;
899 goto splice_read_end;
900 }
901
902 if (!ctx->decrypted) {
Boris Pismennydafb67f2018-07-13 14:33:40 +0300903 err = decrypt_skb_update(sk, skb, NULL);
Dave Watsonc46234e2018-03-22 10:10:35 -0700904
905 if (err < 0) {
906 tls_err_abort(sk, EBADMSG);
907 goto splice_read_end;
908 }
909 ctx->decrypted = true;
910 }
911 rxm = strp_msg(skb);
912
913 chunk = min_t(unsigned int, rxm->full_len, len);
914 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
915 if (copied < 0)
916 goto splice_read_end;
917
918 if (likely(!(flags & MSG_PEEK)))
919 tls_sw_advance_skb(sk, skb, copied);
920
921splice_read_end:
922 release_sock(sk);
923 return copied ? : err;
924}
925
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700926unsigned int tls_sw_poll(struct file *file, struct socket *sock,
927 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -0700928{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700929 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700930 struct sock *sk = sock->sk;
931 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300932 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700933
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700934 /* Grab POLLOUT and POLLHUP from the underlying socket */
935 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -0700936
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700937 /* Clear POLLIN bits, and set based on recv_pkt */
938 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -0700939 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700940 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -0700941
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700942 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700943}
944
945static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
946{
947 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300948 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -0700949 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -0700950 struct strp_msg *rxm = strp_msg(skb);
951 size_t cipher_overhead;
952 size_t data_len = 0;
953 int ret;
954
955 /* Verify that we have a full TLS header, or wait for more data */
956 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
957 return 0;
958
Kees Cook3463e512018-06-25 16:55:05 -0700959 /* Sanity-check size of on-stack buffer. */
960 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
961 ret = -EINVAL;
962 goto read_failure;
963 }
964
Dave Watsonc46234e2018-03-22 10:10:35 -0700965 /* Linearize header to local buffer */
966 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
967
968 if (ret < 0)
969 goto read_failure;
970
971 ctx->control = header[0];
972
973 data_len = ((header[4] & 0xFF) | (header[3] << 8));
974
975 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
976
977 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
978 ret = -EMSGSIZE;
979 goto read_failure;
980 }
981 if (data_len < cipher_overhead) {
982 ret = -EBADMSG;
983 goto read_failure;
984 }
985
986 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
987 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
988 ret = -EINVAL;
989 goto read_failure;
990 }
991
992 return data_len + TLS_HEADER_SIZE;
993
994read_failure:
995 tls_err_abort(strp->sk, ret);
996
997 return ret;
998}
999
1000static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1001{
1002 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001003 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001004
1005 ctx->decrypted = false;
1006
1007 ctx->recv_pkt = skb;
1008 strp_pause(strp);
1009
1010 strp->sk->sk_state_change(strp->sk);
1011}
1012
1013static void tls_data_ready(struct sock *sk)
1014{
1015 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001016 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001017
1018 strp_data_ready(&ctx->strp);
1019}
1020
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001021void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001022{
1023 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001024 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07001025
1026 if (ctx->aead_send)
1027 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001028 tls_free_both_sg(sk);
1029
1030 kfree(ctx);
1031}
1032
Boris Pismenny39f56e12018-07-13 14:33:41 +03001033void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001034{
1035 struct tls_context *tls_ctx = tls_get_ctx(sk);
1036 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1037
Dave Watsonc46234e2018-03-22 10:10:35 -07001038 if (ctx->aead_recv) {
1039 if (ctx->recv_pkt) {
1040 kfree_skb(ctx->recv_pkt);
1041 ctx->recv_pkt = NULL;
1042 }
1043 crypto_free_aead(ctx->aead_recv);
1044 strp_stop(&ctx->strp);
1045 write_lock_bh(&sk->sk_callback_lock);
1046 sk->sk_data_ready = ctx->saved_data_ready;
1047 write_unlock_bh(&sk->sk_callback_lock);
1048 release_sock(sk);
1049 strp_done(&ctx->strp);
1050 lock_sock(sk);
1051 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001052}
1053
1054void tls_sw_free_resources_rx(struct sock *sk)
1055{
1056 struct tls_context *tls_ctx = tls_get_ctx(sk);
1057 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1058
1059 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001060
Dave Watson3c4d7552017-06-14 11:37:39 -07001061 kfree(ctx);
1062}
1063
Dave Watsonc46234e2018-03-22 10:10:35 -07001064int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001065{
1066 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1067 struct tls_crypto_info *crypto_info;
1068 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001069 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1070 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001071 struct cipher_context *cctx;
1072 struct crypto_aead **aead;
1073 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001074 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1075 char *iv, *rec_seq;
1076 int rc = 0;
1077
1078 if (!ctx) {
1079 rc = -EINVAL;
1080 goto out;
1081 }
1082
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001083 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001084 if (!ctx->priv_ctx_tx) {
1085 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1086 if (!sw_ctx_tx) {
1087 rc = -ENOMEM;
1088 goto out;
1089 }
1090 ctx->priv_ctx_tx = sw_ctx_tx;
1091 } else {
1092 sw_ctx_tx =
1093 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001094 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001095 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001096 if (!ctx->priv_ctx_rx) {
1097 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1098 if (!sw_ctx_rx) {
1099 rc = -ENOMEM;
1100 goto out;
1101 }
1102 ctx->priv_ctx_rx = sw_ctx_rx;
1103 } else {
1104 sw_ctx_rx =
1105 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001106 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001107 }
1108
Dave Watsonc46234e2018-03-22 10:10:35 -07001109 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001110 crypto_init_wait(&sw_ctx_tx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001111 crypto_info = &ctx->crypto_send;
1112 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001113 aead = &sw_ctx_tx->aead_send;
Dave Watsonc46234e2018-03-22 10:10:35 -07001114 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001115 crypto_init_wait(&sw_ctx_rx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001116 crypto_info = &ctx->crypto_recv;
1117 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001118 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001119 }
1120
Dave Watson3c4d7552017-06-14 11:37:39 -07001121 switch (crypto_info->cipher_type) {
1122 case TLS_CIPHER_AES_GCM_128: {
1123 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1124 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1125 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1126 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1127 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1128 rec_seq =
1129 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1130 gcm_128_info =
1131 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1132 break;
1133 }
1134 default:
1135 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001136 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001137 }
1138
Kees Cookb16520f2018-04-10 17:52:34 -07001139 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001140 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001141 rc = -EINVAL;
1142 goto free_priv;
1143 }
1144
Dave Watsonc46234e2018-03-22 10:10:35 -07001145 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1146 cctx->tag_size = tag_size;
1147 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1148 cctx->iv_size = iv_size;
1149 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1150 GFP_KERNEL);
1151 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001152 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001153 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001154 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001155 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1156 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1157 cctx->rec_seq_size = rec_seq_size;
1158 cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
1159 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001160 rc = -ENOMEM;
1161 goto free_iv;
1162 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001163 memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001164
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001165 if (sw_ctx_tx) {
1166 sg_init_table(sw_ctx_tx->sg_encrypted_data,
1167 ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
1168 sg_init_table(sw_ctx_tx->sg_plaintext_data,
1169 ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
Dave Watson3c4d7552017-06-14 11:37:39 -07001170
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001171 sg_init_table(sw_ctx_tx->sg_aead_in, 2);
1172 sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
1173 sizeof(sw_ctx_tx->aad_space));
1174 sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
1175 sg_chain(sw_ctx_tx->sg_aead_in, 2,
1176 sw_ctx_tx->sg_plaintext_data);
1177 sg_init_table(sw_ctx_tx->sg_aead_out, 2);
1178 sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
1179 sizeof(sw_ctx_tx->aad_space));
1180 sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
1181 sg_chain(sw_ctx_tx->sg_aead_out, 2,
1182 sw_ctx_tx->sg_encrypted_data);
Dave Watsonc46234e2018-03-22 10:10:35 -07001183 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001184
Dave Watsonc46234e2018-03-22 10:10:35 -07001185 if (!*aead) {
1186 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1187 if (IS_ERR(*aead)) {
1188 rc = PTR_ERR(*aead);
1189 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001190 goto free_rec_seq;
1191 }
1192 }
1193
1194 ctx->push_pending_record = tls_sw_push_pending_record;
1195
1196 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1197
Dave Watsonc46234e2018-03-22 10:10:35 -07001198 rc = crypto_aead_setkey(*aead, keyval,
Dave Watson3c4d7552017-06-14 11:37:39 -07001199 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1200 if (rc)
1201 goto free_aead;
1202
Dave Watsonc46234e2018-03-22 10:10:35 -07001203 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1204 if (rc)
1205 goto free_aead;
1206
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001207 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001208 /* Set up strparser */
1209 memset(&cb, 0, sizeof(cb));
1210 cb.rcv_msg = tls_queue;
1211 cb.parse_msg = tls_read_size;
1212
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001213 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001214
1215 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001216 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001217 sk->sk_data_ready = tls_data_ready;
1218 write_unlock_bh(&sk->sk_callback_lock);
1219
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001220 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001221
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001222 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001223 }
1224
1225 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001226
1227free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001228 crypto_free_aead(*aead);
1229 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001230free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001231 kfree(cctx->rec_seq);
1232 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001233free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001234 kfree(cctx->iv);
1235 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001236free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001237 if (tx) {
1238 kfree(ctx->priv_ctx_tx);
1239 ctx->priv_ctx_tx = NULL;
1240 } else {
1241 kfree(ctx->priv_ctx_rx);
1242 ctx->priv_ctx_rx = NULL;
1243 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001244out:
1245 return rc;
1246}