blob: f26376e954aeccadc0162a74b3c37af2d4ab0051 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/module.h>
38#include <crypto/aead.h>
39
40#include <net/tls.h>
41
Dave Watson3c4d7552017-06-14 11:37:39 -070042static void trim_sg(struct sock *sk, struct scatterlist *sg,
43 int *sg_num_elem, unsigned int *sg_size, int target_size)
44{
45 int i = *sg_num_elem - 1;
46 int trim = *sg_size - target_size;
47
48 if (trim <= 0) {
49 WARN_ON(trim < 0);
50 return;
51 }
52
53 *sg_size = target_size;
54 while (trim >= sg[i].length) {
55 trim -= sg[i].length;
56 sk_mem_uncharge(sk, sg[i].length);
57 put_page(sg_page(&sg[i]));
58 i--;
59
60 if (i < 0)
61 goto out;
62 }
63
64 sg[i].length -= trim;
65 sk_mem_uncharge(sk, trim);
66
67out:
68 *sg_num_elem = i + 1;
69}
70
71static void trim_both_sgl(struct sock *sk, int target_size)
72{
73 struct tls_context *tls_ctx = tls_get_ctx(sk);
74 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
75
76 trim_sg(sk, ctx->sg_plaintext_data,
77 &ctx->sg_plaintext_num_elem,
78 &ctx->sg_plaintext_size,
79 target_size);
80
81 if (target_size > 0)
82 target_size += tls_ctx->overhead_size;
83
84 trim_sg(sk, ctx->sg_encrypted_data,
85 &ctx->sg_encrypted_num_elem,
86 &ctx->sg_encrypted_size,
87 target_size);
88}
89
90static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
91 int *sg_num_elem, unsigned int *sg_size,
92 int first_coalesce)
93{
94 struct page_frag *pfrag;
95 unsigned int size = *sg_size;
96 int num_elem = *sg_num_elem, use = 0, rc = 0;
97 struct scatterlist *sge;
98 unsigned int orig_offset;
99
100 len -= size;
101 pfrag = sk_page_frag(sk);
102
103 while (len > 0) {
104 if (!sk_page_frag_refill(sk, pfrag)) {
105 rc = -ENOMEM;
106 goto out;
107 }
108
109 use = min_t(int, len, pfrag->size - pfrag->offset);
110
111 if (!sk_wmem_schedule(sk, use)) {
112 rc = -ENOMEM;
113 goto out;
114 }
115
116 sk_mem_charge(sk, use);
117 size += use;
118 orig_offset = pfrag->offset;
119 pfrag->offset += use;
120
121 sge = sg + num_elem - 1;
122 if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
123 sg->offset + sg->length == orig_offset) {
124 sg->length += use;
125 } else {
126 sge++;
127 sg_unmark_end(sge);
128 sg_set_page(sge, pfrag->page, use, orig_offset);
129 get_page(pfrag->page);
130 ++num_elem;
131 if (num_elem == MAX_SKB_FRAGS) {
132 rc = -ENOSPC;
133 break;
134 }
135 }
136
137 len -= use;
138 }
139 goto out;
140
141out:
142 *sg_size = size;
143 *sg_num_elem = num_elem;
144 return rc;
145}
146
147static int alloc_encrypted_sg(struct sock *sk, int len)
148{
149 struct tls_context *tls_ctx = tls_get_ctx(sk);
150 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
151 int rc = 0;
152
153 rc = alloc_sg(sk, len, ctx->sg_encrypted_data,
154 &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0);
155
156 return rc;
157}
158
159static int alloc_plaintext_sg(struct sock *sk, int len)
160{
161 struct tls_context *tls_ctx = tls_get_ctx(sk);
162 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
163 int rc = 0;
164
165 rc = alloc_sg(sk, len, ctx->sg_plaintext_data,
166 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
167 tls_ctx->pending_open_record_frags);
168
169 return rc;
170}
171
172static void free_sg(struct sock *sk, struct scatterlist *sg,
173 int *sg_num_elem, unsigned int *sg_size)
174{
175 int i, n = *sg_num_elem;
176
177 for (i = 0; i < n; ++i) {
178 sk_mem_uncharge(sk, sg[i].length);
179 put_page(sg_page(&sg[i]));
180 }
181 *sg_num_elem = 0;
182 *sg_size = 0;
183}
184
185static void tls_free_both_sg(struct sock *sk)
186{
187 struct tls_context *tls_ctx = tls_get_ctx(sk);
188 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
189
190 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
191 &ctx->sg_encrypted_size);
192
193 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
194 &ctx->sg_plaintext_size);
195}
196
197static int tls_do_encryption(struct tls_context *tls_ctx,
198 struct tls_sw_context *ctx, size_t data_len,
199 gfp_t flags)
200{
201 unsigned int req_size = sizeof(struct aead_request) +
202 crypto_aead_reqsize(ctx->aead_send);
203 struct aead_request *aead_req;
204 int rc;
205
Ilya Lesokhin61ef6da2017-11-13 10:22:44 +0200206 aead_req = kzalloc(req_size, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700207 if (!aead_req)
208 return -ENOMEM;
209
210 ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
211 ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
212
213 aead_request_set_tfm(aead_req, ctx->aead_send);
214 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
215 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
216 data_len, tls_ctx->iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530217
218 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
219 crypto_req_done, &ctx->async_wait);
220
221 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
Dave Watson3c4d7552017-06-14 11:37:39 -0700222
223 ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
224 ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
225
226 kfree(aead_req);
227 return rc;
228}
229
230static int tls_push_record(struct sock *sk, int flags,
231 unsigned char record_type)
232{
233 struct tls_context *tls_ctx = tls_get_ctx(sk);
234 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
235 int rc;
236
237 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
238 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
239
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200240 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700241 tls_ctx->rec_seq, tls_ctx->rec_seq_size,
242 record_type);
243
244 tls_fill_prepend(tls_ctx,
245 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
246 ctx->sg_encrypted_data[0].offset,
247 ctx->sg_plaintext_size, record_type);
248
249 tls_ctx->pending_open_record_frags = 0;
250 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
251
252 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
253 sk->sk_allocation);
254 if (rc < 0) {
255 /* If we are called from write_space and
256 * we fail, we need to set this SOCK_NOSPACE
257 * to trigger another write_space in the future.
258 */
259 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
260 return rc;
261 }
262
263 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
264 &ctx->sg_plaintext_size);
265
266 ctx->sg_encrypted_num_elem = 0;
267 ctx->sg_encrypted_size = 0;
268
269 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
270 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
271 if (rc < 0 && rc != -EAGAIN)
272 tls_err_abort(sk);
273
274 tls_advance_record_sn(sk, tls_ctx);
275 return rc;
276}
277
278static int tls_sw_push_pending_record(struct sock *sk, int flags)
279{
280 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
281}
282
283static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
284 int length)
285{
286 struct tls_context *tls_ctx = tls_get_ctx(sk);
287 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
288 struct page *pages[MAX_SKB_FRAGS];
289
290 size_t offset;
291 ssize_t copied, use;
292 int i = 0;
293 unsigned int size = ctx->sg_plaintext_size;
294 int num_elem = ctx->sg_plaintext_num_elem;
295 int rc = 0;
296 int maxpages;
297
298 while (length > 0) {
299 i = 0;
300 maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
301 if (maxpages == 0) {
302 rc = -EFAULT;
303 goto out;
304 }
305 copied = iov_iter_get_pages(from, pages,
306 length,
307 maxpages, &offset);
308 if (copied <= 0) {
309 rc = -EFAULT;
310 goto out;
311 }
312
313 iov_iter_advance(from, copied);
314
315 length -= copied;
316 size += copied;
317 while (copied) {
318 use = min_t(int, copied, PAGE_SIZE - offset);
319
320 sg_set_page(&ctx->sg_plaintext_data[num_elem],
321 pages[i], use, offset);
322 sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
323 sk_mem_charge(sk, use);
324
325 offset = 0;
326 copied -= use;
327
328 ++i;
329 ++num_elem;
330 }
331 }
332
333out:
334 ctx->sg_plaintext_size = size;
335 ctx->sg_plaintext_num_elem = num_elem;
336 return rc;
337}
338
339static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
340 int bytes)
341{
342 struct tls_context *tls_ctx = tls_get_ctx(sk);
343 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
344 struct scatterlist *sg = ctx->sg_plaintext_data;
345 int copy, i, rc = 0;
346
347 for (i = tls_ctx->pending_open_record_frags;
348 i < ctx->sg_plaintext_num_elem; ++i) {
349 copy = sg[i].length;
350 if (copy_from_iter(
351 page_address(sg_page(&sg[i])) + sg[i].offset,
352 copy, from) != copy) {
353 rc = -EFAULT;
354 goto out;
355 }
356 bytes -= copy;
357
358 ++tls_ctx->pending_open_record_frags;
359
360 if (!bytes)
361 break;
362 }
363
364out:
365 return rc;
366}
367
368int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
369{
370 struct tls_context *tls_ctx = tls_get_ctx(sk);
371 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
372 int ret = 0;
373 int required_size;
374 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
375 bool eor = !(msg->msg_flags & MSG_MORE);
376 size_t try_to_copy, copied = 0;
377 unsigned char record_type = TLS_RECORD_TYPE_DATA;
378 int record_room;
379 bool full_record;
380 int orig_size;
381
382 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
383 return -ENOTSUPP;
384
385 lock_sock(sk);
386
387 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
388 goto send_end;
389
390 if (unlikely(msg->msg_controllen)) {
391 ret = tls_proccess_cmsg(sk, msg, &record_type);
392 if (ret)
393 goto send_end;
394 }
395
396 while (msg_data_left(msg)) {
397 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100398 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700399 goto send_end;
400 }
401
402 orig_size = ctx->sg_plaintext_size;
403 full_record = false;
404 try_to_copy = msg_data_left(msg);
405 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
406 if (try_to_copy >= record_room) {
407 try_to_copy = record_room;
408 full_record = true;
409 }
410
411 required_size = ctx->sg_plaintext_size + try_to_copy +
412 tls_ctx->overhead_size;
413
414 if (!sk_stream_memory_free(sk))
415 goto wait_for_sndbuf;
416alloc_encrypted:
417 ret = alloc_encrypted_sg(sk, required_size);
418 if (ret) {
419 if (ret != -ENOSPC)
420 goto wait_for_memory;
421
422 /* Adjust try_to_copy according to the amount that was
423 * actually allocated. The difference is due
424 * to max sg elements limit
425 */
426 try_to_copy -= required_size - ctx->sg_encrypted_size;
427 full_record = true;
428 }
429
430 if (full_record || eor) {
431 ret = zerocopy_from_iter(sk, &msg->msg_iter,
432 try_to_copy);
433 if (ret)
434 goto fallback_to_reg_send;
435
436 copied += try_to_copy;
437 ret = tls_push_record(sk, msg->msg_flags, record_type);
438 if (!ret)
439 continue;
440 if (ret == -EAGAIN)
441 goto send_end;
442
443 copied -= try_to_copy;
444fallback_to_reg_send:
445 iov_iter_revert(&msg->msg_iter,
446 ctx->sg_plaintext_size - orig_size);
447 trim_sg(sk, ctx->sg_plaintext_data,
448 &ctx->sg_plaintext_num_elem,
449 &ctx->sg_plaintext_size,
450 orig_size);
451 }
452
453 required_size = ctx->sg_plaintext_size + try_to_copy;
454alloc_plaintext:
455 ret = alloc_plaintext_sg(sk, required_size);
456 if (ret) {
457 if (ret != -ENOSPC)
458 goto wait_for_memory;
459
460 /* Adjust try_to_copy according to the amount that was
461 * actually allocated. The difference is due
462 * to max sg elements limit
463 */
464 try_to_copy -= required_size - ctx->sg_plaintext_size;
465 full_record = true;
466
467 trim_sg(sk, ctx->sg_encrypted_data,
468 &ctx->sg_encrypted_num_elem,
469 &ctx->sg_encrypted_size,
470 ctx->sg_plaintext_size +
471 tls_ctx->overhead_size);
472 }
473
474 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
475 if (ret)
476 goto trim_sgl;
477
478 copied += try_to_copy;
479 if (full_record || eor) {
480push_record:
481 ret = tls_push_record(sk, msg->msg_flags, record_type);
482 if (ret) {
483 if (ret == -ENOMEM)
484 goto wait_for_memory;
485
486 goto send_end;
487 }
488 }
489
490 continue;
491
492wait_for_sndbuf:
493 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
494wait_for_memory:
495 ret = sk_stream_wait_memory(sk, &timeo);
496 if (ret) {
497trim_sgl:
498 trim_both_sgl(sk, orig_size);
499 goto send_end;
500 }
501
502 if (tls_is_pending_closed_record(tls_ctx))
503 goto push_record;
504
505 if (ctx->sg_encrypted_size < required_size)
506 goto alloc_encrypted;
507
508 goto alloc_plaintext;
509 }
510
511send_end:
512 ret = sk_stream_error(sk, msg->msg_flags, ret);
513
514 release_sock(sk);
515 return copied ? copied : ret;
516}
517
518int tls_sw_sendpage(struct sock *sk, struct page *page,
519 int offset, size_t size, int flags)
520{
521 struct tls_context *tls_ctx = tls_get_ctx(sk);
522 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
523 int ret = 0;
524 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
525 bool eor;
526 size_t orig_size = size;
527 unsigned char record_type = TLS_RECORD_TYPE_DATA;
528 struct scatterlist *sg;
529 bool full_record;
530 int record_room;
531
532 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
533 MSG_SENDPAGE_NOTLAST))
534 return -ENOTSUPP;
535
536 /* No MSG_EOR from splice, only look at MSG_MORE */
537 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
538
539 lock_sock(sk);
540
541 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
542
543 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
544 goto sendpage_end;
545
546 /* Call the sk_stream functions to manage the sndbuf mem. */
547 while (size > 0) {
548 size_t copy, required_size;
549
550 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100551 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700552 goto sendpage_end;
553 }
554
555 full_record = false;
556 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
557 copy = size;
558 if (copy >= record_room) {
559 copy = record_room;
560 full_record = true;
561 }
562 required_size = ctx->sg_plaintext_size + copy +
563 tls_ctx->overhead_size;
564
565 if (!sk_stream_memory_free(sk))
566 goto wait_for_sndbuf;
567alloc_payload:
568 ret = alloc_encrypted_sg(sk, required_size);
569 if (ret) {
570 if (ret != -ENOSPC)
571 goto wait_for_memory;
572
573 /* Adjust copy according to the amount that was
574 * actually allocated. The difference is due
575 * to max sg elements limit
576 */
577 copy -= required_size - ctx->sg_plaintext_size;
578 full_record = true;
579 }
580
581 get_page(page);
582 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
583 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800584 sg_unmark_end(sg);
585
Dave Watson3c4d7552017-06-14 11:37:39 -0700586 ctx->sg_plaintext_num_elem++;
587
588 sk_mem_charge(sk, copy);
589 offset += copy;
590 size -= copy;
591 ctx->sg_plaintext_size += copy;
592 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
593
594 if (full_record || eor ||
595 ctx->sg_plaintext_num_elem ==
596 ARRAY_SIZE(ctx->sg_plaintext_data)) {
597push_record:
598 ret = tls_push_record(sk, flags, record_type);
599 if (ret) {
600 if (ret == -ENOMEM)
601 goto wait_for_memory;
602
603 goto sendpage_end;
604 }
605 }
606 continue;
607wait_for_sndbuf:
608 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
609wait_for_memory:
610 ret = sk_stream_wait_memory(sk, &timeo);
611 if (ret) {
612 trim_both_sgl(sk, ctx->sg_plaintext_size);
613 goto sendpage_end;
614 }
615
616 if (tls_is_pending_closed_record(tls_ctx))
617 goto push_record;
618
619 goto alloc_payload;
620 }
621
622sendpage_end:
623 if (orig_size > size)
624 ret = orig_size - size;
625 else
626 ret = sk_stream_error(sk, flags, ret);
627
628 release_sock(sk);
629 return ret;
630}
631
Ilya Lesokhinff45d822017-11-13 10:22:46 +0200632void tls_sw_free_tx_resources(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700633{
634 struct tls_context *tls_ctx = tls_get_ctx(sk);
635 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
636
637 if (ctx->aead_send)
638 crypto_free_aead(ctx->aead_send);
639
640 tls_free_both_sg(sk);
641
642 kfree(ctx);
Ilya Lesokhinff45d822017-11-13 10:22:46 +0200643 kfree(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700644}
645
646int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
647{
648 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
649 struct tls_crypto_info *crypto_info;
650 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
651 struct tls_sw_context *sw_ctx;
652 u16 nonce_size, tag_size, iv_size, rec_seq_size;
653 char *iv, *rec_seq;
654 int rc = 0;
655
656 if (!ctx) {
657 rc = -EINVAL;
658 goto out;
659 }
660
661 if (ctx->priv_ctx) {
662 rc = -EEXIST;
663 goto out;
664 }
665
666 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
667 if (!sw_ctx) {
668 rc = -ENOMEM;
669 goto out;
670 }
671
Vakul Garga54667f2018-01-31 21:34:37 +0530672 crypto_init_wait(&sw_ctx->async_wait);
673
Dave Watson3c4d7552017-06-14 11:37:39 -0700674 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
Dave Watson3c4d7552017-06-14 11:37:39 -0700675
676 crypto_info = &ctx->crypto_send;
677 switch (crypto_info->cipher_type) {
678 case TLS_CIPHER_AES_GCM_128: {
679 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
680 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
681 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
682 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
683 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
684 rec_seq =
685 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
686 gcm_128_info =
687 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
688 break;
689 }
690 default:
691 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +0100692 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -0700693 }
694
695 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
696 ctx->tag_size = tag_size;
697 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
698 ctx->iv_size = iv_size;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +0100699 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
Dave Watson3c4d7552017-06-14 11:37:39 -0700700 if (!ctx->iv) {
701 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +0100702 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -0700703 }
704 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
705 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
706 ctx->rec_seq_size = rec_seq_size;
707 ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
708 if (!ctx->rec_seq) {
709 rc = -ENOMEM;
710 goto free_iv;
711 }
712 memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
713
714 sg_init_table(sw_ctx->sg_encrypted_data,
715 ARRAY_SIZE(sw_ctx->sg_encrypted_data));
716 sg_init_table(sw_ctx->sg_plaintext_data,
717 ARRAY_SIZE(sw_ctx->sg_plaintext_data));
718
719 sg_init_table(sw_ctx->sg_aead_in, 2);
720 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
721 sizeof(sw_ctx->aad_space));
722 sg_unmark_end(&sw_ctx->sg_aead_in[1]);
723 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
724 sg_init_table(sw_ctx->sg_aead_out, 2);
725 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
726 sizeof(sw_ctx->aad_space));
727 sg_unmark_end(&sw_ctx->sg_aead_out[1]);
728 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
729
730 if (!sw_ctx->aead_send) {
731 sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
732 if (IS_ERR(sw_ctx->aead_send)) {
733 rc = PTR_ERR(sw_ctx->aead_send);
734 sw_ctx->aead_send = NULL;
735 goto free_rec_seq;
736 }
737 }
738
739 ctx->push_pending_record = tls_sw_push_pending_record;
740
741 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
742
743 rc = crypto_aead_setkey(sw_ctx->aead_send, keyval,
744 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
745 if (rc)
746 goto free_aead;
747
748 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
749 if (!rc)
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +0100750 return 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700751
752free_aead:
753 crypto_free_aead(sw_ctx->aead_send);
754 sw_ctx->aead_send = NULL;
755free_rec_seq:
756 kfree(ctx->rec_seq);
757 ctx->rec_seq = NULL;
758free_iv:
759 kfree(ctx->iv);
760 ctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +0100761free_priv:
762 kfree(ctx->priv_ctx);
763 ctx->priv_ctx = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -0700764out:
765 return rc;
766}