blob: e28a6ff25d9695f0a7ac610b816c1cf5eccf8c58 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Dave Watsonc46234e2018-03-22 10:10:35 -070046static int tls_do_decryption(struct sock *sk,
47 struct scatterlist *sgin,
48 struct scatterlist *sgout,
49 char *iv_recv,
50 size_t data_len,
Vakul Garg0b243d02018-08-10 20:46:41 +053051 struct aead_request *aead_req)
Dave Watsonc46234e2018-03-22 10:10:35 -070052{
53 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030054 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -070055 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -070056
Vakul Garg0b243d02018-08-10 20:46:41 +053057 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -070058 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
59 aead_request_set_crypt(aead_req, sgin, sgout,
60 data_len + tls_ctx->rx.tag_size,
61 (u8 *)iv_recv);
62 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
63 crypto_req_done, &ctx->async_wait);
64
65 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -070066 return ret;
67}
68
Dave Watson3c4d7552017-06-14 11:37:39 -070069static void trim_sg(struct sock *sk, struct scatterlist *sg,
70 int *sg_num_elem, unsigned int *sg_size, int target_size)
71{
72 int i = *sg_num_elem - 1;
73 int trim = *sg_size - target_size;
74
75 if (trim <= 0) {
76 WARN_ON(trim < 0);
77 return;
78 }
79
80 *sg_size = target_size;
81 while (trim >= sg[i].length) {
82 trim -= sg[i].length;
83 sk_mem_uncharge(sk, sg[i].length);
84 put_page(sg_page(&sg[i]));
85 i--;
86
87 if (i < 0)
88 goto out;
89 }
90
91 sg[i].length -= trim;
92 sk_mem_uncharge(sk, trim);
93
94out:
95 *sg_num_elem = i + 1;
96}
97
98static void trim_both_sgl(struct sock *sk, int target_size)
99{
100 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300101 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700102
103 trim_sg(sk, ctx->sg_plaintext_data,
104 &ctx->sg_plaintext_num_elem,
105 &ctx->sg_plaintext_size,
106 target_size);
107
108 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700109 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700110
111 trim_sg(sk, ctx->sg_encrypted_data,
112 &ctx->sg_encrypted_num_elem,
113 &ctx->sg_encrypted_size,
114 target_size);
115}
116
Dave Watson3c4d7552017-06-14 11:37:39 -0700117static int alloc_encrypted_sg(struct sock *sk, int len)
118{
119 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300120 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700121 int rc = 0;
122
John Fastabend2c3682f2018-03-18 12:56:49 -0700123 rc = sk_alloc_sg(sk, len,
John Fastabend8c05dbf2018-03-18 12:57:05 -0700124 ctx->sg_encrypted_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700125 &ctx->sg_encrypted_num_elem,
126 &ctx->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700127
Vakul Garg52ea9922018-09-06 21:41:40 +0530128 if (rc == -ENOSPC)
129 ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
130
Dave Watson3c4d7552017-06-14 11:37:39 -0700131 return rc;
132}
133
134static int alloc_plaintext_sg(struct sock *sk, int len)
135{
136 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300137 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700138 int rc = 0;
139
John Fastabend8c05dbf2018-03-18 12:57:05 -0700140 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700141 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
142 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700143
Vakul Garg52ea9922018-09-06 21:41:40 +0530144 if (rc == -ENOSPC)
145 ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
146
Dave Watson3c4d7552017-06-14 11:37:39 -0700147 return rc;
148}
149
150static void free_sg(struct sock *sk, struct scatterlist *sg,
151 int *sg_num_elem, unsigned int *sg_size)
152{
153 int i, n = *sg_num_elem;
154
155 for (i = 0; i < n; ++i) {
156 sk_mem_uncharge(sk, sg[i].length);
157 put_page(sg_page(&sg[i]));
158 }
159 *sg_num_elem = 0;
160 *sg_size = 0;
161}
162
163static void tls_free_both_sg(struct sock *sk)
164{
165 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300166 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700167
168 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
169 &ctx->sg_encrypted_size);
170
171 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
172 &ctx->sg_plaintext_size);
173}
174
175static int tls_do_encryption(struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200176 struct tls_sw_context_tx *ctx,
177 struct aead_request *aead_req,
178 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700179{
Dave Watson3c4d7552017-06-14 11:37:39 -0700180 int rc;
181
Dave Watsondbe42552018-03-22 10:10:06 -0700182 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
183 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700184
185 aead_request_set_tfm(aead_req, ctx->aead_send);
186 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
187 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700188 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530189
190 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
191 crypto_req_done, &ctx->async_wait);
192
193 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
Dave Watson3c4d7552017-06-14 11:37:39 -0700194
Dave Watsondbe42552018-03-22 10:10:06 -0700195 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
196 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700197
Dave Watson3c4d7552017-06-14 11:37:39 -0700198 return rc;
199}
200
201static int tls_push_record(struct sock *sk, int flags,
202 unsigned char record_type)
203{
204 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300205 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200206 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700207 int rc;
208
Vakul Gargd2bdd262018-07-11 14:32:20 +0530209 req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200210 if (!req)
211 return -ENOMEM;
212
Dave Watson3c4d7552017-06-14 11:37:39 -0700213 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
214 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
215
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200216 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700217 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700218 record_type);
219
220 tls_fill_prepend(tls_ctx,
221 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
222 ctx->sg_encrypted_data[0].offset,
223 ctx->sg_plaintext_size, record_type);
224
225 tls_ctx->pending_open_record_frags = 0;
226 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
227
Daniel Borkmanna447da72018-06-15 03:07:45 +0200228 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700229 if (rc < 0) {
230 /* If we are called from write_space and
231 * we fail, we need to set this SOCK_NOSPACE
232 * to trigger another write_space in the future.
233 */
234 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200235 goto out_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700236 }
237
238 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
239 &ctx->sg_plaintext_size);
240
241 ctx->sg_encrypted_num_elem = 0;
242 ctx->sg_encrypted_size = 0;
243
244 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
245 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
246 if (rc < 0 && rc != -EAGAIN)
Dave Watsonf4a8e432018-03-22 10:10:15 -0700247 tls_err_abort(sk, EBADMSG);
Dave Watson3c4d7552017-06-14 11:37:39 -0700248
Dave Watsondbe42552018-03-22 10:10:06 -0700249 tls_advance_record_sn(sk, &tls_ctx->tx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200250out_req:
Vakul Gargd2bdd262018-07-11 14:32:20 +0530251 aead_request_free(req);
Dave Watson3c4d7552017-06-14 11:37:39 -0700252 return rc;
253}
254
255static int tls_sw_push_pending_record(struct sock *sk, int flags)
256{
257 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
258}
259
260static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700261 int length, int *pages_used,
262 unsigned int *size_used,
263 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700264 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700265{
Dave Watson3c4d7552017-06-14 11:37:39 -0700266 struct page *pages[MAX_SKB_FRAGS];
267
268 size_t offset;
269 ssize_t copied, use;
270 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700271 unsigned int size = *size_used;
272 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700273 int rc = 0;
274 int maxpages;
275
276 while (length > 0) {
277 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700278 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700279 if (maxpages == 0) {
280 rc = -EFAULT;
281 goto out;
282 }
283 copied = iov_iter_get_pages(from, pages,
284 length,
285 maxpages, &offset);
286 if (copied <= 0) {
287 rc = -EFAULT;
288 goto out;
289 }
290
291 iov_iter_advance(from, copied);
292
293 length -= copied;
294 size += copied;
295 while (copied) {
296 use = min_t(int, copied, PAGE_SIZE - offset);
297
Dave Watson69ca9292018-03-22 10:09:53 -0700298 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700299 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700300 sg_unmark_end(&to[num_elem]);
301 if (charge)
302 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700303
304 offset = 0;
305 copied -= use;
306
307 ++i;
308 ++num_elem;
309 }
310 }
311
Vakul Gargcfb40992018-08-02 20:43:10 +0530312 /* Mark the end in the last sg entry if newly added */
313 if (num_elem > *pages_used)
314 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700315out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700316 if (rc)
317 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700318 *size_used = size;
319 *pages_used = num_elem;
320
Dave Watson3c4d7552017-06-14 11:37:39 -0700321 return rc;
322}
323
324static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
325 int bytes)
326{
327 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300328 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700329 struct scatterlist *sg = ctx->sg_plaintext_data;
330 int copy, i, rc = 0;
331
332 for (i = tls_ctx->pending_open_record_frags;
333 i < ctx->sg_plaintext_num_elem; ++i) {
334 copy = sg[i].length;
335 if (copy_from_iter(
336 page_address(sg_page(&sg[i])) + sg[i].offset,
337 copy, from) != copy) {
338 rc = -EFAULT;
339 goto out;
340 }
341 bytes -= copy;
342
343 ++tls_ctx->pending_open_record_frags;
344
345 if (!bytes)
346 break;
347 }
348
349out:
350 return rc;
351}
352
353int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
354{
355 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300356 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700357 int ret = 0;
358 int required_size;
359 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
360 bool eor = !(msg->msg_flags & MSG_MORE);
361 size_t try_to_copy, copied = 0;
362 unsigned char record_type = TLS_RECORD_TYPE_DATA;
363 int record_room;
364 bool full_record;
365 int orig_size;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700366 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700367
368 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
369 return -ENOTSUPP;
370
371 lock_sock(sk);
372
373 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
374 goto send_end;
375
376 if (unlikely(msg->msg_controllen)) {
377 ret = tls_proccess_cmsg(sk, msg, &record_type);
378 if (ret)
379 goto send_end;
380 }
381
382 while (msg_data_left(msg)) {
383 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100384 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700385 goto send_end;
386 }
387
388 orig_size = ctx->sg_plaintext_size;
389 full_record = false;
390 try_to_copy = msg_data_left(msg);
391 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
392 if (try_to_copy >= record_room) {
393 try_to_copy = record_room;
394 full_record = true;
395 }
396
397 required_size = ctx->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700398 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700399
400 if (!sk_stream_memory_free(sk))
401 goto wait_for_sndbuf;
402alloc_encrypted:
403 ret = alloc_encrypted_sg(sk, required_size);
404 if (ret) {
405 if (ret != -ENOSPC)
406 goto wait_for_memory;
407
408 /* Adjust try_to_copy according to the amount that was
409 * actually allocated. The difference is due
410 * to max sg elements limit
411 */
412 try_to_copy -= required_size - ctx->sg_encrypted_size;
413 full_record = true;
414 }
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700415 if (!is_kvec && (full_record || eor)) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700416 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Dave Watson69ca9292018-03-22 10:09:53 -0700417 try_to_copy, &ctx->sg_plaintext_num_elem,
418 &ctx->sg_plaintext_size,
419 ctx->sg_plaintext_data,
420 ARRAY_SIZE(ctx->sg_plaintext_data),
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700421 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700422 if (ret)
423 goto fallback_to_reg_send;
424
425 copied += try_to_copy;
426 ret = tls_push_record(sk, msg->msg_flags, record_type);
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700427 if (ret)
Dave Watson3c4d7552017-06-14 11:37:39 -0700428 goto send_end;
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700429 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700430
Dave Watson3c4d7552017-06-14 11:37:39 -0700431fallback_to_reg_send:
Dave Watson3c4d7552017-06-14 11:37:39 -0700432 trim_sg(sk, ctx->sg_plaintext_data,
433 &ctx->sg_plaintext_num_elem,
434 &ctx->sg_plaintext_size,
435 orig_size);
436 }
437
438 required_size = ctx->sg_plaintext_size + try_to_copy;
439alloc_plaintext:
440 ret = alloc_plaintext_sg(sk, required_size);
441 if (ret) {
442 if (ret != -ENOSPC)
443 goto wait_for_memory;
444
445 /* Adjust try_to_copy according to the amount that was
446 * actually allocated. The difference is due
447 * to max sg elements limit
448 */
449 try_to_copy -= required_size - ctx->sg_plaintext_size;
450 full_record = true;
451
452 trim_sg(sk, ctx->sg_encrypted_data,
453 &ctx->sg_encrypted_num_elem,
454 &ctx->sg_encrypted_size,
455 ctx->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700456 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700457 }
458
459 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
460 if (ret)
461 goto trim_sgl;
462
463 copied += try_to_copy;
464 if (full_record || eor) {
465push_record:
466 ret = tls_push_record(sk, msg->msg_flags, record_type);
467 if (ret) {
468 if (ret == -ENOMEM)
469 goto wait_for_memory;
470
471 goto send_end;
472 }
473 }
474
475 continue;
476
477wait_for_sndbuf:
478 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
479wait_for_memory:
480 ret = sk_stream_wait_memory(sk, &timeo);
481 if (ret) {
482trim_sgl:
483 trim_both_sgl(sk, orig_size);
484 goto send_end;
485 }
486
487 if (tls_is_pending_closed_record(tls_ctx))
488 goto push_record;
489
490 if (ctx->sg_encrypted_size < required_size)
491 goto alloc_encrypted;
492
493 goto alloc_plaintext;
494 }
495
496send_end:
497 ret = sk_stream_error(sk, msg->msg_flags, ret);
498
499 release_sock(sk);
500 return copied ? copied : ret;
501}
502
503int tls_sw_sendpage(struct sock *sk, struct page *page,
504 int offset, size_t size, int flags)
505{
506 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300507 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700508 int ret = 0;
509 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
510 bool eor;
511 size_t orig_size = size;
512 unsigned char record_type = TLS_RECORD_TYPE_DATA;
513 struct scatterlist *sg;
514 bool full_record;
515 int record_room;
516
517 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
518 MSG_SENDPAGE_NOTLAST))
519 return -ENOTSUPP;
520
521 /* No MSG_EOR from splice, only look at MSG_MORE */
522 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
523
524 lock_sock(sk);
525
526 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
527
528 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
529 goto sendpage_end;
530
531 /* Call the sk_stream functions to manage the sndbuf mem. */
532 while (size > 0) {
533 size_t copy, required_size;
534
535 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100536 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700537 goto sendpage_end;
538 }
539
540 full_record = false;
541 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
542 copy = size;
543 if (copy >= record_room) {
544 copy = record_room;
545 full_record = true;
546 }
547 required_size = ctx->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700548 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700549
550 if (!sk_stream_memory_free(sk))
551 goto wait_for_sndbuf;
552alloc_payload:
553 ret = alloc_encrypted_sg(sk, required_size);
554 if (ret) {
555 if (ret != -ENOSPC)
556 goto wait_for_memory;
557
558 /* Adjust copy according to the amount that was
559 * actually allocated. The difference is due
560 * to max sg elements limit
561 */
562 copy -= required_size - ctx->sg_plaintext_size;
563 full_record = true;
564 }
565
566 get_page(page);
567 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
568 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800569 sg_unmark_end(sg);
570
Dave Watson3c4d7552017-06-14 11:37:39 -0700571 ctx->sg_plaintext_num_elem++;
572
573 sk_mem_charge(sk, copy);
574 offset += copy;
575 size -= copy;
576 ctx->sg_plaintext_size += copy;
577 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
578
579 if (full_record || eor ||
580 ctx->sg_plaintext_num_elem ==
581 ARRAY_SIZE(ctx->sg_plaintext_data)) {
582push_record:
583 ret = tls_push_record(sk, flags, record_type);
584 if (ret) {
585 if (ret == -ENOMEM)
586 goto wait_for_memory;
587
588 goto sendpage_end;
589 }
590 }
591 continue;
592wait_for_sndbuf:
593 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
594wait_for_memory:
595 ret = sk_stream_wait_memory(sk, &timeo);
596 if (ret) {
597 trim_both_sgl(sk, ctx->sg_plaintext_size);
598 goto sendpage_end;
599 }
600
601 if (tls_is_pending_closed_record(tls_ctx))
602 goto push_record;
603
604 goto alloc_payload;
605 }
606
607sendpage_end:
608 if (orig_size > size)
609 ret = orig_size - size;
610 else
611 ret = sk_stream_error(sk, flags, ret);
612
613 release_sock(sk);
614 return ret;
615}
616
Dave Watsonc46234e2018-03-22 10:10:35 -0700617static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
618 long timeo, int *err)
619{
620 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300621 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700622 struct sk_buff *skb;
623 DEFINE_WAIT_FUNC(wait, woken_wake_function);
624
625 while (!(skb = ctx->recv_pkt)) {
626 if (sk->sk_err) {
627 *err = sock_error(sk);
628 return NULL;
629 }
630
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -0700631 if (sk->sk_shutdown & RCV_SHUTDOWN)
632 return NULL;
633
Dave Watsonc46234e2018-03-22 10:10:35 -0700634 if (sock_flag(sk, SOCK_DONE))
635 return NULL;
636
637 if ((flags & MSG_DONTWAIT) || !timeo) {
638 *err = -EAGAIN;
639 return NULL;
640 }
641
642 add_wait_queue(sk_sleep(sk), &wait);
643 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
644 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
645 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
646 remove_wait_queue(sk_sleep(sk), &wait);
647
648 /* Handle signals */
649 if (signal_pending(current)) {
650 *err = sock_intr_errno(timeo);
651 return NULL;
652 }
653 }
654
655 return skb;
656}
657
Vakul Garg0b243d02018-08-10 20:46:41 +0530658/* This function decrypts the input skb into either out_iov or in out_sg
659 * or in skb buffers itself. The input parameter 'zc' indicates if
660 * zero-copy mode needs to be tried or not. With zero-copy mode, either
661 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
662 * NULL, then the decryption happens inside skb buffers itself, i.e.
663 * zero-copy gets disabled and 'zc' is updated.
664 */
665
666static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
667 struct iov_iter *out_iov,
668 struct scatterlist *out_sg,
669 int *chunk, bool *zc)
670{
671 struct tls_context *tls_ctx = tls_get_ctx(sk);
672 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
673 struct strp_msg *rxm = strp_msg(skb);
674 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
675 struct aead_request *aead_req;
676 struct sk_buff *unused;
677 u8 *aad, *iv, *mem = NULL;
678 struct scatterlist *sgin = NULL;
679 struct scatterlist *sgout = NULL;
680 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
681
682 if (*zc && (out_iov || out_sg)) {
683 if (out_iov)
684 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
685 else
686 n_sgout = sg_nents(out_sg);
687 } else {
688 n_sgout = 0;
689 *zc = false;
690 }
691
692 n_sgin = skb_cow_data(skb, 0, &unused);
693 if (n_sgin < 1)
694 return -EBADMSG;
695
696 /* Increment to accommodate AAD */
697 n_sgin = n_sgin + 1;
698
699 nsg = n_sgin + n_sgout;
700
701 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
702 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
703 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
704 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
705
706 /* Allocate a single block of memory which contains
707 * aead_req || sgin[] || sgout[] || aad || iv.
708 * This order achieves correct alignment for aead_req, sgin, sgout.
709 */
710 mem = kmalloc(mem_size, sk->sk_allocation);
711 if (!mem)
712 return -ENOMEM;
713
714 /* Segment the allocated memory */
715 aead_req = (struct aead_request *)mem;
716 sgin = (struct scatterlist *)(mem + aead_size);
717 sgout = sgin + n_sgin;
718 aad = (u8 *)(sgout + n_sgout);
719 iv = aad + TLS_AAD_SPACE_SIZE;
720
721 /* Prepare IV */
722 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
723 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
724 tls_ctx->rx.iv_size);
725 if (err < 0) {
726 kfree(mem);
727 return err;
728 }
729 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
730
731 /* Prepare AAD */
732 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
733 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
734 ctx->control);
735
736 /* Prepare sgin */
737 sg_init_table(sgin, n_sgin);
738 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
739 err = skb_to_sgvec(skb, &sgin[1],
740 rxm->offset + tls_ctx->rx.prepend_size,
741 rxm->full_len - tls_ctx->rx.prepend_size);
742 if (err < 0) {
743 kfree(mem);
744 return err;
745 }
746
747 if (n_sgout) {
748 if (out_iov) {
749 sg_init_table(sgout, n_sgout);
750 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
751
752 *chunk = 0;
753 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
754 chunk, &sgout[1],
755 (n_sgout - 1), false);
756 if (err < 0)
757 goto fallback_to_reg_recv;
758 } else if (out_sg) {
759 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
760 } else {
761 goto fallback_to_reg_recv;
762 }
763 } else {
764fallback_to_reg_recv:
765 sgout = sgin;
766 pages = 0;
767 *chunk = 0;
768 *zc = false;
769 }
770
771 /* Prepare and submit AEAD request */
772 err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
773
774 /* Release the pages in case iov was mapped to pages */
775 for (; pages > 0; pages--)
776 put_page(sg_page(&sgout[pages]));
777
778 kfree(mem);
779 return err;
780}
781
Boris Pismennydafb67f2018-07-13 14:33:40 +0300782static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +0530783 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +0300784{
785 struct tls_context *tls_ctx = tls_get_ctx(sk);
786 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
787 struct strp_msg *rxm = strp_msg(skb);
788 int err = 0;
789
Boris Pismenny4799ac82018-07-13 14:33:43 +0300790#ifdef CONFIG_TLS_DEVICE
791 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +0300792 if (err < 0)
793 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300794#endif
795 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530796 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300797 if (err < 0)
798 return err;
799 } else {
800 *zc = false;
801 }
Boris Pismennydafb67f2018-07-13 14:33:40 +0300802
803 rxm->offset += tls_ctx->rx.prepend_size;
804 rxm->full_len -= tls_ctx->rx.overhead_size;
805 tls_advance_record_sn(sk, &tls_ctx->rx);
806 ctx->decrypted = true;
807 ctx->saved_data_ready(sk);
808
809 return err;
810}
811
812int decrypt_skb(struct sock *sk, struct sk_buff *skb,
813 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -0700814{
Vakul Garg0b243d02018-08-10 20:46:41 +0530815 bool zc = true;
816 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -0700817
Vakul Garg0b243d02018-08-10 20:46:41 +0530818 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700819}
820
821static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
822 unsigned int len)
823{
824 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300825 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700826 struct strp_msg *rxm = strp_msg(skb);
827
828 if (len < rxm->full_len) {
829 rxm->offset += len;
830 rxm->full_len -= len;
831
832 return false;
833 }
834
835 /* Finished with message */
836 ctx->recv_pkt = NULL;
837 kfree_skb(skb);
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -0700838 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -0700839
840 return true;
841}
842
843int tls_sw_recvmsg(struct sock *sk,
844 struct msghdr *msg,
845 size_t len,
846 int nonblock,
847 int flags,
848 int *addr_len)
849{
850 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300851 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700852 unsigned char control;
853 struct strp_msg *rxm;
854 struct sk_buff *skb;
855 ssize_t copied = 0;
856 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +0200857 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -0700858 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700859 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watsonc46234e2018-03-22 10:10:35 -0700860
861 flags |= nonblock;
862
863 if (unlikely(flags & MSG_ERRQUEUE))
864 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
865
866 lock_sock(sk);
867
Daniel Borkmann06030db2018-06-15 03:07:46 +0200868 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -0700869 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
870 do {
871 bool zc = false;
872 int chunk = 0;
873
874 skb = tls_wait_data(sk, flags, timeo, &err);
875 if (!skb)
876 goto recv_end;
877
878 rxm = strp_msg(skb);
879 if (!cmsg) {
880 int cerr;
881
882 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
883 sizeof(ctx->control), &ctx->control);
884 cmsg = true;
885 control = ctx->control;
886 if (ctx->control != TLS_RECORD_TYPE_DATA) {
887 if (cerr || msg->msg_flags & MSG_CTRUNC) {
888 err = -EIO;
889 goto recv_end;
890 }
891 }
892 } else if (control != ctx->control) {
893 goto recv_end;
894 }
895
896 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530897 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -0700898
Vakul Garg0b243d02018-08-10 20:46:41 +0530899 if (!is_kvec && to_copy <= len &&
900 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -0700901 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -0700902
Vakul Garg0b243d02018-08-10 20:46:41 +0530903 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
904 &chunk, &zc);
905 if (err < 0) {
906 tls_err_abort(sk, EBADMSG);
907 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -0700908 }
909 ctx->decrypted = true;
910 }
911
912 if (!zc) {
913 chunk = min_t(unsigned int, rxm->full_len, len);
914 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
915 chunk);
916 if (err < 0)
917 goto recv_end;
918 }
919
920 copied += chunk;
921 len -= chunk;
922 if (likely(!(flags & MSG_PEEK))) {
923 u8 control = ctx->control;
924
925 if (tls_sw_advance_skb(sk, skb, chunk)) {
926 /* Return full control message to
927 * userspace before trying to parse
928 * another message type
929 */
930 msg->msg_flags |= MSG_EOR;
931 if (control != TLS_RECORD_TYPE_DATA)
932 goto recv_end;
933 }
934 }
Daniel Borkmann06030db2018-06-15 03:07:46 +0200935 /* If we have a new message from strparser, continue now. */
936 if (copied >= target && !ctx->recv_pkt)
937 break;
Dave Watsonc46234e2018-03-22 10:10:35 -0700938 } while (len);
939
940recv_end:
941 release_sock(sk);
942 return copied ? : err;
943}
944
945ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
946 struct pipe_inode_info *pipe,
947 size_t len, unsigned int flags)
948{
949 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300950 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700951 struct strp_msg *rxm = NULL;
952 struct sock *sk = sock->sk;
953 struct sk_buff *skb;
954 ssize_t copied = 0;
955 int err = 0;
956 long timeo;
957 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +0530958 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -0700959
960 lock_sock(sk);
961
962 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
963
964 skb = tls_wait_data(sk, flags, timeo, &err);
965 if (!skb)
966 goto splice_read_end;
967
968 /* splice does not support reading control messages */
969 if (ctx->control != TLS_RECORD_TYPE_DATA) {
970 err = -ENOTSUPP;
971 goto splice_read_end;
972 }
973
974 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530975 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700976
977 if (err < 0) {
978 tls_err_abort(sk, EBADMSG);
979 goto splice_read_end;
980 }
981 ctx->decrypted = true;
982 }
983 rxm = strp_msg(skb);
984
985 chunk = min_t(unsigned int, rxm->full_len, len);
986 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
987 if (copied < 0)
988 goto splice_read_end;
989
990 if (likely(!(flags & MSG_PEEK)))
991 tls_sw_advance_skb(sk, skb, copied);
992
993splice_read_end:
994 release_sock(sk);
995 return copied ? : err;
996}
997
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700998unsigned int tls_sw_poll(struct file *file, struct socket *sock,
999 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001000{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001001 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001002 struct sock *sk = sock->sk;
1003 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001004 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001005
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001006 /* Grab POLLOUT and POLLHUP from the underlying socket */
1007 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001008
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001009 /* Clear POLLIN bits, and set based on recv_pkt */
1010 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001011 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001012 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001013
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001014 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001015}
1016
1017static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1018{
1019 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001020 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001021 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001022 struct strp_msg *rxm = strp_msg(skb);
1023 size_t cipher_overhead;
1024 size_t data_len = 0;
1025 int ret;
1026
1027 /* Verify that we have a full TLS header, or wait for more data */
1028 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1029 return 0;
1030
Kees Cook3463e512018-06-25 16:55:05 -07001031 /* Sanity-check size of on-stack buffer. */
1032 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1033 ret = -EINVAL;
1034 goto read_failure;
1035 }
1036
Dave Watsonc46234e2018-03-22 10:10:35 -07001037 /* Linearize header to local buffer */
1038 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1039
1040 if (ret < 0)
1041 goto read_failure;
1042
1043 ctx->control = header[0];
1044
1045 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1046
1047 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1048
1049 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1050 ret = -EMSGSIZE;
1051 goto read_failure;
1052 }
1053 if (data_len < cipher_overhead) {
1054 ret = -EBADMSG;
1055 goto read_failure;
1056 }
1057
1058 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
1059 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
1060 ret = -EINVAL;
1061 goto read_failure;
1062 }
1063
Boris Pismenny4799ac82018-07-13 14:33:43 +03001064#ifdef CONFIG_TLS_DEVICE
1065 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1066 *(u64*)tls_ctx->rx.rec_seq);
1067#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001068 return data_len + TLS_HEADER_SIZE;
1069
1070read_failure:
1071 tls_err_abort(strp->sk, ret);
1072
1073 return ret;
1074}
1075
1076static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1077{
1078 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001079 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001080
1081 ctx->decrypted = false;
1082
1083 ctx->recv_pkt = skb;
1084 strp_pause(strp);
1085
Vakul Gargad13acc2018-07-30 16:08:33 +05301086 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001087}
1088
1089static void tls_data_ready(struct sock *sk)
1090{
1091 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001092 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001093
1094 strp_data_ready(&ctx->strp);
1095}
1096
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001097void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001098{
1099 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001100 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07001101
Vakul Garg201876b2018-07-24 16:54:27 +05301102 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001103 tls_free_both_sg(sk);
1104
1105 kfree(ctx);
1106}
1107
Boris Pismenny39f56e12018-07-13 14:33:41 +03001108void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001109{
1110 struct tls_context *tls_ctx = tls_get_ctx(sk);
1111 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1112
Dave Watsonc46234e2018-03-22 10:10:35 -07001113 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301114 kfree_skb(ctx->recv_pkt);
1115 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001116 crypto_free_aead(ctx->aead_recv);
1117 strp_stop(&ctx->strp);
1118 write_lock_bh(&sk->sk_callback_lock);
1119 sk->sk_data_ready = ctx->saved_data_ready;
1120 write_unlock_bh(&sk->sk_callback_lock);
1121 release_sock(sk);
1122 strp_done(&ctx->strp);
1123 lock_sock(sk);
1124 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001125}
1126
1127void tls_sw_free_resources_rx(struct sock *sk)
1128{
1129 struct tls_context *tls_ctx = tls_get_ctx(sk);
1130 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1131
1132 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001133
Dave Watson3c4d7552017-06-14 11:37:39 -07001134 kfree(ctx);
1135}
1136
Dave Watsonc46234e2018-03-22 10:10:35 -07001137int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001138{
1139 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1140 struct tls_crypto_info *crypto_info;
1141 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001142 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1143 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001144 struct cipher_context *cctx;
1145 struct crypto_aead **aead;
1146 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001147 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1148 char *iv, *rec_seq;
1149 int rc = 0;
1150
1151 if (!ctx) {
1152 rc = -EINVAL;
1153 goto out;
1154 }
1155
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001156 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001157 if (!ctx->priv_ctx_tx) {
1158 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1159 if (!sw_ctx_tx) {
1160 rc = -ENOMEM;
1161 goto out;
1162 }
1163 ctx->priv_ctx_tx = sw_ctx_tx;
1164 } else {
1165 sw_ctx_tx =
1166 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001167 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001168 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001169 if (!ctx->priv_ctx_rx) {
1170 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1171 if (!sw_ctx_rx) {
1172 rc = -ENOMEM;
1173 goto out;
1174 }
1175 ctx->priv_ctx_rx = sw_ctx_rx;
1176 } else {
1177 sw_ctx_rx =
1178 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001179 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001180 }
1181
Dave Watsonc46234e2018-03-22 10:10:35 -07001182 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001183 crypto_init_wait(&sw_ctx_tx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001184 crypto_info = &ctx->crypto_send;
1185 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001186 aead = &sw_ctx_tx->aead_send;
Dave Watsonc46234e2018-03-22 10:10:35 -07001187 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001188 crypto_init_wait(&sw_ctx_rx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001189 crypto_info = &ctx->crypto_recv;
1190 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001191 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001192 }
1193
Dave Watson3c4d7552017-06-14 11:37:39 -07001194 switch (crypto_info->cipher_type) {
1195 case TLS_CIPHER_AES_GCM_128: {
1196 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1197 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1198 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1199 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1200 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1201 rec_seq =
1202 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1203 gcm_128_info =
1204 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1205 break;
1206 }
1207 default:
1208 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001209 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001210 }
1211
Kees Cookb16520f2018-04-10 17:52:34 -07001212 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001213 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001214 rc = -EINVAL;
1215 goto free_priv;
1216 }
1217
Dave Watsonc46234e2018-03-22 10:10:35 -07001218 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1219 cctx->tag_size = tag_size;
1220 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1221 cctx->iv_size = iv_size;
1222 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1223 GFP_KERNEL);
1224 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001225 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001226 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001227 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001228 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1229 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1230 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001231 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001232 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001233 rc = -ENOMEM;
1234 goto free_iv;
1235 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001236
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001237 if (sw_ctx_tx) {
1238 sg_init_table(sw_ctx_tx->sg_encrypted_data,
1239 ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
1240 sg_init_table(sw_ctx_tx->sg_plaintext_data,
1241 ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
Dave Watson3c4d7552017-06-14 11:37:39 -07001242
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001243 sg_init_table(sw_ctx_tx->sg_aead_in, 2);
1244 sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
1245 sizeof(sw_ctx_tx->aad_space));
1246 sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
1247 sg_chain(sw_ctx_tx->sg_aead_in, 2,
1248 sw_ctx_tx->sg_plaintext_data);
1249 sg_init_table(sw_ctx_tx->sg_aead_out, 2);
1250 sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
1251 sizeof(sw_ctx_tx->aad_space));
1252 sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
1253 sg_chain(sw_ctx_tx->sg_aead_out, 2,
1254 sw_ctx_tx->sg_encrypted_data);
Dave Watsonc46234e2018-03-22 10:10:35 -07001255 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001256
Dave Watsonc46234e2018-03-22 10:10:35 -07001257 if (!*aead) {
1258 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1259 if (IS_ERR(*aead)) {
1260 rc = PTR_ERR(*aead);
1261 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001262 goto free_rec_seq;
1263 }
1264 }
1265
1266 ctx->push_pending_record = tls_sw_push_pending_record;
1267
1268 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1269
Dave Watsonc46234e2018-03-22 10:10:35 -07001270 rc = crypto_aead_setkey(*aead, keyval,
Dave Watson3c4d7552017-06-14 11:37:39 -07001271 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1272 if (rc)
1273 goto free_aead;
1274
Dave Watsonc46234e2018-03-22 10:10:35 -07001275 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1276 if (rc)
1277 goto free_aead;
1278
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001279 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001280 /* Set up strparser */
1281 memset(&cb, 0, sizeof(cb));
1282 cb.rcv_msg = tls_queue;
1283 cb.parse_msg = tls_read_size;
1284
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001285 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001286
1287 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001288 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001289 sk->sk_data_ready = tls_data_ready;
1290 write_unlock_bh(&sk->sk_callback_lock);
1291
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001292 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001293
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001294 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001295 }
1296
1297 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001298
1299free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001300 crypto_free_aead(*aead);
1301 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001302free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001303 kfree(cctx->rec_seq);
1304 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001305free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001306 kfree(cctx->iv);
1307 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001308free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001309 if (tx) {
1310 kfree(ctx->priv_ctx_tx);
1311 ctx->priv_ctx_tx = NULL;
1312 } else {
1313 kfree(ctx->priv_ctx_rx);
1314 ctx->priv_ctx_rx = NULL;
1315 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001316out:
1317 return rc;
1318}