blob: b9c6ecfbcfea722b71c7c048cf19388527e00bb7 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Dave Watsonc46234e2018-03-22 10:10:35 -070046static int tls_do_decryption(struct sock *sk,
47 struct scatterlist *sgin,
48 struct scatterlist *sgout,
49 char *iv_recv,
50 size_t data_len,
Vakul Garg0b243d02018-08-10 20:46:41 +053051 struct aead_request *aead_req)
Dave Watsonc46234e2018-03-22 10:10:35 -070052{
53 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030054 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -070055 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -070056
Vakul Garg0b243d02018-08-10 20:46:41 +053057 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -070058 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
59 aead_request_set_crypt(aead_req, sgin, sgout,
60 data_len + tls_ctx->rx.tag_size,
61 (u8 *)iv_recv);
62 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
63 crypto_req_done, &ctx->async_wait);
64
65 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
Dave Watsonc46234e2018-03-22 10:10:35 -070066 return ret;
67}
68
Dave Watson3c4d7552017-06-14 11:37:39 -070069static void trim_sg(struct sock *sk, struct scatterlist *sg,
70 int *sg_num_elem, unsigned int *sg_size, int target_size)
71{
72 int i = *sg_num_elem - 1;
73 int trim = *sg_size - target_size;
74
75 if (trim <= 0) {
76 WARN_ON(trim < 0);
77 return;
78 }
79
80 *sg_size = target_size;
81 while (trim >= sg[i].length) {
82 trim -= sg[i].length;
83 sk_mem_uncharge(sk, sg[i].length);
84 put_page(sg_page(&sg[i]));
85 i--;
86
87 if (i < 0)
88 goto out;
89 }
90
91 sg[i].length -= trim;
92 sk_mem_uncharge(sk, trim);
93
94out:
95 *sg_num_elem = i + 1;
96}
97
98static void trim_both_sgl(struct sock *sk, int target_size)
99{
100 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300101 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700102
103 trim_sg(sk, ctx->sg_plaintext_data,
104 &ctx->sg_plaintext_num_elem,
105 &ctx->sg_plaintext_size,
106 target_size);
107
108 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700109 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700110
111 trim_sg(sk, ctx->sg_encrypted_data,
112 &ctx->sg_encrypted_num_elem,
113 &ctx->sg_encrypted_size,
114 target_size);
115}
116
Dave Watson3c4d7552017-06-14 11:37:39 -0700117static int alloc_encrypted_sg(struct sock *sk, int len)
118{
119 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300120 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700121 int rc = 0;
122
John Fastabend2c3682f2018-03-18 12:56:49 -0700123 rc = sk_alloc_sg(sk, len,
John Fastabend8c05dbf2018-03-18 12:57:05 -0700124 ctx->sg_encrypted_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700125 &ctx->sg_encrypted_num_elem,
126 &ctx->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700127
Vakul Garg52ea9922018-09-06 21:41:40 +0530128 if (rc == -ENOSPC)
129 ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
130
Dave Watson3c4d7552017-06-14 11:37:39 -0700131 return rc;
132}
133
134static int alloc_plaintext_sg(struct sock *sk, int len)
135{
136 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300137 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700138 int rc = 0;
139
John Fastabend8c05dbf2018-03-18 12:57:05 -0700140 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
John Fastabend2c3682f2018-03-18 12:56:49 -0700141 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
142 tls_ctx->pending_open_record_frags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700143
Vakul Garg52ea9922018-09-06 21:41:40 +0530144 if (rc == -ENOSPC)
145 ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
146
Dave Watson3c4d7552017-06-14 11:37:39 -0700147 return rc;
148}
149
150static void free_sg(struct sock *sk, struct scatterlist *sg,
151 int *sg_num_elem, unsigned int *sg_size)
152{
153 int i, n = *sg_num_elem;
154
155 for (i = 0; i < n; ++i) {
156 sk_mem_uncharge(sk, sg[i].length);
157 put_page(sg_page(&sg[i]));
158 }
159 *sg_num_elem = 0;
160 *sg_size = 0;
161}
162
163static void tls_free_both_sg(struct sock *sk)
164{
165 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300166 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700167
168 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
169 &ctx->sg_encrypted_size);
170
171 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
172 &ctx->sg_plaintext_size);
173}
174
175static int tls_do_encryption(struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200176 struct tls_sw_context_tx *ctx,
177 struct aead_request *aead_req,
178 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700179{
Dave Watson3c4d7552017-06-14 11:37:39 -0700180 int rc;
181
Dave Watsondbe42552018-03-22 10:10:06 -0700182 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
183 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700184
185 aead_request_set_tfm(aead_req, ctx->aead_send);
186 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
187 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700188 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530189
190 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
191 crypto_req_done, &ctx->async_wait);
192
193 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
Dave Watson3c4d7552017-06-14 11:37:39 -0700194
Dave Watsondbe42552018-03-22 10:10:06 -0700195 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
196 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700197
Dave Watson3c4d7552017-06-14 11:37:39 -0700198 return rc;
199}
200
201static int tls_push_record(struct sock *sk, int flags,
202 unsigned char record_type)
203{
204 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300205 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200206 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700207 int rc;
208
Vakul Gargd2bdd262018-07-11 14:32:20 +0530209 req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200210 if (!req)
211 return -ENOMEM;
212
Dave Watson3c4d7552017-06-14 11:37:39 -0700213 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
214 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
215
Ilya Lesokhin213ef6e2017-11-13 10:22:47 +0200216 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700217 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700218 record_type);
219
220 tls_fill_prepend(tls_ctx,
221 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
222 ctx->sg_encrypted_data[0].offset,
223 ctx->sg_plaintext_size, record_type);
224
225 tls_ctx->pending_open_record_frags = 0;
226 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
227
Daniel Borkmanna447da72018-06-15 03:07:45 +0200228 rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700229 if (rc < 0) {
230 /* If we are called from write_space and
231 * we fail, we need to set this SOCK_NOSPACE
232 * to trigger another write_space in the future.
233 */
234 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200235 goto out_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700236 }
237
238 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
239 &ctx->sg_plaintext_size);
240
241 ctx->sg_encrypted_num_elem = 0;
242 ctx->sg_encrypted_size = 0;
243
244 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
245 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
246 if (rc < 0 && rc != -EAGAIN)
Dave Watsonf4a8e432018-03-22 10:10:15 -0700247 tls_err_abort(sk, EBADMSG);
Dave Watson3c4d7552017-06-14 11:37:39 -0700248
Dave Watsondbe42552018-03-22 10:10:06 -0700249 tls_advance_record_sn(sk, &tls_ctx->tx);
Daniel Borkmanna447da72018-06-15 03:07:45 +0200250out_req:
Vakul Gargd2bdd262018-07-11 14:32:20 +0530251 aead_request_free(req);
Dave Watson3c4d7552017-06-14 11:37:39 -0700252 return rc;
253}
254
255static int tls_sw_push_pending_record(struct sock *sk, int flags)
256{
257 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
258}
259
260static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700261 int length, int *pages_used,
262 unsigned int *size_used,
263 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700264 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700265{
Dave Watson3c4d7552017-06-14 11:37:39 -0700266 struct page *pages[MAX_SKB_FRAGS];
267
268 size_t offset;
269 ssize_t copied, use;
270 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700271 unsigned int size = *size_used;
272 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700273 int rc = 0;
274 int maxpages;
275
276 while (length > 0) {
277 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700278 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700279 if (maxpages == 0) {
280 rc = -EFAULT;
281 goto out;
282 }
283 copied = iov_iter_get_pages(from, pages,
284 length,
285 maxpages, &offset);
286 if (copied <= 0) {
287 rc = -EFAULT;
288 goto out;
289 }
290
291 iov_iter_advance(from, copied);
292
293 length -= copied;
294 size += copied;
295 while (copied) {
296 use = min_t(int, copied, PAGE_SIZE - offset);
297
Dave Watson69ca9292018-03-22 10:09:53 -0700298 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700299 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700300 sg_unmark_end(&to[num_elem]);
301 if (charge)
302 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700303
304 offset = 0;
305 copied -= use;
306
307 ++i;
308 ++num_elem;
309 }
310 }
311
Vakul Gargcfb40992018-08-02 20:43:10 +0530312 /* Mark the end in the last sg entry if newly added */
313 if (num_elem > *pages_used)
314 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700315out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700316 if (rc)
317 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700318 *size_used = size;
319 *pages_used = num_elem;
320
Dave Watson3c4d7552017-06-14 11:37:39 -0700321 return rc;
322}
323
324static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
325 int bytes)
326{
327 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300328 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700329 struct scatterlist *sg = ctx->sg_plaintext_data;
330 int copy, i, rc = 0;
331
332 for (i = tls_ctx->pending_open_record_frags;
333 i < ctx->sg_plaintext_num_elem; ++i) {
334 copy = sg[i].length;
335 if (copy_from_iter(
336 page_address(sg_page(&sg[i])) + sg[i].offset,
337 copy, from) != copy) {
338 rc = -EFAULT;
339 goto out;
340 }
341 bytes -= copy;
342
343 ++tls_ctx->pending_open_record_frags;
344
345 if (!bytes)
346 break;
347 }
348
349out:
350 return rc;
351}
352
353int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
354{
355 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300356 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700357 int ret = 0;
358 int required_size;
359 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
360 bool eor = !(msg->msg_flags & MSG_MORE);
361 size_t try_to_copy, copied = 0;
362 unsigned char record_type = TLS_RECORD_TYPE_DATA;
363 int record_room;
364 bool full_record;
365 int orig_size;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700366 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700367
368 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
369 return -ENOTSUPP;
370
371 lock_sock(sk);
372
373 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
374 goto send_end;
375
376 if (unlikely(msg->msg_controllen)) {
377 ret = tls_proccess_cmsg(sk, msg, &record_type);
378 if (ret)
379 goto send_end;
380 }
381
382 while (msg_data_left(msg)) {
383 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100384 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700385 goto send_end;
386 }
387
388 orig_size = ctx->sg_plaintext_size;
389 full_record = false;
390 try_to_copy = msg_data_left(msg);
391 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
392 if (try_to_copy >= record_room) {
393 try_to_copy = record_room;
394 full_record = true;
395 }
396
397 required_size = ctx->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700398 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700399
400 if (!sk_stream_memory_free(sk))
401 goto wait_for_sndbuf;
402alloc_encrypted:
403 ret = alloc_encrypted_sg(sk, required_size);
404 if (ret) {
405 if (ret != -ENOSPC)
406 goto wait_for_memory;
407
408 /* Adjust try_to_copy according to the amount that was
409 * actually allocated. The difference is due
410 * to max sg elements limit
411 */
412 try_to_copy -= required_size - ctx->sg_encrypted_size;
413 full_record = true;
414 }
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700415 if (!is_kvec && (full_record || eor)) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700416 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Dave Watson69ca9292018-03-22 10:09:53 -0700417 try_to_copy, &ctx->sg_plaintext_num_elem,
418 &ctx->sg_plaintext_size,
419 ctx->sg_plaintext_data,
420 ARRAY_SIZE(ctx->sg_plaintext_data),
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700421 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700422 if (ret)
423 goto fallback_to_reg_send;
424
425 copied += try_to_copy;
426 ret = tls_push_record(sk, msg->msg_flags, record_type);
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700427 if (ret)
Dave Watson3c4d7552017-06-14 11:37:39 -0700428 goto send_end;
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700429 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700430
Dave Watson3c4d7552017-06-14 11:37:39 -0700431fallback_to_reg_send:
Dave Watson3c4d7552017-06-14 11:37:39 -0700432 trim_sg(sk, ctx->sg_plaintext_data,
433 &ctx->sg_plaintext_num_elem,
434 &ctx->sg_plaintext_size,
435 orig_size);
436 }
437
438 required_size = ctx->sg_plaintext_size + try_to_copy;
439alloc_plaintext:
440 ret = alloc_plaintext_sg(sk, required_size);
441 if (ret) {
442 if (ret != -ENOSPC)
443 goto wait_for_memory;
444
445 /* Adjust try_to_copy according to the amount that was
446 * actually allocated. The difference is due
447 * to max sg elements limit
448 */
449 try_to_copy -= required_size - ctx->sg_plaintext_size;
450 full_record = true;
451
452 trim_sg(sk, ctx->sg_encrypted_data,
453 &ctx->sg_encrypted_num_elem,
454 &ctx->sg_encrypted_size,
455 ctx->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700456 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700457 }
458
459 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
460 if (ret)
461 goto trim_sgl;
462
463 copied += try_to_copy;
464 if (full_record || eor) {
465push_record:
466 ret = tls_push_record(sk, msg->msg_flags, record_type);
467 if (ret) {
468 if (ret == -ENOMEM)
469 goto wait_for_memory;
470
471 goto send_end;
472 }
473 }
474
475 continue;
476
477wait_for_sndbuf:
478 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
479wait_for_memory:
480 ret = sk_stream_wait_memory(sk, &timeo);
481 if (ret) {
482trim_sgl:
483 trim_both_sgl(sk, orig_size);
484 goto send_end;
485 }
486
487 if (tls_is_pending_closed_record(tls_ctx))
488 goto push_record;
489
490 if (ctx->sg_encrypted_size < required_size)
491 goto alloc_encrypted;
492
493 goto alloc_plaintext;
494 }
495
496send_end:
497 ret = sk_stream_error(sk, msg->msg_flags, ret);
498
499 release_sock(sk);
500 return copied ? copied : ret;
501}
502
503int tls_sw_sendpage(struct sock *sk, struct page *page,
504 int offset, size_t size, int flags)
505{
506 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300507 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700508 int ret = 0;
509 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
510 bool eor;
511 size_t orig_size = size;
512 unsigned char record_type = TLS_RECORD_TYPE_DATA;
513 struct scatterlist *sg;
514 bool full_record;
515 int record_room;
516
517 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
518 MSG_SENDPAGE_NOTLAST))
519 return -ENOTSUPP;
520
521 /* No MSG_EOR from splice, only look at MSG_MORE */
522 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
523
524 lock_sock(sk);
525
526 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
527
528 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
529 goto sendpage_end;
530
531 /* Call the sk_stream functions to manage the sndbuf mem. */
532 while (size > 0) {
533 size_t copy, required_size;
534
535 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100536 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700537 goto sendpage_end;
538 }
539
540 full_record = false;
541 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
542 copy = size;
543 if (copy >= record_room) {
544 copy = record_room;
545 full_record = true;
546 }
547 required_size = ctx->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700548 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700549
550 if (!sk_stream_memory_free(sk))
551 goto wait_for_sndbuf;
552alloc_payload:
553 ret = alloc_encrypted_sg(sk, required_size);
554 if (ret) {
555 if (ret != -ENOSPC)
556 goto wait_for_memory;
557
558 /* Adjust copy according to the amount that was
559 * actually allocated. The difference is due
560 * to max sg elements limit
561 */
562 copy -= required_size - ctx->sg_plaintext_size;
563 full_record = true;
564 }
565
566 get_page(page);
567 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
568 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -0800569 sg_unmark_end(sg);
570
Dave Watson3c4d7552017-06-14 11:37:39 -0700571 ctx->sg_plaintext_num_elem++;
572
573 sk_mem_charge(sk, copy);
574 offset += copy;
575 size -= copy;
576 ctx->sg_plaintext_size += copy;
577 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
578
579 if (full_record || eor ||
580 ctx->sg_plaintext_num_elem ==
581 ARRAY_SIZE(ctx->sg_plaintext_data)) {
582push_record:
583 ret = tls_push_record(sk, flags, record_type);
584 if (ret) {
585 if (ret == -ENOMEM)
586 goto wait_for_memory;
587
588 goto sendpage_end;
589 }
590 }
591 continue;
592wait_for_sndbuf:
593 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
594wait_for_memory:
595 ret = sk_stream_wait_memory(sk, &timeo);
596 if (ret) {
597 trim_both_sgl(sk, ctx->sg_plaintext_size);
598 goto sendpage_end;
599 }
600
601 if (tls_is_pending_closed_record(tls_ctx))
602 goto push_record;
603
604 goto alloc_payload;
605 }
606
607sendpage_end:
608 if (orig_size > size)
609 ret = orig_size - size;
610 else
611 ret = sk_stream_error(sk, flags, ret);
612
613 release_sock(sk);
614 return ret;
615}
616
Dave Watsonc46234e2018-03-22 10:10:35 -0700617static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
618 long timeo, int *err)
619{
620 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300621 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700622 struct sk_buff *skb;
623 DEFINE_WAIT_FUNC(wait, woken_wake_function);
624
625 while (!(skb = ctx->recv_pkt)) {
626 if (sk->sk_err) {
627 *err = sock_error(sk);
628 return NULL;
629 }
630
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -0700631 if (sk->sk_shutdown & RCV_SHUTDOWN)
632 return NULL;
633
Dave Watsonc46234e2018-03-22 10:10:35 -0700634 if (sock_flag(sk, SOCK_DONE))
635 return NULL;
636
637 if ((flags & MSG_DONTWAIT) || !timeo) {
638 *err = -EAGAIN;
639 return NULL;
640 }
641
642 add_wait_queue(sk_sleep(sk), &wait);
643 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
644 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
645 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
646 remove_wait_queue(sk_sleep(sk), &wait);
647
648 /* Handle signals */
649 if (signal_pending(current)) {
650 *err = sock_intr_errno(timeo);
651 return NULL;
652 }
653 }
654
655 return skb;
656}
657
Vakul Garg0b243d02018-08-10 20:46:41 +0530658/* This function decrypts the input skb into either out_iov or in out_sg
659 * or in skb buffers itself. The input parameter 'zc' indicates if
660 * zero-copy mode needs to be tried or not. With zero-copy mode, either
661 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
662 * NULL, then the decryption happens inside skb buffers itself, i.e.
663 * zero-copy gets disabled and 'zc' is updated.
664 */
665
666static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
667 struct iov_iter *out_iov,
668 struct scatterlist *out_sg,
669 int *chunk, bool *zc)
670{
671 struct tls_context *tls_ctx = tls_get_ctx(sk);
672 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
673 struct strp_msg *rxm = strp_msg(skb);
674 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
675 struct aead_request *aead_req;
676 struct sk_buff *unused;
677 u8 *aad, *iv, *mem = NULL;
678 struct scatterlist *sgin = NULL;
679 struct scatterlist *sgout = NULL;
680 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
681
682 if (*zc && (out_iov || out_sg)) {
683 if (out_iov)
684 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
685 else
686 n_sgout = sg_nents(out_sg);
687 } else {
688 n_sgout = 0;
689 *zc = false;
690 }
691
692 n_sgin = skb_cow_data(skb, 0, &unused);
693 if (n_sgin < 1)
694 return -EBADMSG;
695
696 /* Increment to accommodate AAD */
697 n_sgin = n_sgin + 1;
698
699 nsg = n_sgin + n_sgout;
700
701 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
702 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
703 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
704 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
705
706 /* Allocate a single block of memory which contains
707 * aead_req || sgin[] || sgout[] || aad || iv.
708 * This order achieves correct alignment for aead_req, sgin, sgout.
709 */
710 mem = kmalloc(mem_size, sk->sk_allocation);
711 if (!mem)
712 return -ENOMEM;
713
714 /* Segment the allocated memory */
715 aead_req = (struct aead_request *)mem;
716 sgin = (struct scatterlist *)(mem + aead_size);
717 sgout = sgin + n_sgin;
718 aad = (u8 *)(sgout + n_sgout);
719 iv = aad + TLS_AAD_SPACE_SIZE;
720
721 /* Prepare IV */
722 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
723 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
724 tls_ctx->rx.iv_size);
725 if (err < 0) {
726 kfree(mem);
727 return err;
728 }
729 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
730
731 /* Prepare AAD */
732 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
733 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
734 ctx->control);
735
736 /* Prepare sgin */
737 sg_init_table(sgin, n_sgin);
738 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
739 err = skb_to_sgvec(skb, &sgin[1],
740 rxm->offset + tls_ctx->rx.prepend_size,
741 rxm->full_len - tls_ctx->rx.prepend_size);
742 if (err < 0) {
743 kfree(mem);
744 return err;
745 }
746
747 if (n_sgout) {
748 if (out_iov) {
749 sg_init_table(sgout, n_sgout);
750 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
751
752 *chunk = 0;
753 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
754 chunk, &sgout[1],
755 (n_sgout - 1), false);
756 if (err < 0)
757 goto fallback_to_reg_recv;
758 } else if (out_sg) {
759 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
760 } else {
761 goto fallback_to_reg_recv;
762 }
763 } else {
764fallback_to_reg_recv:
765 sgout = sgin;
766 pages = 0;
767 *chunk = 0;
768 *zc = false;
769 }
770
771 /* Prepare and submit AEAD request */
772 err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
773
774 /* Release the pages in case iov was mapped to pages */
775 for (; pages > 0; pages--)
776 put_page(sg_page(&sgout[pages]));
777
778 kfree(mem);
779 return err;
780}
781
Boris Pismennydafb67f2018-07-13 14:33:40 +0300782static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +0530783 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +0300784{
785 struct tls_context *tls_ctx = tls_get_ctx(sk);
786 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
787 struct strp_msg *rxm = strp_msg(skb);
788 int err = 0;
789
Boris Pismenny4799ac82018-07-13 14:33:43 +0300790#ifdef CONFIG_TLS_DEVICE
791 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +0300792 if (err < 0)
793 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300794#endif
795 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530796 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300797 if (err < 0)
798 return err;
799 } else {
800 *zc = false;
801 }
Boris Pismennydafb67f2018-07-13 14:33:40 +0300802
803 rxm->offset += tls_ctx->rx.prepend_size;
804 rxm->full_len -= tls_ctx->rx.overhead_size;
805 tls_advance_record_sn(sk, &tls_ctx->rx);
806 ctx->decrypted = true;
807 ctx->saved_data_ready(sk);
808
809 return err;
810}
811
812int decrypt_skb(struct sock *sk, struct sk_buff *skb,
813 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -0700814{
Vakul Garg0b243d02018-08-10 20:46:41 +0530815 bool zc = true;
816 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -0700817
Vakul Garg0b243d02018-08-10 20:46:41 +0530818 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700819}
820
821static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
822 unsigned int len)
823{
824 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300825 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700826 struct strp_msg *rxm = strp_msg(skb);
827
828 if (len < rxm->full_len) {
829 rxm->offset += len;
830 rxm->full_len -= len;
831
832 return false;
833 }
834
835 /* Finished with message */
836 ctx->recv_pkt = NULL;
837 kfree_skb(skb);
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -0700838 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -0700839
840 return true;
841}
842
843int tls_sw_recvmsg(struct sock *sk,
844 struct msghdr *msg,
845 size_t len,
846 int nonblock,
847 int flags,
848 int *addr_len)
849{
850 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300851 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700852 unsigned char control;
853 struct strp_msg *rxm;
854 struct sk_buff *skb;
855 ssize_t copied = 0;
856 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +0200857 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -0700858 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -0700859 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watsonc46234e2018-03-22 10:10:35 -0700860
861 flags |= nonblock;
862
863 if (unlikely(flags & MSG_ERRQUEUE))
864 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
865
866 lock_sock(sk);
867
Daniel Borkmann06030db2018-06-15 03:07:46 +0200868 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -0700869 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
870 do {
871 bool zc = false;
872 int chunk = 0;
873
874 skb = tls_wait_data(sk, flags, timeo, &err);
875 if (!skb)
876 goto recv_end;
877
878 rxm = strp_msg(skb);
879 if (!cmsg) {
880 int cerr;
881
882 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
883 sizeof(ctx->control), &ctx->control);
884 cmsg = true;
885 control = ctx->control;
886 if (ctx->control != TLS_RECORD_TYPE_DATA) {
887 if (cerr || msg->msg_flags & MSG_CTRUNC) {
888 err = -EIO;
889 goto recv_end;
890 }
891 }
892 } else if (control != ctx->control) {
893 goto recv_end;
894 }
895
896 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530897 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -0700898
Vakul Garg0b243d02018-08-10 20:46:41 +0530899 if (!is_kvec && to_copy <= len &&
900 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -0700901 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -0700902
Vakul Garg0b243d02018-08-10 20:46:41 +0530903 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
904 &chunk, &zc);
905 if (err < 0) {
906 tls_err_abort(sk, EBADMSG);
907 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -0700908 }
909 ctx->decrypted = true;
910 }
911
912 if (!zc) {
913 chunk = min_t(unsigned int, rxm->full_len, len);
914 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
915 chunk);
916 if (err < 0)
917 goto recv_end;
918 }
919
920 copied += chunk;
921 len -= chunk;
922 if (likely(!(flags & MSG_PEEK))) {
923 u8 control = ctx->control;
924
925 if (tls_sw_advance_skb(sk, skb, chunk)) {
926 /* Return full control message to
927 * userspace before trying to parse
928 * another message type
929 */
930 msg->msg_flags |= MSG_EOR;
931 if (control != TLS_RECORD_TYPE_DATA)
932 goto recv_end;
933 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +0200934 } else {
935 /* MSG_PEEK right now cannot look beyond current skb
936 * from strparser, meaning we cannot advance skb here
937 * and thus unpause strparser since we'd loose original
938 * one.
939 */
940 break;
Dave Watsonc46234e2018-03-22 10:10:35 -0700941 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +0200942
Daniel Borkmann06030db2018-06-15 03:07:46 +0200943 /* If we have a new message from strparser, continue now. */
944 if (copied >= target && !ctx->recv_pkt)
945 break;
Dave Watsonc46234e2018-03-22 10:10:35 -0700946 } while (len);
947
948recv_end:
949 release_sock(sk);
950 return copied ? : err;
951}
952
953ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
954 struct pipe_inode_info *pipe,
955 size_t len, unsigned int flags)
956{
957 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300958 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700959 struct strp_msg *rxm = NULL;
960 struct sock *sk = sock->sk;
961 struct sk_buff *skb;
962 ssize_t copied = 0;
963 int err = 0;
964 long timeo;
965 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +0530966 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -0700967
968 lock_sock(sk);
969
970 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
971
972 skb = tls_wait_data(sk, flags, timeo, &err);
973 if (!skb)
974 goto splice_read_end;
975
976 /* splice does not support reading control messages */
977 if (ctx->control != TLS_RECORD_TYPE_DATA) {
978 err = -ENOTSUPP;
979 goto splice_read_end;
980 }
981
982 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +0530983 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -0700984
985 if (err < 0) {
986 tls_err_abort(sk, EBADMSG);
987 goto splice_read_end;
988 }
989 ctx->decrypted = true;
990 }
991 rxm = strp_msg(skb);
992
993 chunk = min_t(unsigned int, rxm->full_len, len);
994 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
995 if (copied < 0)
996 goto splice_read_end;
997
998 if (likely(!(flags & MSG_PEEK)))
999 tls_sw_advance_skb(sk, skb, copied);
1000
1001splice_read_end:
1002 release_sock(sk);
1003 return copied ? : err;
1004}
1005
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001006unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1007 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001008{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001009 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001010 struct sock *sk = sock->sk;
1011 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001012 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001013
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001014 /* Grab POLLOUT and POLLHUP from the underlying socket */
1015 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001016
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001017 /* Clear POLLIN bits, and set based on recv_pkt */
1018 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001019 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001020 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001021
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001022 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001023}
1024
1025static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1026{
1027 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001028 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001029 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001030 struct strp_msg *rxm = strp_msg(skb);
1031 size_t cipher_overhead;
1032 size_t data_len = 0;
1033 int ret;
1034
1035 /* Verify that we have a full TLS header, or wait for more data */
1036 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1037 return 0;
1038
Kees Cook3463e512018-06-25 16:55:05 -07001039 /* Sanity-check size of on-stack buffer. */
1040 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1041 ret = -EINVAL;
1042 goto read_failure;
1043 }
1044
Dave Watsonc46234e2018-03-22 10:10:35 -07001045 /* Linearize header to local buffer */
1046 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1047
1048 if (ret < 0)
1049 goto read_failure;
1050
1051 ctx->control = header[0];
1052
1053 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1054
1055 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1056
1057 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1058 ret = -EMSGSIZE;
1059 goto read_failure;
1060 }
1061 if (data_len < cipher_overhead) {
1062 ret = -EBADMSG;
1063 goto read_failure;
1064 }
1065
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001066 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1067 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001068 ret = -EINVAL;
1069 goto read_failure;
1070 }
1071
Boris Pismenny4799ac82018-07-13 14:33:43 +03001072#ifdef CONFIG_TLS_DEVICE
1073 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1074 *(u64*)tls_ctx->rx.rec_seq);
1075#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001076 return data_len + TLS_HEADER_SIZE;
1077
1078read_failure:
1079 tls_err_abort(strp->sk, ret);
1080
1081 return ret;
1082}
1083
1084static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1085{
1086 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001087 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001088
1089 ctx->decrypted = false;
1090
1091 ctx->recv_pkt = skb;
1092 strp_pause(strp);
1093
Vakul Gargad13acc2018-07-30 16:08:33 +05301094 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001095}
1096
1097static void tls_data_ready(struct sock *sk)
1098{
1099 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001100 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001101
1102 strp_data_ready(&ctx->strp);
1103}
1104
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001105void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001106{
1107 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001108 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -07001109
Vakul Garg201876b2018-07-24 16:54:27 +05301110 crypto_free_aead(ctx->aead_send);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001111 tls_free_both_sg(sk);
1112
1113 kfree(ctx);
1114}
1115
Boris Pismenny39f56e12018-07-13 14:33:41 +03001116void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001117{
1118 struct tls_context *tls_ctx = tls_get_ctx(sk);
1119 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1120
Dave Watsonc46234e2018-03-22 10:10:35 -07001121 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301122 kfree_skb(ctx->recv_pkt);
1123 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001124 crypto_free_aead(ctx->aead_recv);
1125 strp_stop(&ctx->strp);
1126 write_lock_bh(&sk->sk_callback_lock);
1127 sk->sk_data_ready = ctx->saved_data_ready;
1128 write_unlock_bh(&sk->sk_callback_lock);
1129 release_sock(sk);
1130 strp_done(&ctx->strp);
1131 lock_sock(sk);
1132 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001133}
1134
1135void tls_sw_free_resources_rx(struct sock *sk)
1136{
1137 struct tls_context *tls_ctx = tls_get_ctx(sk);
1138 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1139
1140 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001141
Dave Watson3c4d7552017-06-14 11:37:39 -07001142 kfree(ctx);
1143}
1144
Dave Watsonc46234e2018-03-22 10:10:35 -07001145int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001146{
Dave Watson3c4d7552017-06-14 11:37:39 -07001147 struct tls_crypto_info *crypto_info;
1148 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001149 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1150 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001151 struct cipher_context *cctx;
1152 struct crypto_aead **aead;
1153 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001154 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1155 char *iv, *rec_seq;
1156 int rc = 0;
1157
1158 if (!ctx) {
1159 rc = -EINVAL;
1160 goto out;
1161 }
1162
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001163 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001164 if (!ctx->priv_ctx_tx) {
1165 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1166 if (!sw_ctx_tx) {
1167 rc = -ENOMEM;
1168 goto out;
1169 }
1170 ctx->priv_ctx_tx = sw_ctx_tx;
1171 } else {
1172 sw_ctx_tx =
1173 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001174 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001175 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001176 if (!ctx->priv_ctx_rx) {
1177 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1178 if (!sw_ctx_rx) {
1179 rc = -ENOMEM;
1180 goto out;
1181 }
1182 ctx->priv_ctx_rx = sw_ctx_rx;
1183 } else {
1184 sw_ctx_rx =
1185 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001186 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001187 }
1188
Dave Watsonc46234e2018-03-22 10:10:35 -07001189 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001190 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001191 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001192 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001193 aead = &sw_ctx_tx->aead_send;
Dave Watsonc46234e2018-03-22 10:10:35 -07001194 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001195 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001196 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001197 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001198 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001199 }
1200
Dave Watson3c4d7552017-06-14 11:37:39 -07001201 switch (crypto_info->cipher_type) {
1202 case TLS_CIPHER_AES_GCM_128: {
1203 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1204 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1205 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1206 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1207 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1208 rec_seq =
1209 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1210 gcm_128_info =
1211 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1212 break;
1213 }
1214 default:
1215 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001216 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001217 }
1218
Kees Cookb16520f2018-04-10 17:52:34 -07001219 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001220 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001221 rc = -EINVAL;
1222 goto free_priv;
1223 }
1224
Dave Watsonc46234e2018-03-22 10:10:35 -07001225 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1226 cctx->tag_size = tag_size;
1227 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1228 cctx->iv_size = iv_size;
1229 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1230 GFP_KERNEL);
1231 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001232 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001233 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001234 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001235 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1236 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1237 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001238 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001239 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001240 rc = -ENOMEM;
1241 goto free_iv;
1242 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001243
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001244 if (sw_ctx_tx) {
1245 sg_init_table(sw_ctx_tx->sg_encrypted_data,
1246 ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
1247 sg_init_table(sw_ctx_tx->sg_plaintext_data,
1248 ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
Dave Watson3c4d7552017-06-14 11:37:39 -07001249
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001250 sg_init_table(sw_ctx_tx->sg_aead_in, 2);
1251 sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
1252 sizeof(sw_ctx_tx->aad_space));
1253 sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
1254 sg_chain(sw_ctx_tx->sg_aead_in, 2,
1255 sw_ctx_tx->sg_plaintext_data);
1256 sg_init_table(sw_ctx_tx->sg_aead_out, 2);
1257 sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
1258 sizeof(sw_ctx_tx->aad_space));
1259 sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
1260 sg_chain(sw_ctx_tx->sg_aead_out, 2,
1261 sw_ctx_tx->sg_encrypted_data);
Dave Watsonc46234e2018-03-22 10:10:35 -07001262 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001263
Dave Watsonc46234e2018-03-22 10:10:35 -07001264 if (!*aead) {
1265 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1266 if (IS_ERR(*aead)) {
1267 rc = PTR_ERR(*aead);
1268 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001269 goto free_rec_seq;
1270 }
1271 }
1272
1273 ctx->push_pending_record = tls_sw_push_pending_record;
1274
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001275 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001276 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1277 if (rc)
1278 goto free_aead;
1279
Dave Watsonc46234e2018-03-22 10:10:35 -07001280 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1281 if (rc)
1282 goto free_aead;
1283
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001284 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001285 /* Set up strparser */
1286 memset(&cb, 0, sizeof(cb));
1287 cb.rcv_msg = tls_queue;
1288 cb.parse_msg = tls_read_size;
1289
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001290 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001291
1292 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001293 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001294 sk->sk_data_ready = tls_data_ready;
1295 write_unlock_bh(&sk->sk_callback_lock);
1296
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001297 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001298
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001299 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001300 }
1301
1302 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001303
1304free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001305 crypto_free_aead(*aead);
1306 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001307free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001308 kfree(cctx->rec_seq);
1309 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001310free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001311 kfree(cctx->iv);
1312 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001313free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001314 if (tx) {
1315 kfree(ctx->priv_ctx_tx);
1316 ctx->priv_ctx_tx = NULL;
1317 } else {
1318 kfree(ctx->priv_ctx_rx);
1319 ctx->priv_ctx_rx = NULL;
1320 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001321out:
1322 return rc;
1323}