blob: aa9fdce272b62c7fd60808e1b5c416f591e7ce64 [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Vakul Garg94524d82018-08-29 15:26:55 +0530122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{
124 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530125 struct scatterlist *sgout = aead_req->dst;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530128 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700129 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530130 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
Vakul Garg94524d82018-08-29 15:26:55 +0530137
138 /* Propagate if there was an err */
139 if (err) {
140 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700141 tls_err_abort(skb->sk, err);
Vakul Garg94524d82018-08-29 15:26:55 +0530142 }
143
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
Vakul Garg94524d82018-08-29 15:26:55 +0530149 /* Release the skb, pages and memory allocated for crypto req */
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700150 kfree_skb(skb);
Vakul Garg94524d82018-08-29 15:26:55 +0530151
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
154 if (!sg)
155 break;
156 put_page(sg_page(sg));
157 }
158
159 kfree(aead_req);
160
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
163}
164
Dave Watsonc46234e2018-03-22 10:10:35 -0700165static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530166 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
169 char *iv_recv,
170 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530171 struct aead_request *aead_req,
172 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700173{
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700176 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700177
Vakul Garg0b243d02018-08-10 20:46:41 +0530178 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
182 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700183
Vakul Garg94524d82018-08-29 15:26:55 +0530184 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
196 } else {
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
200 }
201
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
204 if (async)
205 return ret;
206
207 ret = crypto_wait_req(ret, &ctx->async_wait);
208 }
209
210 if (async)
211 atomic_dec(&ctx->decrypt_pending);
212
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 return ret;
214}
215
Dave Watson3c4d7552017-06-14 11:37:39 -0700216static void trim_sg(struct sock *sk, struct scatterlist *sg,
217 int *sg_num_elem, unsigned int *sg_size, int target_size)
218{
219 int i = *sg_num_elem - 1;
220 int trim = *sg_size - target_size;
221
222 if (trim <= 0) {
223 WARN_ON(trim < 0);
224 return;
225 }
226
227 *sg_size = target_size;
228 while (trim >= sg[i].length) {
229 trim -= sg[i].length;
230 sk_mem_uncharge(sk, sg[i].length);
231 put_page(sg_page(&sg[i]));
232 i--;
233
234 if (i < 0)
235 goto out;
236 }
237
238 sg[i].length -= trim;
239 sk_mem_uncharge(sk, trim);
240
241out:
242 *sg_num_elem = i + 1;
243}
244
245static void trim_both_sgl(struct sock *sk, int target_size)
246{
247 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300248 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530249 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700250
Vakul Garg80ece6a2018-09-26 16:22:08 +0530251 trim_sg(sk, &rec->sg_plaintext_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530252 &rec->sg_plaintext_num_elem,
253 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700254 target_size);
255
256 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700257 target_size += tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700258
Vakul Garg80ece6a2018-09-26 16:22:08 +0530259 trim_sg(sk, &rec->sg_encrypted_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530260 &rec->sg_encrypted_num_elem,
261 &rec->sg_encrypted_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700262 target_size);
263}
264
Dave Watson3c4d7552017-06-14 11:37:39 -0700265static int alloc_encrypted_sg(struct sock *sk, int len)
266{
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530269 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270 int rc = 0;
271
John Fastabend2c3682f2018-03-18 12:56:49 -0700272 rc = sk_alloc_sg(sk, len,
Vakul Garg80ece6a2018-09-26 16:22:08 +0530273 &rec->sg_encrypted_data[1], 0,
Vakul Garga42055e2018-09-21 09:46:13 +0530274 &rec->sg_encrypted_num_elem,
275 &rec->sg_encrypted_size, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700276
Vakul Garg52ea9922018-09-06 21:41:40 +0530277 if (rc == -ENOSPC)
Vakul Garg80ece6a2018-09-26 16:22:08 +0530278 rec->sg_encrypted_num_elem =
279 ARRAY_SIZE(rec->sg_encrypted_data) - 1;
Vakul Garg52ea9922018-09-06 21:41:40 +0530280
Dave Watson3c4d7552017-06-14 11:37:39 -0700281 return rc;
282}
283
Vakul Garg4e6d4722018-09-30 08:04:35 +0530284static int move_to_plaintext_sg(struct sock *sk, int required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700285{
286 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300287 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530288 struct tls_rec *rec = ctx->open_rec;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530289 struct scatterlist *plain_sg = &rec->sg_plaintext_data[1];
290 struct scatterlist *enc_sg = &rec->sg_encrypted_data[1];
291 int enc_sg_idx = 0;
292 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700293
Vakul Garg4e6d4722018-09-30 08:04:35 +0530294 if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
295 return -ENOSPC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700296
Vakul Garg4e6d4722018-09-30 08:04:35 +0530297 /* We add page references worth len bytes from enc_sg at the
298 * end of plain_sg. It is guaranteed that sg_encrypted_data
299 * has enough required room (ensured by caller).
300 */
301 len = required_size - rec->sg_plaintext_size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530302
Vakul Garg4e6d4722018-09-30 08:04:35 +0530303 /* Skip initial bytes in sg_encrypted_data to be able
304 * to use same offset of both plain and encrypted data.
305 */
306 skip = tls_ctx->tx.prepend_size + rec->sg_plaintext_size;
307
308 while (enc_sg_idx < rec->sg_encrypted_num_elem) {
309 if (enc_sg[enc_sg_idx].length > skip)
310 break;
311
312 skip -= enc_sg[enc_sg_idx].length;
313 enc_sg_idx++;
314 }
315
316 /* unmark the end of plain_sg*/
317 sg_unmark_end(plain_sg + rec->sg_plaintext_num_elem - 1);
318
319 while (len) {
320 struct page *page = sg_page(&enc_sg[enc_sg_idx]);
321 int bytes = enc_sg[enc_sg_idx].length - skip;
322 int offset = enc_sg[enc_sg_idx].offset + skip;
323
324 if (bytes > len)
325 bytes = len;
326 else
327 enc_sg_idx++;
328
329 /* Skipping is required only one time */
330 skip = 0;
331
332 /* Increment page reference */
333 get_page(page);
334
335 sg_set_page(&plain_sg[rec->sg_plaintext_num_elem], page,
336 bytes, offset);
337
338 sk_mem_charge(sk, bytes);
339
340 len -= bytes;
341 rec->sg_plaintext_size += bytes;
342
343 rec->sg_plaintext_num_elem++;
344
345 if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
346 return -ENOSPC;
347 }
348
349 return 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700350}
351
352static void free_sg(struct sock *sk, struct scatterlist *sg,
353 int *sg_num_elem, unsigned int *sg_size)
354{
355 int i, n = *sg_num_elem;
356
357 for (i = 0; i < n; ++i) {
358 sk_mem_uncharge(sk, sg[i].length);
359 put_page(sg_page(&sg[i]));
360 }
361 *sg_num_elem = 0;
362 *sg_size = 0;
363}
364
Vakul Gargc7749732018-09-25 20:21:51 +0530365static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700366{
367 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300368 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530369 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700370
Vakul Garga42055e2018-09-21 09:46:13 +0530371 /* Return if there is no open record */
372 if (!rec)
373 return;
Dave Watson3c4d7552017-06-14 11:37:39 -0700374
Vakul Garg80ece6a2018-09-26 16:22:08 +0530375 free_sg(sk, &rec->sg_encrypted_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530376 &rec->sg_encrypted_num_elem,
377 &rec->sg_encrypted_size);
378
Vakul Garg80ece6a2018-09-26 16:22:08 +0530379 free_sg(sk, &rec->sg_plaintext_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530380 &rec->sg_plaintext_num_elem,
381 &rec->sg_plaintext_size);
Vakul Gargc7749732018-09-25 20:21:51 +0530382
383 kfree(rec);
Dave Watson3c4d7552017-06-14 11:37:39 -0700384}
385
Vakul Garga42055e2018-09-21 09:46:13 +0530386int tls_tx_records(struct sock *sk, int flags)
387{
388 struct tls_context *tls_ctx = tls_get_ctx(sk);
389 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
390 struct tls_rec *rec, *tmp;
391 int tx_flags, rc = 0;
392
393 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530394 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530395 struct tls_rec, list);
396
397 if (flags == -1)
398 tx_flags = rec->tx_flags;
399 else
400 tx_flags = flags;
401
402 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
403 if (rc)
404 goto tx_err;
405
406 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530407 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530408 */
Vakul Garga42055e2018-09-21 09:46:13 +0530409 list_del(&rec->list);
Vakul Garg80ece6a2018-09-26 16:22:08 +0530410 free_sg(sk, &rec->sg_plaintext_data[1],
Vakul Gargb85135b2018-09-25 16:26:17 +0530411 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
412
Vakul Garga42055e2018-09-21 09:46:13 +0530413 kfree(rec);
414 }
415
Vakul Garg9932a292018-09-24 15:35:56 +0530416 /* Tx all ready records */
417 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
418 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530419 if (flags == -1)
420 tx_flags = rec->tx_flags;
421 else
422 tx_flags = flags;
423
424 rc = tls_push_sg(sk, tls_ctx,
Vakul Garg80ece6a2018-09-26 16:22:08 +0530425 &rec->sg_encrypted_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530426 0, tx_flags);
427 if (rc)
428 goto tx_err;
429
Vakul Garga42055e2018-09-21 09:46:13 +0530430 list_del(&rec->list);
Vakul Garg80ece6a2018-09-26 16:22:08 +0530431 free_sg(sk, &rec->sg_plaintext_data[1],
Vakul Gargb85135b2018-09-25 16:26:17 +0530432 &rec->sg_plaintext_num_elem,
433 &rec->sg_plaintext_size);
434
Vakul Garga42055e2018-09-21 09:46:13 +0530435 kfree(rec);
436 } else {
437 break;
438 }
439 }
440
441tx_err:
442 if (rc < 0 && rc != -EAGAIN)
443 tls_err_abort(sk, EBADMSG);
444
445 return rc;
446}
447
448static void tls_encrypt_done(struct crypto_async_request *req, int err)
449{
450 struct aead_request *aead_req = (struct aead_request *)req;
451 struct sock *sk = req->data;
452 struct tls_context *tls_ctx = tls_get_ctx(sk);
453 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
454 struct tls_rec *rec;
455 bool ready = false;
456 int pending;
457
458 rec = container_of(aead_req, struct tls_rec, aead_req);
459
Vakul Garg80ece6a2018-09-26 16:22:08 +0530460 rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
461 rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530462
Vakul Garga42055e2018-09-21 09:46:13 +0530463
Vakul Garg80ece6a2018-09-26 16:22:08 +0530464 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530465 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530466 rec = NULL;
467
468 /* If err is already set on socket, return the same code */
469 if (sk->sk_err) {
470 ctx->async_wait.err = sk->sk_err;
471 } else {
472 ctx->async_wait.err = err;
473 tls_err_abort(sk, err);
474 }
475 }
476
Vakul Garg9932a292018-09-24 15:35:56 +0530477 if (rec) {
478 struct tls_rec *first_rec;
479
480 /* Mark the record as ready for transmission */
481 smp_store_mb(rec->tx_ready, true);
482
483 /* If received record is at head of tx_list, schedule tx */
484 first_rec = list_first_entry(&ctx->tx_list,
485 struct tls_rec, list);
486 if (rec == first_rec)
487 ready = true;
488 }
Vakul Garga42055e2018-09-21 09:46:13 +0530489
490 pending = atomic_dec_return(&ctx->encrypt_pending);
491
492 if (!pending && READ_ONCE(ctx->async_notify))
493 complete(&ctx->async_wait.completion);
494
495 if (!ready)
496 return;
497
498 /* Schedule the transmission */
499 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Vakul Garg80ece6a2018-09-26 16:22:08 +0530500 schedule_delayed_work(&ctx->tx_work.work, 2);
Vakul Garga42055e2018-09-21 09:46:13 +0530501}
502
503static int tls_do_encryption(struct sock *sk,
504 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200505 struct tls_sw_context_tx *ctx,
506 struct aead_request *aead_req,
507 size_t data_len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700508{
Vakul Garga42055e2018-09-21 09:46:13 +0530509 struct tls_rec *rec = ctx->open_rec;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530510 struct scatterlist *plain_sg = rec->sg_plaintext_data;
511 struct scatterlist *enc_sg = rec->sg_encrypted_data;
Dave Watson3c4d7552017-06-14 11:37:39 -0700512 int rc;
513
Vakul Garg80ece6a2018-09-26 16:22:08 +0530514 /* Skip the first index as it contains AAD data */
515 rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size;
516 rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700517
Vakul Garg4e6d4722018-09-30 08:04:35 +0530518 /* If it is inplace crypto, then pass same SG list as both src, dst */
519 if (rec->inplace_crypto)
520 plain_sg = enc_sg;
521
Dave Watson3c4d7552017-06-14 11:37:39 -0700522 aead_request_set_tfm(aead_req, ctx->aead_send);
523 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
Vakul Garg4e6d4722018-09-30 08:04:35 +0530524 aead_request_set_crypt(aead_req, plain_sg, enc_sg,
Dave Watsondbe42552018-03-22 10:10:06 -0700525 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530526
527 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530528 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530529
Vakul Garg9932a292018-09-24 15:35:56 +0530530 /* Add the record in tx_list */
531 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530532 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700533
Vakul Garga42055e2018-09-21 09:46:13 +0530534 rc = crypto_aead_encrypt(aead_req);
535 if (!rc || rc != -EINPROGRESS) {
536 atomic_dec(&ctx->encrypt_pending);
Vakul Garg80ece6a2018-09-26 16:22:08 +0530537 rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
538 rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530539 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700540
Vakul Garg9932a292018-09-24 15:35:56 +0530541 if (!rc) {
542 WRITE_ONCE(rec->tx_ready, true);
543 } else if (rc != -EINPROGRESS) {
544 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530545 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530546 }
Vakul Garga42055e2018-09-21 09:46:13 +0530547
548 /* Unhook the record from context if encryption is not failure */
549 ctx->open_rec = NULL;
550 tls_advance_record_sn(sk, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700551 return rc;
552}
553
554static int tls_push_record(struct sock *sk, int flags,
555 unsigned char record_type)
556{
557 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300558 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530559 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200560 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700561 int rc;
562
Vakul Garga42055e2018-09-21 09:46:13 +0530563 if (!rec)
564 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200565
Vakul Garga42055e2018-09-21 09:46:13 +0530566 rec->tx_flags = flags;
567 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700568
Vakul Garg80ece6a2018-09-26 16:22:08 +0530569 sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem);
570 sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem);
Vakul Garga42055e2018-09-21 09:46:13 +0530571
572 tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
Dave Watsondbe42552018-03-22 10:10:06 -0700573 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700574 record_type);
575
576 tls_fill_prepend(tls_ctx,
Vakul Garg80ece6a2018-09-26 16:22:08 +0530577 page_address(sg_page(&rec->sg_encrypted_data[1])) +
578 rec->sg_encrypted_data[1].offset,
Vakul Garga42055e2018-09-21 09:46:13 +0530579 rec->sg_plaintext_size, record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700580
581 tls_ctx->pending_open_record_frags = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700582
Vakul Garga42055e2018-09-21 09:46:13 +0530583 rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
584 if (rc == -EINPROGRESS)
585 return -EINPROGRESS;
586
Dave Watson3c4d7552017-06-14 11:37:39 -0700587 if (rc < 0) {
Vakul Garga42055e2018-09-21 09:46:13 +0530588 tls_err_abort(sk, EBADMSG);
589 return rc;
Dave Watson3c4d7552017-06-14 11:37:39 -0700590 }
591
Vakul Garg9932a292018-09-24 15:35:56 +0530592 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700593}
594
595static int tls_sw_push_pending_record(struct sock *sk, int flags)
596{
597 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
598}
599
600static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
Dave Watson69ca9292018-03-22 10:09:53 -0700601 int length, int *pages_used,
602 unsigned int *size_used,
603 struct scatterlist *to, int to_max_pages,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700604 bool charge)
Dave Watson3c4d7552017-06-14 11:37:39 -0700605{
Dave Watson3c4d7552017-06-14 11:37:39 -0700606 struct page *pages[MAX_SKB_FRAGS];
607
608 size_t offset;
609 ssize_t copied, use;
610 int i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700611 unsigned int size = *size_used;
612 int num_elem = *pages_used;
Dave Watson3c4d7552017-06-14 11:37:39 -0700613 int rc = 0;
614 int maxpages;
615
616 while (length > 0) {
617 i = 0;
Dave Watson69ca9292018-03-22 10:09:53 -0700618 maxpages = to_max_pages - num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -0700619 if (maxpages == 0) {
620 rc = -EFAULT;
621 goto out;
622 }
623 copied = iov_iter_get_pages(from, pages,
624 length,
625 maxpages, &offset);
626 if (copied <= 0) {
627 rc = -EFAULT;
628 goto out;
629 }
630
631 iov_iter_advance(from, copied);
632
633 length -= copied;
634 size += copied;
635 while (copied) {
636 use = min_t(int, copied, PAGE_SIZE - offset);
637
Dave Watson69ca9292018-03-22 10:09:53 -0700638 sg_set_page(&to[num_elem],
Dave Watson3c4d7552017-06-14 11:37:39 -0700639 pages[i], use, offset);
Dave Watson69ca9292018-03-22 10:09:53 -0700640 sg_unmark_end(&to[num_elem]);
641 if (charge)
642 sk_mem_charge(sk, use);
Dave Watson3c4d7552017-06-14 11:37:39 -0700643
644 offset = 0;
645 copied -= use;
646
647 ++i;
648 ++num_elem;
649 }
650 }
651
Vakul Gargcfb40992018-08-02 20:43:10 +0530652 /* Mark the end in the last sg entry if newly added */
653 if (num_elem > *pages_used)
654 sg_mark_end(&to[num_elem - 1]);
Dave Watson3c4d7552017-06-14 11:37:39 -0700655out:
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700656 if (rc)
657 iov_iter_revert(from, size - *size_used);
Dave Watson69ca9292018-03-22 10:09:53 -0700658 *size_used = size;
659 *pages_used = num_elem;
660
Dave Watson3c4d7552017-06-14 11:37:39 -0700661 return rc;
662}
663
664static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
665 int bytes)
666{
667 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300668 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530669 struct tls_rec *rec = ctx->open_rec;
Vakul Garg80ece6a2018-09-26 16:22:08 +0530670 struct scatterlist *sg = &rec->sg_plaintext_data[1];
Dave Watson3c4d7552017-06-14 11:37:39 -0700671 int copy, i, rc = 0;
672
673 for (i = tls_ctx->pending_open_record_frags;
Vakul Garga42055e2018-09-21 09:46:13 +0530674 i < rec->sg_plaintext_num_elem; ++i) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700675 copy = sg[i].length;
676 if (copy_from_iter(
677 page_address(sg_page(&sg[i])) + sg[i].offset,
678 copy, from) != copy) {
679 rc = -EFAULT;
680 goto out;
681 }
682 bytes -= copy;
683
684 ++tls_ctx->pending_open_record_frags;
685
686 if (!bytes)
687 break;
688 }
689
690out:
691 return rc;
692}
693
Wei Yongjunbf17b672018-09-26 12:10:48 +0000694static struct tls_rec *get_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700695{
696 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300697 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530698 struct tls_rec *rec;
699 int mem_size;
700
701 /* Return if we already have an open record */
702 if (ctx->open_rec)
703 return ctx->open_rec;
704
705 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
706
707 rec = kzalloc(mem_size, sk->sk_allocation);
708 if (!rec)
709 return NULL;
710
711 sg_init_table(&rec->sg_plaintext_data[0],
712 ARRAY_SIZE(rec->sg_plaintext_data));
713 sg_init_table(&rec->sg_encrypted_data[0],
714 ARRAY_SIZE(rec->sg_encrypted_data));
715
Vakul Garg80ece6a2018-09-26 16:22:08 +0530716 sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space,
Vakul Garga42055e2018-09-21 09:46:13 +0530717 sizeof(rec->aad_space));
Vakul Garg80ece6a2018-09-26 16:22:08 +0530718 sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space,
Vakul Garga42055e2018-09-21 09:46:13 +0530719 sizeof(rec->aad_space));
Vakul Garga42055e2018-09-21 09:46:13 +0530720
721 ctx->open_rec = rec;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530722 rec->inplace_crypto = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530723
724 return rec;
725}
726
727int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
728{
Dave Watson3c4d7552017-06-14 11:37:39 -0700729 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530730 struct tls_context *tls_ctx = tls_get_ctx(sk);
731 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
732 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
733 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
734 unsigned char record_type = TLS_RECORD_TYPE_DATA;
735 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700736 bool eor = !(msg->msg_flags & MSG_MORE);
737 size_t try_to_copy, copied = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530738 struct tls_rec *rec;
739 int required_size;
740 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700741 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530742 int record_room;
743 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700744 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530745 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700746
747 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
748 return -ENOTSUPP;
749
750 lock_sock(sk);
751
Vakul Garga42055e2018-09-21 09:46:13 +0530752 /* Wait till there is any pending write on socket */
753 if (unlikely(sk->sk_write_pending)) {
754 ret = wait_on_pending_writer(sk, &timeo);
755 if (unlikely(ret))
756 goto send_end;
757 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700758
759 if (unlikely(msg->msg_controllen)) {
760 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530761 if (ret) {
762 if (ret == -EINPROGRESS)
763 num_async++;
764 else if (ret != -EAGAIN)
765 goto send_end;
766 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700767 }
768
769 while (msg_data_left(msg)) {
770 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100771 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700772 goto send_end;
773 }
774
Vakul Garga42055e2018-09-21 09:46:13 +0530775 rec = get_rec(sk);
776 if (!rec) {
777 ret = -ENOMEM;
778 goto send_end;
779 }
780
781 orig_size = rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700782 full_record = false;
783 try_to_copy = msg_data_left(msg);
Vakul Garga42055e2018-09-21 09:46:13 +0530784 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700785 if (try_to_copy >= record_room) {
786 try_to_copy = record_room;
787 full_record = true;
788 }
789
Vakul Garga42055e2018-09-21 09:46:13 +0530790 required_size = rec->sg_plaintext_size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700791 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700792
793 if (!sk_stream_memory_free(sk))
794 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530795
Dave Watson3c4d7552017-06-14 11:37:39 -0700796alloc_encrypted:
797 ret = alloc_encrypted_sg(sk, required_size);
798 if (ret) {
799 if (ret != -ENOSPC)
800 goto wait_for_memory;
801
802 /* Adjust try_to_copy according to the amount that was
803 * actually allocated. The difference is due
804 * to max sg elements limit
805 */
Vakul Garga42055e2018-09-21 09:46:13 +0530806 try_to_copy -= required_size - rec->sg_encrypted_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700807 full_record = true;
808 }
Vakul Garga42055e2018-09-21 09:46:13 +0530809
810 if (!is_kvec && (full_record || eor) && !async_capable) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700811 ret = zerocopy_from_iter(sk, &msg->msg_iter,
Vakul Garga42055e2018-09-21 09:46:13 +0530812 try_to_copy, &rec->sg_plaintext_num_elem,
813 &rec->sg_plaintext_size,
Vakul Garg80ece6a2018-09-26 16:22:08 +0530814 &rec->sg_plaintext_data[1],
815 ARRAY_SIZE(rec->sg_plaintext_data) - 1,
Doron Roberts-Kedes2da19ed2018-07-26 07:59:36 -0700816 true);
Dave Watson3c4d7552017-06-14 11:37:39 -0700817 if (ret)
818 goto fallback_to_reg_send;
819
Vakul Garg4e6d4722018-09-30 08:04:35 +0530820 rec->inplace_crypto = 0;
821
Vakul Garga42055e2018-09-21 09:46:13 +0530822 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700823 copied += try_to_copy;
824 ret = tls_push_record(sk, msg->msg_flags, record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530825 if (ret) {
826 if (ret == -EINPROGRESS)
827 num_async++;
828 else if (ret != -EAGAIN)
829 goto send_end;
830 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700831 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700832
Dave Watson3c4d7552017-06-14 11:37:39 -0700833fallback_to_reg_send:
Vakul Garg80ece6a2018-09-26 16:22:08 +0530834 trim_sg(sk, &rec->sg_plaintext_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530835 &rec->sg_plaintext_num_elem,
836 &rec->sg_plaintext_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700837 orig_size);
838 }
839
Vakul Garga42055e2018-09-21 09:46:13 +0530840 required_size = rec->sg_plaintext_size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530841
842 ret = move_to_plaintext_sg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700843 if (ret) {
844 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +0530845 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700846
847 /* Adjust try_to_copy according to the amount that was
848 * actually allocated. The difference is due
849 * to max sg elements limit
850 */
Vakul Garga42055e2018-09-21 09:46:13 +0530851 try_to_copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700852 full_record = true;
853
Vakul Garg80ece6a2018-09-26 16:22:08 +0530854 trim_sg(sk, &rec->sg_encrypted_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +0530855 &rec->sg_encrypted_num_elem,
856 &rec->sg_encrypted_size,
857 rec->sg_plaintext_size +
Dave Watsondbe42552018-03-22 10:10:06 -0700858 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700859 }
860
861 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
862 if (ret)
863 goto trim_sgl;
864
865 copied += try_to_copy;
866 if (full_record || eor) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700867 ret = tls_push_record(sk, msg->msg_flags, record_type);
868 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530869 if (ret == -EINPROGRESS)
870 num_async++;
871 else if (ret != -EAGAIN)
872 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700873 }
874 }
875
876 continue;
877
878wait_for_sndbuf:
879 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
880wait_for_memory:
881 ret = sk_stream_wait_memory(sk, &timeo);
882 if (ret) {
883trim_sgl:
884 trim_both_sgl(sk, orig_size);
885 goto send_end;
886 }
887
Vakul Garga42055e2018-09-21 09:46:13 +0530888 if (rec->sg_encrypted_size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700889 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700890 }
891
Vakul Garga42055e2018-09-21 09:46:13 +0530892 if (!num_async) {
893 goto send_end;
894 } else if (num_zc) {
895 /* Wait for pending encryptions to get completed */
896 smp_store_mb(ctx->async_notify, true);
897
898 if (atomic_read(&ctx->encrypt_pending))
899 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
900 else
901 reinit_completion(&ctx->async_wait.completion);
902
903 WRITE_ONCE(ctx->async_notify, false);
904
905 if (ctx->async_wait.err) {
906 ret = ctx->async_wait.err;
907 copied = 0;
908 }
909 }
910
911 /* Transmit if any encryptions have completed */
912 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
913 cancel_delayed_work(&ctx->tx_work.work);
914 tls_tx_records(sk, msg->msg_flags);
915 }
916
Dave Watson3c4d7552017-06-14 11:37:39 -0700917send_end:
918 ret = sk_stream_error(sk, msg->msg_flags, ret);
919
920 release_sock(sk);
921 return copied ? copied : ret;
922}
923
924int tls_sw_sendpage(struct sock *sk, struct page *page,
925 int offset, size_t size, int flags)
926{
Vakul Garga42055e2018-09-21 09:46:13 +0530927 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -0700928 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300929 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700930 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Vakul Garga42055e2018-09-21 09:46:13 +0530931 size_t orig_size = size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700932 struct scatterlist *sg;
Vakul Garga42055e2018-09-21 09:46:13 +0530933 struct tls_rec *rec;
934 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700935 bool full_record;
936 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530937 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530938 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -0700939
940 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
941 MSG_SENDPAGE_NOTLAST))
942 return -ENOTSUPP;
943
944 /* No MSG_EOR from splice, only look at MSG_MORE */
945 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
946
947 lock_sock(sk);
948
949 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
950
Vakul Garga42055e2018-09-21 09:46:13 +0530951 /* Wait till there is any pending write on socket */
952 if (unlikely(sk->sk_write_pending)) {
953 ret = wait_on_pending_writer(sk, &timeo);
954 if (unlikely(ret))
955 goto sendpage_end;
956 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700957
958 /* Call the sk_stream functions to manage the sndbuf mem. */
959 while (size > 0) {
960 size_t copy, required_size;
961
962 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100963 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700964 goto sendpage_end;
965 }
966
Vakul Garga42055e2018-09-21 09:46:13 +0530967 rec = get_rec(sk);
968 if (!rec) {
969 ret = -ENOMEM;
970 goto sendpage_end;
971 }
972
Dave Watson3c4d7552017-06-14 11:37:39 -0700973 full_record = false;
Vakul Garga42055e2018-09-21 09:46:13 +0530974 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700975 copy = size;
976 if (copy >= record_room) {
977 copy = record_room;
978 full_record = true;
979 }
Vakul Garga42055e2018-09-21 09:46:13 +0530980 required_size = rec->sg_plaintext_size + copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700981 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700982
983 if (!sk_stream_memory_free(sk))
984 goto wait_for_sndbuf;
985alloc_payload:
986 ret = alloc_encrypted_sg(sk, required_size);
987 if (ret) {
988 if (ret != -ENOSPC)
989 goto wait_for_memory;
990
991 /* Adjust copy according to the amount that was
992 * actually allocated. The difference is due
993 * to max sg elements limit
994 */
Vakul Garga42055e2018-09-21 09:46:13 +0530995 copy -= required_size - rec->sg_plaintext_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700996 full_record = true;
997 }
998
999 get_page(page);
Vakul Garg80ece6a2018-09-26 16:22:08 +05301000 sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -07001001 sg_set_page(sg, page, copy, offset);
Dave Watson7a8c4dd2018-01-19 12:30:13 -08001002 sg_unmark_end(sg);
1003
Vakul Garga42055e2018-09-21 09:46:13 +05301004 rec->sg_plaintext_num_elem++;
Dave Watson3c4d7552017-06-14 11:37:39 -07001005
1006 sk_mem_charge(sk, copy);
1007 offset += copy;
1008 size -= copy;
Vakul Garga42055e2018-09-21 09:46:13 +05301009 rec->sg_plaintext_size += copy;
1010 tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
Dave Watson3c4d7552017-06-14 11:37:39 -07001011
1012 if (full_record || eor ||
Vakul Garga42055e2018-09-21 09:46:13 +05301013 rec->sg_plaintext_num_elem ==
Vakul Garg80ece6a2018-09-26 16:22:08 +05301014 ARRAY_SIZE(rec->sg_plaintext_data) - 1) {
Vakul Garg4e6d4722018-09-30 08:04:35 +05301015 rec->inplace_crypto = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -07001016 ret = tls_push_record(sk, flags, record_type);
1017 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301018 if (ret == -EINPROGRESS)
1019 num_async++;
1020 else if (ret != -EAGAIN)
1021 goto sendpage_end;
Dave Watson3c4d7552017-06-14 11:37:39 -07001022 }
1023 }
1024 continue;
1025wait_for_sndbuf:
1026 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1027wait_for_memory:
1028 ret = sk_stream_wait_memory(sk, &timeo);
1029 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +05301030 trim_both_sgl(sk, rec->sg_plaintext_size);
Dave Watson3c4d7552017-06-14 11:37:39 -07001031 goto sendpage_end;
1032 }
1033
Dave Watson3c4d7552017-06-14 11:37:39 -07001034 goto alloc_payload;
1035 }
1036
Vakul Garga42055e2018-09-21 09:46:13 +05301037 if (num_async) {
1038 /* Transmit if any encryptions have completed */
1039 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1040 cancel_delayed_work(&ctx->tx_work.work);
1041 tls_tx_records(sk, flags);
1042 }
1043 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001044sendpage_end:
1045 if (orig_size > size)
1046 ret = orig_size - size;
1047 else
1048 ret = sk_stream_error(sk, flags, ret);
1049
1050 release_sock(sk);
1051 return ret;
1052}
1053
Dave Watsonc46234e2018-03-22 10:10:35 -07001054static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
1055 long timeo, int *err)
1056{
1057 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001058 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001059 struct sk_buff *skb;
1060 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1061
1062 while (!(skb = ctx->recv_pkt)) {
1063 if (sk->sk_err) {
1064 *err = sock_error(sk);
1065 return NULL;
1066 }
1067
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -07001068 if (sk->sk_shutdown & RCV_SHUTDOWN)
1069 return NULL;
1070
Dave Watsonc46234e2018-03-22 10:10:35 -07001071 if (sock_flag(sk, SOCK_DONE))
1072 return NULL;
1073
1074 if ((flags & MSG_DONTWAIT) || !timeo) {
1075 *err = -EAGAIN;
1076 return NULL;
1077 }
1078
1079 add_wait_queue(sk_sleep(sk), &wait);
1080 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1081 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
1082 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1083 remove_wait_queue(sk_sleep(sk), &wait);
1084
1085 /* Handle signals */
1086 if (signal_pending(current)) {
1087 *err = sock_intr_errno(timeo);
1088 return NULL;
1089 }
1090 }
1091
1092 return skb;
1093}
1094
Vakul Garg0b243d02018-08-10 20:46:41 +05301095/* This function decrypts the input skb into either out_iov or in out_sg
1096 * or in skb buffers itself. The input parameter 'zc' indicates if
1097 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1098 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1099 * NULL, then the decryption happens inside skb buffers itself, i.e.
1100 * zero-copy gets disabled and 'zc' is updated.
1101 */
1102
1103static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1104 struct iov_iter *out_iov,
1105 struct scatterlist *out_sg,
1106 int *chunk, bool *zc)
1107{
1108 struct tls_context *tls_ctx = tls_get_ctx(sk);
1109 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1110 struct strp_msg *rxm = strp_msg(skb);
1111 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1112 struct aead_request *aead_req;
1113 struct sk_buff *unused;
1114 u8 *aad, *iv, *mem = NULL;
1115 struct scatterlist *sgin = NULL;
1116 struct scatterlist *sgout = NULL;
1117 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1118
1119 if (*zc && (out_iov || out_sg)) {
1120 if (out_iov)
1121 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1122 else
1123 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001124 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1125 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +05301126 } else {
1127 n_sgout = 0;
1128 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -07001129 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +05301130 }
1131
Vakul Garg0b243d02018-08-10 20:46:41 +05301132 if (n_sgin < 1)
1133 return -EBADMSG;
1134
1135 /* Increment to accommodate AAD */
1136 n_sgin = n_sgin + 1;
1137
1138 nsg = n_sgin + n_sgout;
1139
1140 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1141 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1142 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1143 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1144
1145 /* Allocate a single block of memory which contains
1146 * aead_req || sgin[] || sgout[] || aad || iv.
1147 * This order achieves correct alignment for aead_req, sgin, sgout.
1148 */
1149 mem = kmalloc(mem_size, sk->sk_allocation);
1150 if (!mem)
1151 return -ENOMEM;
1152
1153 /* Segment the allocated memory */
1154 aead_req = (struct aead_request *)mem;
1155 sgin = (struct scatterlist *)(mem + aead_size);
1156 sgout = sgin + n_sgin;
1157 aad = (u8 *)(sgout + n_sgout);
1158 iv = aad + TLS_AAD_SPACE_SIZE;
1159
1160 /* Prepare IV */
1161 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1162 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1163 tls_ctx->rx.iv_size);
1164 if (err < 0) {
1165 kfree(mem);
1166 return err;
1167 }
1168 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1169
1170 /* Prepare AAD */
1171 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1172 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1173 ctx->control);
1174
1175 /* Prepare sgin */
1176 sg_init_table(sgin, n_sgin);
1177 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1178 err = skb_to_sgvec(skb, &sgin[1],
1179 rxm->offset + tls_ctx->rx.prepend_size,
1180 rxm->full_len - tls_ctx->rx.prepend_size);
1181 if (err < 0) {
1182 kfree(mem);
1183 return err;
1184 }
1185
1186 if (n_sgout) {
1187 if (out_iov) {
1188 sg_init_table(sgout, n_sgout);
1189 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1190
1191 *chunk = 0;
1192 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
1193 chunk, &sgout[1],
1194 (n_sgout - 1), false);
1195 if (err < 0)
1196 goto fallback_to_reg_recv;
1197 } else if (out_sg) {
1198 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1199 } else {
1200 goto fallback_to_reg_recv;
1201 }
1202 } else {
1203fallback_to_reg_recv:
1204 sgout = sgin;
1205 pages = 0;
1206 *chunk = 0;
1207 *zc = false;
1208 }
1209
1210 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301211 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1212 data_len, aead_req, *zc);
1213 if (err == -EINPROGRESS)
1214 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301215
1216 /* Release the pages in case iov was mapped to pages */
1217 for (; pages > 0; pages--)
1218 put_page(sg_page(&sgout[pages]));
1219
1220 kfree(mem);
1221 return err;
1222}
1223
Boris Pismennydafb67f2018-07-13 14:33:40 +03001224static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +05301225 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001226{
1227 struct tls_context *tls_ctx = tls_get_ctx(sk);
1228 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1229 struct strp_msg *rxm = strp_msg(skb);
1230 int err = 0;
1231
Boris Pismenny4799ac82018-07-13 14:33:43 +03001232#ifdef CONFIG_TLS_DEVICE
1233 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001234 if (err < 0)
1235 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001236#endif
1237 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301238 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301239 if (err < 0) {
1240 if (err == -EINPROGRESS)
1241 tls_advance_record_sn(sk, &tls_ctx->rx);
1242
Boris Pismenny4799ac82018-07-13 14:33:43 +03001243 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301244 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001245 } else {
1246 *zc = false;
1247 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001248
1249 rxm->offset += tls_ctx->rx.prepend_size;
1250 rxm->full_len -= tls_ctx->rx.overhead_size;
1251 tls_advance_record_sn(sk, &tls_ctx->rx);
1252 ctx->decrypted = true;
1253 ctx->saved_data_ready(sk);
1254
1255 return err;
1256}
1257
1258int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1259 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001260{
Vakul Garg0b243d02018-08-10 20:46:41 +05301261 bool zc = true;
1262 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001263
Vakul Garg0b243d02018-08-10 20:46:41 +05301264 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001265}
1266
1267static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1268 unsigned int len)
1269{
1270 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001271 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001272
Vakul Garg94524d82018-08-29 15:26:55 +05301273 if (skb) {
1274 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001275
Vakul Garg94524d82018-08-29 15:26:55 +05301276 if (len < rxm->full_len) {
1277 rxm->offset += len;
1278 rxm->full_len -= len;
1279 return false;
1280 }
1281 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001282 }
1283
1284 /* Finished with message */
1285 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001286 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001287
1288 return true;
1289}
1290
1291int tls_sw_recvmsg(struct sock *sk,
1292 struct msghdr *msg,
1293 size_t len,
1294 int nonblock,
1295 int flags,
1296 int *addr_len)
1297{
1298 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001299 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001300 unsigned char control;
1301 struct strp_msg *rxm;
1302 struct sk_buff *skb;
1303 ssize_t copied = 0;
1304 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001305 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001306 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -07001307 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Vakul Garg94524d82018-08-29 15:26:55 +05301308 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001309
1310 flags |= nonblock;
1311
1312 if (unlikely(flags & MSG_ERRQUEUE))
1313 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1314
1315 lock_sock(sk);
1316
Daniel Borkmann06030db2018-06-15 03:07:46 +02001317 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -07001318 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1319 do {
1320 bool zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301321 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001322 int chunk = 0;
1323
1324 skb = tls_wait_data(sk, flags, timeo, &err);
1325 if (!skb)
1326 goto recv_end;
1327
1328 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301329
Dave Watsonc46234e2018-03-22 10:10:35 -07001330 if (!cmsg) {
1331 int cerr;
1332
1333 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1334 sizeof(ctx->control), &ctx->control);
1335 cmsg = true;
1336 control = ctx->control;
1337 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1338 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1339 err = -EIO;
1340 goto recv_end;
1341 }
1342 }
1343 } else if (control != ctx->control) {
1344 goto recv_end;
1345 }
1346
1347 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301348 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001349
Vakul Garg0b243d02018-08-10 20:46:41 +05301350 if (!is_kvec && to_copy <= len &&
1351 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -07001352 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -07001353
Vakul Garg0b243d02018-08-10 20:46:41 +05301354 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1355 &chunk, &zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301356 if (err < 0 && err != -EINPROGRESS) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301357 tls_err_abort(sk, EBADMSG);
1358 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001359 }
Vakul Garg94524d82018-08-29 15:26:55 +05301360
1361 if (err == -EINPROGRESS) {
1362 async = true;
1363 num_async++;
1364 goto pick_next_record;
1365 }
1366
Dave Watsonc46234e2018-03-22 10:10:35 -07001367 ctx->decrypted = true;
1368 }
1369
1370 if (!zc) {
1371 chunk = min_t(unsigned int, rxm->full_len, len);
Vakul Garg94524d82018-08-29 15:26:55 +05301372
Dave Watsonc46234e2018-03-22 10:10:35 -07001373 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1374 chunk);
1375 if (err < 0)
1376 goto recv_end;
1377 }
1378
Vakul Garg94524d82018-08-29 15:26:55 +05301379pick_next_record:
Dave Watsonc46234e2018-03-22 10:10:35 -07001380 copied += chunk;
1381 len -= chunk;
1382 if (likely(!(flags & MSG_PEEK))) {
1383 u8 control = ctx->control;
1384
Vakul Garg94524d82018-08-29 15:26:55 +05301385 /* For async, drop current skb reference */
1386 if (async)
1387 skb = NULL;
1388
Dave Watsonc46234e2018-03-22 10:10:35 -07001389 if (tls_sw_advance_skb(sk, skb, chunk)) {
1390 /* Return full control message to
1391 * userspace before trying to parse
1392 * another message type
1393 */
1394 msg->msg_flags |= MSG_EOR;
1395 if (control != TLS_RECORD_TYPE_DATA)
1396 goto recv_end;
Vakul Garg94524d82018-08-29 15:26:55 +05301397 } else {
1398 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001399 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001400 } else {
1401 /* MSG_PEEK right now cannot look beyond current skb
1402 * from strparser, meaning we cannot advance skb here
1403 * and thus unpause strparser since we'd loose original
1404 * one.
1405 */
1406 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001407 }
Vakul Garg94524d82018-08-29 15:26:55 +05301408
Daniel Borkmann06030db2018-06-15 03:07:46 +02001409 /* If we have a new message from strparser, continue now. */
1410 if (copied >= target && !ctx->recv_pkt)
1411 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001412 } while (len);
1413
1414recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301415 if (num_async) {
1416 /* Wait for all previously submitted records to be decrypted */
1417 smp_store_mb(ctx->async_notify, true);
1418 if (atomic_read(&ctx->decrypt_pending)) {
1419 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1420 if (err) {
1421 /* one of async decrypt failed */
1422 tls_err_abort(sk, err);
1423 copied = 0;
1424 }
1425 } else {
1426 reinit_completion(&ctx->async_wait.completion);
1427 }
1428 WRITE_ONCE(ctx->async_notify, false);
1429 }
1430
Dave Watsonc46234e2018-03-22 10:10:35 -07001431 release_sock(sk);
1432 return copied ? : err;
1433}
1434
1435ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1436 struct pipe_inode_info *pipe,
1437 size_t len, unsigned int flags)
1438{
1439 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001440 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001441 struct strp_msg *rxm = NULL;
1442 struct sock *sk = sock->sk;
1443 struct sk_buff *skb;
1444 ssize_t copied = 0;
1445 int err = 0;
1446 long timeo;
1447 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301448 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001449
1450 lock_sock(sk);
1451
1452 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1453
1454 skb = tls_wait_data(sk, flags, timeo, &err);
1455 if (!skb)
1456 goto splice_read_end;
1457
1458 /* splice does not support reading control messages */
1459 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1460 err = -ENOTSUPP;
1461 goto splice_read_end;
1462 }
1463
1464 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301465 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001466
1467 if (err < 0) {
1468 tls_err_abort(sk, EBADMSG);
1469 goto splice_read_end;
1470 }
1471 ctx->decrypted = true;
1472 }
1473 rxm = strp_msg(skb);
1474
1475 chunk = min_t(unsigned int, rxm->full_len, len);
1476 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1477 if (copied < 0)
1478 goto splice_read_end;
1479
1480 if (likely(!(flags & MSG_PEEK)))
1481 tls_sw_advance_skb(sk, skb, copied);
1482
1483splice_read_end:
1484 release_sock(sk);
1485 return copied ? : err;
1486}
1487
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001488unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1489 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001490{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001491 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001492 struct sock *sk = sock->sk;
1493 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001494 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001495
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001496 /* Grab POLLOUT and POLLHUP from the underlying socket */
1497 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001498
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001499 /* Clear POLLIN bits, and set based on recv_pkt */
1500 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001501 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001502 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001503
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001504 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001505}
1506
1507static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1508{
1509 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001510 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001511 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001512 struct strp_msg *rxm = strp_msg(skb);
1513 size_t cipher_overhead;
1514 size_t data_len = 0;
1515 int ret;
1516
1517 /* Verify that we have a full TLS header, or wait for more data */
1518 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1519 return 0;
1520
Kees Cook3463e512018-06-25 16:55:05 -07001521 /* Sanity-check size of on-stack buffer. */
1522 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1523 ret = -EINVAL;
1524 goto read_failure;
1525 }
1526
Dave Watsonc46234e2018-03-22 10:10:35 -07001527 /* Linearize header to local buffer */
1528 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1529
1530 if (ret < 0)
1531 goto read_failure;
1532
1533 ctx->control = header[0];
1534
1535 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1536
1537 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1538
1539 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1540 ret = -EMSGSIZE;
1541 goto read_failure;
1542 }
1543 if (data_len < cipher_overhead) {
1544 ret = -EBADMSG;
1545 goto read_failure;
1546 }
1547
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001548 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1549 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001550 ret = -EINVAL;
1551 goto read_failure;
1552 }
1553
Boris Pismenny4799ac82018-07-13 14:33:43 +03001554#ifdef CONFIG_TLS_DEVICE
1555 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1556 *(u64*)tls_ctx->rx.rec_seq);
1557#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001558 return data_len + TLS_HEADER_SIZE;
1559
1560read_failure:
1561 tls_err_abort(strp->sk, ret);
1562
1563 return ret;
1564}
1565
1566static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1567{
1568 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001569 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001570
1571 ctx->decrypted = false;
1572
1573 ctx->recv_pkt = skb;
1574 strp_pause(strp);
1575
Vakul Gargad13acc2018-07-30 16:08:33 +05301576 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001577}
1578
1579static void tls_data_ready(struct sock *sk)
1580{
1581 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001582 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001583
1584 strp_data_ready(&ctx->strp);
1585}
1586
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001587void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001588{
1589 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001590 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301591 struct tls_rec *rec, *tmp;
1592
1593 /* Wait for any pending async encryptions to complete */
1594 smp_store_mb(ctx->async_notify, true);
1595 if (atomic_read(&ctx->encrypt_pending))
1596 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1597
1598 cancel_delayed_work_sync(&ctx->tx_work.work);
1599
1600 /* Tx whatever records we can transmit and abandon the rest */
1601 tls_tx_records(sk, -1);
1602
Vakul Garg9932a292018-09-24 15:35:56 +05301603 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05301604 * the partially sent record if any at head of tx_list.
1605 */
1606 if (tls_ctx->partially_sent_record) {
1607 struct scatterlist *sg = tls_ctx->partially_sent_record;
1608
1609 while (1) {
1610 put_page(sg_page(sg));
1611 sk_mem_uncharge(sk, sg->length);
1612
1613 if (sg_is_last(sg))
1614 break;
1615 sg++;
1616 }
1617
1618 tls_ctx->partially_sent_record = NULL;
1619
Vakul Garg9932a292018-09-24 15:35:56 +05301620 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05301621 struct tls_rec, list);
Vakul Gargb85135b2018-09-25 16:26:17 +05301622
Vakul Garg80ece6a2018-09-26 16:22:08 +05301623 free_sg(sk, &rec->sg_plaintext_data[1],
Vakul Gargb85135b2018-09-25 16:26:17 +05301624 &rec->sg_plaintext_num_elem,
1625 &rec->sg_plaintext_size);
1626
Vakul Garga42055e2018-09-21 09:46:13 +05301627 list_del(&rec->list);
1628 kfree(rec);
1629 }
1630
Vakul Garg9932a292018-09-24 15:35:56 +05301631 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garg80ece6a2018-09-26 16:22:08 +05301632 free_sg(sk, &rec->sg_encrypted_data[1],
Vakul Garga42055e2018-09-21 09:46:13 +05301633 &rec->sg_encrypted_num_elem,
1634 &rec->sg_encrypted_size);
1635
Vakul Garg80ece6a2018-09-26 16:22:08 +05301636 free_sg(sk, &rec->sg_plaintext_data[1],
Vakul Gargb85135b2018-09-25 16:26:17 +05301637 &rec->sg_plaintext_num_elem,
1638 &rec->sg_plaintext_size);
1639
Vakul Garga42055e2018-09-21 09:46:13 +05301640 list_del(&rec->list);
1641 kfree(rec);
1642 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001643
Vakul Garg201876b2018-07-24 16:54:27 +05301644 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05301645 tls_free_open_rec(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001646
1647 kfree(ctx);
1648}
1649
Boris Pismenny39f56e12018-07-13 14:33:41 +03001650void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001651{
1652 struct tls_context *tls_ctx = tls_get_ctx(sk);
1653 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1654
Dave Watsonc46234e2018-03-22 10:10:35 -07001655 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301656 kfree_skb(ctx->recv_pkt);
1657 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001658 crypto_free_aead(ctx->aead_recv);
1659 strp_stop(&ctx->strp);
1660 write_lock_bh(&sk->sk_callback_lock);
1661 sk->sk_data_ready = ctx->saved_data_ready;
1662 write_unlock_bh(&sk->sk_callback_lock);
1663 release_sock(sk);
1664 strp_done(&ctx->strp);
1665 lock_sock(sk);
1666 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001667}
1668
1669void tls_sw_free_resources_rx(struct sock *sk)
1670{
1671 struct tls_context *tls_ctx = tls_get_ctx(sk);
1672 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1673
1674 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001675
Dave Watson3c4d7552017-06-14 11:37:39 -07001676 kfree(ctx);
1677}
1678
Vakul Garg9932a292018-09-24 15:35:56 +05301679/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05301680static void tx_work_handler(struct work_struct *work)
1681{
1682 struct delayed_work *delayed_work = to_delayed_work(work);
1683 struct tx_work *tx_work = container_of(delayed_work,
1684 struct tx_work, work);
1685 struct sock *sk = tx_work->sk;
1686 struct tls_context *tls_ctx = tls_get_ctx(sk);
1687 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1688
1689 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1690 return;
1691
1692 lock_sock(sk);
1693 tls_tx_records(sk, -1);
1694 release_sock(sk);
1695}
1696
Dave Watsonc46234e2018-03-22 10:10:35 -07001697int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001698{
Dave Watson3c4d7552017-06-14 11:37:39 -07001699 struct tls_crypto_info *crypto_info;
1700 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001701 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1702 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001703 struct cipher_context *cctx;
1704 struct crypto_aead **aead;
1705 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001706 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1707 char *iv, *rec_seq;
1708 int rc = 0;
1709
1710 if (!ctx) {
1711 rc = -EINVAL;
1712 goto out;
1713 }
1714
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001715 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001716 if (!ctx->priv_ctx_tx) {
1717 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1718 if (!sw_ctx_tx) {
1719 rc = -ENOMEM;
1720 goto out;
1721 }
1722 ctx->priv_ctx_tx = sw_ctx_tx;
1723 } else {
1724 sw_ctx_tx =
1725 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001726 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001727 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001728 if (!ctx->priv_ctx_rx) {
1729 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1730 if (!sw_ctx_rx) {
1731 rc = -ENOMEM;
1732 goto out;
1733 }
1734 ctx->priv_ctx_rx = sw_ctx_rx;
1735 } else {
1736 sw_ctx_rx =
1737 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001738 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001739 }
1740
Dave Watsonc46234e2018-03-22 10:10:35 -07001741 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001742 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001743 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001744 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001745 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05301746 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05301747 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1748 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001749 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001750 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001751 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001752 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001753 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001754 }
1755
Dave Watson3c4d7552017-06-14 11:37:39 -07001756 switch (crypto_info->cipher_type) {
1757 case TLS_CIPHER_AES_GCM_128: {
1758 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1759 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1760 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1761 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1762 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1763 rec_seq =
1764 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1765 gcm_128_info =
1766 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1767 break;
1768 }
1769 default:
1770 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001771 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001772 }
1773
Kees Cookb16520f2018-04-10 17:52:34 -07001774 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001775 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001776 rc = -EINVAL;
1777 goto free_priv;
1778 }
1779
Dave Watsonc46234e2018-03-22 10:10:35 -07001780 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1781 cctx->tag_size = tag_size;
1782 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1783 cctx->iv_size = iv_size;
1784 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1785 GFP_KERNEL);
1786 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001787 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001788 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001789 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001790 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1791 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1792 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001793 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001794 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001795 rc = -ENOMEM;
1796 goto free_iv;
1797 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001798
Dave Watsonc46234e2018-03-22 10:10:35 -07001799 if (!*aead) {
1800 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1801 if (IS_ERR(*aead)) {
1802 rc = PTR_ERR(*aead);
1803 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001804 goto free_rec_seq;
1805 }
1806 }
1807
1808 ctx->push_pending_record = tls_sw_push_pending_record;
1809
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001810 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001811 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1812 if (rc)
1813 goto free_aead;
1814
Dave Watsonc46234e2018-03-22 10:10:35 -07001815 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1816 if (rc)
1817 goto free_aead;
1818
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001819 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001820 /* Set up strparser */
1821 memset(&cb, 0, sizeof(cb));
1822 cb.rcv_msg = tls_queue;
1823 cb.parse_msg = tls_read_size;
1824
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001825 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001826
1827 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001828 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001829 sk->sk_data_ready = tls_data_ready;
1830 write_unlock_bh(&sk->sk_callback_lock);
1831
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001832 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001833
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001834 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001835 }
1836
1837 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001838
1839free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001840 crypto_free_aead(*aead);
1841 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001842free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001843 kfree(cctx->rec_seq);
1844 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001845free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001846 kfree(cctx->iv);
1847 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001848free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001849 if (tx) {
1850 kfree(ctx->priv_ctx_tx);
1851 ctx->priv_ctx_tx = NULL;
1852 } else {
1853 kfree(ctx->priv_ctx_rx);
1854 ctx->priv_ctx_rx = NULL;
1855 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001856out:
1857 return rc;
1858}