blob: 5043b0be1448955e8269c6d3b0c7bb552458acef [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
Dave Watsonc46234e2018-03-22 10:10:35 -070037#include <linux/sched/signal.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070038#include <linux/module.h>
39#include <crypto/aead.h>
40
Dave Watsonc46234e2018-03-22 10:10:35 -070041#include <net/strparser.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042#include <net/tls.h>
43
Kees Cookb16520f2018-04-10 17:52:34 -070044#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
45
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -070046static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
48{
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
52 int elt = 0;
53
54 if (unlikely(recursion_level >= 24))
55 return -EMSGSIZE;
56
57 if (chunk > 0) {
58 if (chunk > len)
59 chunk = len;
60 elt++;
61 len -= chunk;
62 if (len == 0)
63 return elt;
64 offset += chunk;
65 }
66
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
68 int end;
69
70 WARN_ON(start > offset + len);
71
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
73 chunk = end - offset;
74 if (chunk > 0) {
75 if (chunk > len)
76 chunk = len;
77 elt++;
78 len -= chunk;
79 if (len == 0)
80 return elt;
81 offset += chunk;
82 }
83 start = end;
84 }
85
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
88 int end, ret;
89
90 WARN_ON(start > offset + len);
91
92 end = start + frag_iter->len;
93 chunk = end - offset;
94 if (chunk > 0) {
95 if (chunk > len)
96 chunk = len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
98 recursion_level + 1);
99 if (unlikely(ret < 0))
100 return ret;
101 elt += ret;
102 len -= chunk;
103 if (len == 0)
104 return elt;
105 offset += chunk;
106 }
107 start = end;
108 }
109 }
110 BUG_ON(len);
111 return elt;
112}
113
114/* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 */
117static int skb_nsg(struct sk_buff *skb, int offset, int len)
118{
119 return __skb_nsg(skb, offset, len, 0);
120}
121
Vakul Garg94524d82018-08-29 15:26:55 +0530122static void tls_decrypt_done(struct crypto_async_request *req, int err)
123{
124 struct aead_request *aead_req = (struct aead_request *)req;
Vakul Garg94524d82018-08-29 15:26:55 +0530125 struct scatterlist *sgout = aead_req->dst;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
Vakul Garg94524d82018-08-29 15:26:55 +0530128 struct scatterlist *sg;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700129 struct sk_buff *skb;
Vakul Garg94524d82018-08-29 15:26:55 +0530130 unsigned int pages;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700131 int pending;
132
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
Vakul Garg94524d82018-08-29 15:26:55 +0530137
138 /* Propagate if there was an err */
139 if (err) {
140 ctx->async_wait.err = err;
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700141 tls_err_abort(skb->sk, err);
Vakul Garg94524d82018-08-29 15:26:55 +0530142 }
143
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
146 */
147 skb->sk = NULL;
148
Vakul Garg94524d82018-08-29 15:26:55 +0530149 /* Release the skb, pages and memory allocated for crypto req */
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700150 kfree_skb(skb);
Vakul Garg94524d82018-08-29 15:26:55 +0530151
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
154 if (!sg)
155 break;
156 put_page(sg_page(sg));
157 }
158
159 kfree(aead_req);
160
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
163}
164
Dave Watsonc46234e2018-03-22 10:10:35 -0700165static int tls_do_decryption(struct sock *sk,
Vakul Garg94524d82018-08-29 15:26:55 +0530166 struct sk_buff *skb,
Dave Watsonc46234e2018-03-22 10:10:35 -0700167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
169 char *iv_recv,
170 size_t data_len,
Vakul Garg94524d82018-08-29 15:26:55 +0530171 struct aead_request *aead_req,
172 bool async)
Dave Watsonc46234e2018-03-22 10:10:35 -0700173{
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700176 int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -0700177
Vakul Garg0b243d02018-08-10 20:46:41 +0530178 aead_request_set_tfm(aead_req, ctx->aead_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
182 (u8 *)iv_recv);
Dave Watsonc46234e2018-03-22 10:10:35 -0700183
Vakul Garg94524d82018-08-29 15:26:55 +0530184 if (async) {
John Fastabend7a3dd8c2018-09-14 13:01:46 -0700185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
190 */
191 skb->sk = sk;
Vakul Garg94524d82018-08-29 15:26:55 +0530192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
196 } else {
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
200 }
201
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
204 if (async)
205 return ret;
206
207 ret = crypto_wait_req(ret, &ctx->async_wait);
208 }
209
210 if (async)
211 atomic_dec(&ctx->decrypt_pending);
212
Dave Watsonc46234e2018-03-22 10:10:35 -0700213 return ret;
214}
215
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200216static void tls_trim_both_msgs(struct sock *sk, int target_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700217{
218 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300219 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530220 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700221
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200222 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700223 if (target_size > 0)
Dave Watsondbe42552018-03-22 10:10:06 -0700224 target_size += tls_ctx->tx.overhead_size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200225 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700226}
227
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200228static int tls_alloc_encrypted_msg(struct sock *sk, int len)
Dave Watson3c4d7552017-06-14 11:37:39 -0700229{
230 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300231 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530232 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200233 struct sk_msg *msg_en = &rec->msg_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700234
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200235 return sk_msg_alloc(sk, msg_en, len, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700236}
237
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200238static int tls_clone_plaintext_msg(struct sock *sk, int required)
Dave Watson3c4d7552017-06-14 11:37:39 -0700239{
240 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300241 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530242 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200243 struct sk_msg *msg_pl = &rec->msg_plaintext;
244 struct sk_msg *msg_en = &rec->msg_encrypted;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530245 int skip, len;
Dave Watson3c4d7552017-06-14 11:37:39 -0700246
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200247 /* We add page references worth len bytes from encrypted sg
248 * at the end of plaintext sg. It is guaranteed that msg_en
Vakul Garg4e6d4722018-09-30 08:04:35 +0530249 * has enough required room (ensured by caller).
250 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200251 len = required - msg_pl->sg.size;
Vakul Garg52ea9922018-09-06 21:41:40 +0530252
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200253 /* Skip initial bytes in msg_en's data to be able to use
254 * same offset of both plain and encrypted data.
Vakul Garg4e6d4722018-09-30 08:04:35 +0530255 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200256 skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530257
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200258 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
Dave Watson3c4d7552017-06-14 11:37:39 -0700259}
260
Vakul Gargc7749732018-09-25 20:21:51 +0530261static void tls_free_open_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700262{
263 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300264 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530265 struct tls_rec *rec = ctx->open_rec;
Dave Watson3c4d7552017-06-14 11:37:39 -0700266
Vakul Garga42055e2018-09-21 09:46:13 +0530267 /* Return if there is no open record */
268 if (!rec)
269 return;
Dave Watson3c4d7552017-06-14 11:37:39 -0700270
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200271 sk_msg_free(sk, &rec->msg_encrypted);
272 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Gargc7749732018-09-25 20:21:51 +0530273 kfree(rec);
Dave Watson3c4d7552017-06-14 11:37:39 -0700274}
275
Vakul Garga42055e2018-09-21 09:46:13 +0530276int tls_tx_records(struct sock *sk, int flags)
277{
278 struct tls_context *tls_ctx = tls_get_ctx(sk);
279 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
280 struct tls_rec *rec, *tmp;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200281 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530282 int tx_flags, rc = 0;
283
284 if (tls_is_partially_sent_record(tls_ctx)) {
Vakul Garg9932a292018-09-24 15:35:56 +0530285 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +0530286 struct tls_rec, list);
287
288 if (flags == -1)
289 tx_flags = rec->tx_flags;
290 else
291 tx_flags = flags;
292
293 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
294 if (rc)
295 goto tx_err;
296
297 /* Full record has been transmitted.
Vakul Garg9932a292018-09-24 15:35:56 +0530298 * Remove the head of tx_list
Vakul Garga42055e2018-09-21 09:46:13 +0530299 */
Vakul Garga42055e2018-09-21 09:46:13 +0530300 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200301 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530302 kfree(rec);
303 }
304
Vakul Garg9932a292018-09-24 15:35:56 +0530305 /* Tx all ready records */
306 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
307 if (READ_ONCE(rec->tx_ready)) {
Vakul Garga42055e2018-09-21 09:46:13 +0530308 if (flags == -1)
309 tx_flags = rec->tx_flags;
310 else
311 tx_flags = flags;
312
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200313 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530314 rc = tls_push_sg(sk, tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200315 &msg_en->sg.data[msg_en->sg.curr],
Vakul Garga42055e2018-09-21 09:46:13 +0530316 0, tx_flags);
317 if (rc)
318 goto tx_err;
319
Vakul Garga42055e2018-09-21 09:46:13 +0530320 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200321 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +0530322 kfree(rec);
323 } else {
324 break;
325 }
326 }
327
328tx_err:
329 if (rc < 0 && rc != -EAGAIN)
330 tls_err_abort(sk, EBADMSG);
331
332 return rc;
333}
334
335static void tls_encrypt_done(struct crypto_async_request *req, int err)
336{
337 struct aead_request *aead_req = (struct aead_request *)req;
338 struct sock *sk = req->data;
339 struct tls_context *tls_ctx = tls_get_ctx(sk);
340 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200341 struct scatterlist *sge;
342 struct sk_msg *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530343 struct tls_rec *rec;
344 bool ready = false;
345 int pending;
346
347 rec = container_of(aead_req, struct tls_rec, aead_req);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200348 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530349
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200350 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
351 sge->offset -= tls_ctx->tx.prepend_size;
352 sge->length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530353
Vakul Garg80ece6a2018-09-26 16:22:08 +0530354 /* Check if error is previously set on socket */
Vakul Garga42055e2018-09-21 09:46:13 +0530355 if (err || sk->sk_err) {
Vakul Garga42055e2018-09-21 09:46:13 +0530356 rec = NULL;
357
358 /* If err is already set on socket, return the same code */
359 if (sk->sk_err) {
360 ctx->async_wait.err = sk->sk_err;
361 } else {
362 ctx->async_wait.err = err;
363 tls_err_abort(sk, err);
364 }
365 }
366
Vakul Garg9932a292018-09-24 15:35:56 +0530367 if (rec) {
368 struct tls_rec *first_rec;
369
370 /* Mark the record as ready for transmission */
371 smp_store_mb(rec->tx_ready, true);
372
373 /* If received record is at head of tx_list, schedule tx */
374 first_rec = list_first_entry(&ctx->tx_list,
375 struct tls_rec, list);
376 if (rec == first_rec)
377 ready = true;
378 }
Vakul Garga42055e2018-09-21 09:46:13 +0530379
380 pending = atomic_dec_return(&ctx->encrypt_pending);
381
382 if (!pending && READ_ONCE(ctx->async_notify))
383 complete(&ctx->async_wait.completion);
384
385 if (!ready)
386 return;
387
388 /* Schedule the transmission */
389 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200390 schedule_delayed_work(&ctx->tx_work.work, 1);
Vakul Garga42055e2018-09-21 09:46:13 +0530391}
392
393static int tls_do_encryption(struct sock *sk,
394 struct tls_context *tls_ctx,
Daniel Borkmanna447da72018-06-15 03:07:45 +0200395 struct tls_sw_context_tx *ctx,
396 struct aead_request *aead_req,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200397 size_t data_len, u32 start)
Dave Watson3c4d7552017-06-14 11:37:39 -0700398{
Vakul Garga42055e2018-09-21 09:46:13 +0530399 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200400 struct sk_msg *msg_en = &rec->msg_encrypted;
401 struct scatterlist *sge = sk_msg_elem(msg_en, start);
Dave Watson3c4d7552017-06-14 11:37:39 -0700402 int rc;
403
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200404 sge->offset += tls_ctx->tx.prepend_size;
405 sge->length -= tls_ctx->tx.prepend_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700406
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200407 msg_en->sg.curr = start;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530408
Dave Watson3c4d7552017-06-14 11:37:39 -0700409 aead_request_set_tfm(aead_req, ctx->aead_send);
410 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200411 aead_request_set_crypt(aead_req, rec->sg_aead_in,
412 rec->sg_aead_out,
Dave Watsondbe42552018-03-22 10:10:06 -0700413 data_len, tls_ctx->tx.iv);
Vakul Garga54667f2018-01-31 21:34:37 +0530414
415 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
Vakul Garga42055e2018-09-21 09:46:13 +0530416 tls_encrypt_done, sk);
Vakul Garga54667f2018-01-31 21:34:37 +0530417
Vakul Garg9932a292018-09-24 15:35:56 +0530418 /* Add the record in tx_list */
419 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +0530420 atomic_inc(&ctx->encrypt_pending);
Dave Watson3c4d7552017-06-14 11:37:39 -0700421
Vakul Garga42055e2018-09-21 09:46:13 +0530422 rc = crypto_aead_encrypt(aead_req);
423 if (!rc || rc != -EINPROGRESS) {
424 atomic_dec(&ctx->encrypt_pending);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200425 sge->offset -= tls_ctx->tx.prepend_size;
426 sge->length += tls_ctx->tx.prepend_size;
Vakul Garga42055e2018-09-21 09:46:13 +0530427 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700428
Vakul Garg9932a292018-09-24 15:35:56 +0530429 if (!rc) {
430 WRITE_ONCE(rec->tx_ready, true);
431 } else if (rc != -EINPROGRESS) {
432 list_del(&rec->list);
Vakul Garga42055e2018-09-21 09:46:13 +0530433 return rc;
Vakul Garg9932a292018-09-24 15:35:56 +0530434 }
Vakul Garga42055e2018-09-21 09:46:13 +0530435
436 /* Unhook the record from context if encryption is not failure */
437 ctx->open_rec = NULL;
438 tls_advance_record_sn(sk, &tls_ctx->tx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700439 return rc;
440}
441
442static int tls_push_record(struct sock *sk, int flags,
443 unsigned char record_type)
444{
445 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300446 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +0530447 struct tls_rec *rec = ctx->open_rec;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200448 struct sk_msg *msg_pl, *msg_en;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200449 struct aead_request *req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700450 int rc;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200451 u32 i;
Dave Watson3c4d7552017-06-14 11:37:39 -0700452
Vakul Garga42055e2018-09-21 09:46:13 +0530453 if (!rec)
454 return 0;
Daniel Borkmanna447da72018-06-15 03:07:45 +0200455
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200456 msg_pl = &rec->msg_plaintext;
457 msg_en = &rec->msg_encrypted;
458
Vakul Garga42055e2018-09-21 09:46:13 +0530459 rec->tx_flags = flags;
460 req = &rec->aead_req;
Dave Watson3c4d7552017-06-14 11:37:39 -0700461
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200462 i = msg_pl->sg.end;
463 sk_msg_iter_var_prev(i);
464 sg_mark_end(sk_msg_elem(msg_pl, i));
Vakul Garga42055e2018-09-21 09:46:13 +0530465
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200466 i = msg_pl->sg.start;
467 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
468 &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
469
470 i = msg_en->sg.end;
471 sk_msg_iter_var_prev(i);
472 sg_mark_end(sk_msg_elem(msg_en, i));
473
474 i = msg_en->sg.start;
475 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
476
477 tls_make_aad(rec->aad_space, msg_pl->sg.size,
Dave Watsondbe42552018-03-22 10:10:06 -0700478 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700479 record_type);
480
481 tls_fill_prepend(tls_ctx,
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200482 page_address(sg_page(&msg_en->sg.data[i])) +
483 msg_en->sg.data[i].offset, msg_pl->sg.size,
484 record_type);
Dave Watson3c4d7552017-06-14 11:37:39 -0700485
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200486 tls_ctx->pending_open_record_frags = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700487
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200488 rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
Dave Watson3c4d7552017-06-14 11:37:39 -0700489 if (rc < 0) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200490 if (rc != -EINPROGRESS)
491 tls_err_abort(sk, EBADMSG);
Vakul Garga42055e2018-09-21 09:46:13 +0530492 return rc;
Dave Watson3c4d7552017-06-14 11:37:39 -0700493 }
494
Vakul Garg9932a292018-09-24 15:35:56 +0530495 return tls_tx_records(sk, flags);
Dave Watson3c4d7552017-06-14 11:37:39 -0700496}
497
498static int tls_sw_push_pending_record(struct sock *sk, int flags)
499{
500 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
501}
502
Wei Yongjunbf17b672018-09-26 12:10:48 +0000503static struct tls_rec *get_rec(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -0700504{
505 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300506 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200507 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530508 struct tls_rec *rec;
509 int mem_size;
510
511 /* Return if we already have an open record */
512 if (ctx->open_rec)
513 return ctx->open_rec;
514
515 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
516
517 rec = kzalloc(mem_size, sk->sk_allocation);
518 if (!rec)
519 return NULL;
520
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200521 msg_pl = &rec->msg_plaintext;
522 msg_en = &rec->msg_encrypted;
Vakul Garga42055e2018-09-21 09:46:13 +0530523
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200524 sk_msg_init(msg_pl);
525 sk_msg_init(msg_en);
526
527 sg_init_table(rec->sg_aead_in, 2);
528 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
Vakul Garga42055e2018-09-21 09:46:13 +0530529 sizeof(rec->aad_space));
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200530 sg_unmark_end(&rec->sg_aead_in[1]);
531
532 sg_init_table(rec->sg_aead_out, 2);
533 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
Vakul Garga42055e2018-09-21 09:46:13 +0530534 sizeof(rec->aad_space));
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200535 sg_unmark_end(&rec->sg_aead_out[1]);
Vakul Garga42055e2018-09-21 09:46:13 +0530536
537 ctx->open_rec = rec;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530538 rec->inplace_crypto = 1;
Vakul Garga42055e2018-09-21 09:46:13 +0530539
540 return rec;
541}
542
543int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
544{
Dave Watson3c4d7552017-06-14 11:37:39 -0700545 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
Vakul Garga42055e2018-09-21 09:46:13 +0530546 struct tls_context *tls_ctx = tls_get_ctx(sk);
547 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
548 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
549 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
550 unsigned char record_type = TLS_RECORD_TYPE_DATA;
551 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Dave Watson3c4d7552017-06-14 11:37:39 -0700552 bool eor = !(msg->msg_flags & MSG_MORE);
553 size_t try_to_copy, copied = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200554 struct sk_msg *msg_pl, *msg_en;
Vakul Garga42055e2018-09-21 09:46:13 +0530555 struct tls_rec *rec;
556 int required_size;
557 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700558 bool full_record;
Vakul Garga42055e2018-09-21 09:46:13 +0530559 int record_room;
560 int num_zc = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700561 int orig_size;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530562 int ret = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700563
564 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
565 return -ENOTSUPP;
566
567 lock_sock(sk);
568
Vakul Garga42055e2018-09-21 09:46:13 +0530569 /* Wait till there is any pending write on socket */
570 if (unlikely(sk->sk_write_pending)) {
571 ret = wait_on_pending_writer(sk, &timeo);
572 if (unlikely(ret))
573 goto send_end;
574 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700575
576 if (unlikely(msg->msg_controllen)) {
577 ret = tls_proccess_cmsg(sk, msg, &record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530578 if (ret) {
579 if (ret == -EINPROGRESS)
580 num_async++;
581 else if (ret != -EAGAIN)
582 goto send_end;
583 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700584 }
585
586 while (msg_data_left(msg)) {
587 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100588 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700589 goto send_end;
590 }
591
Vakul Garga42055e2018-09-21 09:46:13 +0530592 rec = get_rec(sk);
593 if (!rec) {
594 ret = -ENOMEM;
595 goto send_end;
596 }
597
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200598 msg_pl = &rec->msg_plaintext;
599 msg_en = &rec->msg_encrypted;
600
601 orig_size = msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700602 full_record = false;
603 try_to_copy = msg_data_left(msg);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200604 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700605 if (try_to_copy >= record_room) {
606 try_to_copy = record_room;
607 full_record = true;
608 }
609
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200610 required_size = msg_pl->sg.size + try_to_copy +
Dave Watsondbe42552018-03-22 10:10:06 -0700611 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700612
613 if (!sk_stream_memory_free(sk))
614 goto wait_for_sndbuf;
Vakul Garga42055e2018-09-21 09:46:13 +0530615
Dave Watson3c4d7552017-06-14 11:37:39 -0700616alloc_encrypted:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200617 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700618 if (ret) {
619 if (ret != -ENOSPC)
620 goto wait_for_memory;
621
622 /* Adjust try_to_copy according to the amount that was
623 * actually allocated. The difference is due
624 * to max sg elements limit
625 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200626 try_to_copy -= required_size - msg_en->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700627 full_record = true;
628 }
Vakul Garga42055e2018-09-21 09:46:13 +0530629
630 if (!is_kvec && (full_record || eor) && !async_capable) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200631 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
632 msg_pl, try_to_copy);
Dave Watson3c4d7552017-06-14 11:37:39 -0700633 if (ret)
634 goto fallback_to_reg_send;
635
Vakul Garg4e6d4722018-09-30 08:04:35 +0530636 rec->inplace_crypto = 0;
637
Vakul Garga42055e2018-09-21 09:46:13 +0530638 num_zc++;
Dave Watson3c4d7552017-06-14 11:37:39 -0700639 copied += try_to_copy;
640 ret = tls_push_record(sk, msg->msg_flags, record_type);
Vakul Garga42055e2018-09-21 09:46:13 +0530641 if (ret) {
642 if (ret == -EINPROGRESS)
643 num_async++;
644 else if (ret != -EAGAIN)
645 goto send_end;
646 }
Doron Roberts-Kedes5a3611e2018-07-26 07:59:35 -0700647 continue;
Dave Watson3c4d7552017-06-14 11:37:39 -0700648
Dave Watson3c4d7552017-06-14 11:37:39 -0700649fallback_to_reg_send:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200650 sk_msg_trim(sk, msg_pl, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700651 }
652
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200653 required_size = msg_pl->sg.size + try_to_copy;
Vakul Garg4e6d4722018-09-30 08:04:35 +0530654
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200655 ret = tls_clone_plaintext_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700656 if (ret) {
657 if (ret != -ENOSPC)
Vakul Garg4e6d4722018-09-30 08:04:35 +0530658 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700659
660 /* Adjust try_to_copy according to the amount that was
661 * actually allocated. The difference is due
662 * to max sg elements limit
663 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200664 try_to_copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700665 full_record = true;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200666 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
667 tls_ctx->tx.overhead_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700668 }
669
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200670 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl,
671 try_to_copy);
672 if (ret < 0)
Dave Watson3c4d7552017-06-14 11:37:39 -0700673 goto trim_sgl;
674
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200675 /* Open records defined only if successfully copied, otherwise
676 * we would trim the sg but not reset the open record frags.
677 */
678 tls_ctx->pending_open_record_frags = true;
Dave Watson3c4d7552017-06-14 11:37:39 -0700679 copied += try_to_copy;
680 if (full_record || eor) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700681 ret = tls_push_record(sk, msg->msg_flags, record_type);
682 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530683 if (ret == -EINPROGRESS)
684 num_async++;
685 else if (ret != -EAGAIN)
686 goto send_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700687 }
688 }
689
690 continue;
691
692wait_for_sndbuf:
693 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
694wait_for_memory:
695 ret = sk_stream_wait_memory(sk, &timeo);
696 if (ret) {
697trim_sgl:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200698 tls_trim_both_msgs(sk, orig_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700699 goto send_end;
700 }
701
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200702 if (msg_en->sg.size < required_size)
Dave Watson3c4d7552017-06-14 11:37:39 -0700703 goto alloc_encrypted;
Dave Watson3c4d7552017-06-14 11:37:39 -0700704 }
705
Vakul Garga42055e2018-09-21 09:46:13 +0530706 if (!num_async) {
707 goto send_end;
708 } else if (num_zc) {
709 /* Wait for pending encryptions to get completed */
710 smp_store_mb(ctx->async_notify, true);
711
712 if (atomic_read(&ctx->encrypt_pending))
713 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
714 else
715 reinit_completion(&ctx->async_wait.completion);
716
717 WRITE_ONCE(ctx->async_notify, false);
718
719 if (ctx->async_wait.err) {
720 ret = ctx->async_wait.err;
721 copied = 0;
722 }
723 }
724
725 /* Transmit if any encryptions have completed */
726 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
727 cancel_delayed_work(&ctx->tx_work.work);
728 tls_tx_records(sk, msg->msg_flags);
729 }
730
Dave Watson3c4d7552017-06-14 11:37:39 -0700731send_end:
732 ret = sk_stream_error(sk, msg->msg_flags, ret);
733
734 release_sock(sk);
735 return copied ? copied : ret;
736}
737
738int tls_sw_sendpage(struct sock *sk, struct page *page,
739 int offset, size_t size, int flags)
740{
Vakul Garga42055e2018-09-21 09:46:13 +0530741 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Dave Watson3c4d7552017-06-14 11:37:39 -0700742 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300743 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700744 unsigned char record_type = TLS_RECORD_TYPE_DATA;
Vakul Garga42055e2018-09-21 09:46:13 +0530745 size_t orig_size = size;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200746 struct sk_msg *msg_pl;
Vakul Garga42055e2018-09-21 09:46:13 +0530747 struct tls_rec *rec;
748 int num_async = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700749 bool full_record;
750 int record_room;
Vakul Garg4128c0c2018-09-24 16:09:49 +0530751 int ret = 0;
Vakul Garga42055e2018-09-21 09:46:13 +0530752 bool eor;
Dave Watson3c4d7552017-06-14 11:37:39 -0700753
754 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
755 MSG_SENDPAGE_NOTLAST))
756 return -ENOTSUPP;
757
758 /* No MSG_EOR from splice, only look at MSG_MORE */
759 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
760
761 lock_sock(sk);
762
763 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
764
Vakul Garga42055e2018-09-21 09:46:13 +0530765 /* Wait till there is any pending write on socket */
766 if (unlikely(sk->sk_write_pending)) {
767 ret = wait_on_pending_writer(sk, &timeo);
768 if (unlikely(ret))
769 goto sendpage_end;
770 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700771
772 /* Call the sk_stream functions to manage the sndbuf mem. */
773 while (size > 0) {
774 size_t copy, required_size;
775
776 if (sk->sk_err) {
r.hering@avm.de30be8f82018-01-12 15:42:06 +0100777 ret = -sk->sk_err;
Dave Watson3c4d7552017-06-14 11:37:39 -0700778 goto sendpage_end;
779 }
780
Vakul Garga42055e2018-09-21 09:46:13 +0530781 rec = get_rec(sk);
782 if (!rec) {
783 ret = -ENOMEM;
784 goto sendpage_end;
785 }
786
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200787 msg_pl = &rec->msg_plaintext;
788
Dave Watson3c4d7552017-06-14 11:37:39 -0700789 full_record = false;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200790 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700791 copy = size;
792 if (copy >= record_room) {
793 copy = record_room;
794 full_record = true;
795 }
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200796
797 required_size = msg_pl->sg.size + copy +
798 tls_ctx->tx.overhead_size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700799
800 if (!sk_stream_memory_free(sk))
801 goto wait_for_sndbuf;
802alloc_payload:
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200803 ret = tls_alloc_encrypted_msg(sk, required_size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700804 if (ret) {
805 if (ret != -ENOSPC)
806 goto wait_for_memory;
807
808 /* Adjust copy according to the amount that was
809 * actually allocated. The difference is due
810 * to max sg elements limit
811 */
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200812 copy -= required_size - msg_pl->sg.size;
Dave Watson3c4d7552017-06-14 11:37:39 -0700813 full_record = true;
814 }
815
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200816 sk_msg_page_add(msg_pl, page, copy, offset);
Dave Watson3c4d7552017-06-14 11:37:39 -0700817 sk_mem_charge(sk, copy);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200818
Dave Watson3c4d7552017-06-14 11:37:39 -0700819 offset += copy;
820 size -= copy;
Dave Watson3c4d7552017-06-14 11:37:39 -0700821
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200822 tls_ctx->pending_open_record_frags = true;
823 if (full_record || eor || sk_msg_full(msg_pl)) {
Vakul Garg4e6d4722018-09-30 08:04:35 +0530824 rec->inplace_crypto = 0;
Dave Watson3c4d7552017-06-14 11:37:39 -0700825 ret = tls_push_record(sk, flags, record_type);
826 if (ret) {
Vakul Garga42055e2018-09-21 09:46:13 +0530827 if (ret == -EINPROGRESS)
828 num_async++;
829 else if (ret != -EAGAIN)
830 goto sendpage_end;
Dave Watson3c4d7552017-06-14 11:37:39 -0700831 }
832 }
833 continue;
834wait_for_sndbuf:
835 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
836wait_for_memory:
837 ret = sk_stream_wait_memory(sk, &timeo);
838 if (ret) {
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200839 tls_trim_both_msgs(sk, msg_pl->sg.size);
Dave Watson3c4d7552017-06-14 11:37:39 -0700840 goto sendpage_end;
841 }
842
Dave Watson3c4d7552017-06-14 11:37:39 -0700843 goto alloc_payload;
844 }
845
Vakul Garga42055e2018-09-21 09:46:13 +0530846 if (num_async) {
847 /* Transmit if any encryptions have completed */
848 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
849 cancel_delayed_work(&ctx->tx_work.work);
850 tls_tx_records(sk, flags);
851 }
852 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700853sendpage_end:
854 if (orig_size > size)
855 ret = orig_size - size;
856 else
857 ret = sk_stream_error(sk, flags, ret);
858
859 release_sock(sk);
860 return ret;
861}
862
Dave Watsonc46234e2018-03-22 10:10:35 -0700863static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
864 long timeo, int *err)
865{
866 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300867 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700868 struct sk_buff *skb;
869 DEFINE_WAIT_FUNC(wait, woken_wake_function);
870
871 while (!(skb = ctx->recv_pkt)) {
872 if (sk->sk_err) {
873 *err = sock_error(sk);
874 return NULL;
875 }
876
Doron Roberts-Kedesfcf47932018-07-18 16:22:27 -0700877 if (sk->sk_shutdown & RCV_SHUTDOWN)
878 return NULL;
879
Dave Watsonc46234e2018-03-22 10:10:35 -0700880 if (sock_flag(sk, SOCK_DONE))
881 return NULL;
882
883 if ((flags & MSG_DONTWAIT) || !timeo) {
884 *err = -EAGAIN;
885 return NULL;
886 }
887
888 add_wait_queue(sk_sleep(sk), &wait);
889 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
890 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
891 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
892 remove_wait_queue(sk_sleep(sk), &wait);
893
894 /* Handle signals */
895 if (signal_pending(current)) {
896 *err = sock_intr_errno(timeo);
897 return NULL;
898 }
899 }
900
901 return skb;
902}
903
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200904static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
905 int length, int *pages_used,
906 unsigned int *size_used,
907 struct scatterlist *to,
908 int to_max_pages)
909{
910 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
911 struct page *pages[MAX_SKB_FRAGS];
912 unsigned int size = *size_used;
913 ssize_t copied, use;
914 size_t offset;
915
916 while (length > 0) {
917 i = 0;
918 maxpages = to_max_pages - num_elem;
919 if (maxpages == 0) {
920 rc = -EFAULT;
921 goto out;
922 }
923 copied = iov_iter_get_pages(from, pages,
924 length,
925 maxpages, &offset);
926 if (copied <= 0) {
927 rc = -EFAULT;
928 goto out;
929 }
930
931 iov_iter_advance(from, copied);
932
933 length -= copied;
934 size += copied;
935 while (copied) {
936 use = min_t(int, copied, PAGE_SIZE - offset);
937
938 sg_set_page(&to[num_elem],
939 pages[i], use, offset);
940 sg_unmark_end(&to[num_elem]);
941 /* We do not uncharge memory from this API */
942
943 offset = 0;
944 copied -= use;
945
946 i++;
947 num_elem++;
948 }
949 }
950 /* Mark the end in the last sg entry if newly added */
951 if (num_elem > *pages_used)
952 sg_mark_end(&to[num_elem - 1]);
953out:
954 if (rc)
955 iov_iter_revert(from, size - *size_used);
956 *size_used = size;
957 *pages_used = num_elem;
958
959 return rc;
960}
961
Vakul Garg0b243d02018-08-10 20:46:41 +0530962/* This function decrypts the input skb into either out_iov or in out_sg
963 * or in skb buffers itself. The input parameter 'zc' indicates if
964 * zero-copy mode needs to be tried or not. With zero-copy mode, either
965 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
966 * NULL, then the decryption happens inside skb buffers itself, i.e.
967 * zero-copy gets disabled and 'zc' is updated.
968 */
969
970static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
971 struct iov_iter *out_iov,
972 struct scatterlist *out_sg,
973 int *chunk, bool *zc)
974{
975 struct tls_context *tls_ctx = tls_get_ctx(sk);
976 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
977 struct strp_msg *rxm = strp_msg(skb);
978 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
979 struct aead_request *aead_req;
980 struct sk_buff *unused;
981 u8 *aad, *iv, *mem = NULL;
982 struct scatterlist *sgin = NULL;
983 struct scatterlist *sgout = NULL;
984 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
985
986 if (*zc && (out_iov || out_sg)) {
987 if (out_iov)
988 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
989 else
990 n_sgout = sg_nents(out_sg);
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -0700991 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
992 rxm->full_len - tls_ctx->rx.prepend_size);
Vakul Garg0b243d02018-08-10 20:46:41 +0530993 } else {
994 n_sgout = 0;
995 *zc = false;
Doron Roberts-Kedes0927f712018-08-28 16:33:57 -0700996 n_sgin = skb_cow_data(skb, 0, &unused);
Vakul Garg0b243d02018-08-10 20:46:41 +0530997 }
998
Vakul Garg0b243d02018-08-10 20:46:41 +0530999 if (n_sgin < 1)
1000 return -EBADMSG;
1001
1002 /* Increment to accommodate AAD */
1003 n_sgin = n_sgin + 1;
1004
1005 nsg = n_sgin + n_sgout;
1006
1007 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1008 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1009 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1010 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1011
1012 /* Allocate a single block of memory which contains
1013 * aead_req || sgin[] || sgout[] || aad || iv.
1014 * This order achieves correct alignment for aead_req, sgin, sgout.
1015 */
1016 mem = kmalloc(mem_size, sk->sk_allocation);
1017 if (!mem)
1018 return -ENOMEM;
1019
1020 /* Segment the allocated memory */
1021 aead_req = (struct aead_request *)mem;
1022 sgin = (struct scatterlist *)(mem + aead_size);
1023 sgout = sgin + n_sgin;
1024 aad = (u8 *)(sgout + n_sgout);
1025 iv = aad + TLS_AAD_SPACE_SIZE;
1026
1027 /* Prepare IV */
1028 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1029 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1030 tls_ctx->rx.iv_size);
1031 if (err < 0) {
1032 kfree(mem);
1033 return err;
1034 }
1035 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1036
1037 /* Prepare AAD */
1038 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1039 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1040 ctx->control);
1041
1042 /* Prepare sgin */
1043 sg_init_table(sgin, n_sgin);
1044 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1045 err = skb_to_sgvec(skb, &sgin[1],
1046 rxm->offset + tls_ctx->rx.prepend_size,
1047 rxm->full_len - tls_ctx->rx.prepend_size);
1048 if (err < 0) {
1049 kfree(mem);
1050 return err;
1051 }
1052
1053 if (n_sgout) {
1054 if (out_iov) {
1055 sg_init_table(sgout, n_sgout);
1056 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1057
1058 *chunk = 0;
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001059 err = tls_setup_from_iter(sk, out_iov, data_len,
1060 &pages, chunk, &sgout[1],
1061 (n_sgout - 1));
Vakul Garg0b243d02018-08-10 20:46:41 +05301062 if (err < 0)
1063 goto fallback_to_reg_recv;
1064 } else if (out_sg) {
1065 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1066 } else {
1067 goto fallback_to_reg_recv;
1068 }
1069 } else {
1070fallback_to_reg_recv:
1071 sgout = sgin;
1072 pages = 0;
1073 *chunk = 0;
1074 *zc = false;
1075 }
1076
1077 /* Prepare and submit AEAD request */
Vakul Garg94524d82018-08-29 15:26:55 +05301078 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1079 data_len, aead_req, *zc);
1080 if (err == -EINPROGRESS)
1081 return err;
Vakul Garg0b243d02018-08-10 20:46:41 +05301082
1083 /* Release the pages in case iov was mapped to pages */
1084 for (; pages > 0; pages--)
1085 put_page(sg_page(&sgout[pages]));
1086
1087 kfree(mem);
1088 return err;
1089}
1090
Boris Pismennydafb67f2018-07-13 14:33:40 +03001091static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
Vakul Garg0b243d02018-08-10 20:46:41 +05301092 struct iov_iter *dest, int *chunk, bool *zc)
Boris Pismennydafb67f2018-07-13 14:33:40 +03001093{
1094 struct tls_context *tls_ctx = tls_get_ctx(sk);
1095 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1096 struct strp_msg *rxm = strp_msg(skb);
1097 int err = 0;
1098
Boris Pismenny4799ac82018-07-13 14:33:43 +03001099#ifdef CONFIG_TLS_DEVICE
1100 err = tls_device_decrypted(sk, skb);
Boris Pismennydafb67f2018-07-13 14:33:40 +03001101 if (err < 0)
1102 return err;
Boris Pismenny4799ac82018-07-13 14:33:43 +03001103#endif
1104 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301105 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301106 if (err < 0) {
1107 if (err == -EINPROGRESS)
1108 tls_advance_record_sn(sk, &tls_ctx->rx);
1109
Boris Pismenny4799ac82018-07-13 14:33:43 +03001110 return err;
Vakul Garg94524d82018-08-29 15:26:55 +05301111 }
Boris Pismenny4799ac82018-07-13 14:33:43 +03001112 } else {
1113 *zc = false;
1114 }
Boris Pismennydafb67f2018-07-13 14:33:40 +03001115
1116 rxm->offset += tls_ctx->rx.prepend_size;
1117 rxm->full_len -= tls_ctx->rx.overhead_size;
1118 tls_advance_record_sn(sk, &tls_ctx->rx);
1119 ctx->decrypted = true;
1120 ctx->saved_data_ready(sk);
1121
1122 return err;
1123}
1124
1125int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1126 struct scatterlist *sgout)
Dave Watsonc46234e2018-03-22 10:10:35 -07001127{
Vakul Garg0b243d02018-08-10 20:46:41 +05301128 bool zc = true;
1129 int chunk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001130
Vakul Garg0b243d02018-08-10 20:46:41 +05301131 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001132}
1133
1134static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1135 unsigned int len)
1136{
1137 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001138 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001139
Vakul Garg94524d82018-08-29 15:26:55 +05301140 if (skb) {
1141 struct strp_msg *rxm = strp_msg(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001142
Vakul Garg94524d82018-08-29 15:26:55 +05301143 if (len < rxm->full_len) {
1144 rxm->offset += len;
1145 rxm->full_len -= len;
1146 return false;
1147 }
1148 kfree_skb(skb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001149 }
1150
1151 /* Finished with message */
1152 ctx->recv_pkt = NULL;
Doron Roberts-Kedes7170e602018-06-06 09:33:28 -07001153 __strp_unpause(&ctx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001154
1155 return true;
1156}
1157
1158int tls_sw_recvmsg(struct sock *sk,
1159 struct msghdr *msg,
1160 size_t len,
1161 int nonblock,
1162 int flags,
1163 int *addr_len)
1164{
1165 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001166 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001167 unsigned char control;
1168 struct strp_msg *rxm;
1169 struct sk_buff *skb;
1170 ssize_t copied = 0;
1171 bool cmsg = false;
Daniel Borkmann06030db2018-06-15 03:07:46 +02001172 int target, err = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001173 long timeo;
Doron Roberts-Kedes0a26cf32018-07-25 14:48:21 -07001174 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
Vakul Garg94524d82018-08-29 15:26:55 +05301175 int num_async = 0;
Dave Watsonc46234e2018-03-22 10:10:35 -07001176
1177 flags |= nonblock;
1178
1179 if (unlikely(flags & MSG_ERRQUEUE))
1180 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1181
1182 lock_sock(sk);
1183
Daniel Borkmann06030db2018-06-15 03:07:46 +02001184 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Dave Watsonc46234e2018-03-22 10:10:35 -07001185 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1186 do {
1187 bool zc = false;
Vakul Garg94524d82018-08-29 15:26:55 +05301188 bool async = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001189 int chunk = 0;
1190
1191 skb = tls_wait_data(sk, flags, timeo, &err);
1192 if (!skb)
1193 goto recv_end;
1194
1195 rxm = strp_msg(skb);
Vakul Garg94524d82018-08-29 15:26:55 +05301196
Dave Watsonc46234e2018-03-22 10:10:35 -07001197 if (!cmsg) {
1198 int cerr;
1199
1200 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1201 sizeof(ctx->control), &ctx->control);
1202 cmsg = true;
1203 control = ctx->control;
1204 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1205 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1206 err = -EIO;
1207 goto recv_end;
1208 }
1209 }
1210 } else if (control != ctx->control) {
1211 goto recv_end;
1212 }
1213
1214 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301215 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
Dave Watsonc46234e2018-03-22 10:10:35 -07001216
Vakul Garg0b243d02018-08-10 20:46:41 +05301217 if (!is_kvec && to_copy <= len &&
1218 likely(!(flags & MSG_PEEK)))
Dave Watsonc46234e2018-03-22 10:10:35 -07001219 zc = true;
Dave Watsonc46234e2018-03-22 10:10:35 -07001220
Vakul Garg0b243d02018-08-10 20:46:41 +05301221 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1222 &chunk, &zc);
Vakul Garg94524d82018-08-29 15:26:55 +05301223 if (err < 0 && err != -EINPROGRESS) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301224 tls_err_abort(sk, EBADMSG);
1225 goto recv_end;
Dave Watsonc46234e2018-03-22 10:10:35 -07001226 }
Vakul Garg94524d82018-08-29 15:26:55 +05301227
1228 if (err == -EINPROGRESS) {
1229 async = true;
1230 num_async++;
1231 goto pick_next_record;
1232 }
1233
Dave Watsonc46234e2018-03-22 10:10:35 -07001234 ctx->decrypted = true;
1235 }
1236
1237 if (!zc) {
1238 chunk = min_t(unsigned int, rxm->full_len, len);
Vakul Garg94524d82018-08-29 15:26:55 +05301239
Dave Watsonc46234e2018-03-22 10:10:35 -07001240 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1241 chunk);
1242 if (err < 0)
1243 goto recv_end;
1244 }
1245
Vakul Garg94524d82018-08-29 15:26:55 +05301246pick_next_record:
Dave Watsonc46234e2018-03-22 10:10:35 -07001247 copied += chunk;
1248 len -= chunk;
1249 if (likely(!(flags & MSG_PEEK))) {
1250 u8 control = ctx->control;
1251
Vakul Garg94524d82018-08-29 15:26:55 +05301252 /* For async, drop current skb reference */
1253 if (async)
1254 skb = NULL;
1255
Dave Watsonc46234e2018-03-22 10:10:35 -07001256 if (tls_sw_advance_skb(sk, skb, chunk)) {
1257 /* Return full control message to
1258 * userspace before trying to parse
1259 * another message type
1260 */
1261 msg->msg_flags |= MSG_EOR;
1262 if (control != TLS_RECORD_TYPE_DATA)
1263 goto recv_end;
Vakul Garg94524d82018-08-29 15:26:55 +05301264 } else {
1265 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001266 }
Daniel Borkmann50c6b582018-09-14 23:00:55 +02001267 } else {
1268 /* MSG_PEEK right now cannot look beyond current skb
1269 * from strparser, meaning we cannot advance skb here
1270 * and thus unpause strparser since we'd loose original
1271 * one.
1272 */
1273 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001274 }
Vakul Garg94524d82018-08-29 15:26:55 +05301275
Daniel Borkmann06030db2018-06-15 03:07:46 +02001276 /* If we have a new message from strparser, continue now. */
1277 if (copied >= target && !ctx->recv_pkt)
1278 break;
Dave Watsonc46234e2018-03-22 10:10:35 -07001279 } while (len);
1280
1281recv_end:
Vakul Garg94524d82018-08-29 15:26:55 +05301282 if (num_async) {
1283 /* Wait for all previously submitted records to be decrypted */
1284 smp_store_mb(ctx->async_notify, true);
1285 if (atomic_read(&ctx->decrypt_pending)) {
1286 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1287 if (err) {
1288 /* one of async decrypt failed */
1289 tls_err_abort(sk, err);
1290 copied = 0;
1291 }
1292 } else {
1293 reinit_completion(&ctx->async_wait.completion);
1294 }
1295 WRITE_ONCE(ctx->async_notify, false);
1296 }
1297
Dave Watsonc46234e2018-03-22 10:10:35 -07001298 release_sock(sk);
1299 return copied ? : err;
1300}
1301
1302ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1303 struct pipe_inode_info *pipe,
1304 size_t len, unsigned int flags)
1305{
1306 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001307 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001308 struct strp_msg *rxm = NULL;
1309 struct sock *sk = sock->sk;
1310 struct sk_buff *skb;
1311 ssize_t copied = 0;
1312 int err = 0;
1313 long timeo;
1314 int chunk;
Vakul Garg0b243d02018-08-10 20:46:41 +05301315 bool zc = false;
Dave Watsonc46234e2018-03-22 10:10:35 -07001316
1317 lock_sock(sk);
1318
1319 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1320
1321 skb = tls_wait_data(sk, flags, timeo, &err);
1322 if (!skb)
1323 goto splice_read_end;
1324
1325 /* splice does not support reading control messages */
1326 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1327 err = -ENOTSUPP;
1328 goto splice_read_end;
1329 }
1330
1331 if (!ctx->decrypted) {
Vakul Garg0b243d02018-08-10 20:46:41 +05301332 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
Dave Watsonc46234e2018-03-22 10:10:35 -07001333
1334 if (err < 0) {
1335 tls_err_abort(sk, EBADMSG);
1336 goto splice_read_end;
1337 }
1338 ctx->decrypted = true;
1339 }
1340 rxm = strp_msg(skb);
1341
1342 chunk = min_t(unsigned int, rxm->full_len, len);
1343 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1344 if (copied < 0)
1345 goto splice_read_end;
1346
1347 if (likely(!(flags & MSG_PEEK)))
1348 tls_sw_advance_skb(sk, skb, copied);
1349
1350splice_read_end:
1351 release_sock(sk);
1352 return copied ? : err;
1353}
1354
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001355unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1356 struct poll_table_struct *wait)
Dave Watsonc46234e2018-03-22 10:10:35 -07001357{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001358 unsigned int ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001359 struct sock *sk = sock->sk;
1360 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001361 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001362
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001363 /* Grab POLLOUT and POLLHUP from the underlying socket */
1364 ret = ctx->sk_poll(file, sock, wait);
Dave Watsonc46234e2018-03-22 10:10:35 -07001365
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001366 /* Clear POLLIN bits, and set based on recv_pkt */
1367 ret &= ~(POLLIN | POLLRDNORM);
Dave Watsonc46234e2018-03-22 10:10:35 -07001368 if (ctx->recv_pkt)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001369 ret |= POLLIN | POLLRDNORM;
Dave Watsonc46234e2018-03-22 10:10:35 -07001370
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001371 return ret;
Dave Watsonc46234e2018-03-22 10:10:35 -07001372}
1373
1374static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1375{
1376 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001377 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Kees Cook3463e512018-06-25 16:55:05 -07001378 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
Dave Watsonc46234e2018-03-22 10:10:35 -07001379 struct strp_msg *rxm = strp_msg(skb);
1380 size_t cipher_overhead;
1381 size_t data_len = 0;
1382 int ret;
1383
1384 /* Verify that we have a full TLS header, or wait for more data */
1385 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1386 return 0;
1387
Kees Cook3463e512018-06-25 16:55:05 -07001388 /* Sanity-check size of on-stack buffer. */
1389 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1390 ret = -EINVAL;
1391 goto read_failure;
1392 }
1393
Dave Watsonc46234e2018-03-22 10:10:35 -07001394 /* Linearize header to local buffer */
1395 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1396
1397 if (ret < 0)
1398 goto read_failure;
1399
1400 ctx->control = header[0];
1401
1402 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1403
1404 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1405
1406 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1407 ret = -EMSGSIZE;
1408 goto read_failure;
1409 }
1410 if (data_len < cipher_overhead) {
1411 ret = -EBADMSG;
1412 goto read_failure;
1413 }
1414
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001415 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1416 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001417 ret = -EINVAL;
1418 goto read_failure;
1419 }
1420
Boris Pismenny4799ac82018-07-13 14:33:43 +03001421#ifdef CONFIG_TLS_DEVICE
1422 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1423 *(u64*)tls_ctx->rx.rec_seq);
1424#endif
Dave Watsonc46234e2018-03-22 10:10:35 -07001425 return data_len + TLS_HEADER_SIZE;
1426
1427read_failure:
1428 tls_err_abort(strp->sk, ret);
1429
1430 return ret;
1431}
1432
1433static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1434{
1435 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001436 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001437
1438 ctx->decrypted = false;
1439
1440 ctx->recv_pkt = skb;
1441 strp_pause(strp);
1442
Vakul Gargad13acc2018-07-30 16:08:33 +05301443 ctx->saved_data_ready(strp->sk);
Dave Watsonc46234e2018-03-22 10:10:35 -07001444}
1445
1446static void tls_data_ready(struct sock *sk)
1447{
1448 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001449 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -07001450
1451 strp_data_ready(&ctx->strp);
1452}
1453
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001454void tls_sw_free_resources_tx(struct sock *sk)
Dave Watson3c4d7552017-06-14 11:37:39 -07001455{
1456 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001457 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
Vakul Garga42055e2018-09-21 09:46:13 +05301458 struct tls_rec *rec, *tmp;
1459
1460 /* Wait for any pending async encryptions to complete */
1461 smp_store_mb(ctx->async_notify, true);
1462 if (atomic_read(&ctx->encrypt_pending))
1463 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1464
1465 cancel_delayed_work_sync(&ctx->tx_work.work);
1466
1467 /* Tx whatever records we can transmit and abandon the rest */
1468 tls_tx_records(sk, -1);
1469
Vakul Garg9932a292018-09-24 15:35:56 +05301470 /* Free up un-sent records in tx_list. First, free
Vakul Garga42055e2018-09-21 09:46:13 +05301471 * the partially sent record if any at head of tx_list.
1472 */
1473 if (tls_ctx->partially_sent_record) {
1474 struct scatterlist *sg = tls_ctx->partially_sent_record;
1475
1476 while (1) {
1477 put_page(sg_page(sg));
1478 sk_mem_uncharge(sk, sg->length);
1479
1480 if (sg_is_last(sg))
1481 break;
1482 sg++;
1483 }
1484
1485 tls_ctx->partially_sent_record = NULL;
1486
Vakul Garg9932a292018-09-24 15:35:56 +05301487 rec = list_first_entry(&ctx->tx_list,
Vakul Garga42055e2018-09-21 09:46:13 +05301488 struct tls_rec, list);
1489 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001490 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05301491 kfree(rec);
1492 }
1493
Vakul Garg9932a292018-09-24 15:35:56 +05301494 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
Vakul Garga42055e2018-09-21 09:46:13 +05301495 list_del(&rec->list);
Daniel Borkmannd829e9c2018-10-13 02:45:59 +02001496 sk_msg_free(sk, &rec->msg_encrypted);
1497 sk_msg_free(sk, &rec->msg_plaintext);
Vakul Garga42055e2018-09-21 09:46:13 +05301498 kfree(rec);
1499 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001500
Vakul Garg201876b2018-07-24 16:54:27 +05301501 crypto_free_aead(ctx->aead_send);
Vakul Gargc7749732018-09-25 20:21:51 +05301502 tls_free_open_rec(sk);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001503
1504 kfree(ctx);
1505}
1506
Boris Pismenny39f56e12018-07-13 14:33:41 +03001507void tls_sw_release_resources_rx(struct sock *sk)
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001508{
1509 struct tls_context *tls_ctx = tls_get_ctx(sk);
1510 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1511
Dave Watsonc46234e2018-03-22 10:10:35 -07001512 if (ctx->aead_recv) {
Vakul Garg201876b2018-07-24 16:54:27 +05301513 kfree_skb(ctx->recv_pkt);
1514 ctx->recv_pkt = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001515 crypto_free_aead(ctx->aead_recv);
1516 strp_stop(&ctx->strp);
1517 write_lock_bh(&sk->sk_callback_lock);
1518 sk->sk_data_ready = ctx->saved_data_ready;
1519 write_unlock_bh(&sk->sk_callback_lock);
1520 release_sock(sk);
1521 strp_done(&ctx->strp);
1522 lock_sock(sk);
1523 }
Boris Pismenny39f56e12018-07-13 14:33:41 +03001524}
1525
1526void tls_sw_free_resources_rx(struct sock *sk)
1527{
1528 struct tls_context *tls_ctx = tls_get_ctx(sk);
1529 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1530
1531 tls_sw_release_resources_rx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -07001532
Dave Watson3c4d7552017-06-14 11:37:39 -07001533 kfree(ctx);
1534}
1535
Vakul Garg9932a292018-09-24 15:35:56 +05301536/* The work handler to transmitt the encrypted records in tx_list */
Vakul Garga42055e2018-09-21 09:46:13 +05301537static void tx_work_handler(struct work_struct *work)
1538{
1539 struct delayed_work *delayed_work = to_delayed_work(work);
1540 struct tx_work *tx_work = container_of(delayed_work,
1541 struct tx_work, work);
1542 struct sock *sk = tx_work->sk;
1543 struct tls_context *tls_ctx = tls_get_ctx(sk);
1544 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1545
1546 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1547 return;
1548
1549 lock_sock(sk);
1550 tls_tx_records(sk, -1);
1551 release_sock(sk);
1552}
1553
Dave Watsonc46234e2018-03-22 10:10:35 -07001554int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -07001555{
Dave Watson3c4d7552017-06-14 11:37:39 -07001556 struct tls_crypto_info *crypto_info;
1557 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001558 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1559 struct tls_sw_context_rx *sw_ctx_rx = NULL;
Dave Watsonc46234e2018-03-22 10:10:35 -07001560 struct cipher_context *cctx;
1561 struct crypto_aead **aead;
1562 struct strp_callbacks cb;
Dave Watson3c4d7552017-06-14 11:37:39 -07001563 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1564 char *iv, *rec_seq;
1565 int rc = 0;
1566
1567 if (!ctx) {
1568 rc = -EINVAL;
1569 goto out;
1570 }
1571
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001572 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001573 if (!ctx->priv_ctx_tx) {
1574 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1575 if (!sw_ctx_tx) {
1576 rc = -ENOMEM;
1577 goto out;
1578 }
1579 ctx->priv_ctx_tx = sw_ctx_tx;
1580 } else {
1581 sw_ctx_tx =
1582 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
Dave Watsonc46234e2018-03-22 10:10:35 -07001583 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001584 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001585 if (!ctx->priv_ctx_rx) {
1586 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1587 if (!sw_ctx_rx) {
1588 rc = -ENOMEM;
1589 goto out;
1590 }
1591 ctx->priv_ctx_rx = sw_ctx_rx;
1592 } else {
1593 sw_ctx_rx =
1594 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001595 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001596 }
1597
Dave Watsonc46234e2018-03-22 10:10:35 -07001598 if (tx) {
Boris Pismennyb190a582018-07-13 14:33:42 +03001599 crypto_init_wait(&sw_ctx_tx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001600 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001601 cctx = &ctx->tx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001602 aead = &sw_ctx_tx->aead_send;
Vakul Garg9932a292018-09-24 15:35:56 +05301603 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
Vakul Garga42055e2018-09-21 09:46:13 +05301604 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1605 sw_ctx_tx->tx_work.sk = sk;
Dave Watsonc46234e2018-03-22 10:10:35 -07001606 } else {
Boris Pismennyb190a582018-07-13 14:33:42 +03001607 crypto_init_wait(&sw_ctx_rx->async_wait);
Sabrina Dubroca86029d12018-09-12 17:44:42 +02001608 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -07001609 cctx = &ctx->rx;
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001610 aead = &sw_ctx_rx->aead_recv;
Dave Watsonc46234e2018-03-22 10:10:35 -07001611 }
1612
Dave Watson3c4d7552017-06-14 11:37:39 -07001613 switch (crypto_info->cipher_type) {
1614 case TLS_CIPHER_AES_GCM_128: {
1615 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1616 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1617 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1618 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1619 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1620 rec_seq =
1621 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1622 gcm_128_info =
1623 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1624 break;
1625 }
1626 default:
1627 rc = -EINVAL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001628 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001629 }
1630
Kees Cookb16520f2018-04-10 17:52:34 -07001631 /* Sanity-check the IV size for stack allocations. */
Kees Cook3463e512018-06-25 16:55:05 -07001632 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
Kees Cookb16520f2018-04-10 17:52:34 -07001633 rc = -EINVAL;
1634 goto free_priv;
1635 }
1636
Dave Watsonc46234e2018-03-22 10:10:35 -07001637 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1638 cctx->tag_size = tag_size;
1639 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1640 cctx->iv_size = iv_size;
1641 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1642 GFP_KERNEL);
1643 if (!cctx->iv) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001644 rc = -ENOMEM;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001645 goto free_priv;
Dave Watson3c4d7552017-06-14 11:37:39 -07001646 }
Dave Watsonc46234e2018-03-22 10:10:35 -07001647 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1648 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1649 cctx->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +08001650 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Dave Watsonc46234e2018-03-22 10:10:35 -07001651 if (!cctx->rec_seq) {
Dave Watson3c4d7552017-06-14 11:37:39 -07001652 rc = -ENOMEM;
1653 goto free_iv;
1654 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001655
Dave Watsonc46234e2018-03-22 10:10:35 -07001656 if (!*aead) {
1657 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1658 if (IS_ERR(*aead)) {
1659 rc = PTR_ERR(*aead);
1660 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001661 goto free_rec_seq;
1662 }
1663 }
1664
1665 ctx->push_pending_record = tls_sw_push_pending_record;
1666
Sabrina Dubroca7cba09c2018-09-12 17:44:41 +02001667 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
Dave Watson3c4d7552017-06-14 11:37:39 -07001668 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1669 if (rc)
1670 goto free_aead;
1671
Dave Watsonc46234e2018-03-22 10:10:35 -07001672 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1673 if (rc)
1674 goto free_aead;
1675
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001676 if (sw_ctx_rx) {
Dave Watsonc46234e2018-03-22 10:10:35 -07001677 /* Set up strparser */
1678 memset(&cb, 0, sizeof(cb));
1679 cb.rcv_msg = tls_queue;
1680 cb.parse_msg = tls_read_size;
1681
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001682 strp_init(&sw_ctx_rx->strp, sk, &cb);
Dave Watsonc46234e2018-03-22 10:10:35 -07001683
1684 write_lock_bh(&sk->sk_callback_lock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001685 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
Dave Watsonc46234e2018-03-22 10:10:35 -07001686 sk->sk_data_ready = tls_data_ready;
1687 write_unlock_bh(&sk->sk_callback_lock);
1688
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001689 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
Dave Watsonc46234e2018-03-22 10:10:35 -07001690
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001691 strp_check_rcv(&sw_ctx_rx->strp);
Dave Watsonc46234e2018-03-22 10:10:35 -07001692 }
1693
1694 goto out;
Dave Watson3c4d7552017-06-14 11:37:39 -07001695
1696free_aead:
Dave Watsonc46234e2018-03-22 10:10:35 -07001697 crypto_free_aead(*aead);
1698 *aead = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001699free_rec_seq:
Dave Watsonc46234e2018-03-22 10:10:35 -07001700 kfree(cctx->rec_seq);
1701 cctx->rec_seq = NULL;
Dave Watson3c4d7552017-06-14 11:37:39 -07001702free_iv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001703 kfree(cctx->iv);
1704 cctx->iv = NULL;
Sabrina Dubrocacf6d43e2018-01-16 16:04:26 +01001705free_priv:
Boris Pismennyf66de3e2018-04-30 10:16:15 +03001706 if (tx) {
1707 kfree(ctx->priv_ctx_tx);
1708 ctx->priv_ctx_tx = NULL;
1709 } else {
1710 kfree(ctx->priv_ctx_rx);
1711 ctx->priv_ctx_rx = NULL;
1712 }
Dave Watson3c4d7552017-06-14 11:37:39 -07001713out:
1714 return rc;
1715}