blob: ff08b2ff75979d8c71da3cfac373f16643533fdc [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#include <net/tcp.h>
37#include <net/inet_common.h>
38#include <linux/highmem.h>
39#include <linux/netdevice.h>
40#include <linux/sched/signal.h>
Atul Guptadd0bed12018-03-31 21:41:52 +053041#include <linux/inetdevice.h>
Davide Caratti26811cc2019-08-30 12:25:49 +020042#include <linux/inet_diag.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070043
Jakub Kicinskid26b6982019-10-04 16:19:24 -070044#include <net/snmp.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070045#include <net/tls.h>
Jakub Kicinski25a3cd812019-10-03 11:18:54 -070046#include <net/tls_toe.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070047
48MODULE_AUTHOR("Mellanox Technologies");
49MODULE_DESCRIPTION("Transport Layer Security Support");
50MODULE_LICENSE("Dual BSD/GPL");
Daniel Borkmann037b0b82018-08-16 21:49:06 +020051MODULE_ALIAS_TCP_ULP("tls");
Dave Watson3c4d7552017-06-14 11:37:39 -070052
Ilya Lesokhin6d882072017-11-13 10:22:45 +020053enum {
Boris Pismennyc1131872018-02-27 14:18:39 +020054 TLSV4,
55 TLSV6,
56 TLS_NUM_PROTS,
57};
Ilya Lesokhin6d882072017-11-13 10:22:45 +020058
Boris Pismennyc1131872018-02-27 14:18:39 +020059static struct proto *saved_tcpv6_prot;
60static DEFINE_MUTEX(tcpv6_prot_mutex);
John Fastabend28cb6f12018-12-20 11:35:36 -080061static struct proto *saved_tcpv4_prot;
62static DEFINE_MUTEX(tcpv4_prot_mutex);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030063static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
Dave Watsonc46234e2018-03-22 10:10:35 -070064static struct proto_ops tls_sw_proto_ops;
Atul Gupta63a6b3f2019-01-17 20:55:53 -080065static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
Jakub Sitnickif13fe3e2020-03-17 18:04:37 +010066 const struct proto *base);
Ilya Lesokhin6d882072017-11-13 10:22:45 +020067
Jakub Kicinski08700da2019-10-03 11:18:57 -070068void update_sk_prot(struct sock *sk, struct tls_context *ctx)
Ilya Lesokhin6d882072017-11-13 10:22:45 +020069{
Boris Pismennyc1131872018-02-27 14:18:39 +020070 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
71
Boris Pismennyf66de3e2018-04-30 10:16:15 +030072 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
Ilya Lesokhin6d882072017-11-13 10:22:45 +020073}
Dave Watson3c4d7552017-06-14 11:37:39 -070074
75int wait_on_pending_writer(struct sock *sk, long *timeo)
76{
77 int rc = 0;
78 DEFINE_WAIT_FUNC(wait, woken_wake_function);
79
80 add_wait_queue(sk_sleep(sk), &wait);
81 while (1) {
82 if (!*timeo) {
83 rc = -EAGAIN;
84 break;
85 }
86
87 if (signal_pending(current)) {
88 rc = sock_intr_errno(*timeo);
89 break;
90 }
91
92 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
93 break;
94 }
95 remove_wait_queue(sk_sleep(sk), &wait);
96 return rc;
97}
98
99int tls_push_sg(struct sock *sk,
100 struct tls_context *ctx,
101 struct scatterlist *sg,
102 u16 first_offset,
103 int flags)
104{
105 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
106 int ret = 0;
107 struct page *p;
108 size_t size;
109 int offset = first_offset;
110
111 size = sg->length - offset;
112 offset += sg->offset;
113
Dave Watsonc212d2c2018-05-01 13:05:39 -0700114 ctx->in_tcp_sendpages = true;
Dave Watson3c4d7552017-06-14 11:37:39 -0700115 while (1) {
116 if (sg_is_last(sg))
117 sendpage_flags = flags;
118
119 /* is sending application-limited? */
120 tcp_rate_check_app_limited(sk);
121 p = sg_page(sg);
122retry:
123 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
124
125 if (ret != size) {
126 if (ret > 0) {
127 offset += ret;
128 size -= ret;
129 goto retry;
130 }
131
132 offset -= sg->offset;
133 ctx->partially_sent_offset = offset;
134 ctx->partially_sent_record = (void *)sg;
Andre Tomt080324c2018-05-07 04:24:39 +0200135 ctx->in_tcp_sendpages = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700136 return ret;
137 }
138
139 put_page(p);
140 sk_mem_uncharge(sk, sg->length);
141 sg = sg_next(sg);
142 if (!sg)
143 break;
144
145 offset = sg->offset;
146 size = sg->length;
147 }
148
Dave Watsonc212d2c2018-05-01 13:05:39 -0700149 ctx->in_tcp_sendpages = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700150
151 return 0;
152}
153
154static int tls_handle_open_record(struct sock *sk, int flags)
155{
156 struct tls_context *ctx = tls_get_ctx(sk);
157
158 if (tls_is_pending_open_record(ctx))
159 return ctx->push_pending_record(sk, flags);
160
161 return 0;
162}
163
164int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
165 unsigned char *record_type)
166{
167 struct cmsghdr *cmsg;
168 int rc = -EINVAL;
169
170 for_each_cmsghdr(cmsg, msg) {
171 if (!CMSG_OK(msg, cmsg))
172 return -EINVAL;
173 if (cmsg->cmsg_level != SOL_TLS)
174 continue;
175
176 switch (cmsg->cmsg_type) {
177 case TLS_SET_RECORD_TYPE:
178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
179 return -EINVAL;
180
181 if (msg->msg_flags & MSG_MORE)
182 return -EINVAL;
183
184 rc = tls_handle_open_record(sk, msg->msg_flags);
185 if (rc)
186 return rc;
187
188 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
189 rc = 0;
190 break;
191 default:
192 return -EINVAL;
193 }
194 }
195
196 return rc;
197}
198
Vakul Garga42055e2018-09-21 09:46:13 +0530199int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
200 int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700201{
202 struct scatterlist *sg;
203 u16 offset;
204
Dave Watson3c4d7552017-06-14 11:37:39 -0700205 sg = ctx->partially_sent_record;
206 offset = ctx->partially_sent_offset;
207
208 ctx->partially_sent_record = NULL;
209 return tls_push_sg(sk, ctx, sg, offset, flags);
210}
211
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -0800212void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700213{
214 struct scatterlist *sg;
215
Jakub Kicinskic5daa6c2019-11-27 12:16:44 -0800216 for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700217 put_page(sg_page(sg));
218 sk_mem_uncharge(sk, sg->length);
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700219 }
220 ctx->partially_sent_record = NULL;
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700221}
222
Dave Watson3c4d7552017-06-14 11:37:39 -0700223static void tls_write_space(struct sock *sk)
224{
225 struct tls_context *ctx = tls_get_ctx(sk);
226
John Fastabend67db7cd2018-08-22 08:37:32 -0700227 /* If in_tcp_sendpages call lower protocol write space handler
228 * to ensure we wake up any waiting operations there. For example
229 * if do_tcp_sendpages where to call sk_wait_event.
230 */
231 if (ctx->in_tcp_sendpages) {
232 ctx->sk_write_space(sk);
Dave Watsonc212d2c2018-05-01 13:05:39 -0700233 return;
John Fastabend67db7cd2018-08-22 08:37:32 -0700234 }
Dave Watsonc212d2c2018-05-01 13:05:39 -0700235
Boris Pismenny7463d3a2019-02-27 17:38:04 +0200236#ifdef CONFIG_TLS_DEVICE
237 if (ctx->tx_conf == TLS_HW)
238 tls_device_write_space(sk, ctx);
239 else
240#endif
241 tls_sw_write_space(sk, ctx);
Vakul Garg4504ab02019-03-12 08:22:57 +0000242
243 ctx->sk_write_space(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700244}
245
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200246/**
247 * tls_ctx_free() - free TLS ULP context
248 * @sk: socket to with @ctx is attached
249 * @ctx: TLS context structure
250 *
251 * Free TLS context. If @sk is %NULL caller guarantees that the socket
252 * to which @ctx was attached has no outstanding references.
253 */
254void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200255{
256 if (!ctx)
257 return;
258
259 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
260 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800261 mutex_destroy(&ctx->tx_lock);
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200262
263 if (sk)
264 kfree_rcu(ctx, rcu);
265 else
266 kfree(ctx);
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200267}
268
John Fastabend313ab002019-07-19 10:29:17 -0700269static void tls_sk_proto_cleanup(struct sock *sk,
270 struct tls_context *ctx, long timeo)
Dave Watson3c4d7552017-06-14 11:37:39 -0700271{
Dirk van der Merwe93545442019-06-23 21:26:58 -0700272 if (unlikely(sk->sk_write_pending) &&
273 !wait_on_pending_writer(sk, &timeo))
Dave Watson3c4d7552017-06-14 11:37:39 -0700274 tls_handle_open_record(sk, 0);
275
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300276 /* We need these for tls_sw_fallback handling of other packets */
277 if (ctx->tx_conf == TLS_SW) {
278 kfree(ctx->tx.rec_seq);
279 kfree(ctx->tx.iv);
John Fastabend313ab002019-07-19 10:29:17 -0700280 tls_sw_release_resources_tx(sk);
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700281 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700282 } else if (ctx->tx_conf == TLS_HW) {
283 tls_device_free_resources_tx(sk);
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700284 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300285 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700286
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700287 if (ctx->rx_conf == TLS_SW) {
John Fastabend313ab002019-07-19 10:29:17 -0700288 tls_sw_release_resources_rx(sk);
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700289 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
290 } else if (ctx->rx_conf == TLS_HW) {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300291 tls_device_offload_cleanup_rx(sk);
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700292 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
293 }
John Fastabend313ab002019-07-19 10:29:17 -0700294}
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300295
John Fastabend313ab002019-07-19 10:29:17 -0700296static void tls_sk_proto_close(struct sock *sk, long timeout)
297{
John Fastabend95fa1452019-07-19 10:29:22 -0700298 struct inet_connection_sock *icsk = inet_csk(sk);
John Fastabend313ab002019-07-19 10:29:17 -0700299 struct tls_context *ctx = tls_get_ctx(sk);
300 long timeo = sock_sndtimeo(sk, 0);
301 bool free_ctx;
302
303 if (ctx->tx_conf == TLS_SW)
304 tls_sw_cancel_work_tx(ctx);
305
306 lock_sock(sk);
307 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
John Fastabend313ab002019-07-19 10:29:17 -0700308
309 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
310 tls_sk_proto_cleanup(sk, ctx, timeo);
311
John Fastabend95fa1452019-07-19 10:29:22 -0700312 write_lock_bh(&sk->sk_callback_lock);
313 if (free_ctx)
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200314 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
John Fastabend32857cf2019-07-19 10:29:18 -0700315 sk->sk_prot = ctx->sk_proto;
John Fastabendd85f0172019-08-14 05:31:54 +0000316 if (sk->sk_write_space == tls_write_space)
317 sk->sk_write_space = ctx->sk_write_space;
John Fastabend95fa1452019-07-19 10:29:22 -0700318 write_unlock_bh(&sk->sk_callback_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700319 release_sock(sk);
John Fastabend313ab002019-07-19 10:29:17 -0700320 if (ctx->tx_conf == TLS_SW)
321 tls_sw_free_ctx_tx(ctx);
322 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
323 tls_sw_strparser_done(ctx);
324 if (ctx->rx_conf == TLS_SW)
325 tls_sw_free_ctx_rx(ctx);
Jakub Kicinskibe7bbea2019-09-02 21:31:02 -0700326 ctx->sk_proto->close(sk, timeout);
John Fastabend313ab002019-07-19 10:29:17 -0700327
Eric Dumazet98f0a392018-05-05 08:35:04 -0700328 if (free_ctx)
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200329 tls_ctx_free(sk, ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700330}
331
332static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
333 int __user *optlen)
334{
335 int rc = 0;
336 struct tls_context *ctx = tls_get_ctx(sk);
337 struct tls_crypto_info *crypto_info;
338 int len;
339
340 if (get_user(len, optlen))
341 return -EFAULT;
342
343 if (!optval || (len < sizeof(*crypto_info))) {
344 rc = -EINVAL;
345 goto out;
346 }
347
348 if (!ctx) {
349 rc = -EBUSY;
350 goto out;
351 }
352
353 /* get user crypto info */
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200354 crypto_info = &ctx->crypto_send.info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700355
356 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
357 rc = -EBUSY;
358 goto out;
359 }
360
Matthias Rosenfelder5a3b8862017-07-06 00:56:36 -0400361 if (len == sizeof(*crypto_info)) {
Dan Carpenterac55cd62017-06-23 13:15:44 +0300362 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
363 rc = -EFAULT;
Dave Watson3c4d7552017-06-14 11:37:39 -0700364 goto out;
365 }
366
367 switch (crypto_info->cipher_type) {
368 case TLS_CIPHER_AES_GCM_128: {
369 struct tls12_crypto_info_aes_gcm_128 *
370 crypto_info_aes_gcm_128 =
371 container_of(crypto_info,
372 struct tls12_crypto_info_aes_gcm_128,
373 info);
374
375 if (len != sizeof(*crypto_info_aes_gcm_128)) {
376 rc = -EINVAL;
377 goto out;
378 }
379 lock_sock(sk);
Boris Pismennya1dfa682018-02-14 10:46:06 +0200380 memcpy(crypto_info_aes_gcm_128->iv,
Dave Watsondbe42552018-03-22 10:10:06 -0700381 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
Dave Watson3c4d7552017-06-14 11:37:39 -0700382 TLS_CIPHER_AES_GCM_128_IV_SIZE);
Dave Watsondbe42552018-03-22 10:10:06 -0700383 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
Boris Pismennyc410c192018-02-14 10:46:08 +0200384 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
Dave Watson3c4d7552017-06-14 11:37:39 -0700385 release_sock(sk);
Dan Carpenterac55cd62017-06-23 13:15:44 +0300386 if (copy_to_user(optval,
387 crypto_info_aes_gcm_128,
388 sizeof(*crypto_info_aes_gcm_128)))
389 rc = -EFAULT;
Dave Watson3c4d7552017-06-14 11:37:39 -0700390 break;
391 }
Dave Watsonfb99bce2019-01-30 21:58:05 +0000392 case TLS_CIPHER_AES_GCM_256: {
393 struct tls12_crypto_info_aes_gcm_256 *
394 crypto_info_aes_gcm_256 =
395 container_of(crypto_info,
396 struct tls12_crypto_info_aes_gcm_256,
397 info);
398
399 if (len != sizeof(*crypto_info_aes_gcm_256)) {
400 rc = -EINVAL;
401 goto out;
402 }
403 lock_sock(sk);
404 memcpy(crypto_info_aes_gcm_256->iv,
405 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
406 TLS_CIPHER_AES_GCM_256_IV_SIZE);
407 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
408 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
409 release_sock(sk);
410 if (copy_to_user(optval,
411 crypto_info_aes_gcm_256,
412 sizeof(*crypto_info_aes_gcm_256)))
413 rc = -EFAULT;
414 break;
415 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700416 default:
417 rc = -EINVAL;
418 }
419
420out:
421 return rc;
422}
423
424static int do_tls_getsockopt(struct sock *sk, int optname,
425 char __user *optval, int __user *optlen)
426{
427 int rc = 0;
428
429 switch (optname) {
430 case TLS_TX:
431 rc = do_tls_getsockopt_tx(sk, optval, optlen);
432 break;
433 default:
434 rc = -ENOPROTOOPT;
435 break;
436 }
437 return rc;
438}
439
440static int tls_getsockopt(struct sock *sk, int level, int optname,
441 char __user *optval, int __user *optlen)
442{
443 struct tls_context *ctx = tls_get_ctx(sk);
444
445 if (level != SOL_TLS)
Jakub Kicinskibe7bbea2019-09-02 21:31:02 -0700446 return ctx->sk_proto->getsockopt(sk, level,
447 optname, optval, optlen);
Dave Watson3c4d7552017-06-14 11:37:39 -0700448
449 return do_tls_getsockopt(sk, optname, optval, optlen);
450}
451
Dave Watsonc46234e2018-03-22 10:10:35 -0700452static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
453 unsigned int optlen, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700454{
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200455 struct tls_crypto_info *crypto_info;
Vakul Garg4509de12019-02-14 07:11:35 +0000456 struct tls_crypto_info *alt_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700457 struct tls_context *ctx = tls_get_ctx(sk);
Dave Watsonfb99bce2019-01-30 21:58:05 +0000458 size_t optsize;
Dave Watson3c4d7552017-06-14 11:37:39 -0700459 int rc = 0;
Dave Watson58371582018-03-22 10:10:26 -0700460 int conf;
Dave Watson3c4d7552017-06-14 11:37:39 -0700461
462 if (!optval || (optlen < sizeof(*crypto_info))) {
463 rc = -EINVAL;
464 goto out;
465 }
466
Vakul Garg4509de12019-02-14 07:11:35 +0000467 if (tx) {
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200468 crypto_info = &ctx->crypto_send.info;
Vakul Garg4509de12019-02-14 07:11:35 +0000469 alt_crypto_info = &ctx->crypto_recv.info;
470 } else {
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200471 crypto_info = &ctx->crypto_recv.info;
Vakul Garg4509de12019-02-14 07:11:35 +0000472 alt_crypto_info = &ctx->crypto_send.info;
473 }
Dave Watsonc46234e2018-03-22 10:10:35 -0700474
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200475 /* Currently we don't support set crypto info more than one time */
Sabrina Dubroca877d17c2018-01-16 16:04:27 +0100476 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
477 rc = -EBUSY;
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200478 goto out;
Sabrina Dubroca877d17c2018-01-16 16:04:27 +0100479 }
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200480
481 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
Dave Watson3c4d7552017-06-14 11:37:39 -0700482 if (rc) {
483 rc = -EFAULT;
Boris Pismenny257082e2018-02-14 10:46:07 +0200484 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700485 }
486
487 /* check version */
Dave Watson130b3922019-01-30 21:58:31 +0000488 if (crypto_info->version != TLS_1_2_VERSION &&
489 crypto_info->version != TLS_1_3_VERSION) {
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100490 rc = -EINVAL;
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200491 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700492 }
493
Vakul Garg4509de12019-02-14 07:11:35 +0000494 /* Ensure that TLS version and ciphers are same in both directions */
495 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
496 if (alt_crypto_info->version != crypto_info->version ||
497 alt_crypto_info->cipher_type != crypto_info->cipher_type) {
498 rc = -EINVAL;
499 goto err_crypto_info;
500 }
501 }
502
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200503 switch (crypto_info->cipher_type) {
Dave Watsonfb99bce2019-01-30 21:58:05 +0000504 case TLS_CIPHER_AES_GCM_128:
Vakul Gargf295b3a2019-03-20 02:03:36 +0000505 optsize = sizeof(struct tls12_crypto_info_aes_gcm_128);
506 break;
Dave Watsonfb99bce2019-01-30 21:58:05 +0000507 case TLS_CIPHER_AES_GCM_256: {
Vakul Gargf295b3a2019-03-20 02:03:36 +0000508 optsize = sizeof(struct tls12_crypto_info_aes_gcm_256);
Dave Watson3c4d7552017-06-14 11:37:39 -0700509 break;
510 }
Vakul Gargf295b3a2019-03-20 02:03:36 +0000511 case TLS_CIPHER_AES_CCM_128:
512 optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
513 break;
Dave Watson3c4d7552017-06-14 11:37:39 -0700514 default:
515 rc = -EINVAL;
Sabrina Dubroca6db959c2018-01-16 16:04:28 +0100516 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700517 }
518
Vakul Gargf295b3a2019-03-20 02:03:36 +0000519 if (optlen != optsize) {
520 rc = -EINVAL;
521 goto err_crypto_info;
522 }
523
524 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
525 optlen - sizeof(*crypto_info));
526 if (rc) {
527 rc = -EFAULT;
528 goto err_crypto_info;
529 }
530
Dave Watsonc46234e2018-03-22 10:10:35 -0700531 if (tx) {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300532 rc = tls_set_device_offload(sk, ctx);
533 conf = TLS_HW;
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700534 if (!rc) {
535 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
536 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
537 } else {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300538 rc = tls_set_sw_offload(sk, ctx, 1);
Jakub Kicinski318892a2019-07-19 10:29:14 -0700539 if (rc)
540 goto err_crypto_info;
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700541 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
542 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300543 conf = TLS_SW;
544 }
Dave Watsonc46234e2018-03-22 10:10:35 -0700545 } else {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300546 rc = tls_set_device_offload_rx(sk, ctx);
547 conf = TLS_HW;
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700548 if (!rc) {
549 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
550 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
551 } else {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300552 rc = tls_set_sw_offload(sk, ctx, 0);
Jakub Kicinski318892a2019-07-19 10:29:14 -0700553 if (rc)
554 goto err_crypto_info;
Jakub Kicinskib32fd3c2019-10-04 16:19:25 -0700555 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
556 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300557 conf = TLS_SW;
558 }
John Fastabend313ab002019-07-19 10:29:17 -0700559 tls_sw_strparser_arm(sk, ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700560 }
561
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300562 if (tx)
563 ctx->tx_conf = conf;
564 else
565 ctx->rx_conf = conf;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200566 update_sk_prot(sk, ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700567 if (tx) {
568 ctx->sk_write_space = sk->sk_write_space;
569 sk->sk_write_space = tls_write_space;
570 } else {
571 sk->sk_socket->ops = &tls_sw_proto_ops;
572 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700573 goto out;
574
575err_crypto_info:
Sabrina Dubrocac844eb42018-09-12 17:44:43 +0200576 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
Dave Watson3c4d7552017-06-14 11:37:39 -0700577out:
578 return rc;
579}
580
581static int do_tls_setsockopt(struct sock *sk, int optname,
582 char __user *optval, unsigned int optlen)
583{
584 int rc = 0;
585
586 switch (optname) {
587 case TLS_TX:
Dave Watsonc46234e2018-03-22 10:10:35 -0700588 case TLS_RX:
Dave Watson3c4d7552017-06-14 11:37:39 -0700589 lock_sock(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700590 rc = do_tls_setsockopt_conf(sk, optval, optlen,
591 optname == TLS_TX);
Dave Watson3c4d7552017-06-14 11:37:39 -0700592 release_sock(sk);
593 break;
594 default:
595 rc = -ENOPROTOOPT;
596 break;
597 }
598 return rc;
599}
600
601static int tls_setsockopt(struct sock *sk, int level, int optname,
602 char __user *optval, unsigned int optlen)
603{
604 struct tls_context *ctx = tls_get_ctx(sk);
605
606 if (level != SOL_TLS)
Jakub Kicinskibe7bbea2019-09-02 21:31:02 -0700607 return ctx->sk_proto->setsockopt(sk, level, optname, optval,
608 optlen);
Dave Watson3c4d7552017-06-14 11:37:39 -0700609
610 return do_tls_setsockopt(sk, optname, optval, optlen);
611}
612
Jakub Kicinski08700da2019-10-03 11:18:57 -0700613struct tls_context *tls_ctx_create(struct sock *sk)
Atul Guptadd0bed12018-03-31 21:41:52 +0530614{
615 struct inet_connection_sock *icsk = inet_csk(sk);
616 struct tls_context *ctx;
617
Ganesh Goudarc6ec1792018-12-19 17:18:22 +0530618 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
Atul Guptadd0bed12018-03-31 21:41:52 +0530619 if (!ctx)
620 return NULL;
621
Jakub Kicinski79ffe602019-11-05 14:24:35 -0800622 mutex_init(&ctx->tx_lock);
Jakub Kicinski15a7dea2019-08-30 12:25:47 +0200623 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
Jakub Kicinskibe7bbea2019-09-02 21:31:02 -0700624 ctx->sk_proto = sk->sk_prot;
Atul Guptadd0bed12018-03-31 21:41:52 +0530625 return ctx;
626}
627
Atul Gupta63a6b3f2019-01-17 20:55:53 -0800628static void tls_build_proto(struct sock *sk)
629{
630 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
631
632 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
633 if (ip_ver == TLSV6 &&
634 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
635 mutex_lock(&tcpv6_prot_mutex);
636 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
637 build_protos(tls_prots[TLSV6], sk->sk_prot);
638 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
639 }
640 mutex_unlock(&tcpv6_prot_mutex);
641 }
642
643 if (ip_ver == TLSV4 &&
644 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
645 mutex_lock(&tcpv4_prot_mutex);
646 if (likely(sk->sk_prot != saved_tcpv4_prot)) {
647 build_protos(tls_prots[TLSV4], sk->sk_prot);
648 smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
649 }
650 mutex_unlock(&tcpv4_prot_mutex);
651 }
652}
653
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300654static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
Jakub Sitnickif13fe3e2020-03-17 18:04:37 +0100655 const struct proto *base)
Boris Pismennyc1131872018-02-27 14:18:39 +0200656{
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300657 prot[TLS_BASE][TLS_BASE] = *base;
658 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
659 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
660 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
Boris Pismennyc1131872018-02-27 14:18:39 +0200661
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300662 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
663 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
664 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
Dave Watsonc46234e2018-03-22 10:10:35 -0700665
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300666 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
John Fastabend924ad652018-10-13 02:46:00 +0200667 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
668 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
669 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
Dave Watsonc46234e2018-03-22 10:10:35 -0700670
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300671 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
John Fastabend924ad652018-10-13 02:46:00 +0200672 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
673 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
674 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
Atul Guptadd0bed12018-03-31 21:41:52 +0530675
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300676#ifdef CONFIG_TLS_DEVICE
677 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
678 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
679 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
680
681 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
682 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
683 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300684
685 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
686
687 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
688
689 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300690#endif
Jakub Kicinski53b44142019-10-03 11:18:59 -0700691#ifdef CONFIG_TLS_TOE
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300692 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
Jakub Kicinski0eb87452019-10-03 11:18:58 -0700693 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
694 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
Jakub Kicinski53b44142019-10-03 11:18:59 -0700695#endif
Boris Pismennyc1131872018-02-27 14:18:39 +0200696}
697
Dave Watson3c4d7552017-06-14 11:37:39 -0700698static int tls_init(struct sock *sk)
699{
Dave Watson3c4d7552017-06-14 11:37:39 -0700700 struct tls_context *ctx;
701 int rc = 0;
702
Jakub Kicinski16bed0e2019-10-03 11:18:56 -0700703 tls_build_proto(sk);
704
Jakub Kicinski53b44142019-10-03 11:18:59 -0700705#ifdef CONFIG_TLS_TOE
Jakub Kicinski0eb87452019-10-03 11:18:58 -0700706 if (tls_toe_bypass(sk))
John Fastabend95fa1452019-07-19 10:29:22 -0700707 return 0;
Jakub Kicinski53b44142019-10-03 11:18:59 -0700708#endif
Atul Guptadd0bed12018-03-31 21:41:52 +0530709
Ilya Lesokhind91c3e12018-01-16 15:31:52 +0200710 /* The TLS ulp is currently supported only for TCP sockets
711 * in ESTABLISHED state.
712 * Supporting sockets in LISTEN state will require us
713 * to modify the accept implementation to clone rather then
714 * share the ulp context.
715 */
716 if (sk->sk_state != TCP_ESTABLISHED)
Valentin Vidic4a5cdc62019-12-05 07:41:18 +0100717 return -ENOTCONN;
Ilya Lesokhind91c3e12018-01-16 15:31:52 +0200718
Dave Watson3c4d7552017-06-14 11:37:39 -0700719 /* allocate tls context */
John Fastabend95fa1452019-07-19 10:29:22 -0700720 write_lock_bh(&sk->sk_callback_lock);
Jakub Kicinski08700da2019-10-03 11:18:57 -0700721 ctx = tls_ctx_create(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700722 if (!ctx) {
723 rc = -ENOMEM;
724 goto out;
725 }
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200726
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300727 ctx->tx_conf = TLS_BASE;
728 ctx->rx_conf = TLS_BASE;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200729 update_sk_prot(sk, ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700730out:
John Fastabend95fa1452019-07-19 10:29:22 -0700731 write_unlock_bh(&sk->sk_callback_lock);
Dave Watson3c4d7552017-06-14 11:37:39 -0700732 return rc;
733}
734
John Fastabend33bfe202020-01-11 06:12:01 +0000735static void tls_update(struct sock *sk, struct proto *p,
736 void (*write_space)(struct sock *sk))
John Fastabend95fa1452019-07-19 10:29:22 -0700737{
738 struct tls_context *ctx;
739
740 ctx = tls_get_ctx(sk);
John Fastabend33bfe202020-01-11 06:12:01 +0000741 if (likely(ctx)) {
742 ctx->sk_write_space = write_space;
John Fastabend95fa1452019-07-19 10:29:22 -0700743 ctx->sk_proto = p;
John Fastabend33bfe202020-01-11 06:12:01 +0000744 } else {
Jakub Sitnickib8e202d2020-02-18 17:10:13 +0000745 /* Pairs with lockless read in sk_clone_lock(). */
746 WRITE_ONCE(sk->sk_prot, p);
John Fastabend33bfe202020-01-11 06:12:01 +0000747 sk->sk_write_space = write_space;
748 }
John Fastabend95fa1452019-07-19 10:29:22 -0700749}
750
Davide Caratti26811cc2019-08-30 12:25:49 +0200751static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
752{
753 u16 version, cipher_type;
754 struct tls_context *ctx;
755 struct nlattr *start;
756 int err;
757
758 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
759 if (!start)
760 return -EMSGSIZE;
761
762 rcu_read_lock();
763 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
764 if (!ctx) {
765 err = 0;
766 goto nla_failure;
767 }
768 version = ctx->prot_info.version;
769 if (version) {
770 err = nla_put_u16(skb, TLS_INFO_VERSION, version);
771 if (err)
772 goto nla_failure;
773 }
774 cipher_type = ctx->prot_info.cipher_type;
775 if (cipher_type) {
776 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
777 if (err)
778 goto nla_failure;
779 }
780 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
781 if (err)
782 goto nla_failure;
783
784 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
785 if (err)
786 goto nla_failure;
787
788 rcu_read_unlock();
789 nla_nest_end(skb, start);
790 return 0;
791
792nla_failure:
793 rcu_read_unlock();
794 nla_nest_cancel(skb, start);
795 return err;
796}
797
798static size_t tls_get_info_size(const struct sock *sk)
799{
800 size_t size = 0;
801
802 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
803 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
804 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
805 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
806 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
807 0;
808
809 return size;
810}
811
Jakub Kicinskid26b6982019-10-04 16:19:24 -0700812static int __net_init tls_init_net(struct net *net)
813{
814 int err;
815
816 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
817 if (!net->mib.tls_statistics)
818 return -ENOMEM;
819
820 err = tls_proc_init(net);
821 if (err)
822 goto err_free_stats;
823
824 return 0;
825err_free_stats:
826 free_percpu(net->mib.tls_statistics);
827 return err;
828}
829
830static void __net_exit tls_exit_net(struct net *net)
831{
832 tls_proc_fini(net);
833 free_percpu(net->mib.tls_statistics);
834}
835
836static struct pernet_operations tls_proc_ops = {
837 .init = tls_init_net,
838 .exit = tls_exit_net,
839};
840
Dave Watson3c4d7552017-06-14 11:37:39 -0700841static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
842 .name = "tls",
843 .owner = THIS_MODULE,
844 .init = tls_init,
John Fastabend95fa1452019-07-19 10:29:22 -0700845 .update = tls_update,
Davide Caratti26811cc2019-08-30 12:25:49 +0200846 .get_info = tls_get_info,
847 .get_info_size = tls_get_info_size,
Dave Watson3c4d7552017-06-14 11:37:39 -0700848};
849
850static int __init tls_register(void)
851{
Jakub Kicinskid26b6982019-10-04 16:19:24 -0700852 int err;
853
854 err = register_pernet_subsys(&tls_proc_ops);
855 if (err)
856 return err;
857
Dave Watsonc46234e2018-03-22 10:10:35 -0700858 tls_sw_proto_ops = inet_stream_ops;
Dave Watsonc46234e2018-03-22 10:10:35 -0700859 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
Willem de Bruijnd4ffb022019-11-18 10:40:51 -0500860 tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
Dave Watsonc46234e2018-03-22 10:10:35 -0700861
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300862 tls_device_init();
Dave Watson3c4d7552017-06-14 11:37:39 -0700863 tcp_register_ulp(&tcp_tls_ulp_ops);
864
865 return 0;
866}
867
868static void __exit tls_unregister(void)
869{
870 tcp_unregister_ulp(&tcp_tls_ulp_ops);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300871 tls_device_cleanup();
Jakub Kicinskid26b6982019-10-04 16:19:24 -0700872 unregister_pernet_subsys(&tls_proc_ops);
Dave Watson3c4d7552017-06-14 11:37:39 -0700873}
874
875module_init(tls_register);
876module_exit(tls_unregister);