blob: 28887cf628b82321d2eab5bea38f9994d82d3fef [file] [log] [blame]
Dave Watson3c4d7552017-06-14 11:37:39 -07001/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#include <net/tcp.h>
37#include <net/inet_common.h>
38#include <linux/highmem.h>
39#include <linux/netdevice.h>
40#include <linux/sched/signal.h>
Atul Guptadd0bed12018-03-31 21:41:52 +053041#include <linux/inetdevice.h>
Dave Watson3c4d7552017-06-14 11:37:39 -070042
43#include <net/tls.h>
44
45MODULE_AUTHOR("Mellanox Technologies");
46MODULE_DESCRIPTION("Transport Layer Security Support");
47MODULE_LICENSE("Dual BSD/GPL");
Daniel Borkmann037b0b82018-08-16 21:49:06 +020048MODULE_ALIAS_TCP_ULP("tls");
Dave Watson3c4d7552017-06-14 11:37:39 -070049
Ilya Lesokhin6d882072017-11-13 10:22:45 +020050enum {
Boris Pismennyc1131872018-02-27 14:18:39 +020051 TLSV4,
52 TLSV6,
53 TLS_NUM_PROTS,
54};
Ilya Lesokhin6d882072017-11-13 10:22:45 +020055
Boris Pismennyc1131872018-02-27 14:18:39 +020056static struct proto *saved_tcpv6_prot;
57static DEFINE_MUTEX(tcpv6_prot_mutex);
Atul Guptadd0bed12018-03-31 21:41:52 +053058static LIST_HEAD(device_list);
Atul Guptadf9d4a12018-12-11 02:20:09 -080059static DEFINE_SPINLOCK(device_spinlock);
Boris Pismennyf66de3e2018-04-30 10:16:15 +030060static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
Dave Watsonc46234e2018-03-22 10:10:35 -070061static struct proto_ops tls_sw_proto_ops;
Ilya Lesokhin6d882072017-11-13 10:22:45 +020062
Boris Pismennyf66de3e2018-04-30 10:16:15 +030063static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
Ilya Lesokhin6d882072017-11-13 10:22:45 +020064{
Boris Pismennyc1131872018-02-27 14:18:39 +020065 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
66
Boris Pismennyf66de3e2018-04-30 10:16:15 +030067 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
Ilya Lesokhin6d882072017-11-13 10:22:45 +020068}
Dave Watson3c4d7552017-06-14 11:37:39 -070069
70int wait_on_pending_writer(struct sock *sk, long *timeo)
71{
72 int rc = 0;
73 DEFINE_WAIT_FUNC(wait, woken_wake_function);
74
75 add_wait_queue(sk_sleep(sk), &wait);
76 while (1) {
77 if (!*timeo) {
78 rc = -EAGAIN;
79 break;
80 }
81
82 if (signal_pending(current)) {
83 rc = sock_intr_errno(*timeo);
84 break;
85 }
86
87 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
88 break;
89 }
90 remove_wait_queue(sk_sleep(sk), &wait);
91 return rc;
92}
93
94int tls_push_sg(struct sock *sk,
95 struct tls_context *ctx,
96 struct scatterlist *sg,
97 u16 first_offset,
98 int flags)
99{
100 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
101 int ret = 0;
102 struct page *p;
103 size_t size;
104 int offset = first_offset;
105
106 size = sg->length - offset;
107 offset += sg->offset;
108
Dave Watsonc212d2c2018-05-01 13:05:39 -0700109 ctx->in_tcp_sendpages = true;
Dave Watson3c4d7552017-06-14 11:37:39 -0700110 while (1) {
111 if (sg_is_last(sg))
112 sendpage_flags = flags;
113
114 /* is sending application-limited? */
115 tcp_rate_check_app_limited(sk);
116 p = sg_page(sg);
117retry:
118 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
119
120 if (ret != size) {
121 if (ret > 0) {
122 offset += ret;
123 size -= ret;
124 goto retry;
125 }
126
127 offset -= sg->offset;
128 ctx->partially_sent_offset = offset;
129 ctx->partially_sent_record = (void *)sg;
Andre Tomt080324c2018-05-07 04:24:39 +0200130 ctx->in_tcp_sendpages = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700131 return ret;
132 }
133
134 put_page(p);
135 sk_mem_uncharge(sk, sg->length);
136 sg = sg_next(sg);
137 if (!sg)
138 break;
139
140 offset = sg->offset;
141 size = sg->length;
142 }
143
Dave Watsonc212d2c2018-05-01 13:05:39 -0700144 ctx->in_tcp_sendpages = false;
145 ctx->sk_write_space(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700146
147 return 0;
148}
149
150static int tls_handle_open_record(struct sock *sk, int flags)
151{
152 struct tls_context *ctx = tls_get_ctx(sk);
153
154 if (tls_is_pending_open_record(ctx))
155 return ctx->push_pending_record(sk, flags);
156
157 return 0;
158}
159
160int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
161 unsigned char *record_type)
162{
163 struct cmsghdr *cmsg;
164 int rc = -EINVAL;
165
166 for_each_cmsghdr(cmsg, msg) {
167 if (!CMSG_OK(msg, cmsg))
168 return -EINVAL;
169 if (cmsg->cmsg_level != SOL_TLS)
170 continue;
171
172 switch (cmsg->cmsg_type) {
173 case TLS_SET_RECORD_TYPE:
174 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
175 return -EINVAL;
176
177 if (msg->msg_flags & MSG_MORE)
178 return -EINVAL;
179
180 rc = tls_handle_open_record(sk, msg->msg_flags);
181 if (rc)
182 return rc;
183
184 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
185 rc = 0;
186 break;
187 default:
188 return -EINVAL;
189 }
190 }
191
192 return rc;
193}
194
Vakul Garga42055e2018-09-21 09:46:13 +0530195int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
196 int flags)
Dave Watson3c4d7552017-06-14 11:37:39 -0700197{
198 struct scatterlist *sg;
199 u16 offset;
200
Dave Watson3c4d7552017-06-14 11:37:39 -0700201 sg = ctx->partially_sent_record;
202 offset = ctx->partially_sent_offset;
203
204 ctx->partially_sent_record = NULL;
205 return tls_push_sg(sk, ctx, sg, offset, flags);
206}
207
Vakul Garga42055e2018-09-21 09:46:13 +0530208int tls_push_pending_closed_record(struct sock *sk,
209 struct tls_context *tls_ctx,
210 int flags, long *timeo)
211{
212 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
213
214 if (tls_is_partially_sent_record(tls_ctx) ||
Vakul Garg9932a292018-09-24 15:35:56 +0530215 !list_empty(&ctx->tx_list))
Vakul Garga42055e2018-09-21 09:46:13 +0530216 return tls_tx_records(sk, flags);
217 else
218 return tls_ctx->push_pending_record(sk, flags);
219}
220
Dave Watson3c4d7552017-06-14 11:37:39 -0700221static void tls_write_space(struct sock *sk)
222{
223 struct tls_context *ctx = tls_get_ctx(sk);
Vakul Garga42055e2018-09-21 09:46:13 +0530224 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700225
John Fastabend67db7cd2018-08-22 08:37:32 -0700226 /* If in_tcp_sendpages call lower protocol write space handler
227 * to ensure we wake up any waiting operations there. For example
228 * if do_tcp_sendpages where to call sk_wait_event.
229 */
230 if (ctx->in_tcp_sendpages) {
231 ctx->sk_write_space(sk);
Dave Watsonc212d2c2018-05-01 13:05:39 -0700232 return;
John Fastabend67db7cd2018-08-22 08:37:32 -0700233 }
Dave Watsonc212d2c2018-05-01 13:05:39 -0700234
Vakul Garga42055e2018-09-21 09:46:13 +0530235 /* Schedule the transmission if tx list is ready */
Vakul Garg9932a292018-09-24 15:35:56 +0530236 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
Vakul Garga42055e2018-09-21 09:46:13 +0530237 /* Schedule the transmission */
238 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
239 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
Dave Watson3c4d7552017-06-14 11:37:39 -0700240 }
241
242 ctx->sk_write_space(sk);
243}
244
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200245static void tls_ctx_free(struct tls_context *ctx)
246{
247 if (!ctx)
248 return;
249
250 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
251 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
252 kfree(ctx);
253}
254
Dave Watson3c4d7552017-06-14 11:37:39 -0700255static void tls_sk_proto_close(struct sock *sk, long timeout)
256{
257 struct tls_context *ctx = tls_get_ctx(sk);
258 long timeo = sock_sndtimeo(sk, 0);
259 void (*sk_proto_close)(struct sock *sk, long timeout);
Eric Dumazet98f0a392018-05-05 08:35:04 -0700260 bool free_ctx = false;
Dave Watson3c4d7552017-06-14 11:37:39 -0700261
262 lock_sock(sk);
Ilya Lesokhinff45d822017-11-13 10:22:46 +0200263 sk_proto_close = ctx->sk_proto_close;
264
David S. Millerb2d6cee2018-05-11 20:53:22 -0400265 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) ||
266 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) {
Eric Dumazet98f0a392018-05-05 08:35:04 -0700267 free_ctx = true;
Ilya Lesokhinff45d822017-11-13 10:22:46 +0200268 goto skip_tx_cleanup;
269 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700270
271 if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
272 tls_handle_open_record(sk, 0);
273
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300274 /* We need these for tls_sw_fallback handling of other packets */
275 if (ctx->tx_conf == TLS_SW) {
276 kfree(ctx->tx.rec_seq);
277 kfree(ctx->tx.iv);
278 tls_sw_free_resources_tx(sk);
279 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700280
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300281 if (ctx->rx_conf == TLS_SW) {
282 kfree(ctx->rx.rec_seq);
283 kfree(ctx->rx.iv);
284 tls_sw_free_resources_rx(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700285 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700286
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300287#ifdef CONFIG_TLS_DEVICE
Boris Pismenny4799ac82018-07-13 14:33:43 +0300288 if (ctx->rx_conf == TLS_HW)
289 tls_device_offload_cleanup_rx(sk);
290
291 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300292#else
293 {
294#endif
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200295 tls_ctx_free(ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300296 ctx = NULL;
297 }
298
Ilya Lesokhinff45d822017-11-13 10:22:46 +0200299skip_tx_cleanup:
Dave Watson3c4d7552017-06-14 11:37:39 -0700300 release_sock(sk);
301 sk_proto_close(sk, timeout);
Atul Guptadd0bed12018-03-31 21:41:52 +0530302 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
303 * for sk->sk_prot->unhash [tls_hw_unhash]
304 */
Eric Dumazet98f0a392018-05-05 08:35:04 -0700305 if (free_ctx)
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200306 tls_ctx_free(ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700307}
308
309static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
310 int __user *optlen)
311{
312 int rc = 0;
313 struct tls_context *ctx = tls_get_ctx(sk);
314 struct tls_crypto_info *crypto_info;
315 int len;
316
317 if (get_user(len, optlen))
318 return -EFAULT;
319
320 if (!optval || (len < sizeof(*crypto_info))) {
321 rc = -EINVAL;
322 goto out;
323 }
324
325 if (!ctx) {
326 rc = -EBUSY;
327 goto out;
328 }
329
330 /* get user crypto info */
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200331 crypto_info = &ctx->crypto_send.info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700332
333 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
334 rc = -EBUSY;
335 goto out;
336 }
337
Matthias Rosenfelder5a3b8862017-07-06 00:56:36 -0400338 if (len == sizeof(*crypto_info)) {
Dan Carpenterac55cd62017-06-23 13:15:44 +0300339 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
340 rc = -EFAULT;
Dave Watson3c4d7552017-06-14 11:37:39 -0700341 goto out;
342 }
343
344 switch (crypto_info->cipher_type) {
345 case TLS_CIPHER_AES_GCM_128: {
346 struct tls12_crypto_info_aes_gcm_128 *
347 crypto_info_aes_gcm_128 =
348 container_of(crypto_info,
349 struct tls12_crypto_info_aes_gcm_128,
350 info);
351
352 if (len != sizeof(*crypto_info_aes_gcm_128)) {
353 rc = -EINVAL;
354 goto out;
355 }
356 lock_sock(sk);
Boris Pismennya1dfa682018-02-14 10:46:06 +0200357 memcpy(crypto_info_aes_gcm_128->iv,
Dave Watsondbe42552018-03-22 10:10:06 -0700358 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
Dave Watson3c4d7552017-06-14 11:37:39 -0700359 TLS_CIPHER_AES_GCM_128_IV_SIZE);
Dave Watsondbe42552018-03-22 10:10:06 -0700360 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
Boris Pismennyc410c192018-02-14 10:46:08 +0200361 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
Dave Watson3c4d7552017-06-14 11:37:39 -0700362 release_sock(sk);
Dan Carpenterac55cd62017-06-23 13:15:44 +0300363 if (copy_to_user(optval,
364 crypto_info_aes_gcm_128,
365 sizeof(*crypto_info_aes_gcm_128)))
366 rc = -EFAULT;
Dave Watson3c4d7552017-06-14 11:37:39 -0700367 break;
368 }
369 default:
370 rc = -EINVAL;
371 }
372
373out:
374 return rc;
375}
376
377static int do_tls_getsockopt(struct sock *sk, int optname,
378 char __user *optval, int __user *optlen)
379{
380 int rc = 0;
381
382 switch (optname) {
383 case TLS_TX:
384 rc = do_tls_getsockopt_tx(sk, optval, optlen);
385 break;
386 default:
387 rc = -ENOPROTOOPT;
388 break;
389 }
390 return rc;
391}
392
393static int tls_getsockopt(struct sock *sk, int level, int optname,
394 char __user *optval, int __user *optlen)
395{
396 struct tls_context *ctx = tls_get_ctx(sk);
397
398 if (level != SOL_TLS)
399 return ctx->getsockopt(sk, level, optname, optval, optlen);
400
401 return do_tls_getsockopt(sk, optname, optval, optlen);
402}
403
Dave Watsonc46234e2018-03-22 10:10:35 -0700404static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
405 unsigned int optlen, int tx)
Dave Watson3c4d7552017-06-14 11:37:39 -0700406{
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200407 struct tls_crypto_info *crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700408 struct tls_context *ctx = tls_get_ctx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700409 int rc = 0;
Dave Watson58371582018-03-22 10:10:26 -0700410 int conf;
Dave Watson3c4d7552017-06-14 11:37:39 -0700411
412 if (!optval || (optlen < sizeof(*crypto_info))) {
413 rc = -EINVAL;
414 goto out;
415 }
416
Dave Watsonc46234e2018-03-22 10:10:35 -0700417 if (tx)
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200418 crypto_info = &ctx->crypto_send.info;
Dave Watsonc46234e2018-03-22 10:10:35 -0700419 else
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200420 crypto_info = &ctx->crypto_recv.info;
Dave Watsonc46234e2018-03-22 10:10:35 -0700421
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200422 /* Currently we don't support set crypto info more than one time */
Sabrina Dubroca877d17c2018-01-16 16:04:27 +0100423 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
424 rc = -EBUSY;
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200425 goto out;
Sabrina Dubroca877d17c2018-01-16 16:04:27 +0100426 }
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200427
428 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
Dave Watson3c4d7552017-06-14 11:37:39 -0700429 if (rc) {
430 rc = -EFAULT;
Boris Pismenny257082e2018-02-14 10:46:07 +0200431 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700432 }
433
434 /* check version */
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200435 if (crypto_info->version != TLS_1_2_VERSION) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700436 rc = -ENOTSUPP;
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200437 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700438 }
439
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200440 switch (crypto_info->cipher_type) {
Dave Watson3c4d7552017-06-14 11:37:39 -0700441 case TLS_CIPHER_AES_GCM_128: {
442 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
443 rc = -EINVAL;
Sabrina Dubroca6db959c2018-01-16 16:04:28 +0100444 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700445 }
Ilya Lesokhin196c31b2017-11-13 10:22:48 +0200446 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
447 optlen - sizeof(*crypto_info));
Dave Watson3c4d7552017-06-14 11:37:39 -0700448 if (rc) {
449 rc = -EFAULT;
450 goto err_crypto_info;
451 }
452 break;
453 }
454 default:
455 rc = -EINVAL;
Sabrina Dubroca6db959c2018-01-16 16:04:28 +0100456 goto err_crypto_info;
Dave Watson3c4d7552017-06-14 11:37:39 -0700457 }
458
Dave Watsonc46234e2018-03-22 10:10:35 -0700459 if (tx) {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300460#ifdef CONFIG_TLS_DEVICE
461 rc = tls_set_device_offload(sk, ctx);
462 conf = TLS_HW;
463 if (rc) {
464#else
465 {
466#endif
467 rc = tls_set_sw_offload(sk, ctx, 1);
468 conf = TLS_SW;
469 }
Dave Watsonc46234e2018-03-22 10:10:35 -0700470 } else {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300471#ifdef CONFIG_TLS_DEVICE
472 rc = tls_set_device_offload_rx(sk, ctx);
473 conf = TLS_HW;
474 if (rc) {
475#else
476 {
477#endif
478 rc = tls_set_sw_offload(sk, ctx, 0);
479 conf = TLS_SW;
480 }
Dave Watsonc46234e2018-03-22 10:10:35 -0700481 }
482
Dave Watson3c4d7552017-06-14 11:37:39 -0700483 if (rc)
484 goto err_crypto_info;
485
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300486 if (tx)
487 ctx->tx_conf = conf;
488 else
489 ctx->rx_conf = conf;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200490 update_sk_prot(sk, ctx);
Dave Watsonc46234e2018-03-22 10:10:35 -0700491 if (tx) {
492 ctx->sk_write_space = sk->sk_write_space;
493 sk->sk_write_space = tls_write_space;
494 } else {
495 sk->sk_socket->ops = &tls_sw_proto_ops;
496 }
Dave Watson3c4d7552017-06-14 11:37:39 -0700497 goto out;
498
499err_crypto_info:
Sabrina Dubrocac844eb42018-09-12 17:44:43 +0200500 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
Dave Watson3c4d7552017-06-14 11:37:39 -0700501out:
502 return rc;
503}
504
505static int do_tls_setsockopt(struct sock *sk, int optname,
506 char __user *optval, unsigned int optlen)
507{
508 int rc = 0;
509
510 switch (optname) {
511 case TLS_TX:
Dave Watsonc46234e2018-03-22 10:10:35 -0700512 case TLS_RX:
Dave Watson3c4d7552017-06-14 11:37:39 -0700513 lock_sock(sk);
Dave Watsonc46234e2018-03-22 10:10:35 -0700514 rc = do_tls_setsockopt_conf(sk, optval, optlen,
515 optname == TLS_TX);
Dave Watson3c4d7552017-06-14 11:37:39 -0700516 release_sock(sk);
517 break;
518 default:
519 rc = -ENOPROTOOPT;
520 break;
521 }
522 return rc;
523}
524
525static int tls_setsockopt(struct sock *sk, int level, int optname,
526 char __user *optval, unsigned int optlen)
527{
528 struct tls_context *ctx = tls_get_ctx(sk);
529
530 if (level != SOL_TLS)
531 return ctx->setsockopt(sk, level, optname, optval, optlen);
532
533 return do_tls_setsockopt(sk, optname, optval, optlen);
534}
535
Atul Guptadd0bed12018-03-31 21:41:52 +0530536static struct tls_context *create_ctx(struct sock *sk)
537{
538 struct inet_connection_sock *icsk = inet_csk(sk);
539 struct tls_context *ctx;
540
Ganesh Goudarc6ec1792018-12-19 17:18:22 +0530541 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
Atul Guptadd0bed12018-03-31 21:41:52 +0530542 if (!ctx)
543 return NULL;
544
545 icsk->icsk_ulp_data = ctx;
Atul Gupta6c0563e2018-12-11 02:19:40 -0800546 ctx->setsockopt = sk->sk_prot->setsockopt;
547 ctx->getsockopt = sk->sk_prot->getsockopt;
548 ctx->sk_proto_close = sk->sk_prot->close;
Atul Guptadd0bed12018-03-31 21:41:52 +0530549 return ctx;
550}
551
552static int tls_hw_prot(struct sock *sk)
553{
554 struct tls_context *ctx;
555 struct tls_device *dev;
556 int rc = 0;
557
Atul Guptadf9d4a12018-12-11 02:20:09 -0800558 spin_lock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530559 list_for_each_entry(dev, &device_list, dev_list) {
560 if (dev->feature && dev->feature(dev)) {
561 ctx = create_ctx(sk);
562 if (!ctx)
563 goto out;
564
565 ctx->hash = sk->sk_prot->hash;
566 ctx->unhash = sk->sk_prot->unhash;
567 ctx->sk_proto_close = sk->sk_prot->close;
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300568 ctx->rx_conf = TLS_HW_RECORD;
569 ctx->tx_conf = TLS_HW_RECORD;
Atul Guptadd0bed12018-03-31 21:41:52 +0530570 update_sk_prot(sk, ctx);
571 rc = 1;
572 break;
573 }
574 }
575out:
Atul Guptadf9d4a12018-12-11 02:20:09 -0800576 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530577 return rc;
578}
579
580static void tls_hw_unhash(struct sock *sk)
581{
582 struct tls_context *ctx = tls_get_ctx(sk);
583 struct tls_device *dev;
584
Atul Guptadf9d4a12018-12-11 02:20:09 -0800585 spin_lock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530586 list_for_each_entry(dev, &device_list, dev_list) {
Atul Guptadf9d4a12018-12-11 02:20:09 -0800587 if (dev->unhash) {
588 kref_get(&dev->kref);
589 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530590 dev->unhash(dev, sk);
Atul Guptadf9d4a12018-12-11 02:20:09 -0800591 kref_put(&dev->kref, dev->release);
592 spin_lock_bh(&device_spinlock);
593 }
Atul Guptadd0bed12018-03-31 21:41:52 +0530594 }
Atul Guptadf9d4a12018-12-11 02:20:09 -0800595 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530596 ctx->unhash(sk);
597}
598
599static int tls_hw_hash(struct sock *sk)
600{
601 struct tls_context *ctx = tls_get_ctx(sk);
602 struct tls_device *dev;
603 int err;
604
605 err = ctx->hash(sk);
Atul Guptadf9d4a12018-12-11 02:20:09 -0800606 spin_lock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530607 list_for_each_entry(dev, &device_list, dev_list) {
Atul Guptadf9d4a12018-12-11 02:20:09 -0800608 if (dev->hash) {
609 kref_get(&dev->kref);
610 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530611 err |= dev->hash(dev, sk);
Atul Guptadf9d4a12018-12-11 02:20:09 -0800612 kref_put(&dev->kref, dev->release);
613 spin_lock_bh(&device_spinlock);
614 }
Atul Guptadd0bed12018-03-31 21:41:52 +0530615 }
Atul Guptadf9d4a12018-12-11 02:20:09 -0800616 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530617
618 if (err)
619 tls_hw_unhash(sk);
620 return err;
621}
622
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300623static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
624 struct proto *base)
Boris Pismennyc1131872018-02-27 14:18:39 +0200625{
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300626 prot[TLS_BASE][TLS_BASE] = *base;
627 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
628 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
629 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
Boris Pismennyc1131872018-02-27 14:18:39 +0200630
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300631 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
632 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
633 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
Dave Watsonc46234e2018-03-22 10:10:35 -0700634
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300635 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
John Fastabend924ad652018-10-13 02:46:00 +0200636 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
637 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
638 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
Dave Watsonc46234e2018-03-22 10:10:35 -0700639
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300640 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
John Fastabend924ad652018-10-13 02:46:00 +0200641 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
642 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
643 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
Atul Guptadd0bed12018-03-31 21:41:52 +0530644
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300645#ifdef CONFIG_TLS_DEVICE
646 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
647 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
648 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
649
650 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
651 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
652 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300653
654 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
655
656 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
657
658 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300659#endif
660
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300661 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
662 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
663 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
664 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
Boris Pismennyc1131872018-02-27 14:18:39 +0200665}
666
Dave Watson3c4d7552017-06-14 11:37:39 -0700667static int tls_init(struct sock *sk)
668{
Boris Pismennyc1131872018-02-27 14:18:39 +0200669 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
Dave Watson3c4d7552017-06-14 11:37:39 -0700670 struct tls_context *ctx;
671 int rc = 0;
672
Atul Guptadd0bed12018-03-31 21:41:52 +0530673 if (tls_hw_prot(sk))
674 goto out;
675
Ilya Lesokhind91c3e12018-01-16 15:31:52 +0200676 /* The TLS ulp is currently supported only for TCP sockets
677 * in ESTABLISHED state.
678 * Supporting sockets in LISTEN state will require us
679 * to modify the accept implementation to clone rather then
680 * share the ulp context.
681 */
682 if (sk->sk_state != TCP_ESTABLISHED)
683 return -ENOTSUPP;
684
Dave Watson3c4d7552017-06-14 11:37:39 -0700685 /* allocate tls context */
Atul Guptadd0bed12018-03-31 21:41:52 +0530686 ctx = create_ctx(sk);
Dave Watson3c4d7552017-06-14 11:37:39 -0700687 if (!ctx) {
688 rc = -ENOMEM;
689 goto out;
690 }
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200691
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300692 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
Boris Pismennyc1131872018-02-27 14:18:39 +0200693 if (ip_ver == TLSV6 &&
694 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
695 mutex_lock(&tcpv6_prot_mutex);
696 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
697 build_protos(tls_prots[TLSV6], sk->sk_prot);
698 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
699 }
700 mutex_unlock(&tcpv6_prot_mutex);
701 }
702
Boris Pismennyf66de3e2018-04-30 10:16:15 +0300703 ctx->tx_conf = TLS_BASE;
704 ctx->rx_conf = TLS_BASE;
Ilya Lesokhin6d882072017-11-13 10:22:45 +0200705 update_sk_prot(sk, ctx);
Dave Watson3c4d7552017-06-14 11:37:39 -0700706out:
707 return rc;
708}
709
Atul Guptadd0bed12018-03-31 21:41:52 +0530710void tls_register_device(struct tls_device *device)
711{
Atul Guptadf9d4a12018-12-11 02:20:09 -0800712 spin_lock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530713 list_add_tail(&device->dev_list, &device_list);
Atul Guptadf9d4a12018-12-11 02:20:09 -0800714 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530715}
716EXPORT_SYMBOL(tls_register_device);
717
718void tls_unregister_device(struct tls_device *device)
719{
Atul Guptadf9d4a12018-12-11 02:20:09 -0800720 spin_lock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530721 list_del(&device->dev_list);
Atul Guptadf9d4a12018-12-11 02:20:09 -0800722 spin_unlock_bh(&device_spinlock);
Atul Guptadd0bed12018-03-31 21:41:52 +0530723}
724EXPORT_SYMBOL(tls_unregister_device);
725
Dave Watson3c4d7552017-06-14 11:37:39 -0700726static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
727 .name = "tls",
728 .owner = THIS_MODULE,
729 .init = tls_init,
730};
731
732static int __init tls_register(void)
733{
Boris Pismennyc1131872018-02-27 14:18:39 +0200734 build_protos(tls_prots[TLSV4], &tcp_prot);
Dave Watson3c4d7552017-06-14 11:37:39 -0700735
Dave Watsonc46234e2018-03-22 10:10:35 -0700736 tls_sw_proto_ops = inet_stream_ops;
Dave Watsonc46234e2018-03-22 10:10:35 -0700737 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
738
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300739#ifdef CONFIG_TLS_DEVICE
740 tls_device_init();
741#endif
Dave Watson3c4d7552017-06-14 11:37:39 -0700742 tcp_register_ulp(&tcp_tls_ulp_ops);
743
744 return 0;
745}
746
747static void __exit tls_unregister(void)
748{
749 tcp_unregister_ulp(&tcp_tls_ulp_ops);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300750#ifdef CONFIG_TLS_DEVICE
751 tls_device_cleanup();
752#endif
Dave Watson3c4d7552017-06-14 11:37:39 -0700753}
754
755module_init(tls_register);
756module_exit(tls_unregister);