blob: 408efbe3475346a50ca88f2a2422325365a5423c [file] [log] [blame]
Mat Martineauf870fa02020-01-21 16:56:15 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <net/sock.h>
13#include <net/inet_common.h>
14#include <net/inet_hashtables.h>
15#include <net/protocol.h>
16#include <net/tcp.h>
Peter Krystadcf7da0d2020-01-21 16:56:19 -080017#if IS_ENABLED(CONFIG_MPTCP_IPV6)
18#include <net/transp_v6.h>
19#endif
Mat Martineauf870fa02020-01-21 16:56:15 -080020#include <net/mptcp.h>
21#include "protocol.h"
22
Peter Krystad2303f992020-01-21 16:56:17 -080023#define MPTCP_SAME_STATE TCP_MAX_STATES
24
25/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
26 * completed yet or has failed, return the subflow socket.
27 * Otherwise return NULL.
28 */
29static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
30{
Peter Krystadcec37a62020-01-21 16:56:18 -080031 if (!msk->subflow || mptcp_subflow_ctx(msk->subflow->sk)->fourth_ack)
Peter Krystad2303f992020-01-21 16:56:17 -080032 return NULL;
33
34 return msk->subflow;
35}
36
Peter Krystadcec37a62020-01-21 16:56:18 -080037/* if msk has a single subflow, and the mp_capable handshake is failed,
38 * return it.
39 * Otherwise returns NULL
40 */
41static struct socket *__mptcp_tcp_fallback(const struct mptcp_sock *msk)
42{
43 struct socket *ssock = __mptcp_nmpc_socket(msk);
44
45 sock_owned_by_me((const struct sock *)msk);
46
47 if (!ssock || sk_is_mptcp(ssock->sk))
48 return NULL;
49
50 return ssock;
51}
52
Peter Krystad2303f992020-01-21 16:56:17 -080053static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
54{
55 return ((struct sock *)msk)->sk_state == TCP_CLOSE;
56}
57
58static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
59{
60 struct mptcp_subflow_context *subflow;
61 struct sock *sk = (struct sock *)msk;
62 struct socket *ssock;
63 int err;
64
65 ssock = __mptcp_nmpc_socket(msk);
66 if (ssock)
67 goto set_state;
68
69 if (!__mptcp_can_create_subflow(msk))
70 return ERR_PTR(-EINVAL);
71
72 err = mptcp_subflow_create_socket(sk, &ssock);
73 if (err)
74 return ERR_PTR(err);
75
76 msk->subflow = ssock;
77 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcec37a62020-01-21 16:56:18 -080078 list_add(&subflow->node, &msk->conn_list);
Peter Krystad2303f992020-01-21 16:56:17 -080079 subflow->request_mptcp = 1;
80
81set_state:
82 if (state != MPTCP_SAME_STATE)
83 inet_sk_state_store(sk, state);
84 return ssock;
85}
86
Peter Krystadcec37a62020-01-21 16:56:18 -080087static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk)
88{
89 struct mptcp_subflow_context *subflow;
90
91 sock_owned_by_me((const struct sock *)msk);
92
93 mptcp_for_each_subflow(msk, subflow) {
94 return mptcp_subflow_tcp_sock(subflow);
95 }
96
97 return NULL;
98}
99
Mat Martineau6d0060f2020-01-21 16:56:23 -0800100static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
101{
102 if (!msk->cached_ext)
103 msk->cached_ext = __skb_ext_alloc();
104
105 return !!msk->cached_ext;
106}
107
108static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
109 struct msghdr *msg, long *timeo)
110{
111 int mss_now = 0, size_goal = 0, ret = 0;
112 struct mptcp_sock *msk = mptcp_sk(sk);
113 struct mptcp_ext *mpext = NULL;
114 struct page_frag *pfrag;
115 struct sk_buff *skb;
116 size_t psize;
117
118 /* use the mptcp page cache so that we can easily move the data
119 * from one substream to another, but do per subflow memory accounting
120 */
121 pfrag = sk_page_frag(sk);
122 while (!sk_page_frag_refill(ssk, pfrag) ||
123 !mptcp_ext_cache_refill(msk)) {
124 ret = sk_stream_wait_memory(ssk, timeo);
125 if (ret)
126 return ret;
127 }
128
129 /* compute copy limit */
130 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
131 psize = min_t(int, pfrag->size - pfrag->offset, size_goal);
132
133 pr_debug("left=%zu", msg_data_left(msg));
134 psize = copy_page_from_iter(pfrag->page, pfrag->offset,
135 min_t(size_t, msg_data_left(msg), psize),
136 &msg->msg_iter);
137 pr_debug("left=%zu", msg_data_left(msg));
138 if (!psize)
139 return -EINVAL;
140
141 /* Mark the end of the previous write so the beginning of the
142 * next write (with its own mptcp skb extension data) is not
143 * collapsed.
144 */
145 skb = tcp_write_queue_tail(ssk);
146 if (skb)
147 TCP_SKB_CB(skb)->eor = 1;
148
149 ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize,
150 msg->msg_flags | MSG_SENDPAGE_NOTLAST);
151 if (ret <= 0)
152 return ret;
153 if (unlikely(ret < psize))
154 iov_iter_revert(&msg->msg_iter, psize - ret);
155
156 skb = tcp_write_queue_tail(ssk);
157 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
158 msk->cached_ext = NULL;
159
160 memset(mpext, 0, sizeof(*mpext));
161 mpext->data_seq = msk->write_seq;
162 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
163 mpext->data_len = ret;
164 mpext->use_map = 1;
165 mpext->dsn64 = 1;
166
167 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
168 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
169 mpext->dsn64);
170
171 pfrag->offset += ret;
172 msk->write_seq += ret;
173 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
174
175 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal);
176 return ret;
177}
178
Florian Westphal1891c4a2020-01-21 16:56:25 -0800179static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
180{
181 struct socket *sock;
182
183 if (likely(sk_stream_is_writeable(ssk)))
184 return;
185
186 sock = READ_ONCE(ssk->sk_socket);
187
188 if (sock) {
189 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
190 smp_mb__after_atomic();
191 /* set NOSPACE only after clearing SEND_SPACE flag */
192 set_bit(SOCK_NOSPACE, &sock->flags);
193 }
194}
195
Mat Martineauf870fa02020-01-21 16:56:15 -0800196static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
197{
198 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800199 struct socket *ssock;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800200 size_t copied = 0;
Peter Krystadcec37a62020-01-21 16:56:18 -0800201 struct sock *ssk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800202 int ret = 0;
203 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -0800204
205 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
206 return -EOPNOTSUPP;
207
Peter Krystadcec37a62020-01-21 16:56:18 -0800208 lock_sock(sk);
209 ssock = __mptcp_tcp_fallback(msk);
210 if (ssock) {
211 pr_debug("fallback passthrough");
212 ret = sock_sendmsg(ssock, msg);
213 release_sock(sk);
214 return ret;
215 }
216
Mat Martineau6d0060f2020-01-21 16:56:23 -0800217 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
218
Peter Krystadcec37a62020-01-21 16:56:18 -0800219 ssk = mptcp_subflow_get(msk);
220 if (!ssk) {
221 release_sock(sk);
222 return -ENOTCONN;
223 }
224
Mat Martineau6d0060f2020-01-21 16:56:23 -0800225 pr_debug("conn_list->subflow=%p", ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800226
Mat Martineau6d0060f2020-01-21 16:56:23 -0800227 lock_sock(ssk);
228 while (msg_data_left(msg)) {
229 ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo);
230 if (ret < 0)
231 break;
232
233 copied += ret;
234 }
235
236 if (copied > 0)
237 ret = copied;
238
Florian Westphal1891c4a2020-01-21 16:56:25 -0800239 ssk_check_wmem(msk, ssk);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800240 release_sock(ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 release_sock(sk);
242 return ret;
Mat Martineauf870fa02020-01-21 16:56:15 -0800243}
244
Mat Martineau648ef4b2020-01-21 16:56:24 -0800245int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb,
246 unsigned int offset, size_t len)
247{
248 struct mptcp_read_arg *arg = desc->arg.data;
249 size_t copy_len;
250
251 copy_len = min(desc->count, len);
252
253 if (likely(arg->msg)) {
254 int err;
255
256 err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len);
257 if (err) {
258 pr_debug("error path");
259 desc->error = err;
260 return err;
261 }
262 } else {
263 pr_debug("Flushing skb payload");
264 }
265
266 desc->count -= copy_len;
267
268 pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count);
269 return copy_len;
270}
271
Mat Martineauf870fa02020-01-21 16:56:15 -0800272static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
273 int nonblock, int flags, int *addr_len)
274{
275 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800276 struct socket *ssock;
277 struct sock *ssk;
278 int copied = 0;
Mat Martineauf870fa02020-01-21 16:56:15 -0800279
280 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
281 return -EOPNOTSUPP;
282
Peter Krystadcec37a62020-01-21 16:56:18 -0800283 lock_sock(sk);
284 ssock = __mptcp_tcp_fallback(msk);
285 if (ssock) {
286 pr_debug("fallback-read subflow=%p",
287 mptcp_subflow_ctx(ssock->sk));
288 copied = sock_recvmsg(ssock, msg, flags);
289 release_sock(sk);
290 return copied;
291 }
292
293 ssk = mptcp_subflow_get(msk);
294 if (!ssk) {
295 release_sock(sk);
296 return -ENOTCONN;
297 }
298
299 copied = sock_recvmsg(ssk->sk_socket, msg, flags);
300
301 release_sock(sk);
302
303 return copied;
304}
305
306/* subflow sockets can be either outgoing (connect) or incoming
307 * (accept).
308 *
309 * Outgoing subflows use in-kernel sockets.
310 * Incoming subflows do not have their own 'struct socket' allocated,
311 * so we need to use tcp_close() after detaching them from the mptcp
312 * parent socket.
313 */
314static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
315 struct mptcp_subflow_context *subflow,
316 long timeout)
317{
318 struct socket *sock = READ_ONCE(ssk->sk_socket);
319
320 list_del(&subflow->node);
321
322 if (sock && sock != sk->sk_socket) {
323 /* outgoing subflow */
324 sock_release(sock);
325 } else {
326 /* incoming subflow */
327 tcp_close(ssk, timeout);
328 }
Mat Martineauf870fa02020-01-21 16:56:15 -0800329}
330
331static int mptcp_init_sock(struct sock *sk)
332{
Peter Krystadcec37a62020-01-21 16:56:18 -0800333 struct mptcp_sock *msk = mptcp_sk(sk);
334
335 INIT_LIST_HEAD(&msk->conn_list);
Florian Westphal1891c4a2020-01-21 16:56:25 -0800336 __set_bit(MPTCP_SEND_SPACE, &msk->flags);
Peter Krystadcec37a62020-01-21 16:56:18 -0800337
Mat Martineauf870fa02020-01-21 16:56:15 -0800338 return 0;
339}
340
Peter Krystad21498492020-01-21 16:56:21 -0800341static void mptcp_subflow_shutdown(struct sock *ssk, int how)
342{
343 lock_sock(ssk);
344
345 switch (ssk->sk_state) {
346 case TCP_LISTEN:
347 if (!(how & RCV_SHUTDOWN))
348 break;
349 /* fall through */
350 case TCP_SYN_SENT:
351 tcp_disconnect(ssk, O_NONBLOCK);
352 break;
353 default:
354 ssk->sk_shutdown |= how;
355 tcp_shutdown(ssk, how);
356 break;
357 }
358
359 /* Wake up anyone sleeping in poll. */
360 ssk->sk_state_change(ssk);
361 release_sock(ssk);
362}
363
Mat Martineauf870fa02020-01-21 16:56:15 -0800364static void mptcp_close(struct sock *sk, long timeout)
365{
Peter Krystadcec37a62020-01-21 16:56:18 -0800366 struct mptcp_subflow_context *subflow, *tmp;
Mat Martineauf870fa02020-01-21 16:56:15 -0800367 struct mptcp_sock *msk = mptcp_sk(sk);
368
Peter Krystad79c09492020-01-21 16:56:20 -0800369 mptcp_token_destroy(msk->token);
Mat Martineauf870fa02020-01-21 16:56:15 -0800370 inet_sk_state_store(sk, TCP_CLOSE);
371
Peter Krystadcec37a62020-01-21 16:56:18 -0800372 lock_sock(sk);
373
374 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
375 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
376
377 __mptcp_close_ssk(sk, ssk, subflow, timeout);
Mat Martineauf870fa02020-01-21 16:56:15 -0800378 }
379
Mat Martineau6d0060f2020-01-21 16:56:23 -0800380 if (msk->cached_ext)
381 __skb_ext_put(msk->cached_ext);
Peter Krystadcec37a62020-01-21 16:56:18 -0800382 release_sock(sk);
383 sk_common_release(sk);
Mat Martineauf870fa02020-01-21 16:56:15 -0800384}
385
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800386static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
387{
388#if IS_ENABLED(CONFIG_MPTCP_IPV6)
389 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
390 struct ipv6_pinfo *msk6 = inet6_sk(msk);
391
392 msk->sk_v6_daddr = ssk->sk_v6_daddr;
393 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
394
395 if (msk6 && ssk6) {
396 msk6->saddr = ssk6->saddr;
397 msk6->flow_label = ssk6->flow_label;
398 }
399#endif
400
401 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
402 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
403 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
404 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
405 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
406 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
407}
408
409static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
410 bool kern)
411{
412 struct mptcp_sock *msk = mptcp_sk(sk);
413 struct socket *listener;
414 struct sock *newsk;
415
416 listener = __mptcp_nmpc_socket(msk);
417 if (WARN_ON_ONCE(!listener)) {
418 *err = -EINVAL;
419 return NULL;
420 }
421
422 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
423 newsk = inet_csk_accept(listener->sk, flags, err, kern);
424 if (!newsk)
425 return NULL;
426
427 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
428
429 if (sk_is_mptcp(newsk)) {
430 struct mptcp_subflow_context *subflow;
431 struct sock *new_mptcp_sock;
432 struct sock *ssk = newsk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800433 u64 ack_seq;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800434
435 subflow = mptcp_subflow_ctx(newsk);
436 lock_sock(sk);
437
438 local_bh_disable();
439 new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC);
440 if (!new_mptcp_sock) {
441 *err = -ENOBUFS;
442 local_bh_enable();
443 release_sock(sk);
Peter Krystad21498492020-01-21 16:56:21 -0800444 mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800445 tcp_close(newsk, 0);
446 return NULL;
447 }
448
449 mptcp_init_sock(new_mptcp_sock);
450
451 msk = mptcp_sk(new_mptcp_sock);
452 msk->remote_key = subflow->remote_key;
453 msk->local_key = subflow->local_key;
Peter Krystad79c09492020-01-21 16:56:20 -0800454 msk->token = subflow->token;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800455 msk->subflow = NULL;
456
Peter Krystad79c09492020-01-21 16:56:20 -0800457 mptcp_token_update_accept(newsk, new_mptcp_sock);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800458
459 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
460 msk->write_seq = subflow->idsn + 1;
461 ack_seq++;
462 msk->ack_seq = ack_seq;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800463 subflow->map_seq = ack_seq;
464 subflow->map_subflow_seq = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800465 subflow->rel_write_seq = 1;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800466 subflow->tcp_sock = ssk;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800467 newsk = new_mptcp_sock;
468 mptcp_copy_inaddrs(newsk, ssk);
469 list_add(&subflow->node, &msk->conn_list);
470
471 /* will be fully established at mptcp_stream_accept()
472 * completion.
473 */
474 inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV);
475 bh_unlock_sock(new_mptcp_sock);
476 local_bh_enable();
477 release_sock(sk);
478 }
479
480 return newsk;
481}
482
Peter Krystad79c09492020-01-21 16:56:20 -0800483static void mptcp_destroy(struct sock *sk)
484{
485}
486
Peter Krystad717e79c2020-01-21 16:56:22 -0800487static int mptcp_setsockopt(struct sock *sk, int level, int optname,
488 char __user *uoptval, unsigned int optlen)
489{
490 struct mptcp_sock *msk = mptcp_sk(sk);
491 char __kernel *optval;
492 int ret = -EOPNOTSUPP;
493 struct socket *ssock;
494
495 /* will be treated as __user in tcp_setsockopt */
496 optval = (char __kernel __force *)uoptval;
497
498 pr_debug("msk=%p", msk);
499
500 /* @@ the meaning of setsockopt() when the socket is connected and
501 * there are multiple subflows is not defined.
502 */
503 lock_sock(sk);
504 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
505 if (!IS_ERR(ssock)) {
506 pr_debug("subflow=%p", ssock->sk);
507 ret = kernel_setsockopt(ssock, level, optname, optval, optlen);
508 }
509 release_sock(sk);
510
511 return ret;
512}
513
514static int mptcp_getsockopt(struct sock *sk, int level, int optname,
515 char __user *uoptval, int __user *uoption)
516{
517 struct mptcp_sock *msk = mptcp_sk(sk);
518 char __kernel *optval;
519 int ret = -EOPNOTSUPP;
520 int __kernel *option;
521 struct socket *ssock;
522
523 /* will be treated as __user in tcp_getsockopt */
524 optval = (char __kernel __force *)uoptval;
525 option = (int __kernel __force *)uoption;
526
527 pr_debug("msk=%p", msk);
528
529 /* @@ the meaning of getsockopt() when the socket is connected and
530 * there are multiple subflows is not defined.
531 */
532 lock_sock(sk);
533 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
534 if (!IS_ERR(ssock)) {
535 pr_debug("subflow=%p", ssock->sk);
536 ret = kernel_getsockopt(ssock, level, optname, optval, option);
537 }
538 release_sock(sk);
539
540 return ret;
541}
542
Peter Krystadcec37a62020-01-21 16:56:18 -0800543static int mptcp_get_port(struct sock *sk, unsigned short snum)
Mat Martineauf870fa02020-01-21 16:56:15 -0800544{
545 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800546 struct socket *ssock;
Mat Martineauf870fa02020-01-21 16:56:15 -0800547
Peter Krystadcec37a62020-01-21 16:56:18 -0800548 ssock = __mptcp_nmpc_socket(msk);
549 pr_debug("msk=%p, subflow=%p", msk, ssock);
550 if (WARN_ON_ONCE(!ssock))
551 return -EINVAL;
Mat Martineauf870fa02020-01-21 16:56:15 -0800552
Peter Krystadcec37a62020-01-21 16:56:18 -0800553 return inet_csk_get_port(ssock->sk, snum);
554}
Mat Martineauf870fa02020-01-21 16:56:15 -0800555
Peter Krystadcec37a62020-01-21 16:56:18 -0800556void mptcp_finish_connect(struct sock *ssk)
557{
558 struct mptcp_subflow_context *subflow;
559 struct mptcp_sock *msk;
560 struct sock *sk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800561 u64 ack_seq;
Mat Martineauf870fa02020-01-21 16:56:15 -0800562
Peter Krystadcec37a62020-01-21 16:56:18 -0800563 subflow = mptcp_subflow_ctx(ssk);
Mat Martineauf870fa02020-01-21 16:56:15 -0800564
Peter Krystadcec37a62020-01-21 16:56:18 -0800565 if (!subflow->mp_capable)
566 return;
567
568 sk = subflow->conn;
569 msk = mptcp_sk(sk);
570
Mat Martineau648ef4b2020-01-21 16:56:24 -0800571 pr_debug("msk=%p, token=%u", sk, subflow->token);
572
Mat Martineau6d0060f2020-01-21 16:56:23 -0800573 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
574 ack_seq++;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800575 subflow->map_seq = ack_seq;
576 subflow->map_subflow_seq = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800577 subflow->rel_write_seq = 1;
578
Peter Krystadcec37a62020-01-21 16:56:18 -0800579 /* the socket is not connected yet, no msk/subflow ops can access/race
580 * accessing the field below
581 */
582 WRITE_ONCE(msk->remote_key, subflow->remote_key);
583 WRITE_ONCE(msk->local_key, subflow->local_key);
Peter Krystad79c09492020-01-21 16:56:20 -0800584 WRITE_ONCE(msk->token, subflow->token);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800585 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
586 WRITE_ONCE(msk->ack_seq, ack_seq);
Mat Martineauf870fa02020-01-21 16:56:15 -0800587}
588
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800589static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
590{
591 write_lock_bh(&sk->sk_callback_lock);
592 rcu_assign_pointer(sk->sk_wq, &parent->wq);
593 sk_set_socket(sk, parent);
594 sk->sk_uid = SOCK_INODE(parent)->i_uid;
595 write_unlock_bh(&sk->sk_callback_lock);
596}
597
Florian Westphal1891c4a2020-01-21 16:56:25 -0800598static bool mptcp_memory_free(const struct sock *sk, int wake)
599{
600 struct mptcp_sock *msk = mptcp_sk(sk);
601
602 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
603}
604
Mat Martineauf870fa02020-01-21 16:56:15 -0800605static struct proto mptcp_prot = {
606 .name = "MPTCP",
607 .owner = THIS_MODULE,
608 .init = mptcp_init_sock,
609 .close = mptcp_close,
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800610 .accept = mptcp_accept,
Peter Krystad717e79c2020-01-21 16:56:22 -0800611 .setsockopt = mptcp_setsockopt,
612 .getsockopt = mptcp_getsockopt,
Mat Martineauf870fa02020-01-21 16:56:15 -0800613 .shutdown = tcp_shutdown,
Peter Krystad79c09492020-01-21 16:56:20 -0800614 .destroy = mptcp_destroy,
Mat Martineauf870fa02020-01-21 16:56:15 -0800615 .sendmsg = mptcp_sendmsg,
616 .recvmsg = mptcp_recvmsg,
617 .hash = inet_hash,
618 .unhash = inet_unhash,
Peter Krystadcec37a62020-01-21 16:56:18 -0800619 .get_port = mptcp_get_port,
Florian Westphal1891c4a2020-01-21 16:56:25 -0800620 .stream_memory_free = mptcp_memory_free,
Mat Martineauf870fa02020-01-21 16:56:15 -0800621 .obj_size = sizeof(struct mptcp_sock),
622 .no_autobind = true,
623};
624
Peter Krystad2303f992020-01-21 16:56:17 -0800625static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
626{
627 struct mptcp_sock *msk = mptcp_sk(sock->sk);
628 struct socket *ssock;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800629 int err;
Peter Krystad2303f992020-01-21 16:56:17 -0800630
631 lock_sock(sock->sk);
632 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
633 if (IS_ERR(ssock)) {
634 err = PTR_ERR(ssock);
635 goto unlock;
636 }
637
638 err = ssock->ops->bind(ssock, uaddr, addr_len);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800639 if (!err)
640 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -0800641
642unlock:
643 release_sock(sock->sk);
644 return err;
645}
646
647static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
648 int addr_len, int flags)
649{
650 struct mptcp_sock *msk = mptcp_sk(sock->sk);
651 struct socket *ssock;
652 int err;
653
654 lock_sock(sock->sk);
655 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
656 if (IS_ERR(ssock)) {
657 err = PTR_ERR(ssock);
658 goto unlock;
659 }
660
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800661#ifdef CONFIG_TCP_MD5SIG
662 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
663 * TCP option space.
664 */
665 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
666 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
667#endif
668
Peter Krystad2303f992020-01-21 16:56:17 -0800669 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
670 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800671 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -0800672
673unlock:
674 release_sock(sock->sk);
675 return err;
676}
677
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800678static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
679 int peer)
680{
681 if (sock->sk->sk_prot == &tcp_prot) {
682 /* we are being invoked from __sys_accept4, after
683 * mptcp_accept() has just accepted a non-mp-capable
684 * flow: sk is a tcp_sk, not an mptcp one.
685 *
686 * Hand the socket over to tcp so all further socket ops
687 * bypass mptcp.
688 */
689 sock->ops = &inet_stream_ops;
690 }
691
692 return inet_getname(sock, uaddr, peer);
693}
694
695#if IS_ENABLED(CONFIG_MPTCP_IPV6)
696static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
697 int peer)
698{
699 if (sock->sk->sk_prot == &tcpv6_prot) {
700 /* we are being invoked from __sys_accept4 after
701 * mptcp_accept() has accepted a non-mp-capable
702 * subflow: sk is a tcp_sk, not mptcp.
703 *
704 * Hand the socket over to tcp so all further
705 * socket ops bypass mptcp.
706 */
707 sock->ops = &inet6_stream_ops;
708 }
709
710 return inet6_getname(sock, uaddr, peer);
711}
712#endif
713
714static int mptcp_listen(struct socket *sock, int backlog)
715{
716 struct mptcp_sock *msk = mptcp_sk(sock->sk);
717 struct socket *ssock;
718 int err;
719
720 pr_debug("msk=%p", msk);
721
722 lock_sock(sock->sk);
723 ssock = __mptcp_socket_create(msk, TCP_LISTEN);
724 if (IS_ERR(ssock)) {
725 err = PTR_ERR(ssock);
726 goto unlock;
727 }
728
729 err = ssock->ops->listen(ssock, backlog);
730 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
731 if (!err)
732 mptcp_copy_inaddrs(sock->sk, ssock->sk);
733
734unlock:
735 release_sock(sock->sk);
736 return err;
737}
738
739static bool is_tcp_proto(const struct proto *p)
740{
741#if IS_ENABLED(CONFIG_MPTCP_IPV6)
742 return p == &tcp_prot || p == &tcpv6_prot;
743#else
744 return p == &tcp_prot;
745#endif
746}
747
748static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
749 int flags, bool kern)
750{
751 struct mptcp_sock *msk = mptcp_sk(sock->sk);
752 struct socket *ssock;
753 int err;
754
755 pr_debug("msk=%p", msk);
756
757 lock_sock(sock->sk);
758 if (sock->sk->sk_state != TCP_LISTEN)
759 goto unlock_fail;
760
761 ssock = __mptcp_nmpc_socket(msk);
762 if (!ssock)
763 goto unlock_fail;
764
765 sock_hold(ssock->sk);
766 release_sock(sock->sk);
767
768 err = ssock->ops->accept(sock, newsock, flags, kern);
769 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
770 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
771 struct mptcp_subflow_context *subflow;
772
773 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
774 * This is needed so NOSPACE flag can be set from tcp stack.
775 */
776 list_for_each_entry(subflow, &msk->conn_list, node) {
777 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
778
779 if (!ssk->sk_socket)
780 mptcp_sock_graft(ssk, newsock);
781 }
782
783 inet_sk_state_store(newsock->sk, TCP_ESTABLISHED);
784 }
785
786 sock_put(ssock->sk);
787 return err;
788
789unlock_fail:
790 release_sock(sock->sk);
791 return -EINVAL;
792}
793
Peter Krystad2303f992020-01-21 16:56:17 -0800794static __poll_t mptcp_poll(struct file *file, struct socket *sock,
795 struct poll_table_struct *wait)
796{
Florian Westphal1891c4a2020-01-21 16:56:25 -0800797 const struct mptcp_sock *msk;
798 struct sock *sk = sock->sk;
799 struct socket *ssock;
Peter Krystad2303f992020-01-21 16:56:17 -0800800 __poll_t mask = 0;
801
Florian Westphal1891c4a2020-01-21 16:56:25 -0800802 msk = mptcp_sk(sk);
803 lock_sock(sk);
804 ssock = __mptcp_nmpc_socket(msk);
805 if (ssock) {
806 mask = ssock->ops->poll(file, ssock, wait);
807 release_sock(sk);
808 return mask;
809 }
810
811 release_sock(sk);
812 sock_poll_wait(file, sock, wait);
813 lock_sock(sk);
814
815 if (test_bit(MPTCP_DATA_READY, &msk->flags))
816 mask = EPOLLIN | EPOLLRDNORM;
817 if (sk_stream_is_writeable(sk) &&
818 test_bit(MPTCP_SEND_SPACE, &msk->flags))
819 mask |= EPOLLOUT | EPOLLWRNORM;
820 if (sk->sk_shutdown & RCV_SHUTDOWN)
821 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
822
823 release_sock(sk);
824
Peter Krystad2303f992020-01-21 16:56:17 -0800825 return mask;
826}
827
Peter Krystad21498492020-01-21 16:56:21 -0800828static int mptcp_shutdown(struct socket *sock, int how)
829{
830 struct mptcp_sock *msk = mptcp_sk(sock->sk);
831 struct mptcp_subflow_context *subflow;
832 int ret = 0;
833
834 pr_debug("sk=%p, how=%d", msk, how);
835
836 lock_sock(sock->sk);
837
838 if (how == SHUT_WR || how == SHUT_RDWR)
839 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
840
841 how++;
842
843 if ((how & ~SHUTDOWN_MASK) || !how) {
844 ret = -EINVAL;
845 goto out_unlock;
846 }
847
848 if (sock->state == SS_CONNECTING) {
849 if ((1 << sock->sk->sk_state) &
850 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
851 sock->state = SS_DISCONNECTING;
852 else
853 sock->state = SS_CONNECTED;
854 }
855
856 mptcp_for_each_subflow(msk, subflow) {
857 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
858
859 mptcp_subflow_shutdown(tcp_sk, how);
860 }
861
862out_unlock:
863 release_sock(sock->sk);
864
865 return ret;
866}
867
Peter Krystad2303f992020-01-21 16:56:17 -0800868static struct proto_ops mptcp_stream_ops;
869
Mat Martineauf870fa02020-01-21 16:56:15 -0800870static struct inet_protosw mptcp_protosw = {
871 .type = SOCK_STREAM,
872 .protocol = IPPROTO_MPTCP,
873 .prot = &mptcp_prot,
Peter Krystad2303f992020-01-21 16:56:17 -0800874 .ops = &mptcp_stream_ops,
875 .flags = INET_PROTOSW_ICSK,
Mat Martineauf870fa02020-01-21 16:56:15 -0800876};
877
878void __init mptcp_init(void)
879{
Peter Krystad2303f992020-01-21 16:56:17 -0800880 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
881 mptcp_stream_ops = inet_stream_ops;
882 mptcp_stream_ops.bind = mptcp_bind;
883 mptcp_stream_ops.connect = mptcp_stream_connect;
884 mptcp_stream_ops.poll = mptcp_poll;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800885 mptcp_stream_ops.accept = mptcp_stream_accept;
886 mptcp_stream_ops.getname = mptcp_v4_getname;
887 mptcp_stream_ops.listen = mptcp_listen;
Peter Krystad21498492020-01-21 16:56:21 -0800888 mptcp_stream_ops.shutdown = mptcp_shutdown;
Peter Krystad2303f992020-01-21 16:56:17 -0800889
890 mptcp_subflow_init();
891
Mat Martineauf870fa02020-01-21 16:56:15 -0800892 if (proto_register(&mptcp_prot, 1) != 0)
893 panic("Failed to register MPTCP proto.\n");
894
895 inet_register_protosw(&mptcp_protosw);
896}
897
898#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Peter Krystad2303f992020-01-21 16:56:17 -0800899static struct proto_ops mptcp_v6_stream_ops;
Mat Martineauf870fa02020-01-21 16:56:15 -0800900static struct proto mptcp_v6_prot;
901
Peter Krystad79c09492020-01-21 16:56:20 -0800902static void mptcp_v6_destroy(struct sock *sk)
903{
904 mptcp_destroy(sk);
905 inet6_destroy_sock(sk);
906}
907
Mat Martineauf870fa02020-01-21 16:56:15 -0800908static struct inet_protosw mptcp_v6_protosw = {
909 .type = SOCK_STREAM,
910 .protocol = IPPROTO_MPTCP,
911 .prot = &mptcp_v6_prot,
Peter Krystad2303f992020-01-21 16:56:17 -0800912 .ops = &mptcp_v6_stream_ops,
Mat Martineauf870fa02020-01-21 16:56:15 -0800913 .flags = INET_PROTOSW_ICSK,
914};
915
916int mptcpv6_init(void)
917{
918 int err;
919
920 mptcp_v6_prot = mptcp_prot;
921 strcpy(mptcp_v6_prot.name, "MPTCPv6");
922 mptcp_v6_prot.slab = NULL;
Peter Krystad79c09492020-01-21 16:56:20 -0800923 mptcp_v6_prot.destroy = mptcp_v6_destroy;
Mat Martineauf870fa02020-01-21 16:56:15 -0800924 mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) +
925 sizeof(struct ipv6_pinfo);
926
927 err = proto_register(&mptcp_v6_prot, 1);
928 if (err)
929 return err;
930
Peter Krystad2303f992020-01-21 16:56:17 -0800931 mptcp_v6_stream_ops = inet6_stream_ops;
932 mptcp_v6_stream_ops.bind = mptcp_bind;
933 mptcp_v6_stream_ops.connect = mptcp_stream_connect;
934 mptcp_v6_stream_ops.poll = mptcp_poll;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800935 mptcp_v6_stream_ops.accept = mptcp_stream_accept;
936 mptcp_v6_stream_ops.getname = mptcp_v6_getname;
937 mptcp_v6_stream_ops.listen = mptcp_listen;
Peter Krystad21498492020-01-21 16:56:21 -0800938 mptcp_v6_stream_ops.shutdown = mptcp_shutdown;
Peter Krystad2303f992020-01-21 16:56:17 -0800939
Mat Martineauf870fa02020-01-21 16:56:15 -0800940 err = inet6_register_protosw(&mptcp_v6_protosw);
941 if (err)
942 proto_unregister(&mptcp_v6_prot);
943
944 return err;
945}
946#endif