blob: 73780b4cb10813e203ba9bebad3f6553a49dfa83 [file] [log] [blame]
Mat Martineauf870fa02020-01-21 16:56:15 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -080012#include <linux/sched/signal.h>
13#include <linux/atomic.h>
Mat Martineauf870fa02020-01-21 16:56:15 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Peter Krystadcf7da0d2020-01-21 16:56:19 -080019#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/transp_v6.h>
21#endif
Mat Martineauf870fa02020-01-21 16:56:15 -080022#include <net/mptcp.h>
23#include "protocol.h"
24
Peter Krystad2303f992020-01-21 16:56:17 -080025#define MPTCP_SAME_STATE TCP_MAX_STATES
26
Florian Westphalb0519de2020-02-06 00:39:37 +010027#if IS_ENABLED(CONFIG_MPTCP_IPV6)
28struct mptcp6_sock {
29 struct mptcp_sock msk;
30 struct ipv6_pinfo np;
31};
32#endif
33
Peter Krystad2303f992020-01-21 16:56:17 -080034/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
35 * completed yet or has failed, return the subflow socket.
36 * Otherwise return NULL.
37 */
38static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
39{
Christoph Paaschd22f4982020-01-21 16:56:32 -080040 if (!msk->subflow || READ_ONCE(msk->can_ack))
Peter Krystad2303f992020-01-21 16:56:17 -080041 return NULL;
42
43 return msk->subflow;
44}
45
Paolo Abeni8ab183d2020-01-21 16:56:33 -080046static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
47{
48 return msk->first && !sk_is_mptcp(msk->first);
49}
50
Paolo Abeni8ab183d2020-01-21 16:56:33 -080051static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
Peter Krystadcec37a62020-01-21 16:56:18 -080052{
Peter Krystadcec37a62020-01-21 16:56:18 -080053 sock_owned_by_me((const struct sock *)msk);
54
Paolo Abeni8ab183d2020-01-21 16:56:33 -080055 if (likely(!__mptcp_needs_tcp_fallback(msk)))
Peter Krystadcec37a62020-01-21 16:56:18 -080056 return NULL;
57
Paolo Abeni8ab183d2020-01-21 16:56:33 -080058 if (msk->subflow) {
Florian Westphal2c22c062020-02-04 18:12:30 +010059 release_sock((struct sock *)msk);
60 return msk->subflow;
Paolo Abeni8ab183d2020-01-21 16:56:33 -080061 }
62
Florian Westphal2c22c062020-02-04 18:12:30 +010063 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -080064}
65
Peter Krystad2303f992020-01-21 16:56:17 -080066static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
67{
Paolo Abeni8ab183d2020-01-21 16:56:33 -080068 return !msk->first;
Peter Krystad2303f992020-01-21 16:56:17 -080069}
70
71static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
72{
73 struct mptcp_subflow_context *subflow;
74 struct sock *sk = (struct sock *)msk;
75 struct socket *ssock;
76 int err;
77
78 ssock = __mptcp_nmpc_socket(msk);
79 if (ssock)
80 goto set_state;
81
82 if (!__mptcp_can_create_subflow(msk))
83 return ERR_PTR(-EINVAL);
84
85 err = mptcp_subflow_create_socket(sk, &ssock);
86 if (err)
87 return ERR_PTR(err);
88
Paolo Abeni8ab183d2020-01-21 16:56:33 -080089 msk->first = ssock->sk;
Peter Krystad2303f992020-01-21 16:56:17 -080090 msk->subflow = ssock;
91 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcec37a62020-01-21 16:56:18 -080092 list_add(&subflow->node, &msk->conn_list);
Peter Krystad2303f992020-01-21 16:56:17 -080093 subflow->request_mptcp = 1;
94
95set_state:
96 if (state != MPTCP_SAME_STATE)
97 inet_sk_state_store(sk, state);
98 return ssock;
99}
100
Peter Krystadcec37a62020-01-21 16:56:18 -0800101static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk)
102{
103 struct mptcp_subflow_context *subflow;
104
105 sock_owned_by_me((const struct sock *)msk);
106
107 mptcp_for_each_subflow(msk, subflow) {
108 return mptcp_subflow_tcp_sock(subflow);
109 }
110
111 return NULL;
112}
113
Mat Martineau6d0060f2020-01-21 16:56:23 -0800114static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
115{
116 if (!msk->cached_ext)
117 msk->cached_ext = __skb_ext_alloc();
118
119 return !!msk->cached_ext;
120}
121
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800122static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
123{
124 struct mptcp_subflow_context *subflow;
125 struct sock *sk = (struct sock *)msk;
126
127 sock_owned_by_me(sk);
128
129 mptcp_for_each_subflow(msk, subflow) {
130 if (subflow->data_avail)
131 return mptcp_subflow_tcp_sock(subflow);
132 }
133
134 return NULL;
135}
136
Paolo Abeni57040752020-01-21 16:56:27 -0800137static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk,
138 const struct sk_buff *skb,
139 const struct mptcp_ext *mpext)
Mat Martineau6d0060f2020-01-21 16:56:23 -0800140{
Paolo Abeni57040752020-01-21 16:56:27 -0800141 if (!tcp_skb_can_collapse_to(skb))
142 return false;
143
144 /* can collapse only if MPTCP level sequence is in order */
145 return mpext && mpext->data_seq + mpext->data_len == msk->write_seq;
146}
147
148static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
149 struct msghdr *msg, long *timeo, int *pmss_now,
150 int *ps_goal)
151{
152 int mss_now, avail_size, size_goal, ret;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800153 struct mptcp_sock *msk = mptcp_sk(sk);
154 struct mptcp_ext *mpext = NULL;
Paolo Abeni57040752020-01-21 16:56:27 -0800155 struct sk_buff *skb, *tail;
156 bool can_collapse = false;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800157 struct page_frag *pfrag;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800158 size_t psize;
159
160 /* use the mptcp page cache so that we can easily move the data
161 * from one substream to another, but do per subflow memory accounting
162 */
163 pfrag = sk_page_frag(sk);
164 while (!sk_page_frag_refill(ssk, pfrag) ||
165 !mptcp_ext_cache_refill(msk)) {
166 ret = sk_stream_wait_memory(ssk, timeo);
167 if (ret)
168 return ret;
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800169 if (unlikely(__mptcp_needs_tcp_fallback(msk)))
170 return 0;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800171 }
172
173 /* compute copy limit */
174 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
Paolo Abeni57040752020-01-21 16:56:27 -0800175 *pmss_now = mss_now;
176 *ps_goal = size_goal;
177 avail_size = size_goal;
178 skb = tcp_write_queue_tail(ssk);
179 if (skb) {
180 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800181
Paolo Abeni57040752020-01-21 16:56:27 -0800182 /* Limit the write to the size available in the
183 * current skb, if any, so that we create at most a new skb.
184 * Explicitly tells TCP internals to avoid collapsing on later
185 * queue management operation, to avoid breaking the ext <->
186 * SSN association set here
187 */
188 can_collapse = (size_goal - skb->len > 0) &&
189 mptcp_skb_can_collapse_to(msk, skb, mpext);
190 if (!can_collapse)
191 TCP_SKB_CB(skb)->eor = 1;
192 else
193 avail_size = size_goal - skb->len;
194 }
195 psize = min_t(size_t, pfrag->size - pfrag->offset, avail_size);
196
197 /* Copy to page */
Mat Martineau6d0060f2020-01-21 16:56:23 -0800198 pr_debug("left=%zu", msg_data_left(msg));
199 psize = copy_page_from_iter(pfrag->page, pfrag->offset,
200 min_t(size_t, msg_data_left(msg), psize),
201 &msg->msg_iter);
202 pr_debug("left=%zu", msg_data_left(msg));
203 if (!psize)
204 return -EINVAL;
205
Paolo Abeni57040752020-01-21 16:56:27 -0800206 /* tell the TCP stack to delay the push so that we can safely
207 * access the skb after the sendpages call
Mat Martineau6d0060f2020-01-21 16:56:23 -0800208 */
Mat Martineau6d0060f2020-01-21 16:56:23 -0800209 ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize,
210 msg->msg_flags | MSG_SENDPAGE_NOTLAST);
211 if (ret <= 0)
212 return ret;
213 if (unlikely(ret < psize))
214 iov_iter_revert(&msg->msg_iter, psize - ret);
215
Paolo Abeni57040752020-01-21 16:56:27 -0800216 /* if the tail skb extension is still the cached one, collapsing
217 * really happened. Note: we can't check for 'same skb' as the sk_buff
218 * hdr on tail can be transmitted, freed and re-allocated by the
219 * do_tcp_sendpages() call
220 */
221 tail = tcp_write_queue_tail(ssk);
222 if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
223 WARN_ON_ONCE(!can_collapse);
224 mpext->data_len += ret;
225 goto out;
226 }
227
Mat Martineau6d0060f2020-01-21 16:56:23 -0800228 skb = tcp_write_queue_tail(ssk);
229 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
230 msk->cached_ext = NULL;
231
232 memset(mpext, 0, sizeof(*mpext));
233 mpext->data_seq = msk->write_seq;
234 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
235 mpext->data_len = ret;
236 mpext->use_map = 1;
237 mpext->dsn64 = 1;
238
239 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
240 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
241 mpext->dsn64);
242
Paolo Abeni57040752020-01-21 16:56:27 -0800243out:
Mat Martineau6d0060f2020-01-21 16:56:23 -0800244 pfrag->offset += ret;
245 msk->write_seq += ret;
246 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
247
Mat Martineau6d0060f2020-01-21 16:56:23 -0800248 return ret;
249}
250
Florian Westphal1891c4a2020-01-21 16:56:25 -0800251static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
252{
253 struct socket *sock;
254
255 if (likely(sk_stream_is_writeable(ssk)))
256 return;
257
258 sock = READ_ONCE(ssk->sk_socket);
259
260 if (sock) {
261 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
262 smp_mb__after_atomic();
263 /* set NOSPACE only after clearing SEND_SPACE flag */
264 set_bit(SOCK_NOSPACE, &sock->flags);
265 }
266}
267
Mat Martineauf870fa02020-01-21 16:56:15 -0800268static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
269{
Paolo Abeni57040752020-01-21 16:56:27 -0800270 int mss_now = 0, size_goal = 0, ret = 0;
Mat Martineauf870fa02020-01-21 16:56:15 -0800271 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800272 struct socket *ssock;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800273 size_t copied = 0;
Peter Krystadcec37a62020-01-21 16:56:18 -0800274 struct sock *ssk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800275 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -0800276
277 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
278 return -EOPNOTSUPP;
279
Peter Krystadcec37a62020-01-21 16:56:18 -0800280 lock_sock(sk);
281 ssock = __mptcp_tcp_fallback(msk);
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800282 if (unlikely(ssock)) {
283fallback:
Peter Krystadcec37a62020-01-21 16:56:18 -0800284 pr_debug("fallback passthrough");
285 ret = sock_sendmsg(ssock, msg);
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800286 return ret >= 0 ? ret + copied : (copied ? copied : ret);
Peter Krystadcec37a62020-01-21 16:56:18 -0800287 }
288
Mat Martineau6d0060f2020-01-21 16:56:23 -0800289 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
290
Peter Krystadcec37a62020-01-21 16:56:18 -0800291 ssk = mptcp_subflow_get(msk);
292 if (!ssk) {
293 release_sock(sk);
294 return -ENOTCONN;
295 }
296
Mat Martineau6d0060f2020-01-21 16:56:23 -0800297 pr_debug("conn_list->subflow=%p", ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800298
Mat Martineau6d0060f2020-01-21 16:56:23 -0800299 lock_sock(ssk);
300 while (msg_data_left(msg)) {
Paolo Abeni57040752020-01-21 16:56:27 -0800301 ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now,
302 &size_goal);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800303 if (ret < 0)
304 break;
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800305 if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) {
306 release_sock(ssk);
307 ssock = __mptcp_tcp_fallback(msk);
308 goto fallback;
309 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800310
311 copied += ret;
312 }
313
Paolo Abeni57040752020-01-21 16:56:27 -0800314 if (copied) {
Mat Martineau6d0060f2020-01-21 16:56:23 -0800315 ret = copied;
Paolo Abeni57040752020-01-21 16:56:27 -0800316 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
317 size_goal);
318 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800319
Florian Westphal1891c4a2020-01-21 16:56:25 -0800320 ssk_check_wmem(msk, ssk);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800321 release_sock(ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800322 release_sock(sk);
323 return ret;
Mat Martineauf870fa02020-01-21 16:56:15 -0800324}
325
Mat Martineau648ef4b2020-01-21 16:56:24 -0800326int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb,
327 unsigned int offset, size_t len)
328{
329 struct mptcp_read_arg *arg = desc->arg.data;
330 size_t copy_len;
331
332 copy_len = min(desc->count, len);
333
334 if (likely(arg->msg)) {
335 int err;
336
337 err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len);
338 if (err) {
339 pr_debug("error path");
340 desc->error = err;
341 return err;
342 }
343 } else {
344 pr_debug("Flushing skb payload");
345 }
346
347 desc->count -= copy_len;
348
349 pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count);
350 return copy_len;
351}
352
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800353static void mptcp_wait_data(struct sock *sk, long *timeo)
354{
355 DEFINE_WAIT_FUNC(wait, woken_wake_function);
356 struct mptcp_sock *msk = mptcp_sk(sk);
357
358 add_wait_queue(sk_sleep(sk), &wait);
359 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
360
361 sk_wait_event(sk, timeo,
362 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
363
364 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
365 remove_wait_queue(sk_sleep(sk), &wait);
366}
367
Mat Martineauf870fa02020-01-21 16:56:15 -0800368static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
369 int nonblock, int flags, int *addr_len)
370{
371 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800372 struct mptcp_subflow_context *subflow;
373 bool more_data_avail = false;
374 struct mptcp_read_arg arg;
375 read_descriptor_t desc;
376 bool wait_data = false;
Peter Krystadcec37a62020-01-21 16:56:18 -0800377 struct socket *ssock;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800378 struct tcp_sock *tp;
379 bool done = false;
Peter Krystadcec37a62020-01-21 16:56:18 -0800380 struct sock *ssk;
381 int copied = 0;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800382 int target;
383 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -0800384
385 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
386 return -EOPNOTSUPP;
387
Peter Krystadcec37a62020-01-21 16:56:18 -0800388 lock_sock(sk);
389 ssock = __mptcp_tcp_fallback(msk);
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800390 if (unlikely(ssock)) {
391fallback:
Peter Krystadcec37a62020-01-21 16:56:18 -0800392 pr_debug("fallback-read subflow=%p",
393 mptcp_subflow_ctx(ssock->sk));
394 copied = sock_recvmsg(ssock, msg, flags);
Peter Krystadcec37a62020-01-21 16:56:18 -0800395 return copied;
396 }
397
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800398 arg.msg = msg;
399 desc.arg.data = &arg;
400 desc.error = 0;
401
402 timeo = sock_rcvtimeo(sk, nonblock);
403
404 len = min_t(size_t, len, INT_MAX);
405 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
406
407 while (!done) {
408 u32 map_remaining;
409 int bytes_read;
410
411 ssk = mptcp_subflow_recv_lookup(msk);
412 pr_debug("msk=%p ssk=%p", msk, ssk);
413 if (!ssk)
414 goto wait_for_data;
415
416 subflow = mptcp_subflow_ctx(ssk);
417 tp = tcp_sk(ssk);
418
419 lock_sock(ssk);
420 do {
421 /* try to read as much data as available */
422 map_remaining = subflow->map_data_len -
423 mptcp_subflow_get_map_offset(subflow);
424 desc.count = min_t(size_t, len - copied, map_remaining);
425 pr_debug("reading %zu bytes, copied %d", desc.count,
426 copied);
427 bytes_read = tcp_read_sock(ssk, &desc,
428 mptcp_read_actor);
429 if (bytes_read < 0) {
430 if (!copied)
431 copied = bytes_read;
432 done = true;
433 goto next;
434 }
435
436 pr_debug("msk ack_seq=%llx -> %llx", msk->ack_seq,
437 msk->ack_seq + bytes_read);
438 msk->ack_seq += bytes_read;
439 copied += bytes_read;
440 if (copied >= len) {
441 done = true;
442 goto next;
443 }
444 if (tp->urg_data && tp->urg_seq == tp->copied_seq) {
445 pr_err("Urgent data present, cannot proceed");
446 done = true;
447 goto next;
448 }
449next:
450 more_data_avail = mptcp_subflow_data_available(ssk);
451 } while (more_data_avail && !done);
452 release_sock(ssk);
453 continue;
454
455wait_for_data:
456 more_data_avail = false;
457
458 /* only the master socket status is relevant here. The exit
459 * conditions mirror closely tcp_recvmsg()
460 */
461 if (copied >= target)
462 break;
463
464 if (copied) {
465 if (sk->sk_err ||
466 sk->sk_state == TCP_CLOSE ||
467 (sk->sk_shutdown & RCV_SHUTDOWN) ||
468 !timeo ||
469 signal_pending(current))
470 break;
471 } else {
472 if (sk->sk_err) {
473 copied = sock_error(sk);
474 break;
475 }
476
477 if (sk->sk_shutdown & RCV_SHUTDOWN)
478 break;
479
480 if (sk->sk_state == TCP_CLOSE) {
481 copied = -ENOTCONN;
482 break;
483 }
484
485 if (!timeo) {
486 copied = -EAGAIN;
487 break;
488 }
489
490 if (signal_pending(current)) {
491 copied = sock_intr_errno(timeo);
492 break;
493 }
494 }
495
496 pr_debug("block timeout %ld", timeo);
497 wait_data = true;
498 mptcp_wait_data(sk, &timeo);
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800499 if (unlikely(__mptcp_tcp_fallback(msk)))
500 goto fallback;
Peter Krystadcec37a62020-01-21 16:56:18 -0800501 }
502
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800503 if (more_data_avail) {
504 if (!test_bit(MPTCP_DATA_READY, &msk->flags))
505 set_bit(MPTCP_DATA_READY, &msk->flags);
506 } else if (!wait_data) {
507 clear_bit(MPTCP_DATA_READY, &msk->flags);
508
509 /* .. race-breaker: ssk might get new data after last
510 * data_available() returns false.
511 */
512 ssk = mptcp_subflow_recv_lookup(msk);
513 if (unlikely(ssk))
514 set_bit(MPTCP_DATA_READY, &msk->flags);
515 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800516
517 release_sock(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800518 return copied;
519}
520
521/* subflow sockets can be either outgoing (connect) or incoming
522 * (accept).
523 *
524 * Outgoing subflows use in-kernel sockets.
525 * Incoming subflows do not have their own 'struct socket' allocated,
526 * so we need to use tcp_close() after detaching them from the mptcp
527 * parent socket.
528 */
529static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
530 struct mptcp_subflow_context *subflow,
531 long timeout)
532{
533 struct socket *sock = READ_ONCE(ssk->sk_socket);
534
535 list_del(&subflow->node);
536
537 if (sock && sock != sk->sk_socket) {
538 /* outgoing subflow */
539 sock_release(sock);
540 } else {
541 /* incoming subflow */
542 tcp_close(ssk, timeout);
543 }
Mat Martineauf870fa02020-01-21 16:56:15 -0800544}
545
Matthieu Baerts784325e2020-01-21 16:56:28 -0800546static int __mptcp_init_sock(struct sock *sk)
Mat Martineauf870fa02020-01-21 16:56:15 -0800547{
Peter Krystadcec37a62020-01-21 16:56:18 -0800548 struct mptcp_sock *msk = mptcp_sk(sk);
549
550 INIT_LIST_HEAD(&msk->conn_list);
Florian Westphal1891c4a2020-01-21 16:56:25 -0800551 __set_bit(MPTCP_SEND_SPACE, &msk->flags);
Peter Krystadcec37a62020-01-21 16:56:18 -0800552
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800553 msk->first = NULL;
554
Mat Martineauf870fa02020-01-21 16:56:15 -0800555 return 0;
556}
557
Matthieu Baerts784325e2020-01-21 16:56:28 -0800558static int mptcp_init_sock(struct sock *sk)
559{
560 if (!mptcp_is_enabled(sock_net(sk)))
561 return -ENOPROTOOPT;
562
563 return __mptcp_init_sock(sk);
564}
565
Peter Krystad21498492020-01-21 16:56:21 -0800566static void mptcp_subflow_shutdown(struct sock *ssk, int how)
567{
568 lock_sock(ssk);
569
570 switch (ssk->sk_state) {
571 case TCP_LISTEN:
572 if (!(how & RCV_SHUTDOWN))
573 break;
574 /* fall through */
575 case TCP_SYN_SENT:
576 tcp_disconnect(ssk, O_NONBLOCK);
577 break;
578 default:
579 ssk->sk_shutdown |= how;
580 tcp_shutdown(ssk, how);
581 break;
582 }
583
584 /* Wake up anyone sleeping in poll. */
585 ssk->sk_state_change(ssk);
586 release_sock(ssk);
587}
588
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800589/* Called with msk lock held, releases such lock before returning */
Florian Westphal2c22c062020-02-04 18:12:30 +0100590static void mptcp_close(struct sock *sk, long timeout)
Mat Martineauf870fa02020-01-21 16:56:15 -0800591{
Peter Krystadcec37a62020-01-21 16:56:18 -0800592 struct mptcp_subflow_context *subflow, *tmp;
Mat Martineauf870fa02020-01-21 16:56:15 -0800593 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphalb2c5b612020-01-29 15:54:45 +0100594 LIST_HEAD(conn_list);
Mat Martineauf870fa02020-01-21 16:56:15 -0800595
Florian Westphal2c22c062020-02-04 18:12:30 +0100596 lock_sock(sk);
597
Peter Krystad79c09492020-01-21 16:56:20 -0800598 mptcp_token_destroy(msk->token);
Mat Martineauf870fa02020-01-21 16:56:15 -0800599 inet_sk_state_store(sk, TCP_CLOSE);
600
Florian Westphalb2c5b612020-01-29 15:54:45 +0100601 list_splice_init(&msk->conn_list, &conn_list);
602
603 release_sock(sk);
604
605 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
Peter Krystadcec37a62020-01-21 16:56:18 -0800606 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
607
608 __mptcp_close_ssk(sk, ssk, subflow, timeout);
Mat Martineauf870fa02020-01-21 16:56:15 -0800609 }
610
Peter Krystadcec37a62020-01-21 16:56:18 -0800611 sk_common_release(sk);
Mat Martineauf870fa02020-01-21 16:56:15 -0800612}
613
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800614static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
615{
616#if IS_ENABLED(CONFIG_MPTCP_IPV6)
617 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
618 struct ipv6_pinfo *msk6 = inet6_sk(msk);
619
620 msk->sk_v6_daddr = ssk->sk_v6_daddr;
621 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
622
623 if (msk6 && ssk6) {
624 msk6->saddr = ssk6->saddr;
625 msk6->flow_label = ssk6->flow_label;
626 }
627#endif
628
629 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
630 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
631 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
632 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
633 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
634 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
635}
636
Florian Westphalb0519de2020-02-06 00:39:37 +0100637#if IS_ENABLED(CONFIG_MPTCP_IPV6)
638static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
639{
640 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
641
642 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
643}
644#endif
645
646struct sock *mptcp_sk_clone_lock(const struct sock *sk)
647{
648 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
649
650 if (!nsk)
651 return NULL;
652
653#if IS_ENABLED(CONFIG_MPTCP_IPV6)
654 if (nsk->sk_family == AF_INET6)
655 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
656#endif
657
658 return nsk;
659}
660
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800661static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
662 bool kern)
663{
664 struct mptcp_sock *msk = mptcp_sk(sk);
665 struct socket *listener;
666 struct sock *newsk;
667
668 listener = __mptcp_nmpc_socket(msk);
669 if (WARN_ON_ONCE(!listener)) {
670 *err = -EINVAL;
671 return NULL;
672 }
673
674 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
675 newsk = inet_csk_accept(listener->sk, flags, err, kern);
676 if (!newsk)
677 return NULL;
678
679 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
680
681 if (sk_is_mptcp(newsk)) {
682 struct mptcp_subflow_context *subflow;
683 struct sock *new_mptcp_sock;
684 struct sock *ssk = newsk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800685 u64 ack_seq;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800686
687 subflow = mptcp_subflow_ctx(newsk);
688 lock_sock(sk);
689
690 local_bh_disable();
Florian Westphalb0519de2020-02-06 00:39:37 +0100691 new_mptcp_sock = mptcp_sk_clone_lock(sk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800692 if (!new_mptcp_sock) {
693 *err = -ENOBUFS;
694 local_bh_enable();
695 release_sock(sk);
Peter Krystad21498492020-01-21 16:56:21 -0800696 mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800697 tcp_close(newsk, 0);
698 return NULL;
699 }
700
Matthieu Baerts784325e2020-01-21 16:56:28 -0800701 __mptcp_init_sock(new_mptcp_sock);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800702
703 msk = mptcp_sk(new_mptcp_sock);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800704 msk->local_key = subflow->local_key;
Peter Krystad79c09492020-01-21 16:56:20 -0800705 msk->token = subflow->token;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800706 msk->subflow = NULL;
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800707 msk->first = newsk;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800708
Peter Krystad79c09492020-01-21 16:56:20 -0800709 mptcp_token_update_accept(newsk, new_mptcp_sock);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800710
Mat Martineau6d0060f2020-01-21 16:56:23 -0800711 msk->write_seq = subflow->idsn + 1;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800712 if (subflow->can_ack) {
713 msk->can_ack = true;
714 msk->remote_key = subflow->remote_key;
715 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
716 ack_seq++;
717 msk->ack_seq = ack_seq;
718 }
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800719 newsk = new_mptcp_sock;
720 mptcp_copy_inaddrs(newsk, ssk);
721 list_add(&subflow->node, &msk->conn_list);
722
723 /* will be fully established at mptcp_stream_accept()
724 * completion.
725 */
726 inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV);
727 bh_unlock_sock(new_mptcp_sock);
728 local_bh_enable();
729 release_sock(sk);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800730
731 /* the subflow can already receive packet, avoid racing with
732 * the receive path and process the pending ones
733 */
734 lock_sock(ssk);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800735 subflow->rel_write_seq = 1;
736 subflow->tcp_sock = ssk;
737 subflow->conn = new_mptcp_sock;
738 if (unlikely(!skb_queue_empty(&ssk->sk_receive_queue)))
739 mptcp_subflow_data_available(ssk);
740 release_sock(ssk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800741 }
742
743 return newsk;
744}
745
Peter Krystad79c09492020-01-21 16:56:20 -0800746static void mptcp_destroy(struct sock *sk)
747{
Florian Westphalc9fd9c52020-01-29 15:54:43 +0100748 struct mptcp_sock *msk = mptcp_sk(sk);
749
750 if (msk->cached_ext)
751 __skb_ext_put(msk->cached_ext);
Peter Krystad79c09492020-01-21 16:56:20 -0800752}
753
Peter Krystad717e79c2020-01-21 16:56:22 -0800754static int mptcp_setsockopt(struct sock *sk, int level, int optname,
Florian Westphal50e741b2020-01-29 15:54:44 +0100755 char __user *optval, unsigned int optlen)
Peter Krystad717e79c2020-01-21 16:56:22 -0800756{
757 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystad717e79c2020-01-21 16:56:22 -0800758 int ret = -EOPNOTSUPP;
759 struct socket *ssock;
Florian Westphal50e741b2020-01-29 15:54:44 +0100760 struct sock *ssk;
Peter Krystad717e79c2020-01-21 16:56:22 -0800761
762 pr_debug("msk=%p", msk);
763
764 /* @@ the meaning of setsockopt() when the socket is connected and
765 * there are multiple subflows is not defined.
766 */
767 lock_sock(sk);
768 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
Florian Westphal50e741b2020-01-29 15:54:44 +0100769 if (IS_ERR(ssock)) {
770 release_sock(sk);
771 return ret;
Peter Krystad717e79c2020-01-21 16:56:22 -0800772 }
Florian Westphal50e741b2020-01-29 15:54:44 +0100773
774 ssk = ssock->sk;
775 sock_hold(ssk);
Peter Krystad717e79c2020-01-21 16:56:22 -0800776 release_sock(sk);
777
Florian Westphal50e741b2020-01-29 15:54:44 +0100778 ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
779 sock_put(ssk);
780
Peter Krystad717e79c2020-01-21 16:56:22 -0800781 return ret;
782}
783
784static int mptcp_getsockopt(struct sock *sk, int level, int optname,
Florian Westphal50e741b2020-01-29 15:54:44 +0100785 char __user *optval, int __user *option)
Peter Krystad717e79c2020-01-21 16:56:22 -0800786{
787 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystad717e79c2020-01-21 16:56:22 -0800788 int ret = -EOPNOTSUPP;
Peter Krystad717e79c2020-01-21 16:56:22 -0800789 struct socket *ssock;
Florian Westphal50e741b2020-01-29 15:54:44 +0100790 struct sock *ssk;
Peter Krystad717e79c2020-01-21 16:56:22 -0800791
792 pr_debug("msk=%p", msk);
793
794 /* @@ the meaning of getsockopt() when the socket is connected and
795 * there are multiple subflows is not defined.
796 */
797 lock_sock(sk);
798 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
Florian Westphal50e741b2020-01-29 15:54:44 +0100799 if (IS_ERR(ssock)) {
800 release_sock(sk);
801 return ret;
Peter Krystad717e79c2020-01-21 16:56:22 -0800802 }
Florian Westphal50e741b2020-01-29 15:54:44 +0100803
804 ssk = ssock->sk;
805 sock_hold(ssk);
Peter Krystad717e79c2020-01-21 16:56:22 -0800806 release_sock(sk);
807
Florian Westphal50e741b2020-01-29 15:54:44 +0100808 ret = tcp_getsockopt(ssk, level, optname, optval, option);
809 sock_put(ssk);
810
Peter Krystad717e79c2020-01-21 16:56:22 -0800811 return ret;
812}
813
Peter Krystadcec37a62020-01-21 16:56:18 -0800814static int mptcp_get_port(struct sock *sk, unsigned short snum)
Mat Martineauf870fa02020-01-21 16:56:15 -0800815{
816 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800817 struct socket *ssock;
Mat Martineauf870fa02020-01-21 16:56:15 -0800818
Peter Krystadcec37a62020-01-21 16:56:18 -0800819 ssock = __mptcp_nmpc_socket(msk);
820 pr_debug("msk=%p, subflow=%p", msk, ssock);
821 if (WARN_ON_ONCE(!ssock))
822 return -EINVAL;
Mat Martineauf870fa02020-01-21 16:56:15 -0800823
Peter Krystadcec37a62020-01-21 16:56:18 -0800824 return inet_csk_get_port(ssock->sk, snum);
825}
Mat Martineauf870fa02020-01-21 16:56:15 -0800826
Peter Krystadcec37a62020-01-21 16:56:18 -0800827void mptcp_finish_connect(struct sock *ssk)
828{
829 struct mptcp_subflow_context *subflow;
830 struct mptcp_sock *msk;
831 struct sock *sk;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800832 u64 ack_seq;
Mat Martineauf870fa02020-01-21 16:56:15 -0800833
Peter Krystadcec37a62020-01-21 16:56:18 -0800834 subflow = mptcp_subflow_ctx(ssk);
Mat Martineauf870fa02020-01-21 16:56:15 -0800835
Peter Krystadcec37a62020-01-21 16:56:18 -0800836 if (!subflow->mp_capable)
837 return;
838
839 sk = subflow->conn;
840 msk = mptcp_sk(sk);
841
Mat Martineau648ef4b2020-01-21 16:56:24 -0800842 pr_debug("msk=%p, token=%u", sk, subflow->token);
843
Mat Martineau6d0060f2020-01-21 16:56:23 -0800844 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
845 ack_seq++;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800846 subflow->map_seq = ack_seq;
847 subflow->map_subflow_seq = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800848 subflow->rel_write_seq = 1;
849
Peter Krystadcec37a62020-01-21 16:56:18 -0800850 /* the socket is not connected yet, no msk/subflow ops can access/race
851 * accessing the field below
852 */
853 WRITE_ONCE(msk->remote_key, subflow->remote_key);
854 WRITE_ONCE(msk->local_key, subflow->local_key);
Peter Krystad79c09492020-01-21 16:56:20 -0800855 WRITE_ONCE(msk->token, subflow->token);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800856 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
857 WRITE_ONCE(msk->ack_seq, ack_seq);
Christoph Paaschd22f4982020-01-21 16:56:32 -0800858 WRITE_ONCE(msk->can_ack, 1);
Mat Martineauf870fa02020-01-21 16:56:15 -0800859}
860
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800861static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
862{
863 write_lock_bh(&sk->sk_callback_lock);
864 rcu_assign_pointer(sk->sk_wq, &parent->wq);
865 sk_set_socket(sk, parent);
866 sk->sk_uid = SOCK_INODE(parent)->i_uid;
867 write_unlock_bh(&sk->sk_callback_lock);
868}
869
Florian Westphal1891c4a2020-01-21 16:56:25 -0800870static bool mptcp_memory_free(const struct sock *sk, int wake)
871{
872 struct mptcp_sock *msk = mptcp_sk(sk);
873
874 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
875}
876
Mat Martineauf870fa02020-01-21 16:56:15 -0800877static struct proto mptcp_prot = {
878 .name = "MPTCP",
879 .owner = THIS_MODULE,
880 .init = mptcp_init_sock,
881 .close = mptcp_close,
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800882 .accept = mptcp_accept,
Peter Krystad717e79c2020-01-21 16:56:22 -0800883 .setsockopt = mptcp_setsockopt,
884 .getsockopt = mptcp_getsockopt,
Mat Martineauf870fa02020-01-21 16:56:15 -0800885 .shutdown = tcp_shutdown,
Peter Krystad79c09492020-01-21 16:56:20 -0800886 .destroy = mptcp_destroy,
Mat Martineauf870fa02020-01-21 16:56:15 -0800887 .sendmsg = mptcp_sendmsg,
888 .recvmsg = mptcp_recvmsg,
889 .hash = inet_hash,
890 .unhash = inet_unhash,
Peter Krystadcec37a62020-01-21 16:56:18 -0800891 .get_port = mptcp_get_port,
Florian Westphal1891c4a2020-01-21 16:56:25 -0800892 .stream_memory_free = mptcp_memory_free,
Mat Martineauf870fa02020-01-21 16:56:15 -0800893 .obj_size = sizeof(struct mptcp_sock),
894 .no_autobind = true,
895};
896
Peter Krystad2303f992020-01-21 16:56:17 -0800897static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
898{
899 struct mptcp_sock *msk = mptcp_sk(sock->sk);
900 struct socket *ssock;
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800901 int err;
Peter Krystad2303f992020-01-21 16:56:17 -0800902
903 lock_sock(sock->sk);
904 ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
905 if (IS_ERR(ssock)) {
906 err = PTR_ERR(ssock);
907 goto unlock;
908 }
909
910 err = ssock->ops->bind(ssock, uaddr, addr_len);
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800911 if (!err)
912 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -0800913
914unlock:
915 release_sock(sock->sk);
916 return err;
917}
918
919static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
920 int addr_len, int flags)
921{
922 struct mptcp_sock *msk = mptcp_sk(sock->sk);
923 struct socket *ssock;
924 int err;
925
926 lock_sock(sock->sk);
927 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
928 if (IS_ERR(ssock)) {
929 err = PTR_ERR(ssock);
930 goto unlock;
931 }
932
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800933#ifdef CONFIG_TCP_MD5SIG
934 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
935 * TCP option space.
936 */
937 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
938 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
939#endif
940
Peter Krystad2303f992020-01-21 16:56:17 -0800941 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
942 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800943 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -0800944
945unlock:
946 release_sock(sock->sk);
947 return err;
948}
949
Peter Krystadcf7da0d2020-01-21 16:56:19 -0800950static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
951 int peer)
952{
953 if (sock->sk->sk_prot == &tcp_prot) {
954 /* we are being invoked from __sys_accept4, after
955 * mptcp_accept() has just accepted a non-mp-capable
956 * flow: sk is a tcp_sk, not an mptcp one.
957 *
958 * Hand the socket over to tcp so all further socket ops
959 * bypass mptcp.
960 */
961 sock->ops = &inet_stream_ops;
962 }
963
964 return inet_getname(sock, uaddr, peer);
965}
966
967#if IS_ENABLED(CONFIG_MPTCP_IPV6)
968static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
969 int peer)
970{
971 if (sock->sk->sk_prot == &tcpv6_prot) {
972 /* we are being invoked from __sys_accept4 after
973 * mptcp_accept() has accepted a non-mp-capable
974 * subflow: sk is a tcp_sk, not mptcp.
975 *
976 * Hand the socket over to tcp so all further
977 * socket ops bypass mptcp.
978 */
979 sock->ops = &inet6_stream_ops;
980 }
981
982 return inet6_getname(sock, uaddr, peer);
983}
984#endif
985
986static int mptcp_listen(struct socket *sock, int backlog)
987{
988 struct mptcp_sock *msk = mptcp_sk(sock->sk);
989 struct socket *ssock;
990 int err;
991
992 pr_debug("msk=%p", msk);
993
994 lock_sock(sock->sk);
995 ssock = __mptcp_socket_create(msk, TCP_LISTEN);
996 if (IS_ERR(ssock)) {
997 err = PTR_ERR(ssock);
998 goto unlock;
999 }
1000
1001 err = ssock->ops->listen(ssock, backlog);
1002 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
1003 if (!err)
1004 mptcp_copy_inaddrs(sock->sk, ssock->sk);
1005
1006unlock:
1007 release_sock(sock->sk);
1008 return err;
1009}
1010
1011static bool is_tcp_proto(const struct proto *p)
1012{
1013#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1014 return p == &tcp_prot || p == &tcpv6_prot;
1015#else
1016 return p == &tcp_prot;
1017#endif
1018}
1019
1020static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
1021 int flags, bool kern)
1022{
1023 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1024 struct socket *ssock;
1025 int err;
1026
1027 pr_debug("msk=%p", msk);
1028
1029 lock_sock(sock->sk);
1030 if (sock->sk->sk_state != TCP_LISTEN)
1031 goto unlock_fail;
1032
1033 ssock = __mptcp_nmpc_socket(msk);
1034 if (!ssock)
1035 goto unlock_fail;
1036
1037 sock_hold(ssock->sk);
1038 release_sock(sock->sk);
1039
1040 err = ssock->ops->accept(sock, newsock, flags, kern);
1041 if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
1042 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
1043 struct mptcp_subflow_context *subflow;
1044
1045 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
1046 * This is needed so NOSPACE flag can be set from tcp stack.
1047 */
1048 list_for_each_entry(subflow, &msk->conn_list, node) {
1049 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1050
1051 if (!ssk->sk_socket)
1052 mptcp_sock_graft(ssk, newsock);
1053 }
1054
1055 inet_sk_state_store(newsock->sk, TCP_ESTABLISHED);
1056 }
1057
1058 sock_put(ssock->sk);
1059 return err;
1060
1061unlock_fail:
1062 release_sock(sock->sk);
1063 return -EINVAL;
1064}
1065
Peter Krystad2303f992020-01-21 16:56:17 -08001066static __poll_t mptcp_poll(struct file *file, struct socket *sock,
1067 struct poll_table_struct *wait)
1068{
Florian Westphal1891c4a2020-01-21 16:56:25 -08001069 struct sock *sk = sock->sk;
Paolo Abeni8ab183d2020-01-21 16:56:33 -08001070 struct mptcp_sock *msk;
Florian Westphal1891c4a2020-01-21 16:56:25 -08001071 struct socket *ssock;
Peter Krystad2303f992020-01-21 16:56:17 -08001072 __poll_t mask = 0;
1073
Florian Westphal1891c4a2020-01-21 16:56:25 -08001074 msk = mptcp_sk(sk);
1075 lock_sock(sk);
1076 ssock = __mptcp_nmpc_socket(msk);
1077 if (ssock) {
1078 mask = ssock->ops->poll(file, ssock, wait);
1079 release_sock(sk);
1080 return mask;
1081 }
1082
1083 release_sock(sk);
1084 sock_poll_wait(file, sock, wait);
1085 lock_sock(sk);
Paolo Abeni8ab183d2020-01-21 16:56:33 -08001086 ssock = __mptcp_tcp_fallback(msk);
1087 if (unlikely(ssock))
1088 return ssock->ops->poll(file, ssock, NULL);
Florian Westphal1891c4a2020-01-21 16:56:25 -08001089
1090 if (test_bit(MPTCP_DATA_READY, &msk->flags))
1091 mask = EPOLLIN | EPOLLRDNORM;
1092 if (sk_stream_is_writeable(sk) &&
1093 test_bit(MPTCP_SEND_SPACE, &msk->flags))
1094 mask |= EPOLLOUT | EPOLLWRNORM;
1095 if (sk->sk_shutdown & RCV_SHUTDOWN)
1096 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1097
1098 release_sock(sk);
1099
Peter Krystad2303f992020-01-21 16:56:17 -08001100 return mask;
1101}
1102
Peter Krystad21498492020-01-21 16:56:21 -08001103static int mptcp_shutdown(struct socket *sock, int how)
1104{
1105 struct mptcp_sock *msk = mptcp_sk(sock->sk);
1106 struct mptcp_subflow_context *subflow;
1107 int ret = 0;
1108
1109 pr_debug("sk=%p, how=%d", msk, how);
1110
1111 lock_sock(sock->sk);
1112
1113 if (how == SHUT_WR || how == SHUT_RDWR)
1114 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
1115
1116 how++;
1117
1118 if ((how & ~SHUTDOWN_MASK) || !how) {
1119 ret = -EINVAL;
1120 goto out_unlock;
1121 }
1122
1123 if (sock->state == SS_CONNECTING) {
1124 if ((1 << sock->sk->sk_state) &
1125 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
1126 sock->state = SS_DISCONNECTING;
1127 else
1128 sock->state = SS_CONNECTED;
1129 }
1130
1131 mptcp_for_each_subflow(msk, subflow) {
1132 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
1133
1134 mptcp_subflow_shutdown(tcp_sk, how);
1135 }
1136
1137out_unlock:
1138 release_sock(sock->sk);
1139
1140 return ret;
1141}
1142
Florian Westphale42f1ac2020-01-24 16:04:02 -08001143static const struct proto_ops mptcp_stream_ops = {
1144 .family = PF_INET,
1145 .owner = THIS_MODULE,
1146 .release = inet_release,
1147 .bind = mptcp_bind,
1148 .connect = mptcp_stream_connect,
1149 .socketpair = sock_no_socketpair,
1150 .accept = mptcp_stream_accept,
1151 .getname = mptcp_v4_getname,
1152 .poll = mptcp_poll,
1153 .ioctl = inet_ioctl,
1154 .gettstamp = sock_gettstamp,
1155 .listen = mptcp_listen,
1156 .shutdown = mptcp_shutdown,
1157 .setsockopt = sock_common_setsockopt,
1158 .getsockopt = sock_common_getsockopt,
1159 .sendmsg = inet_sendmsg,
1160 .recvmsg = inet_recvmsg,
1161 .mmap = sock_no_mmap,
1162 .sendpage = inet_sendpage,
1163#ifdef CONFIG_COMPAT
1164 .compat_setsockopt = compat_sock_common_setsockopt,
1165 .compat_getsockopt = compat_sock_common_getsockopt,
1166#endif
1167};
Peter Krystad2303f992020-01-21 16:56:17 -08001168
Mat Martineauf870fa02020-01-21 16:56:15 -08001169static struct inet_protosw mptcp_protosw = {
1170 .type = SOCK_STREAM,
1171 .protocol = IPPROTO_MPTCP,
1172 .prot = &mptcp_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08001173 .ops = &mptcp_stream_ops,
1174 .flags = INET_PROTOSW_ICSK,
Mat Martineauf870fa02020-01-21 16:56:15 -08001175};
1176
Matthieu Baerts784325e2020-01-21 16:56:28 -08001177void mptcp_proto_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08001178{
Peter Krystad2303f992020-01-21 16:56:17 -08001179 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
Peter Krystad2303f992020-01-21 16:56:17 -08001180
1181 mptcp_subflow_init();
1182
Mat Martineauf870fa02020-01-21 16:56:15 -08001183 if (proto_register(&mptcp_prot, 1) != 0)
1184 panic("Failed to register MPTCP proto.\n");
1185
1186 inet_register_protosw(&mptcp_protosw);
1187}
1188
1189#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Florian Westphale42f1ac2020-01-24 16:04:02 -08001190static const struct proto_ops mptcp_v6_stream_ops = {
1191 .family = PF_INET6,
1192 .owner = THIS_MODULE,
1193 .release = inet6_release,
1194 .bind = mptcp_bind,
1195 .connect = mptcp_stream_connect,
1196 .socketpair = sock_no_socketpair,
1197 .accept = mptcp_stream_accept,
1198 .getname = mptcp_v6_getname,
1199 .poll = mptcp_poll,
1200 .ioctl = inet6_ioctl,
1201 .gettstamp = sock_gettstamp,
1202 .listen = mptcp_listen,
1203 .shutdown = mptcp_shutdown,
1204 .setsockopt = sock_common_setsockopt,
1205 .getsockopt = sock_common_getsockopt,
1206 .sendmsg = inet6_sendmsg,
1207 .recvmsg = inet6_recvmsg,
1208 .mmap = sock_no_mmap,
1209 .sendpage = inet_sendpage,
1210#ifdef CONFIG_COMPAT
1211 .compat_setsockopt = compat_sock_common_setsockopt,
1212 .compat_getsockopt = compat_sock_common_getsockopt,
1213#endif
1214};
1215
Mat Martineauf870fa02020-01-21 16:56:15 -08001216static struct proto mptcp_v6_prot;
1217
Peter Krystad79c09492020-01-21 16:56:20 -08001218static void mptcp_v6_destroy(struct sock *sk)
1219{
1220 mptcp_destroy(sk);
1221 inet6_destroy_sock(sk);
1222}
1223
Mat Martineauf870fa02020-01-21 16:56:15 -08001224static struct inet_protosw mptcp_v6_protosw = {
1225 .type = SOCK_STREAM,
1226 .protocol = IPPROTO_MPTCP,
1227 .prot = &mptcp_v6_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08001228 .ops = &mptcp_v6_stream_ops,
Mat Martineauf870fa02020-01-21 16:56:15 -08001229 .flags = INET_PROTOSW_ICSK,
1230};
1231
Matthieu Baerts784325e2020-01-21 16:56:28 -08001232int mptcp_proto_v6_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08001233{
1234 int err;
1235
1236 mptcp_v6_prot = mptcp_prot;
1237 strcpy(mptcp_v6_prot.name, "MPTCPv6");
1238 mptcp_v6_prot.slab = NULL;
Peter Krystad79c09492020-01-21 16:56:20 -08001239 mptcp_v6_prot.destroy = mptcp_v6_destroy;
Florian Westphalb0519de2020-02-06 00:39:37 +01001240 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
Mat Martineauf870fa02020-01-21 16:56:15 -08001241
1242 err = proto_register(&mptcp_v6_prot, 1);
1243 if (err)
1244 return err;
1245
Mat Martineauf870fa02020-01-21 16:56:15 -08001246 err = inet6_register_protosw(&mptcp_v6_protosw);
1247 if (err)
1248 proto_unregister(&mptcp_v6_prot);
1249
1250 return err;
1251}
1252#endif