blob: 400824eabf737b817991f2283c14bb9236aff56d [file] [log] [blame]
Mat Martineauf870fa02020-01-21 16:56:15 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -080012#include <linux/sched/signal.h>
13#include <linux/atomic.h>
Mat Martineauf870fa02020-01-21 16:56:15 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Mat Martineau3721b9b2020-07-28 15:12:03 -070019#include <net/tcp_states.h>
Peter Krystadcf7da0d2020-01-21 16:56:19 -080020#if IS_ENABLED(CONFIG_MPTCP_IPV6)
21#include <net/transp_v6.h>
22#endif
Mat Martineauf870fa02020-01-21 16:56:15 -080023#include <net/mptcp.h>
24#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070025#include "mib.h"
Mat Martineauf870fa02020-01-21 16:56:15 -080026
Peter Krystad2303f992020-01-21 16:56:17 -080027#define MPTCP_SAME_STATE TCP_MAX_STATES
28
Florian Westphalb0519de2020-02-06 00:39:37 +010029#if IS_ENABLED(CONFIG_MPTCP_IPV6)
30struct mptcp6_sock {
31 struct mptcp_sock msk;
32 struct ipv6_pinfo np;
33};
34#endif
35
Florian Westphal6771bfd2020-02-26 10:14:48 +010036struct mptcp_skb_cb {
37 u32 offset;
38};
39
40#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
41
Paolo Abenid0272362020-03-27 14:48:45 -070042static struct percpu_counter mptcp_sockets_allocated;
43
Peter Krystad2303f992020-01-21 16:56:17 -080044/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
45 * completed yet or has failed, return the subflow socket.
46 * Otherwise return NULL.
47 */
48static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
49{
Christoph Paaschd22f4982020-01-21 16:56:32 -080050 if (!msk->subflow || READ_ONCE(msk->can_ack))
Peter Krystad2303f992020-01-21 16:56:17 -080051 return NULL;
52
53 return msk->subflow;
54}
55
Paolo Abenid2f77c52020-06-29 22:26:22 +020056static bool mptcp_is_tcpsk(struct sock *sk)
Florian Westphal0b4f33d2020-04-02 13:44:51 +020057{
58 struct socket *sock = sk->sk_socket;
59
Florian Westphal0b4f33d2020-04-02 13:44:51 +020060 if (unlikely(sk->sk_prot == &tcp_prot)) {
61 /* we are being invoked after mptcp_accept() has
62 * accepted a non-mp-capable flow: sk is a tcp_sk,
63 * not an mptcp one.
64 *
65 * Hand the socket over to tcp so all further socket ops
66 * bypass mptcp.
67 */
68 sock->ops = &inet_stream_ops;
Paolo Abenid2f77c52020-06-29 22:26:22 +020069 return true;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020070#if IS_ENABLED(CONFIG_MPTCP_IPV6)
71 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
72 sock->ops = &inet6_stream_ops;
Paolo Abenid2f77c52020-06-29 22:26:22 +020073 return true;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020074#endif
75 }
76
Paolo Abenid2f77c52020-06-29 22:26:22 +020077 return false;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020078}
79
Paolo Abeni76660af2020-06-29 22:26:24 +020080static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
Peter Krystadcec37a62020-01-21 16:56:18 -080081{
Peter Krystadcec37a62020-01-21 16:56:18 -080082 sock_owned_by_me((const struct sock *)msk);
83
Davide Carattie1ff9e82020-06-29 22:26:20 +020084 if (likely(!__mptcp_check_fallback(msk)))
Peter Krystadcec37a62020-01-21 16:56:18 -080085 return NULL;
86
Paolo Abeni76660af2020-06-29 22:26:24 +020087 return msk->first;
Peter Krystadcec37a62020-01-21 16:56:18 -080088}
89
Paolo Abenifa680182020-06-29 22:26:23 +020090static int __mptcp_socket_create(struct mptcp_sock *msk)
Peter Krystad2303f992020-01-21 16:56:17 -080091{
92 struct mptcp_subflow_context *subflow;
93 struct sock *sk = (struct sock *)msk;
94 struct socket *ssock;
95 int err;
96
Peter Krystad2303f992020-01-21 16:56:17 -080097 err = mptcp_subflow_create_socket(sk, &ssock);
98 if (err)
Paolo Abenifa680182020-06-29 22:26:23 +020099 return err;
Peter Krystad2303f992020-01-21 16:56:17 -0800100
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800101 msk->first = ssock->sk;
Peter Krystad2303f992020-01-21 16:56:17 -0800102 msk->subflow = ssock;
103 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800104 list_add(&subflow->node, &msk->conn_list);
Peter Krystad2303f992020-01-21 16:56:17 -0800105 subflow->request_mptcp = 1;
106
Davide Carattie1ff9e82020-06-29 22:26:20 +0200107 /* accept() will wait on first subflow sk_wq, and we always wakes up
108 * via msk->sk_socket
109 */
110 RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
111
Paolo Abenifa680182020-06-29 22:26:23 +0200112 return 0;
Peter Krystad2303f992020-01-21 16:56:17 -0800113}
114
Florian Westphal6771bfd2020-02-26 10:14:48 +0100115static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
116 struct sk_buff *skb,
117 unsigned int offset, size_t copy_len)
118{
119 struct sock *sk = (struct sock *)msk;
Florian Westphal4e637c72020-05-25 23:41:13 +0200120 struct sk_buff *tail;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100121
122 __skb_unlink(skb, &ssk->sk_receive_queue);
Florian Westphal4e637c72020-05-25 23:41:13 +0200123
124 skb_ext_reset(skb);
125 skb_orphan(skb);
126 msk->ack_seq += copy_len;
127
128 tail = skb_peek_tail(&sk->sk_receive_queue);
129 if (offset == 0 && tail) {
130 bool fragstolen;
131 int delta;
132
133 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
134 kfree_skb_partial(skb, fragstolen);
135 atomic_add(delta, &sk->sk_rmem_alloc);
136 sk_mem_charge(sk, delta);
137 return;
138 }
139 }
140
Florian Westphal600911f2020-02-26 10:14:49 +0100141 skb_set_owner_r(skb, sk);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100142 __skb_queue_tail(&sk->sk_receive_queue, skb);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100143 MPTCP_SKB_CB(skb)->offset = offset;
144}
145
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700146static void mptcp_stop_timer(struct sock *sk)
147{
148 struct inet_connection_sock *icsk = inet_csk(sk);
149
150 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
151 mptcp_sk(sk)->timer_ival = 0;
152}
153
Florian Westphalde06f572020-04-02 13:44:53 +0200154/* both sockets must be locked */
155static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
156 struct sock *ssk)
157{
158 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
159 u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
160
161 /* revalidate data sequence number.
162 *
163 * mptcp_subflow_data_available() is usually called
164 * without msk lock. Its unlikely (but possible)
165 * that msk->ack_seq has been advanced since the last
166 * call found in-sequence data.
167 */
168 if (likely(dsn == msk->ack_seq))
169 return true;
170
171 subflow->data_avail = 0;
172 return mptcp_subflow_data_available(ssk);
173}
174
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700175static void mptcp_check_data_fin_ack(struct sock *sk)
176{
177 struct mptcp_sock *msk = mptcp_sk(sk);
178
179 if (__mptcp_check_fallback(msk))
180 return;
181
182 /* Look for an acknowledged DATA_FIN */
183 if (((1 << sk->sk_state) &
184 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
185 msk->write_seq == atomic64_read(&msk->snd_una)) {
186 mptcp_stop_timer(sk);
187
188 WRITE_ONCE(msk->snd_data_fin_enable, 0);
189
190 switch (sk->sk_state) {
191 case TCP_FIN_WAIT1:
192 inet_sk_state_store(sk, TCP_FIN_WAIT2);
193 sk->sk_state_change(sk);
194 break;
195 case TCP_CLOSING:
196 fallthrough;
197 case TCP_LAST_ACK:
198 inet_sk_state_store(sk, TCP_CLOSE);
199 sk->sk_state_change(sk);
200 break;
201 }
202
203 if (sk->sk_shutdown == SHUTDOWN_MASK ||
204 sk->sk_state == TCP_CLOSE)
205 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
206 else
207 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
208 }
209}
210
Mat Martineau3721b9b2020-07-28 15:12:03 -0700211static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
212{
213 struct mptcp_sock *msk = mptcp_sk(sk);
214
215 if (READ_ONCE(msk->rcv_data_fin) &&
216 ((1 << sk->sk_state) &
217 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
218 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
219
220 if (msk->ack_seq == rcv_data_fin_seq) {
221 if (seq)
222 *seq = rcv_data_fin_seq;
223
224 return true;
225 }
226 }
227
228 return false;
229}
230
231static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
232{
233 long tout = ssk && inet_csk(ssk)->icsk_pending ?
234 inet_csk(ssk)->icsk_timeout - jiffies : 0;
235
236 if (tout <= 0)
237 tout = mptcp_sk(sk)->timer_ival;
238 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
239}
240
241static void mptcp_check_data_fin(struct sock *sk)
242{
243 struct mptcp_sock *msk = mptcp_sk(sk);
244 u64 rcv_data_fin_seq;
245
246 if (__mptcp_check_fallback(msk) || !msk->first)
247 return;
248
249 /* Need to ack a DATA_FIN received from a peer while this side
250 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
251 * msk->rcv_data_fin was set when parsing the incoming options
252 * at the subflow level and the msk lock was not held, so this
253 * is the first opportunity to act on the DATA_FIN and change
254 * the msk state.
255 *
256 * If we are caught up to the sequence number of the incoming
257 * DATA_FIN, send the DATA_ACK now and do state transition. If
258 * not caught up, do nothing and let the recv code send DATA_ACK
259 * when catching up.
260 */
261
262 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
263 struct mptcp_subflow_context *subflow;
264
265 msk->ack_seq++;
266 WRITE_ONCE(msk->rcv_data_fin, 0);
267
268 sk->sk_shutdown |= RCV_SHUTDOWN;
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700269 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
270 set_bit(MPTCP_DATA_READY, &msk->flags);
Mat Martineau3721b9b2020-07-28 15:12:03 -0700271
272 switch (sk->sk_state) {
273 case TCP_ESTABLISHED:
274 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
275 break;
276 case TCP_FIN_WAIT1:
277 inet_sk_state_store(sk, TCP_CLOSING);
278 break;
279 case TCP_FIN_WAIT2:
280 inet_sk_state_store(sk, TCP_CLOSE);
281 // @@ Close subflows now?
282 break;
283 default:
284 /* Other states not expected */
285 WARN_ON_ONCE(1);
286 break;
287 }
288
289 mptcp_set_timeout(sk, NULL);
290 mptcp_for_each_subflow(msk, subflow) {
291 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
292
293 lock_sock(ssk);
294 tcp_send_ack(ssk);
295 release_sock(ssk);
296 }
297
298 sk->sk_state_change(sk);
299
300 if (sk->sk_shutdown == SHUTDOWN_MASK ||
301 sk->sk_state == TCP_CLOSE)
302 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
303 else
304 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
305 }
306}
307
Florian Westphal6771bfd2020-02-26 10:14:48 +0100308static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
309 struct sock *ssk,
310 unsigned int *bytes)
311{
312 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Florian Westphal600911f2020-02-26 10:14:49 +0100313 struct sock *sk = (struct sock *)msk;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100314 unsigned int moved = 0;
315 bool more_data_avail;
316 struct tcp_sock *tp;
317 bool done = false;
Florian Westphal600911f2020-02-26 10:14:49 +0100318
Florian Westphalde06f572020-04-02 13:44:53 +0200319 if (!mptcp_subflow_dsn_valid(msk, ssk)) {
320 *bytes = 0;
321 return false;
322 }
323
Florian Westphal6771bfd2020-02-26 10:14:48 +0100324 tp = tcp_sk(ssk);
325 do {
326 u32 map_remaining, offset;
327 u32 seq = tp->copied_seq;
328 struct sk_buff *skb;
329 bool fin;
330
331 /* try to move as much data as available */
332 map_remaining = subflow->map_data_len -
333 mptcp_subflow_get_map_offset(subflow);
334
335 skb = skb_peek(&ssk->sk_receive_queue);
336 if (!skb)
337 break;
338
Davide Carattie1ff9e82020-06-29 22:26:20 +0200339 if (__mptcp_check_fallback(msk)) {
340 /* if we are running under the workqueue, TCP could have
341 * collapsed skbs between dummy map creation and now
342 * be sure to adjust the size
343 */
344 map_remaining = skb->len;
345 subflow->map_data_len = skb->len;
346 }
347
Florian Westphal6771bfd2020-02-26 10:14:48 +0100348 offset = seq - TCP_SKB_CB(skb)->seq;
349 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
350 if (fin) {
351 done = true;
352 seq++;
353 }
354
355 if (offset < skb->len) {
356 size_t len = skb->len - offset;
357
358 if (tp->urg_data)
359 done = true;
360
361 __mptcp_move_skb(msk, ssk, skb, offset, len);
362 seq += len;
363 moved += len;
364
365 if (WARN_ON_ONCE(map_remaining < len))
366 break;
367 } else {
368 WARN_ON_ONCE(!fin);
369 sk_eat_skb(ssk, skb);
370 done = true;
371 }
372
373 WRITE_ONCE(tp->copied_seq, seq);
374 more_data_avail = mptcp_subflow_data_available(ssk);
Florian Westphal600911f2020-02-26 10:14:49 +0100375
376 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
377 done = true;
378 break;
379 }
Florian Westphal6771bfd2020-02-26 10:14:48 +0100380 } while (more_data_avail);
381
382 *bytes = moved;
383
Mat Martineau43b54c62020-07-28 15:12:06 -0700384 /* If the moves have caught up with the DATA_FIN sequence number
385 * it's time to ack the DATA_FIN and change socket state, but
386 * this is not a good place to change state. Let the workqueue
387 * do it.
388 */
389 if (mptcp_pending_data_fin(sk, NULL) &&
390 schedule_work(&msk->work))
391 sock_hold(sk);
392
Florian Westphal6771bfd2020-02-26 10:14:48 +0100393 return done;
394}
395
Florian Westphal2e522132020-02-26 10:14:51 +0100396/* In most cases we will be able to lock the mptcp socket. If its already
397 * owned, we need to defer to the work queue to avoid ABBA deadlock.
398 */
399static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
400{
401 struct sock *sk = (struct sock *)msk;
402 unsigned int moved = 0;
403
404 if (READ_ONCE(sk->sk_lock.owned))
405 return false;
406
407 if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
408 return false;
409
410 /* must re-check after taking the lock */
411 if (!READ_ONCE(sk->sk_lock.owned))
412 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
413
414 spin_unlock_bh(&sk->sk_lock.slock);
415
416 return moved > 0;
417}
418
419void mptcp_data_ready(struct sock *sk, struct sock *ssk)
Florian Westphal101f6f82020-02-26 10:14:46 +0100420{
421 struct mptcp_sock *msk = mptcp_sk(sk);
422
423 set_bit(MPTCP_DATA_READY, &msk->flags);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100424
Florian Westphal2e522132020-02-26 10:14:51 +0100425 if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
426 move_skbs_to_msk(msk, ssk))
427 goto wake;
428
Florian Westphal600911f2020-02-26 10:14:49 +0100429 /* don't schedule if mptcp sk is (still) over limit */
430 if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
431 goto wake;
432
Paolo Abeni14c441b2020-02-26 10:14:52 +0100433 /* mptcp socket is owned, release_cb should retry */
434 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
435 &sk->sk_tsq_flags)) {
436 sock_hold(sk);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100437
Paolo Abeni14c441b2020-02-26 10:14:52 +0100438 /* need to try again, its possible release_cb() has already
439 * been called after the test_and_set_bit() above.
440 */
441 move_skbs_to_msk(msk, ssk);
442 }
Florian Westphal600911f2020-02-26 10:14:49 +0100443wake:
Florian Westphal101f6f82020-02-26 10:14:46 +0100444 sk->sk_data_ready(sk);
445}
446
Peter Krystadec3edaa2020-03-27 14:48:40 -0700447static void __mptcp_flush_join_list(struct mptcp_sock *msk)
448{
449 if (likely(list_empty(&msk->join_list)))
450 return;
451
452 spin_lock_bh(&msk->join_list_lock);
453 list_splice_tail_init(&msk->join_list, &msk->conn_list);
454 spin_unlock_bh(&msk->join_list_lock);
455}
456
Paolo Abenib51f9b82020-03-27 14:48:44 -0700457static bool mptcp_timer_pending(struct sock *sk)
458{
459 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
460}
461
462static void mptcp_reset_timer(struct sock *sk)
463{
464 struct inet_connection_sock *icsk = inet_csk(sk);
465 unsigned long tout;
466
467 /* should never be called with mptcp level timer cleared */
468 tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
469 if (WARN_ON_ONCE(!tout))
470 tout = TCP_RTO_MIN;
471 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
472}
473
474void mptcp_data_acked(struct sock *sk)
475{
476 mptcp_reset_timer(sk);
Paolo Abeni3b1d6212020-03-27 14:48:48 -0700477
Mat Martineau43b54c62020-07-28 15:12:06 -0700478 if ((!sk_stream_is_writeable(sk) ||
479 (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
Paolo Abeni3b1d6212020-03-27 14:48:48 -0700480 schedule_work(&mptcp_sk(sk)->work))
481 sock_hold(sk);
Paolo Abenib51f9b82020-03-27 14:48:44 -0700482}
483
Florian Westphal59832e22020-04-02 13:44:52 +0200484void mptcp_subflow_eof(struct sock *sk)
485{
486 struct mptcp_sock *msk = mptcp_sk(sk);
487
488 if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
489 schedule_work(&msk->work))
490 sock_hold(sk);
491}
492
Paolo Abeni59698562020-06-10 10:47:41 +0200493static void mptcp_check_for_eof(struct mptcp_sock *msk)
494{
495 struct mptcp_subflow_context *subflow;
496 struct sock *sk = (struct sock *)msk;
497 int receivers = 0;
498
499 mptcp_for_each_subflow(msk, subflow)
500 receivers += !subflow->rx_eof;
501
502 if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
503 /* hopefully temporary hack: propagate shutdown status
504 * to msk, when all subflows agree on it
505 */
506 sk->sk_shutdown |= RCV_SHUTDOWN;
507
508 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
509 set_bit(MPTCP_DATA_READY, &msk->flags);
510 sk->sk_data_ready(sk);
511 }
512}
513
Mat Martineau6d0060f2020-01-21 16:56:23 -0800514static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
515{
Florian Westphal4930f482020-05-16 10:46:23 +0200516 const struct sock *sk = (const struct sock *)msk;
517
Mat Martineau6d0060f2020-01-21 16:56:23 -0800518 if (!msk->cached_ext)
Florian Westphal4930f482020-05-16 10:46:23 +0200519 msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800520
521 return !!msk->cached_ext;
522}
523
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800524static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
525{
526 struct mptcp_subflow_context *subflow;
527 struct sock *sk = (struct sock *)msk;
528
529 sock_owned_by_me(sk);
530
531 mptcp_for_each_subflow(msk, subflow) {
532 if (subflow->data_avail)
533 return mptcp_subflow_tcp_sock(subflow);
534 }
535
536 return NULL;
537}
538
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700539static bool mptcp_skb_can_collapse_to(u64 write_seq,
540 const struct sk_buff *skb,
541 const struct mptcp_ext *mpext)
Mat Martineau6d0060f2020-01-21 16:56:23 -0800542{
Paolo Abeni57040752020-01-21 16:56:27 -0800543 if (!tcp_skb_can_collapse_to(skb))
544 return false;
545
546 /* can collapse only if MPTCP level sequence is in order */
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700547 return mpext && mpext->data_seq + mpext->data_len == write_seq;
Paolo Abeni57040752020-01-21 16:56:27 -0800548}
549
Paolo Abeni18b683b2020-03-27 14:48:43 -0700550static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
551 const struct page_frag *pfrag,
552 const struct mptcp_data_frag *df)
553{
554 return df && pfrag->page == df->page &&
555 df->data_seq + df->data_len == msk->write_seq;
556}
557
Paolo Abenid0272362020-03-27 14:48:45 -0700558static void dfrag_uncharge(struct sock *sk, int len)
Paolo Abeni18b683b2020-03-27 14:48:43 -0700559{
Paolo Abenid0272362020-03-27 14:48:45 -0700560 sk_mem_uncharge(sk, len);
Florian Westphal7948f6c2020-03-27 14:48:46 -0700561 sk_wmem_queued_add(sk, -len);
Paolo Abenid0272362020-03-27 14:48:45 -0700562}
563
564static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
565{
566 int len = dfrag->data_len + dfrag->overhead;
567
Paolo Abeni18b683b2020-03-27 14:48:43 -0700568 list_del(&dfrag->list);
Paolo Abenid0272362020-03-27 14:48:45 -0700569 dfrag_uncharge(sk, len);
Paolo Abeni18b683b2020-03-27 14:48:43 -0700570 put_page(dfrag->page);
571}
572
573static void mptcp_clean_una(struct sock *sk)
574{
575 struct mptcp_sock *msk = mptcp_sk(sk);
576 struct mptcp_data_frag *dtmp, *dfrag;
Paolo Abenid0272362020-03-27 14:48:45 -0700577 bool cleaned = false;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200578 u64 snd_una;
579
580 /* on fallback we just need to ignore snd_una, as this is really
581 * plain TCP
582 */
583 if (__mptcp_check_fallback(msk))
584 atomic64_set(&msk->snd_una, msk->write_seq);
585 snd_una = atomic64_read(&msk->snd_una);
Paolo Abeni18b683b2020-03-27 14:48:43 -0700586
587 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
588 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
589 break;
590
Paolo Abenid0272362020-03-27 14:48:45 -0700591 dfrag_clear(sk, dfrag);
592 cleaned = true;
593 }
594
Florian Westphal7948f6c2020-03-27 14:48:46 -0700595 dfrag = mptcp_rtx_head(sk);
596 if (dfrag && after64(snd_una, dfrag->data_seq)) {
Paolo Abeni53eb4c32020-07-23 13:02:30 +0200597 u64 delta = snd_una - dfrag->data_seq;
598
599 if (WARN_ON_ONCE(delta > dfrag->data_len))
600 goto out;
Florian Westphal7948f6c2020-03-27 14:48:46 -0700601
602 dfrag->data_seq += delta;
Paolo Abeni53eb4c32020-07-23 13:02:30 +0200603 dfrag->offset += delta;
Florian Westphal7948f6c2020-03-27 14:48:46 -0700604 dfrag->data_len -= delta;
605
606 dfrag_uncharge(sk, delta);
607 cleaned = true;
608 }
609
Paolo Abeni53eb4c32020-07-23 13:02:30 +0200610out:
Paolo Abenid0272362020-03-27 14:48:45 -0700611 if (cleaned) {
612 sk_mem_reclaim_partial(sk);
Florian Westphal7948f6c2020-03-27 14:48:46 -0700613
614 /* Only wake up writers if a subflow is ready */
615 if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
616 sk_stream_write_space(sk);
Paolo Abeni18b683b2020-03-27 14:48:43 -0700617 }
618}
619
620/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
621 * data
622 */
623static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
624{
625 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
626 pfrag, sk->sk_allocation)))
627 return true;
628
629 sk->sk_prot->enter_memory_pressure(sk);
630 sk_stream_moderate_sndbuf(sk);
631 return false;
632}
633
634static struct mptcp_data_frag *
635mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
636 int orig_offset)
637{
638 int offset = ALIGN(orig_offset, sizeof(long));
639 struct mptcp_data_frag *dfrag;
640
641 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
642 dfrag->data_len = 0;
643 dfrag->data_seq = msk->write_seq;
644 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
645 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
646 dfrag->page = pfrag->page;
647
648 return dfrag;
649}
650
Paolo Abeni57040752020-01-21 16:56:27 -0800651static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700652 struct msghdr *msg, struct mptcp_data_frag *dfrag,
653 long *timeo, int *pmss_now,
Paolo Abeni57040752020-01-21 16:56:27 -0800654 int *ps_goal)
655{
Paolo Abeni18b683b2020-03-27 14:48:43 -0700656 int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
657 bool dfrag_collapsed, can_collapse = false;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800658 struct mptcp_sock *msk = mptcp_sk(sk);
659 struct mptcp_ext *mpext = NULL;
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700660 bool retransmission = !!dfrag;
Paolo Abeni57040752020-01-21 16:56:27 -0800661 struct sk_buff *skb, *tail;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800662 struct page_frag *pfrag;
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700663 struct page *page;
664 u64 *write_seq;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800665 size_t psize;
666
667 /* use the mptcp page cache so that we can easily move the data
668 * from one substream to another, but do per subflow memory accounting
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700669 * Note: pfrag is used only !retransmission, but the compiler if
670 * fooled into a warning if we don't init here
Mat Martineau6d0060f2020-01-21 16:56:23 -0800671 */
672 pfrag = sk_page_frag(sk);
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700673 if (!retransmission) {
674 write_seq = &msk->write_seq;
675 page = pfrag->page;
676 } else {
677 write_seq = &dfrag->data_seq;
678 page = dfrag->page;
679 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800680
681 /* compute copy limit */
682 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
Paolo Abeni57040752020-01-21 16:56:27 -0800683 *pmss_now = mss_now;
684 *ps_goal = size_goal;
685 avail_size = size_goal;
686 skb = tcp_write_queue_tail(ssk);
687 if (skb) {
688 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800689
Paolo Abeni57040752020-01-21 16:56:27 -0800690 /* Limit the write to the size available in the
691 * current skb, if any, so that we create at most a new skb.
692 * Explicitly tells TCP internals to avoid collapsing on later
693 * queue management operation, to avoid breaking the ext <->
694 * SSN association set here
695 */
696 can_collapse = (size_goal - skb->len > 0) &&
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700697 mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
Paolo Abeni57040752020-01-21 16:56:27 -0800698 if (!can_collapse)
699 TCP_SKB_CB(skb)->eor = 1;
700 else
701 avail_size = size_goal - skb->len;
702 }
Paolo Abeni18b683b2020-03-27 14:48:43 -0700703
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700704 if (!retransmission) {
705 /* reuse tail pfrag, if possible, or carve a new one from the
706 * page allocator
707 */
708 dfrag = mptcp_rtx_tail(sk);
709 offset = pfrag->offset;
710 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
711 if (!dfrag_collapsed) {
712 dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
713 offset = dfrag->offset;
714 frag_truesize = dfrag->overhead;
715 }
716 psize = min_t(size_t, pfrag->size - offset, avail_size);
717
718 /* Copy to page */
719 pr_debug("left=%zu", msg_data_left(msg));
720 psize = copy_page_from_iter(pfrag->page, offset,
721 min_t(size_t, msg_data_left(msg),
722 psize),
723 &msg->msg_iter);
724 pr_debug("left=%zu", msg_data_left(msg));
725 if (!psize)
726 return -EINVAL;
727
728 if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
729 return -ENOMEM;
730 } else {
Paolo Abeni18b683b2020-03-27 14:48:43 -0700731 offset = dfrag->offset;
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700732 psize = min_t(size_t, dfrag->data_len, avail_size);
Paolo Abeni18b683b2020-03-27 14:48:43 -0700733 }
Paolo Abenid0272362020-03-27 14:48:45 -0700734
Paolo Abeni57040752020-01-21 16:56:27 -0800735 /* tell the TCP stack to delay the push so that we can safely
736 * access the skb after the sendpages call
Mat Martineau6d0060f2020-01-21 16:56:23 -0800737 */
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700738 ret = do_tcp_sendpages(ssk, page, offset, psize,
Florian Westphal72511aab2020-05-16 10:46:19 +0200739 msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800740 if (ret <= 0)
741 return ret;
Paolo Abeni18b683b2020-03-27 14:48:43 -0700742
743 frag_truesize += ret;
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700744 if (!retransmission) {
745 if (unlikely(ret < psize))
746 iov_iter_revert(&msg->msg_iter, psize - ret);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800747
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700748 /* send successful, keep track of sent data for mptcp-level
749 * retransmission
750 */
751 dfrag->data_len += ret;
752 if (!dfrag_collapsed) {
753 get_page(dfrag->page);
754 list_add_tail(&dfrag->list, &msk->rtx_queue);
755 sk_wmem_queued_add(sk, frag_truesize);
756 } else {
757 sk_wmem_queued_add(sk, ret);
758 }
759
760 /* charge data on mptcp rtx queue to the master socket
761 * Note: we charge such data both to sk and ssk
762 */
763 sk->sk_forward_alloc -= frag_truesize;
Paolo Abeni18b683b2020-03-27 14:48:43 -0700764 }
765
Paolo Abeni57040752020-01-21 16:56:27 -0800766 /* if the tail skb extension is still the cached one, collapsing
767 * really happened. Note: we can't check for 'same skb' as the sk_buff
768 * hdr on tail can be transmitted, freed and re-allocated by the
769 * do_tcp_sendpages() call
770 */
771 tail = tcp_write_queue_tail(ssk);
772 if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
773 WARN_ON_ONCE(!can_collapse);
774 mpext->data_len += ret;
775 goto out;
776 }
777
Mat Martineau6d0060f2020-01-21 16:56:23 -0800778 skb = tcp_write_queue_tail(ssk);
779 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
780 msk->cached_ext = NULL;
781
782 memset(mpext, 0, sizeof(*mpext));
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700783 mpext->data_seq = *write_seq;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800784 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
785 mpext->data_len = ret;
786 mpext->use_map = 1;
787 mpext->dsn64 = 1;
788
789 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
790 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
791 mpext->dsn64);
792
Paolo Abeni57040752020-01-21 16:56:27 -0800793out:
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700794 if (!retransmission)
795 pfrag->offset += frag_truesize;
Mat Martineau721e9082020-07-28 15:12:10 -0700796 WRITE_ONCE(*write_seq, *write_seq + ret);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800797 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
798
Mat Martineau6d0060f2020-01-21 16:56:23 -0800799 return ret;
800}
801
Florian Westphala0e17062020-05-16 10:46:17 +0200802static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
803{
804 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
805 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
806
807 /* enables sk->write_space() callbacks */
808 set_bit(SOCK_NOSPACE, &sock->flags);
809}
810
Peter Krystadf2962342020-03-27 14:48:39 -0700811static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
812{
813 struct mptcp_subflow_context *subflow;
814 struct sock *backup = NULL;
815
816 sock_owned_by_me((const struct sock *)msk);
817
Florian Westphal149f7c72020-05-16 10:46:20 +0200818 if (!mptcp_ext_cache_refill(msk))
819 return NULL;
820
Peter Krystadf2962342020-03-27 14:48:39 -0700821 mptcp_for_each_subflow(msk, subflow) {
822 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
823
824 if (!sk_stream_memory_free(ssk)) {
825 struct socket *sock = ssk->sk_socket;
826
Florian Westphala0e17062020-05-16 10:46:17 +0200827 if (sock)
828 mptcp_nospace(msk, sock);
Peter Krystadf2962342020-03-27 14:48:39 -0700829
830 return NULL;
831 }
832
833 if (subflow->backup) {
834 if (!backup)
835 backup = ssk;
836
837 continue;
838 }
839
840 return ssk;
841 }
842
843 return backup;
844}
845
Florian Westphal1891c4a2020-01-21 16:56:25 -0800846static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
847{
848 struct socket *sock;
849
850 if (likely(sk_stream_is_writeable(ssk)))
851 return;
852
853 sock = READ_ONCE(ssk->sk_socket);
Florian Westphala0e17062020-05-16 10:46:17 +0200854 if (sock)
855 mptcp_nospace(msk, sock);
Florian Westphal1891c4a2020-01-21 16:56:25 -0800856}
857
Mat Martineauf870fa02020-01-21 16:56:15 -0800858static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
859{
Paolo Abeni57040752020-01-21 16:56:27 -0800860 int mss_now = 0, size_goal = 0, ret = 0;
Mat Martineauf870fa02020-01-21 16:56:15 -0800861 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphal17091702020-05-16 10:46:21 +0200862 struct page_frag *pfrag;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800863 size_t copied = 0;
Peter Krystadcec37a62020-01-21 16:56:18 -0800864 struct sock *ssk;
Florian Westphal72511aab2020-05-16 10:46:19 +0200865 bool tx_ok;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800866 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -0800867
868 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
869 return -EOPNOTSUPP;
870
Peter Krystadcec37a62020-01-21 16:56:18 -0800871 lock_sock(sk);
Mat Martineau1954b862020-02-28 15:47:39 -0800872
873 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
874
875 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
876 ret = sk_stream_wait_connect(sk, &timeo);
877 if (ret)
878 goto out;
879 }
880
Florian Westphal17091702020-05-16 10:46:21 +0200881 pfrag = sk_page_frag(sk);
Florian Westphal72511aab2020-05-16 10:46:19 +0200882restart:
Paolo Abeni18b683b2020-03-27 14:48:43 -0700883 mptcp_clean_una(sk);
884
Mat Martineau57baaf22020-07-28 15:12:00 -0700885 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
886 ret = -EPIPE;
887 goto out;
888 }
889
Florian Westphalfb529e62020-05-16 10:46:18 +0200890wait_for_sndbuf:
Peter Krystadec3edaa2020-03-27 14:48:40 -0700891 __mptcp_flush_join_list(msk);
Peter Krystadf2962342020-03-27 14:48:39 -0700892 ssk = mptcp_subflow_get_send(msk);
Florian Westphal17091702020-05-16 10:46:21 +0200893 while (!sk_stream_memory_free(sk) ||
894 !ssk ||
895 !mptcp_page_frag_refill(ssk, pfrag)) {
Florian Westphalfb529e62020-05-16 10:46:18 +0200896 if (ssk) {
897 /* make sure retransmit timer is
898 * running before we wait for memory.
899 *
900 * The retransmit timer might be needed
901 * to make the peer send an up-to-date
902 * MPTCP Ack.
903 */
904 mptcp_set_timeout(sk, ssk);
905 if (!mptcp_timer_pending(sk))
906 mptcp_reset_timer(sk);
907 }
908
Peter Krystadf2962342020-03-27 14:48:39 -0700909 ret = sk_stream_wait_memory(sk, &timeo);
910 if (ret)
911 goto out;
912
Paolo Abeni18b683b2020-03-27 14:48:43 -0700913 mptcp_clean_una(sk);
914
Peter Krystadf2962342020-03-27 14:48:39 -0700915 ssk = mptcp_subflow_get_send(msk);
916 if (list_empty(&msk->conn_list)) {
917 ret = -ENOTCONN;
918 goto out;
919 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800920 }
921
Mat Martineau6d0060f2020-01-21 16:56:23 -0800922 pr_debug("conn_list->subflow=%p", ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800923
Mat Martineau6d0060f2020-01-21 16:56:23 -0800924 lock_sock(ssk);
Florian Westphal72511aab2020-05-16 10:46:19 +0200925 tx_ok = msg_data_left(msg);
926 while (tx_ok) {
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700927 ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
Paolo Abeni57040752020-01-21 16:56:27 -0800928 &size_goal);
Florian Westphal72511aab2020-05-16 10:46:19 +0200929 if (ret < 0) {
930 if (ret == -EAGAIN && timeo > 0) {
931 mptcp_set_timeout(sk, ssk);
932 release_sock(ssk);
933 goto restart;
934 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800935 break;
Florian Westphal72511aab2020-05-16 10:46:19 +0200936 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800937
938 copied += ret;
Florian Westphalfb529e62020-05-16 10:46:18 +0200939
Florian Westphal72511aab2020-05-16 10:46:19 +0200940 tx_ok = msg_data_left(msg);
941 if (!tx_ok)
942 break;
943
Florian Westphal149f7c72020-05-16 10:46:20 +0200944 if (!sk_stream_memory_free(ssk) ||
Florian Westphal17091702020-05-16 10:46:21 +0200945 !mptcp_page_frag_refill(ssk, pfrag) ||
Florian Westphal149f7c72020-05-16 10:46:20 +0200946 !mptcp_ext_cache_refill(msk)) {
Florian Westphal72511aab2020-05-16 10:46:19 +0200947 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
948 tcp_push(ssk, msg->msg_flags, mss_now,
949 tcp_sk(ssk)->nonagle, size_goal);
950 mptcp_set_timeout(sk, ssk);
951 release_sock(ssk);
952 goto restart;
953 }
954
Florian Westphalfb529e62020-05-16 10:46:18 +0200955 /* memory is charged to mptcp level socket as well, i.e.
956 * if msg is very large, mptcp socket may run out of buffer
957 * space. mptcp_clean_una() will release data that has
958 * been acked at mptcp level in the mean time, so there is
959 * a good chance we can continue sending data right away.
Florian Westphal72511aab2020-05-16 10:46:19 +0200960 *
961 * Normally, when the tcp subflow can accept more data, then
962 * so can the MPTCP socket. However, we need to cope with
963 * peers that might lag behind in their MPTCP-level
964 * acknowledgements, i.e. data might have been acked at
965 * tcp level only. So, we must also check the MPTCP socket
966 * limits before we send more data.
Florian Westphalfb529e62020-05-16 10:46:18 +0200967 */
968 if (unlikely(!sk_stream_memory_free(sk))) {
969 tcp_push(ssk, msg->msg_flags, mss_now,
970 tcp_sk(ssk)->nonagle, size_goal);
971 mptcp_clean_una(sk);
972 if (!sk_stream_memory_free(sk)) {
973 /* can't send more for now, need to wait for
974 * MPTCP-level ACKs from peer.
975 *
976 * Wakeup will happen via mptcp_clean_una().
977 */
978 mptcp_set_timeout(sk, ssk);
979 release_sock(ssk);
980 goto wait_for_sndbuf;
981 }
982 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800983 }
984
Paolo Abenib51f9b82020-03-27 14:48:44 -0700985 mptcp_set_timeout(sk, ssk);
Paolo Abeni57040752020-01-21 16:56:27 -0800986 if (copied) {
Mat Martineau6d0060f2020-01-21 16:56:23 -0800987 ret = copied;
Paolo Abeni57040752020-01-21 16:56:27 -0800988 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
989 size_goal);
Paolo Abenib51f9b82020-03-27 14:48:44 -0700990
991 /* start the timer, if it's not pending */
992 if (!mptcp_timer_pending(sk))
993 mptcp_reset_timer(sk);
Paolo Abeni57040752020-01-21 16:56:27 -0800994 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800995
Florian Westphal1891c4a2020-01-21 16:56:25 -0800996 ssk_check_wmem(msk, ssk);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800997 release_sock(ssk);
Mat Martineau1954b862020-02-28 15:47:39 -0800998out:
Peter Krystadcec37a62020-01-21 16:56:18 -0800999 release_sock(sk);
1000 return ret;
Mat Martineauf870fa02020-01-21 16:56:15 -08001001}
1002
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001003static void mptcp_wait_data(struct sock *sk, long *timeo)
1004{
1005 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1006 struct mptcp_sock *msk = mptcp_sk(sk);
1007
1008 add_wait_queue(sk_sleep(sk), &wait);
1009 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1010
1011 sk_wait_event(sk, timeo,
1012 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
1013
1014 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1015 remove_wait_queue(sk_sleep(sk), &wait);
1016}
1017
Florian Westphal6771bfd2020-02-26 10:14:48 +01001018static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1019 struct msghdr *msg,
1020 size_t len)
1021{
1022 struct sock *sk = (struct sock *)msk;
1023 struct sk_buff *skb;
1024 int copied = 0;
1025
1026 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1027 u32 offset = MPTCP_SKB_CB(skb)->offset;
1028 u32 data_len = skb->len - offset;
1029 u32 count = min_t(size_t, len - copied, data_len);
1030 int err;
1031
1032 err = skb_copy_datagram_msg(skb, offset, msg, count);
1033 if (unlikely(err < 0)) {
1034 if (!copied)
1035 return err;
1036 break;
1037 }
1038
1039 copied += count;
1040
1041 if (count < data_len) {
1042 MPTCP_SKB_CB(skb)->offset += count;
1043 break;
1044 }
1045
1046 __skb_unlink(skb, &sk->sk_receive_queue);
1047 __kfree_skb(skb);
1048
1049 if (copied >= len)
1050 break;
1051 }
1052
1053 return copied;
1054}
1055
Florian Westphala6b118f2020-06-30 21:24:45 +02001056/* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
1057 *
1058 * Only difference: Use highest rtt estimate of the subflows in use.
1059 */
1060static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1061{
1062 struct mptcp_subflow_context *subflow;
1063 struct sock *sk = (struct sock *)msk;
1064 u32 time, advmss = 1;
1065 u64 rtt_us, mstamp;
1066
1067 sock_owned_by_me(sk);
1068
1069 if (copied <= 0)
1070 return;
1071
1072 msk->rcvq_space.copied += copied;
1073
1074 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1075 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1076
1077 rtt_us = msk->rcvq_space.rtt_us;
1078 if (rtt_us && time < (rtt_us >> 3))
1079 return;
1080
1081 rtt_us = 0;
1082 mptcp_for_each_subflow(msk, subflow) {
1083 const struct tcp_sock *tp;
1084 u64 sf_rtt_us;
1085 u32 sf_advmss;
1086
1087 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1088
1089 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1090 sf_advmss = READ_ONCE(tp->advmss);
1091
1092 rtt_us = max(sf_rtt_us, rtt_us);
1093 advmss = max(sf_advmss, advmss);
1094 }
1095
1096 msk->rcvq_space.rtt_us = rtt_us;
1097 if (time < (rtt_us >> 3) || rtt_us == 0)
1098 return;
1099
1100 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1101 goto new_measure;
1102
1103 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1104 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1105 int rcvmem, rcvbuf;
1106 u64 rcvwin, grow;
1107
1108 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1109
1110 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1111
1112 do_div(grow, msk->rcvq_space.space);
1113 rcvwin += (grow << 1);
1114
1115 rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1116 while (tcp_win_from_space(sk, rcvmem) < advmss)
1117 rcvmem += 128;
1118
1119 do_div(rcvwin, advmss);
1120 rcvbuf = min_t(u64, rcvwin * rcvmem,
1121 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1122
1123 if (rcvbuf > sk->sk_rcvbuf) {
1124 u32 window_clamp;
1125
1126 window_clamp = tcp_win_from_space(sk, rcvbuf);
1127 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1128
1129 /* Make subflows follow along. If we do not do this, we
1130 * get drops at subflow level if skbs can't be moved to
1131 * the mptcp rx queue fast enough (announced rcv_win can
1132 * exceed ssk->sk_rcvbuf).
1133 */
1134 mptcp_for_each_subflow(msk, subflow) {
1135 struct sock *ssk;
1136
1137 ssk = mptcp_subflow_tcp_sock(subflow);
1138 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1139 tcp_sk(ssk)->window_clamp = window_clamp;
1140 }
1141 }
1142 }
1143
1144 msk->rcvq_space.space = msk->rcvq_space.copied;
1145new_measure:
1146 msk->rcvq_space.copied = 0;
1147 msk->rcvq_space.time = mstamp;
1148}
1149
Florian Westphal6771bfd2020-02-26 10:14:48 +01001150static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1151{
1152 unsigned int moved = 0;
1153 bool done;
1154
1155 do {
1156 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1157
1158 if (!ssk)
1159 break;
1160
1161 lock_sock(ssk);
1162 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1163 release_sock(ssk);
1164 } while (!done);
1165
1166 return moved > 0;
1167}
1168
Mat Martineauf870fa02020-01-21 16:56:15 -08001169static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1170 int nonblock, int flags, int *addr_len)
1171{
1172 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -08001173 int copied = 0;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001174 int target;
1175 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -08001176
1177 if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
1178 return -EOPNOTSUPP;
1179
Peter Krystadcec37a62020-01-21 16:56:18 -08001180 lock_sock(sk);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001181 timeo = sock_rcvtimeo(sk, nonblock);
1182
1183 len = min_t(size_t, len, INT_MAX);
1184 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001185 __mptcp_flush_join_list(msk);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001186
Florian Westphal6771bfd2020-02-26 10:14:48 +01001187 while (len > (size_t)copied) {
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001188 int bytes_read;
1189
Florian Westphal6771bfd2020-02-26 10:14:48 +01001190 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
1191 if (unlikely(bytes_read < 0)) {
1192 if (!copied)
1193 copied = bytes_read;
1194 goto out_err;
1195 }
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001196
Florian Westphal6771bfd2020-02-26 10:14:48 +01001197 copied += bytes_read;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001198
Florian Westphal6771bfd2020-02-26 10:14:48 +01001199 if (skb_queue_empty(&sk->sk_receive_queue) &&
1200 __mptcp_move_skbs(msk))
1201 continue;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001202
1203 /* only the master socket status is relevant here. The exit
1204 * conditions mirror closely tcp_recvmsg()
1205 */
1206 if (copied >= target)
1207 break;
1208
1209 if (copied) {
1210 if (sk->sk_err ||
1211 sk->sk_state == TCP_CLOSE ||
1212 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1213 !timeo ||
1214 signal_pending(current))
1215 break;
1216 } else {
1217 if (sk->sk_err) {
1218 copied = sock_error(sk);
1219 break;
1220 }
1221
Paolo Abeni59698562020-06-10 10:47:41 +02001222 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1223 mptcp_check_for_eof(msk);
1224
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001225 if (sk->sk_shutdown & RCV_SHUTDOWN)
1226 break;
1227
1228 if (sk->sk_state == TCP_CLOSE) {
1229 copied = -ENOTCONN;
1230 break;
1231 }
1232
1233 if (!timeo) {
1234 copied = -EAGAIN;
1235 break;
1236 }
1237
1238 if (signal_pending(current)) {
1239 copied = sock_intr_errno(timeo);
1240 break;
1241 }
1242 }
1243
1244 pr_debug("block timeout %ld", timeo);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001245 mptcp_wait_data(sk, &timeo);
Peter Krystadcec37a62020-01-21 16:56:18 -08001246 }
1247
Florian Westphal6771bfd2020-02-26 10:14:48 +01001248 if (skb_queue_empty(&sk->sk_receive_queue)) {
1249 /* entire backlog drained, clear DATA_READY. */
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001250 clear_bit(MPTCP_DATA_READY, &msk->flags);
1251
Florian Westphal6771bfd2020-02-26 10:14:48 +01001252 /* .. race-breaker: ssk might have gotten new data
1253 * after last __mptcp_move_skbs() returned false.
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001254 */
Florian Westphal6771bfd2020-02-26 10:14:48 +01001255 if (unlikely(__mptcp_move_skbs(msk)))
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001256 set_bit(MPTCP_DATA_READY, &msk->flags);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001257 } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
1258 /* data to read but mptcp_wait_data() cleared DATA_READY */
1259 set_bit(MPTCP_DATA_READY, &msk->flags);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001260 }
Florian Westphal6771bfd2020-02-26 10:14:48 +01001261out_err:
Florian Westphala6b118f2020-06-30 21:24:45 +02001262 mptcp_rcv_space_adjust(msk, copied);
1263
Peter Krystadcec37a62020-01-21 16:56:18 -08001264 release_sock(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -08001265 return copied;
1266}
1267
Paolo Abenib51f9b82020-03-27 14:48:44 -07001268static void mptcp_retransmit_handler(struct sock *sk)
1269{
1270 struct mptcp_sock *msk = mptcp_sk(sk);
1271
Mat Martineauc7529392020-07-28 15:12:09 -07001272 if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
Paolo Abenib51f9b82020-03-27 14:48:44 -07001273 mptcp_stop_timer(sk);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001274 } else {
1275 set_bit(MPTCP_WORK_RTX, &msk->flags);
1276 if (schedule_work(&msk->work))
1277 sock_hold(sk);
1278 }
Paolo Abenib51f9b82020-03-27 14:48:44 -07001279}
1280
1281static void mptcp_retransmit_timer(struct timer_list *t)
1282{
1283 struct inet_connection_sock *icsk = from_timer(icsk, t,
1284 icsk_retransmit_timer);
1285 struct sock *sk = &icsk->icsk_inet.sk;
1286
1287 bh_lock_sock(sk);
1288 if (!sock_owned_by_user(sk)) {
1289 mptcp_retransmit_handler(sk);
1290 } else {
1291 /* delegate our work to tcp_release_cb() */
1292 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
1293 &sk->sk_tsq_flags))
1294 sock_hold(sk);
1295 }
1296 bh_unlock_sock(sk);
1297 sock_put(sk);
1298}
1299
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001300/* Find an idle subflow. Return NULL if there is unacked data at tcp
1301 * level.
1302 *
1303 * A backup subflow is returned only if that is the only kind available.
1304 */
1305static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
1306{
1307 struct mptcp_subflow_context *subflow;
1308 struct sock *backup = NULL;
1309
1310 sock_owned_by_me((const struct sock *)msk);
1311
1312 mptcp_for_each_subflow(msk, subflow) {
1313 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1314
1315 /* still data outstanding at TCP level? Don't retransmit. */
1316 if (!tcp_write_queue_empty(ssk))
1317 return NULL;
1318
1319 if (subflow->backup) {
1320 if (!backup)
1321 backup = ssk;
1322 continue;
1323 }
1324
1325 return ssk;
1326 }
1327
1328 return backup;
1329}
1330
Peter Krystadcec37a62020-01-21 16:56:18 -08001331/* subflow sockets can be either outgoing (connect) or incoming
1332 * (accept).
1333 *
1334 * Outgoing subflows use in-kernel sockets.
1335 * Incoming subflows do not have their own 'struct socket' allocated,
1336 * so we need to use tcp_close() after detaching them from the mptcp
1337 * parent socket.
1338 */
1339static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
1340 struct mptcp_subflow_context *subflow,
1341 long timeout)
1342{
1343 struct socket *sock = READ_ONCE(ssk->sk_socket);
1344
1345 list_del(&subflow->node);
1346
1347 if (sock && sock != sk->sk_socket) {
1348 /* outgoing subflow */
1349 sock_release(sock);
1350 } else {
1351 /* incoming subflow */
1352 tcp_close(ssk, timeout);
1353 }
Mat Martineauf870fa02020-01-21 16:56:15 -08001354}
1355
Paolo Abenidc24f8b2020-02-26 12:19:03 +01001356static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
1357{
1358 return 0;
1359}
1360
Florian Westphalb4162682020-07-07 14:40:48 +02001361static void pm_work(struct mptcp_sock *msk)
1362{
1363 struct mptcp_pm_data *pm = &msk->pm;
1364
1365 spin_lock_bh(&msk->pm.lock);
1366
1367 pr_debug("msk=%p status=%x", msk, pm->status);
1368 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
1369 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
1370 mptcp_pm_nl_add_addr_received(msk);
1371 }
1372 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
1373 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
1374 mptcp_pm_nl_fully_established(msk);
1375 }
1376 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
1377 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
1378 mptcp_pm_nl_subflow_established(msk);
1379 }
1380
1381 spin_unlock_bh(&msk->pm.lock);
1382}
1383
Paolo Abeni80992012020-02-26 10:14:47 +01001384static void mptcp_worker(struct work_struct *work)
1385{
1386 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001387 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
Florian Westphal149f7c72020-05-16 10:46:20 +02001388 int orig_len, orig_offset, mss_now = 0, size_goal = 0;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001389 struct mptcp_data_frag *dfrag;
1390 u64 orig_write_seq;
1391 size_t copied = 0;
1392 struct msghdr msg;
1393 long timeo = 0;
Paolo Abeni80992012020-02-26 10:14:47 +01001394
1395 lock_sock(sk);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001396 mptcp_clean_una(sk);
Mat Martineau43b54c62020-07-28 15:12:06 -07001397 mptcp_check_data_fin_ack(sk);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001398 __mptcp_flush_join_list(msk);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001399 __mptcp_move_skbs(msk);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001400
Florian Westphalb4162682020-07-07 14:40:48 +02001401 if (msk->pm.status)
1402 pm_work(msk);
1403
Florian Westphal59832e22020-04-02 13:44:52 +02001404 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1405 mptcp_check_for_eof(msk);
1406
Mat Martineau43b54c62020-07-28 15:12:06 -07001407 mptcp_check_data_fin(sk);
1408
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001409 if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1410 goto unlock;
1411
1412 dfrag = mptcp_rtx_head(sk);
1413 if (!dfrag)
1414 goto unlock;
1415
Florian Westphal149f7c72020-05-16 10:46:20 +02001416 if (!mptcp_ext_cache_refill(msk))
1417 goto reset_unlock;
1418
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001419 ssk = mptcp_subflow_get_retrans(msk);
1420 if (!ssk)
1421 goto reset_unlock;
1422
1423 lock_sock(ssk);
1424
1425 msg.msg_flags = MSG_DONTWAIT;
1426 orig_len = dfrag->data_len;
1427 orig_offset = dfrag->offset;
1428 orig_write_seq = dfrag->data_seq;
1429 while (dfrag->data_len > 0) {
Florian Westphal149f7c72020-05-16 10:46:20 +02001430 int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
1431 &mss_now, &size_goal);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001432 if (ret < 0)
1433 break;
1434
Florian Westphalfc518952020-03-27 14:48:50 -07001435 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001436 copied += ret;
1437 dfrag->data_len -= ret;
1438 dfrag->offset += ret;
Florian Westphal149f7c72020-05-16 10:46:20 +02001439
1440 if (!mptcp_ext_cache_refill(msk))
1441 break;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07001442 }
1443 if (copied)
1444 tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
1445 size_goal);
1446
1447 dfrag->data_seq = orig_write_seq;
1448 dfrag->offset = orig_offset;
1449 dfrag->data_len = orig_len;
1450
1451 mptcp_set_timeout(sk, ssk);
1452 release_sock(ssk);
1453
1454reset_unlock:
1455 if (!mptcp_timer_pending(sk))
1456 mptcp_reset_timer(sk);
1457
1458unlock:
Paolo Abeni80992012020-02-26 10:14:47 +01001459 release_sock(sk);
1460 sock_put(sk);
1461}
1462
Matthieu Baerts784325e2020-01-21 16:56:28 -08001463static int __mptcp_init_sock(struct sock *sk)
Mat Martineauf870fa02020-01-21 16:56:15 -08001464{
Peter Krystadcec37a62020-01-21 16:56:18 -08001465 struct mptcp_sock *msk = mptcp_sk(sk);
1466
Peter Krystadec3edaa2020-03-27 14:48:40 -07001467 spin_lock_init(&msk->join_list_lock);
1468
Peter Krystadcec37a62020-01-21 16:56:18 -08001469 INIT_LIST_HEAD(&msk->conn_list);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001470 INIT_LIST_HEAD(&msk->join_list);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001471 INIT_LIST_HEAD(&msk->rtx_queue);
Florian Westphal1891c4a2020-01-21 16:56:25 -08001472 __set_bit(MPTCP_SEND_SPACE, &msk->flags);
Paolo Abeni80992012020-02-26 10:14:47 +01001473 INIT_WORK(&msk->work, mptcp_worker);
Peter Krystadcec37a62020-01-21 16:56:18 -08001474
Paolo Abeni8ab183d2020-01-21 16:56:33 -08001475 msk->first = NULL;
Paolo Abenidc24f8b2020-02-26 12:19:03 +01001476 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
Paolo Abeni8ab183d2020-01-21 16:56:33 -08001477
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001478 mptcp_pm_data_init(msk);
1479
Paolo Abenib51f9b82020-03-27 14:48:44 -07001480 /* re-use the csk retrans timer for MPTCP-level retrans */
1481 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
1482
Mat Martineauf870fa02020-01-21 16:56:15 -08001483 return 0;
1484}
1485
Matthieu Baerts784325e2020-01-21 16:56:28 -08001486static int mptcp_init_sock(struct sock *sk)
1487{
Florian Westphalfc518952020-03-27 14:48:50 -07001488 struct net *net = sock_net(sk);
1489 int ret;
Paolo Abeni18b683b2020-03-27 14:48:43 -07001490
Florian Westphalfc518952020-03-27 14:48:50 -07001491 if (!mptcp_is_enabled(net))
1492 return -ENOPROTOOPT;
1493
1494 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
1495 return -ENOMEM;
1496
1497 ret = __mptcp_init_sock(sk);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001498 if (ret)
1499 return ret;
1500
Paolo Abenifa680182020-06-29 22:26:23 +02001501 ret = __mptcp_socket_create(mptcp_sk(sk));
1502 if (ret)
1503 return ret;
1504
Paolo Abenid0272362020-03-27 14:48:45 -07001505 sk_sockets_allocated_inc(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001506 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -07001507 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
Paolo Abenid0272362020-03-27 14:48:45 -07001508
Paolo Abeni18b683b2020-03-27 14:48:43 -07001509 return 0;
1510}
1511
1512static void __mptcp_clear_xmit(struct sock *sk)
1513{
1514 struct mptcp_sock *msk = mptcp_sk(sk);
1515 struct mptcp_data_frag *dtmp, *dfrag;
1516
Paolo Abenib51f9b82020-03-27 14:48:44 -07001517 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
1518
Paolo Abeni18b683b2020-03-27 14:48:43 -07001519 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
Paolo Abenid0272362020-03-27 14:48:45 -07001520 dfrag_clear(sk, dfrag);
Matthieu Baerts784325e2020-01-21 16:56:28 -08001521}
1522
Paolo Abeni80992012020-02-26 10:14:47 +01001523static void mptcp_cancel_work(struct sock *sk)
1524{
1525 struct mptcp_sock *msk = mptcp_sk(sk);
1526
1527 if (cancel_work_sync(&msk->work))
1528 sock_put(sk);
1529}
1530
Mat Martineau43b54c62020-07-28 15:12:06 -07001531static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
Peter Krystad21498492020-01-21 16:56:21 -08001532{
1533 lock_sock(ssk);
1534
1535 switch (ssk->sk_state) {
1536 case TCP_LISTEN:
1537 if (!(how & RCV_SHUTDOWN))
1538 break;
1539 /* fall through */
1540 case TCP_SYN_SENT:
1541 tcp_disconnect(ssk, O_NONBLOCK);
1542 break;
1543 default:
Mat Martineau43b54c62020-07-28 15:12:06 -07001544 if (__mptcp_check_fallback(mptcp_sk(sk))) {
1545 pr_debug("Fallback");
1546 ssk->sk_shutdown |= how;
1547 tcp_shutdown(ssk, how);
1548 } else {
1549 pr_debug("Sending DATA_FIN on subflow %p", ssk);
1550 mptcp_set_timeout(sk, ssk);
1551 tcp_send_ack(ssk);
1552 }
Peter Krystad21498492020-01-21 16:56:21 -08001553 break;
1554 }
1555
Peter Krystad21498492020-01-21 16:56:21 -08001556 release_sock(ssk);
1557}
1558
Mat Martineau6920b852020-07-28 15:12:04 -07001559static const unsigned char new_state[16] = {
1560 /* current state: new state: action: */
1561 [0 /* (Invalid) */] = TCP_CLOSE,
1562 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1563 [TCP_SYN_SENT] = TCP_CLOSE,
1564 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1565 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
1566 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
1567 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */
1568 [TCP_CLOSE] = TCP_CLOSE,
1569 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
1570 [TCP_LAST_ACK] = TCP_LAST_ACK,
1571 [TCP_LISTEN] = TCP_CLOSE,
1572 [TCP_CLOSING] = TCP_CLOSING,
1573 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
1574};
1575
1576static int mptcp_close_state(struct sock *sk)
1577{
1578 int next = (int)new_state[sk->sk_state];
1579 int ns = next & TCP_STATE_MASK;
1580
1581 inet_sk_state_store(sk, ns);
1582
1583 return next & TCP_ACTION_FIN;
1584}
1585
Florian Westphal2c22c062020-02-04 18:12:30 +01001586static void mptcp_close(struct sock *sk, long timeout)
Mat Martineauf870fa02020-01-21 16:56:15 -08001587{
Peter Krystadcec37a62020-01-21 16:56:18 -08001588 struct mptcp_subflow_context *subflow, *tmp;
Mat Martineauf870fa02020-01-21 16:56:15 -08001589 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphalb2c5b612020-01-29 15:54:45 +01001590 LIST_HEAD(conn_list);
Mat Martineauf870fa02020-01-21 16:56:15 -08001591
Florian Westphal2c22c062020-02-04 18:12:30 +01001592 lock_sock(sk);
Mat Martineau43b54c62020-07-28 15:12:06 -07001593 sk->sk_shutdown = SHUTDOWN_MASK;
Florian Westphal2c22c062020-02-04 18:12:30 +01001594
Mat Martineau43b54c62020-07-28 15:12:06 -07001595 if (sk->sk_state == TCP_LISTEN) {
1596 inet_sk_state_store(sk, TCP_CLOSE);
1597 goto cleanup;
1598 } else if (sk->sk_state == TCP_CLOSE) {
1599 goto cleanup;
1600 }
1601
1602 if (__mptcp_check_fallback(msk)) {
1603 goto update_state;
1604 } else if (mptcp_close_state(sk)) {
1605 pr_debug("Sending DATA_FIN sk=%p", sk);
1606 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
1607 WRITE_ONCE(msk->snd_data_fin_enable, 1);
1608
1609 mptcp_for_each_subflow(msk, subflow) {
1610 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
1611
1612 mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
1613 }
1614 }
1615
1616 sk_stream_wait_close(sk, timeout);
1617
1618update_state:
Mat Martineauf870fa02020-01-21 16:56:15 -08001619 inet_sk_state_store(sk, TCP_CLOSE);
1620
Mat Martineau43b54c62020-07-28 15:12:06 -07001621cleanup:
Paolo Abeni10f6d462020-05-29 17:43:30 +02001622 /* be sure to always acquire the join list lock, to sync vs
1623 * mptcp_finish_join().
1624 */
1625 spin_lock_bh(&msk->join_list_lock);
1626 list_splice_tail_init(&msk->join_list, &msk->conn_list);
1627 spin_unlock_bh(&msk->join_list_lock);
Florian Westphalb2c5b612020-01-29 15:54:45 +01001628 list_splice_init(&msk->conn_list, &conn_list);
1629
Paolo Abeni18b683b2020-03-27 14:48:43 -07001630 __mptcp_clear_xmit(sk);
1631
Florian Westphalb2c5b612020-01-29 15:54:45 +01001632 release_sock(sk);
1633
1634 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
Peter Krystadcec37a62020-01-21 16:56:18 -08001635 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
Peter Krystadcec37a62020-01-21 16:56:18 -08001636 __mptcp_close_ssk(sk, ssk, subflow, timeout);
Mat Martineauf870fa02020-01-21 16:56:15 -08001637 }
1638
Paolo Abeni80992012020-02-26 10:14:47 +01001639 mptcp_cancel_work(sk);
1640
Florian Westphal6771bfd2020-02-26 10:14:48 +01001641 __skb_queue_purge(&sk->sk_receive_queue);
1642
Peter Krystadcec37a62020-01-21 16:56:18 -08001643 sk_common_release(sk);
Mat Martineauf870fa02020-01-21 16:56:15 -08001644}
1645
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001646static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
1647{
1648#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1649 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
1650 struct ipv6_pinfo *msk6 = inet6_sk(msk);
1651
1652 msk->sk_v6_daddr = ssk->sk_v6_daddr;
1653 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
1654
1655 if (msk6 && ssk6) {
1656 msk6->saddr = ssk6->saddr;
1657 msk6->flow_label = ssk6->flow_label;
1658 }
1659#endif
1660
1661 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
1662 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
1663 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
1664 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
1665 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
1666 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
1667}
1668
Paolo Abeni18b683b2020-03-27 14:48:43 -07001669static int mptcp_disconnect(struct sock *sk, int flags)
1670{
Florian Westphal42c556f2020-04-29 20:43:20 +02001671 /* Should never be called.
1672 * inet_stream_connect() calls ->disconnect, but that
1673 * refers to the subflow socket, not the mptcp one.
1674 */
1675 WARN_ON_ONCE(1);
1676 return 0;
Paolo Abeni18b683b2020-03-27 14:48:43 -07001677}
1678
Florian Westphalb0519de2020-02-06 00:39:37 +01001679#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1680static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
1681{
1682 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
1683
1684 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
1685}
1686#endif
1687
Paolo Abenifca5c822020-04-20 16:25:06 +02001688struct sock *mptcp_sk_clone(const struct sock *sk,
Paolo Abenicfde1412020-04-30 15:01:52 +02001689 const struct mptcp_options_received *mp_opt,
Paolo Abenifca5c822020-04-20 16:25:06 +02001690 struct request_sock *req)
Florian Westphalb0519de2020-02-06 00:39:37 +01001691{
Paolo Abeni58b09912020-03-13 16:52:41 +01001692 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Florian Westphalb0519de2020-02-06 00:39:37 +01001693 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
Paolo Abeni58b09912020-03-13 16:52:41 +01001694 struct mptcp_sock *msk;
1695 u64 ack_seq;
Florian Westphalb0519de2020-02-06 00:39:37 +01001696
1697 if (!nsk)
1698 return NULL;
1699
1700#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1701 if (nsk->sk_family == AF_INET6)
1702 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
1703#endif
1704
Paolo Abeni58b09912020-03-13 16:52:41 +01001705 __mptcp_init_sock(nsk);
1706
1707 msk = mptcp_sk(nsk);
1708 msk->local_key = subflow_req->local_key;
1709 msk->token = subflow_req->token;
1710 msk->subflow = NULL;
Paolo Abenib93df082020-07-23 13:02:32 +02001711 WRITE_ONCE(msk->fully_established, false);
Paolo Abeni58b09912020-03-13 16:52:41 +01001712
Paolo Abeni58b09912020-03-13 16:52:41 +01001713 msk->write_seq = subflow_req->idsn + 1;
Paolo Abenicc9d2562020-03-27 14:48:42 -07001714 atomic64_set(&msk->snd_una, msk->write_seq);
Paolo Abenicfde1412020-04-30 15:01:52 +02001715 if (mp_opt->mp_capable) {
Paolo Abeni58b09912020-03-13 16:52:41 +01001716 msk->can_ack = true;
Paolo Abenicfde1412020-04-30 15:01:52 +02001717 msk->remote_key = mp_opt->sndr_key;
Paolo Abeni58b09912020-03-13 16:52:41 +01001718 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
1719 ack_seq++;
1720 msk->ack_seq = ack_seq;
1721 }
Paolo Abeni7f20d5f2020-03-17 15:53:34 +01001722
Florian Westphal5e200872020-04-20 16:25:04 +02001723 sock_reset_flag(nsk, SOCK_RCU_FREE);
Paolo Abeni7f20d5f2020-03-17 15:53:34 +01001724 /* will be fully established after successful MPC subflow creation */
1725 inet_sk_state_store(nsk, TCP_SYN_RECV);
Paolo Abeni58b09912020-03-13 16:52:41 +01001726 bh_unlock_sock(nsk);
1727
1728 /* keep a single reference */
1729 __sock_put(nsk);
Florian Westphalb0519de2020-02-06 00:39:37 +01001730 return nsk;
1731}
1732
Florian Westphala6b118f2020-06-30 21:24:45 +02001733void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
1734{
1735 const struct tcp_sock *tp = tcp_sk(ssk);
1736
1737 msk->rcvq_space.copied = 0;
1738 msk->rcvq_space.rtt_us = 0;
1739
1740 msk->rcvq_space.time = tp->tcp_mstamp;
1741
1742 /* initial rcv_space offering made to peer */
1743 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
1744 TCP_INIT_CWND * tp->advmss);
1745 if (msk->rcvq_space.space == 0)
1746 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
1747}
1748
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001749static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
1750 bool kern)
1751{
1752 struct mptcp_sock *msk = mptcp_sk(sk);
1753 struct socket *listener;
1754 struct sock *newsk;
1755
1756 listener = __mptcp_nmpc_socket(msk);
1757 if (WARN_ON_ONCE(!listener)) {
1758 *err = -EINVAL;
1759 return NULL;
1760 }
1761
1762 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
1763 newsk = inet_csk_accept(listener->sk, flags, err, kern);
1764 if (!newsk)
1765 return NULL;
1766
1767 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001768 if (sk_is_mptcp(newsk)) {
1769 struct mptcp_subflow_context *subflow;
1770 struct sock *new_mptcp_sock;
1771 struct sock *ssk = newsk;
1772
1773 subflow = mptcp_subflow_ctx(newsk);
Paolo Abeni58b09912020-03-13 16:52:41 +01001774 new_mptcp_sock = subflow->conn;
1775
1776 /* is_mptcp should be false if subflow->conn is missing, see
1777 * subflow_syn_recv_sock()
1778 */
1779 if (WARN_ON_ONCE(!new_mptcp_sock)) {
1780 tcp_sk(newsk)->is_mptcp = 0;
1781 return newsk;
1782 }
1783
1784 /* acquire the 2nd reference for the owning socket */
1785 sock_hold(new_mptcp_sock);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001786
1787 local_bh_disable();
Paolo Abeni58b09912020-03-13 16:52:41 +01001788 bh_lock_sock(new_mptcp_sock);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001789 msk = mptcp_sk(new_mptcp_sock);
Paolo Abeni8ab183d2020-01-21 16:56:33 -08001790 msk->first = newsk;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001791
1792 newsk = new_mptcp_sock;
1793 mptcp_copy_inaddrs(newsk, ssk);
1794 list_add(&subflow->node, &msk->conn_list);
1795
Florian Westphala6b118f2020-06-30 21:24:45 +02001796 mptcp_rcv_space_init(msk, ssk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001797 bh_unlock_sock(new_mptcp_sock);
Florian Westphalfc518952020-03-27 14:48:50 -07001798
1799 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001800 local_bh_enable();
Florian Westphalfc518952020-03-27 14:48:50 -07001801 } else {
1802 MPTCP_INC_STATS(sock_net(sk),
1803 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08001804 }
1805
1806 return newsk;
1807}
1808
Peter Krystad79c09492020-01-21 16:56:20 -08001809static void mptcp_destroy(struct sock *sk)
1810{
Florian Westphalc9fd9c52020-01-29 15:54:43 +01001811 struct mptcp_sock *msk = mptcp_sk(sk);
1812
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02001813 mptcp_token_destroy(msk);
Florian Westphalc9fd9c52020-01-29 15:54:43 +01001814 if (msk->cached_ext)
1815 __skb_ext_put(msk->cached_ext);
Paolo Abenid0272362020-03-27 14:48:45 -07001816
1817 sk_sockets_allocated_dec(sk);
Peter Krystad79c09492020-01-21 16:56:20 -08001818}
1819
Florian Westphalfd1452d2020-07-05 01:30:16 +02001820static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001821 sockptr_t optval, unsigned int optlen)
Florian Westphalfd1452d2020-07-05 01:30:16 +02001822{
1823 struct sock *sk = (struct sock *)msk;
1824 struct socket *ssock;
1825 int ret;
1826
1827 switch (optname) {
1828 case SO_REUSEPORT:
1829 case SO_REUSEADDR:
1830 lock_sock(sk);
1831 ssock = __mptcp_nmpc_socket(msk);
1832 if (!ssock) {
1833 release_sock(sk);
1834 return -EINVAL;
1835 }
1836
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001837 ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
Florian Westphalfd1452d2020-07-05 01:30:16 +02001838 if (ret == 0) {
1839 if (optname == SO_REUSEPORT)
1840 sk->sk_reuseport = ssock->sk->sk_reuseport;
1841 else if (optname == SO_REUSEADDR)
1842 sk->sk_reuse = ssock->sk->sk_reuse;
1843 }
1844 release_sock(sk);
1845 return ret;
1846 }
1847
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001848 return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
Florian Westphalfd1452d2020-07-05 01:30:16 +02001849}
1850
Florian Westphalc9b95a12020-07-05 01:30:17 +02001851static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001852 sockptr_t optval, unsigned int optlen)
Florian Westphalc9b95a12020-07-05 01:30:17 +02001853{
1854 struct sock *sk = (struct sock *)msk;
1855 int ret = -EOPNOTSUPP;
1856 struct socket *ssock;
1857
1858 switch (optname) {
1859 case IPV6_V6ONLY:
1860 lock_sock(sk);
1861 ssock = __mptcp_nmpc_socket(msk);
1862 if (!ssock) {
1863 release_sock(sk);
1864 return -EINVAL;
1865 }
1866
1867 ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
1868 if (ret == 0)
1869 sk->sk_ipv6only = ssock->sk->sk_ipv6only;
1870
1871 release_sock(sk);
1872 break;
1873 }
1874
1875 return ret;
1876}
1877
Peter Krystad717e79c2020-01-21 16:56:22 -08001878static int mptcp_setsockopt(struct sock *sk, int level, int optname,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02001879 sockptr_t optval, unsigned int optlen)
Peter Krystad717e79c2020-01-21 16:56:22 -08001880{
1881 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001882 struct sock *ssk;
Peter Krystad717e79c2020-01-21 16:56:22 -08001883
1884 pr_debug("msk=%p", msk);
1885
Florian Westphal83f0c102020-07-05 01:30:15 +02001886 if (level == SOL_SOCKET)
Florian Westphalfd1452d2020-07-05 01:30:16 +02001887 return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
Florian Westphal83f0c102020-07-05 01:30:15 +02001888
Peter Krystad717e79c2020-01-21 16:56:22 -08001889 /* @@ the meaning of setsockopt() when the socket is connected and
Mat Martineaub6e4a1a2020-02-14 14:14:29 -08001890 * there are multiple subflows is not yet defined. It is up to the
1891 * MPTCP-level socket to configure the subflows until the subflow
1892 * is in TCP fallback, when TCP socket options are passed through
1893 * to the one remaining subflow.
Peter Krystad717e79c2020-01-21 16:56:22 -08001894 */
1895 lock_sock(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001896 ssk = __mptcp_tcp_fallback(msk);
Florian Westphale1546592020-04-11 21:05:01 +02001897 release_sock(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001898 if (ssk)
1899 return tcp_setsockopt(ssk, level, optname, optval, optlen);
Florian Westphal50e741b2020-01-29 15:54:44 +01001900
Florian Westphalc9b95a12020-07-05 01:30:17 +02001901 if (level == SOL_IPV6)
1902 return mptcp_setsockopt_v6(msk, optname, optval, optlen);
1903
Mat Martineaub6e4a1a2020-02-14 14:14:29 -08001904 return -EOPNOTSUPP;
Peter Krystad717e79c2020-01-21 16:56:22 -08001905}
1906
1907static int mptcp_getsockopt(struct sock *sk, int level, int optname,
Florian Westphal50e741b2020-01-29 15:54:44 +01001908 char __user *optval, int __user *option)
Peter Krystad717e79c2020-01-21 16:56:22 -08001909{
1910 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001911 struct sock *ssk;
Peter Krystad717e79c2020-01-21 16:56:22 -08001912
1913 pr_debug("msk=%p", msk);
1914
Mat Martineaub6e4a1a2020-02-14 14:14:29 -08001915 /* @@ the meaning of setsockopt() when the socket is connected and
1916 * there are multiple subflows is not yet defined. It is up to the
1917 * MPTCP-level socket to configure the subflows until the subflow
1918 * is in TCP fallback, when socket options are passed through
1919 * to the one remaining subflow.
Peter Krystad717e79c2020-01-21 16:56:22 -08001920 */
1921 lock_sock(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001922 ssk = __mptcp_tcp_fallback(msk);
Florian Westphale1546592020-04-11 21:05:01 +02001923 release_sock(sk);
Paolo Abeni76660af2020-06-29 22:26:24 +02001924 if (ssk)
1925 return tcp_getsockopt(ssk, level, optname, optval, option);
Florian Westphal50e741b2020-01-29 15:54:44 +01001926
Mat Martineaub6e4a1a2020-02-14 14:14:29 -08001927 return -EOPNOTSUPP;
Peter Krystad717e79c2020-01-21 16:56:22 -08001928}
1929
Paolo Abenib51f9b82020-03-27 14:48:44 -07001930#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1931 TCPF_WRITE_TIMER_DEFERRED)
Paolo Abeni14c441b2020-02-26 10:14:52 +01001932
1933/* this is very alike tcp_release_cb() but we must handle differently a
1934 * different set of events
1935 */
1936static void mptcp_release_cb(struct sock *sk)
1937{
1938 unsigned long flags, nflags;
1939
1940 do {
1941 flags = sk->sk_tsq_flags;
1942 if (!(flags & MPTCP_DEFERRED_ALL))
1943 return;
1944 nflags = flags & ~MPTCP_DEFERRED_ALL;
1945 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1946
Paolo Abenib51f9b82020-03-27 14:48:44 -07001947 sock_release_ownership(sk);
1948
Paolo Abeni14c441b2020-02-26 10:14:52 +01001949 if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1950 struct mptcp_sock *msk = mptcp_sk(sk);
1951 struct sock *ssk;
1952
1953 ssk = mptcp_subflow_recv_lookup(msk);
1954 if (!ssk || !schedule_work(&msk->work))
1955 __sock_put(sk);
1956 }
Paolo Abenib51f9b82020-03-27 14:48:44 -07001957
1958 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1959 mptcp_retransmit_handler(sk);
1960 __sock_put(sk);
1961 }
Paolo Abeni14c441b2020-02-26 10:14:52 +01001962}
1963
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02001964static int mptcp_hash(struct sock *sk)
1965{
1966 /* should never be called,
1967 * we hash the TCP subflows not the master socket
1968 */
1969 WARN_ON_ONCE(1);
1970 return 0;
1971}
1972
1973static void mptcp_unhash(struct sock *sk)
1974{
1975 /* called from sk_common_release(), but nothing to do here */
1976}
1977
Peter Krystadcec37a62020-01-21 16:56:18 -08001978static int mptcp_get_port(struct sock *sk, unsigned short snum)
Mat Martineauf870fa02020-01-21 16:56:15 -08001979{
1980 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -08001981 struct socket *ssock;
Mat Martineauf870fa02020-01-21 16:56:15 -08001982
Peter Krystadcec37a62020-01-21 16:56:18 -08001983 ssock = __mptcp_nmpc_socket(msk);
1984 pr_debug("msk=%p, subflow=%p", msk, ssock);
1985 if (WARN_ON_ONCE(!ssock))
1986 return -EINVAL;
Mat Martineauf870fa02020-01-21 16:56:15 -08001987
Peter Krystadcec37a62020-01-21 16:56:18 -08001988 return inet_csk_get_port(ssock->sk, snum);
1989}
Mat Martineauf870fa02020-01-21 16:56:15 -08001990
Peter Krystadcec37a62020-01-21 16:56:18 -08001991void mptcp_finish_connect(struct sock *ssk)
1992{
1993 struct mptcp_subflow_context *subflow;
1994 struct mptcp_sock *msk;
1995 struct sock *sk;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001996 u64 ack_seq;
Mat Martineauf870fa02020-01-21 16:56:15 -08001997
Peter Krystadcec37a62020-01-21 16:56:18 -08001998 subflow = mptcp_subflow_ctx(ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -08001999 sk = subflow->conn;
2000 msk = mptcp_sk(sk);
2001
Mat Martineau648ef4b2020-01-21 16:56:24 -08002002 pr_debug("msk=%p, token=%u", sk, subflow->token);
2003
Mat Martineau6d0060f2020-01-21 16:56:23 -08002004 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
2005 ack_seq++;
Mat Martineau648ef4b2020-01-21 16:56:24 -08002006 subflow->map_seq = ack_seq;
2007 subflow->map_subflow_seq = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -08002008
Peter Krystadcec37a62020-01-21 16:56:18 -08002009 /* the socket is not connected yet, no msk/subflow ops can access/race
2010 * accessing the field below
2011 */
2012 WRITE_ONCE(msk->remote_key, subflow->remote_key);
2013 WRITE_ONCE(msk->local_key, subflow->local_key);
Mat Martineau6d0060f2020-01-21 16:56:23 -08002014 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
2015 WRITE_ONCE(msk->ack_seq, ack_seq);
Christoph Paaschd22f4982020-01-21 16:56:32 -08002016 WRITE_ONCE(msk->can_ack, 1);
Paolo Abenicc9d2562020-03-27 14:48:42 -07002017 atomic64_set(&msk->snd_una, msk->write_seq);
Peter Krystad1b1c7a02020-03-27 14:48:38 -07002018
2019 mptcp_pm_new_connection(msk, 0);
Florian Westphala6b118f2020-06-30 21:24:45 +02002020
2021 mptcp_rcv_space_init(msk, ssk);
Mat Martineauf870fa02020-01-21 16:56:15 -08002022}
2023
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002024static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
2025{
2026 write_lock_bh(&sk->sk_callback_lock);
2027 rcu_assign_pointer(sk->sk_wq, &parent->wq);
2028 sk_set_socket(sk, parent);
2029 sk->sk_uid = SOCK_INODE(parent)->i_uid;
2030 write_unlock_bh(&sk->sk_callback_lock);
2031}
2032
Peter Krystadf2962342020-03-27 14:48:39 -07002033bool mptcp_finish_join(struct sock *sk)
2034{
2035 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
2036 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2037 struct sock *parent = (void *)msk;
2038 struct socket *parent_sock;
Peter Krystadec3edaa2020-03-27 14:48:40 -07002039 bool ret;
Peter Krystadf2962342020-03-27 14:48:39 -07002040
2041 pr_debug("msk=%p, subflow=%p", msk, subflow);
2042
2043 /* mptcp socket already closing? */
Paolo Abenib93df082020-07-23 13:02:32 +02002044 if (!mptcp_is_fully_established(parent))
Peter Krystadf2962342020-03-27 14:48:39 -07002045 return false;
2046
2047 if (!msk->pm.server_side)
2048 return true;
2049
Paolo Abeni10f6d462020-05-29 17:43:30 +02002050 if (!mptcp_pm_allow_new_subflow(msk))
2051 return false;
2052
2053 /* active connections are already on conn_list, and we can't acquire
2054 * msk lock here.
2055 * use the join list lock as synchronization point and double-check
2056 * msk status to avoid racing with mptcp_close()
2057 */
2058 spin_lock_bh(&msk->join_list_lock);
2059 ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
2060 if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
2061 list_add_tail(&subflow->node, &msk->join_list);
2062 spin_unlock_bh(&msk->join_list_lock);
2063 if (!ret)
2064 return false;
2065
2066 /* attach to msk socket only after we are sure he will deal with us
2067 * at close time
2068 */
Peter Krystadf2962342020-03-27 14:48:39 -07002069 parent_sock = READ_ONCE(parent->sk_socket);
2070 if (parent_sock && !sk->sk_socket)
2071 mptcp_sock_graft(sk, parent_sock);
Paolo Abeni10f6d462020-05-29 17:43:30 +02002072 subflow->map_seq = msk->ack_seq;
2073 return true;
Peter Krystadf2962342020-03-27 14:48:39 -07002074}
2075
Florian Westphal1891c4a2020-01-21 16:56:25 -08002076static bool mptcp_memory_free(const struct sock *sk, int wake)
2077{
2078 struct mptcp_sock *msk = mptcp_sk(sk);
2079
2080 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
2081}
2082
Mat Martineauf870fa02020-01-21 16:56:15 -08002083static struct proto mptcp_prot = {
2084 .name = "MPTCP",
2085 .owner = THIS_MODULE,
2086 .init = mptcp_init_sock,
Paolo Abeni18b683b2020-03-27 14:48:43 -07002087 .disconnect = mptcp_disconnect,
Mat Martineauf870fa02020-01-21 16:56:15 -08002088 .close = mptcp_close,
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002089 .accept = mptcp_accept,
Peter Krystad717e79c2020-01-21 16:56:22 -08002090 .setsockopt = mptcp_setsockopt,
2091 .getsockopt = mptcp_getsockopt,
Mat Martineauf870fa02020-01-21 16:56:15 -08002092 .shutdown = tcp_shutdown,
Peter Krystad79c09492020-01-21 16:56:20 -08002093 .destroy = mptcp_destroy,
Mat Martineauf870fa02020-01-21 16:56:15 -08002094 .sendmsg = mptcp_sendmsg,
2095 .recvmsg = mptcp_recvmsg,
Paolo Abeni14c441b2020-02-26 10:14:52 +01002096 .release_cb = mptcp_release_cb,
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002097 .hash = mptcp_hash,
2098 .unhash = mptcp_unhash,
Peter Krystadcec37a62020-01-21 16:56:18 -08002099 .get_port = mptcp_get_port,
Paolo Abenid0272362020-03-27 14:48:45 -07002100 .sockets_allocated = &mptcp_sockets_allocated,
2101 .memory_allocated = &tcp_memory_allocated,
2102 .memory_pressure = &tcp_memory_pressure,
Florian Westphal1891c4a2020-01-21 16:56:25 -08002103 .stream_memory_free = mptcp_memory_free,
Paolo Abenid0272362020-03-27 14:48:45 -07002104 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2105 .sysctl_mem = sysctl_tcp_mem,
Mat Martineauf870fa02020-01-21 16:56:15 -08002106 .obj_size = sizeof(struct mptcp_sock),
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002107 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Mat Martineauf870fa02020-01-21 16:56:15 -08002108 .no_autobind = true,
2109};
2110
Peter Krystad2303f992020-01-21 16:56:17 -08002111static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2112{
2113 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2114 struct socket *ssock;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002115 int err;
Peter Krystad2303f992020-01-21 16:56:17 -08002116
2117 lock_sock(sock->sk);
Paolo Abenifa680182020-06-29 22:26:23 +02002118 ssock = __mptcp_nmpc_socket(msk);
2119 if (!ssock) {
2120 err = -EINVAL;
Peter Krystad2303f992020-01-21 16:56:17 -08002121 goto unlock;
2122 }
2123
2124 err = ssock->ops->bind(ssock, uaddr, addr_len);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002125 if (!err)
2126 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -08002127
2128unlock:
2129 release_sock(sock->sk);
2130 return err;
2131}
2132
Paolo Abeni0235d072020-07-23 13:02:31 +02002133static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
2134 struct mptcp_subflow_context *subflow)
2135{
2136 subflow->request_mptcp = 0;
2137 __mptcp_do_fallback(msk);
2138}
2139
Peter Krystad2303f992020-01-21 16:56:17 -08002140static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
2141 int addr_len, int flags)
2142{
2143 struct mptcp_sock *msk = mptcp_sk(sock->sk);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002144 struct mptcp_subflow_context *subflow;
Peter Krystad2303f992020-01-21 16:56:17 -08002145 struct socket *ssock;
2146 int err;
2147
2148 lock_sock(sock->sk);
Paolo Abeni41be81a2020-05-29 17:43:29 +02002149 if (sock->state != SS_UNCONNECTED && msk->subflow) {
2150 /* pending connection or invalid state, let existing subflow
2151 * cope with that
2152 */
2153 ssock = msk->subflow;
2154 goto do_connect;
2155 }
2156
Paolo Abenifa680182020-06-29 22:26:23 +02002157 ssock = __mptcp_nmpc_socket(msk);
2158 if (!ssock) {
2159 err = -EINVAL;
Peter Krystad2303f992020-01-21 16:56:17 -08002160 goto unlock;
2161 }
2162
Paolo Abenifa680182020-06-29 22:26:23 +02002163 mptcp_token_destroy(msk);
2164 inet_sk_state_store(sock->sk, TCP_SYN_SENT);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002165 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002166#ifdef CONFIG_TCP_MD5SIG
2167 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
2168 * TCP option space.
2169 */
2170 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
Paolo Abeni0235d072020-07-23 13:02:31 +02002171 mptcp_subflow_early_fallback(msk, subflow);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002172#endif
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002173 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
Paolo Abeni0235d072020-07-23 13:02:31 +02002174 mptcp_subflow_early_fallback(msk, subflow);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002175
Paolo Abeni41be81a2020-05-29 17:43:29 +02002176do_connect:
Peter Krystad2303f992020-01-21 16:56:17 -08002177 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
Paolo Abeni41be81a2020-05-29 17:43:29 +02002178 sock->state = ssock->state;
2179
2180 /* on successful connect, the msk state will be moved to established by
2181 * subflow_finish_connect()
2182 */
Matthieu Baerts367fe042020-07-27 12:24:33 +02002183 if (!err || err == -EINPROGRESS)
Paolo Abeni41be81a2020-05-29 17:43:29 +02002184 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2185 else
2186 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
Peter Krystad2303f992020-01-21 16:56:17 -08002187
2188unlock:
2189 release_sock(sock->sk);
2190 return err;
2191}
2192
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002193static int mptcp_listen(struct socket *sock, int backlog)
2194{
2195 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2196 struct socket *ssock;
2197 int err;
2198
2199 pr_debug("msk=%p", msk);
2200
2201 lock_sock(sock->sk);
Paolo Abenifa680182020-06-29 22:26:23 +02002202 ssock = __mptcp_nmpc_socket(msk);
2203 if (!ssock) {
2204 err = -EINVAL;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002205 goto unlock;
2206 }
2207
Paolo Abenifa680182020-06-29 22:26:23 +02002208 mptcp_token_destroy(msk);
2209 inet_sk_state_store(sock->sk, TCP_LISTEN);
Florian Westphal5e200872020-04-20 16:25:04 +02002210 sock_set_flag(sock->sk, SOCK_RCU_FREE);
2211
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002212 err = ssock->ops->listen(ssock, backlog);
2213 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2214 if (!err)
2215 mptcp_copy_inaddrs(sock->sk, ssock->sk);
2216
2217unlock:
2218 release_sock(sock->sk);
2219 return err;
2220}
2221
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002222static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
2223 int flags, bool kern)
2224{
2225 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2226 struct socket *ssock;
2227 int err;
2228
2229 pr_debug("msk=%p", msk);
2230
2231 lock_sock(sock->sk);
2232 if (sock->sk->sk_state != TCP_LISTEN)
2233 goto unlock_fail;
2234
2235 ssock = __mptcp_nmpc_socket(msk);
2236 if (!ssock)
2237 goto unlock_fail;
2238
Paolo Abeni8a056612020-06-29 22:26:25 +02002239 clear_bit(MPTCP_DATA_READY, &msk->flags);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002240 sock_hold(ssock->sk);
2241 release_sock(sock->sk);
2242
2243 err = ssock->ops->accept(sock, newsock, flags, kern);
Paolo Abenid2f77c52020-06-29 22:26:22 +02002244 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002245 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
2246 struct mptcp_subflow_context *subflow;
2247
2248 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
2249 * This is needed so NOSPACE flag can be set from tcp stack.
2250 */
Peter Krystadec3edaa2020-03-27 14:48:40 -07002251 __mptcp_flush_join_list(msk);
Geliang Tang190f8b02020-08-03 21:00:44 +08002252 mptcp_for_each_subflow(msk, subflow) {
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002253 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2254
2255 if (!ssk->sk_socket)
2256 mptcp_sock_graft(ssk, newsock);
2257 }
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002258 }
2259
Paolo Abeni8a056612020-06-29 22:26:25 +02002260 if (inet_csk_listen_poll(ssock->sk))
2261 set_bit(MPTCP_DATA_READY, &msk->flags);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002262 sock_put(ssock->sk);
2263 return err;
2264
2265unlock_fail:
2266 release_sock(sock->sk);
2267 return -EINVAL;
2268}
2269
Paolo Abeni8a056612020-06-29 22:26:25 +02002270static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
2271{
2272 return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
2273 0;
2274}
2275
Peter Krystad2303f992020-01-21 16:56:17 -08002276static __poll_t mptcp_poll(struct file *file, struct socket *sock,
2277 struct poll_table_struct *wait)
2278{
Florian Westphal1891c4a2020-01-21 16:56:25 -08002279 struct sock *sk = sock->sk;
Paolo Abeni8ab183d2020-01-21 16:56:33 -08002280 struct mptcp_sock *msk;
Peter Krystad2303f992020-01-21 16:56:17 -08002281 __poll_t mask = 0;
Paolo Abeni8a056612020-06-29 22:26:25 +02002282 int state;
Peter Krystad2303f992020-01-21 16:56:17 -08002283
Florian Westphal1891c4a2020-01-21 16:56:25 -08002284 msk = mptcp_sk(sk);
Florian Westphal1891c4a2020-01-21 16:56:25 -08002285 sock_poll_wait(file, sock, wait);
Florian Westphal1891c4a2020-01-21 16:56:25 -08002286
Paolo Abeni8a056612020-06-29 22:26:25 +02002287 state = inet_sk_state_load(sk);
2288 if (state == TCP_LISTEN)
2289 return mptcp_check_readable(msk);
2290
2291 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
2292 mask |= mptcp_check_readable(msk);
2293 if (sk_stream_is_writeable(sk) &&
2294 test_bit(MPTCP_SEND_SPACE, &msk->flags))
2295 mask |= EPOLLOUT | EPOLLWRNORM;
2296 }
Florian Westphal1891c4a2020-01-21 16:56:25 -08002297 if (sk->sk_shutdown & RCV_SHUTDOWN)
2298 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2299
Peter Krystad2303f992020-01-21 16:56:17 -08002300 return mask;
2301}
2302
Peter Krystad21498492020-01-21 16:56:21 -08002303static int mptcp_shutdown(struct socket *sock, int how)
2304{
2305 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2306 struct mptcp_subflow_context *subflow;
2307 int ret = 0;
2308
2309 pr_debug("sk=%p, how=%d", msk, how);
2310
2311 lock_sock(sock->sk);
Peter Krystad21498492020-01-21 16:56:21 -08002312
2313 how++;
Peter Krystad21498492020-01-21 16:56:21 -08002314 if ((how & ~SHUTDOWN_MASK) || !how) {
2315 ret = -EINVAL;
2316 goto out_unlock;
2317 }
2318
2319 if (sock->state == SS_CONNECTING) {
2320 if ((1 << sock->sk->sk_state) &
2321 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
2322 sock->state = SS_DISCONNECTING;
2323 else
2324 sock->state = SS_CONNECTED;
2325 }
2326
Mat Martineau43b54c62020-07-28 15:12:06 -07002327 /* If we've already sent a FIN, or it's a closed state, skip this. */
2328 if (__mptcp_check_fallback(msk)) {
2329 if (how == SHUT_WR || how == SHUT_RDWR)
2330 inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
Mat Martineau7279da62020-07-28 15:12:02 -07002331
Mat Martineau43b54c62020-07-28 15:12:06 -07002332 mptcp_for_each_subflow(msk, subflow) {
2333 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
Peter Krystad21498492020-01-21 16:56:21 -08002334
Mat Martineau43b54c62020-07-28 15:12:06 -07002335 mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2336 }
2337 } else if ((how & SEND_SHUTDOWN) &&
2338 ((1 << sock->sk->sk_state) &
2339 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2340 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
2341 mptcp_close_state(sock->sk)) {
2342 __mptcp_flush_join_list(msk);
2343
2344 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2345 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2346
2347 mptcp_for_each_subflow(msk, subflow) {
2348 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2349
2350 mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
2351 }
Peter Krystad21498492020-01-21 16:56:21 -08002352 }
2353
Davide Carattie1ff9e82020-06-29 22:26:20 +02002354 /* Wake up anyone sleeping in poll. */
2355 sock->sk->sk_state_change(sock->sk);
2356
Peter Krystad21498492020-01-21 16:56:21 -08002357out_unlock:
2358 release_sock(sock->sk);
2359
2360 return ret;
2361}
2362
Florian Westphale42f1ac2020-01-24 16:04:02 -08002363static const struct proto_ops mptcp_stream_ops = {
2364 .family = PF_INET,
2365 .owner = THIS_MODULE,
2366 .release = inet_release,
2367 .bind = mptcp_bind,
2368 .connect = mptcp_stream_connect,
2369 .socketpair = sock_no_socketpair,
2370 .accept = mptcp_stream_accept,
Paolo Abenid2f77c52020-06-29 22:26:22 +02002371 .getname = inet_getname,
Florian Westphale42f1ac2020-01-24 16:04:02 -08002372 .poll = mptcp_poll,
2373 .ioctl = inet_ioctl,
2374 .gettstamp = sock_gettstamp,
2375 .listen = mptcp_listen,
2376 .shutdown = mptcp_shutdown,
2377 .setsockopt = sock_common_setsockopt,
2378 .getsockopt = sock_common_getsockopt,
2379 .sendmsg = inet_sendmsg,
2380 .recvmsg = inet_recvmsg,
2381 .mmap = sock_no_mmap,
2382 .sendpage = inet_sendpage,
Florian Westphale42f1ac2020-01-24 16:04:02 -08002383};
Peter Krystad2303f992020-01-21 16:56:17 -08002384
Mat Martineauf870fa02020-01-21 16:56:15 -08002385static struct inet_protosw mptcp_protosw = {
2386 .type = SOCK_STREAM,
2387 .protocol = IPPROTO_MPTCP,
2388 .prot = &mptcp_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08002389 .ops = &mptcp_stream_ops,
2390 .flags = INET_PROTOSW_ICSK,
Mat Martineauf870fa02020-01-21 16:56:15 -08002391};
2392
Paolo Abenid39dcec2020-06-26 19:29:59 +02002393void __init mptcp_proto_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08002394{
Peter Krystad2303f992020-01-21 16:56:17 -08002395 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
Peter Krystad2303f992020-01-21 16:56:17 -08002396
Paolo Abenid0272362020-03-27 14:48:45 -07002397 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
2398 panic("Failed to allocate MPTCP pcpu counter\n");
2399
Peter Krystad2303f992020-01-21 16:56:17 -08002400 mptcp_subflow_init();
Peter Krystad1b1c7a02020-03-27 14:48:38 -07002401 mptcp_pm_init();
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02002402 mptcp_token_init();
Peter Krystad2303f992020-01-21 16:56:17 -08002403
Mat Martineauf870fa02020-01-21 16:56:15 -08002404 if (proto_register(&mptcp_prot, 1) != 0)
2405 panic("Failed to register MPTCP proto.\n");
2406
2407 inet_register_protosw(&mptcp_protosw);
Florian Westphal6771bfd2020-02-26 10:14:48 +01002408
2409 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
Mat Martineauf870fa02020-01-21 16:56:15 -08002410}
2411
2412#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Florian Westphale42f1ac2020-01-24 16:04:02 -08002413static const struct proto_ops mptcp_v6_stream_ops = {
2414 .family = PF_INET6,
2415 .owner = THIS_MODULE,
2416 .release = inet6_release,
2417 .bind = mptcp_bind,
2418 .connect = mptcp_stream_connect,
2419 .socketpair = sock_no_socketpair,
2420 .accept = mptcp_stream_accept,
Paolo Abenid2f77c52020-06-29 22:26:22 +02002421 .getname = inet6_getname,
Florian Westphale42f1ac2020-01-24 16:04:02 -08002422 .poll = mptcp_poll,
2423 .ioctl = inet6_ioctl,
2424 .gettstamp = sock_gettstamp,
2425 .listen = mptcp_listen,
2426 .shutdown = mptcp_shutdown,
2427 .setsockopt = sock_common_setsockopt,
2428 .getsockopt = sock_common_getsockopt,
2429 .sendmsg = inet6_sendmsg,
2430 .recvmsg = inet6_recvmsg,
2431 .mmap = sock_no_mmap,
2432 .sendpage = inet_sendpage,
2433#ifdef CONFIG_COMPAT
Christoph Hellwig39869122020-05-18 08:28:06 +02002434 .compat_ioctl = inet6_compat_ioctl,
Florian Westphale42f1ac2020-01-24 16:04:02 -08002435#endif
2436};
2437
Mat Martineauf870fa02020-01-21 16:56:15 -08002438static struct proto mptcp_v6_prot;
2439
Peter Krystad79c09492020-01-21 16:56:20 -08002440static void mptcp_v6_destroy(struct sock *sk)
2441{
2442 mptcp_destroy(sk);
2443 inet6_destroy_sock(sk);
2444}
2445
Mat Martineauf870fa02020-01-21 16:56:15 -08002446static struct inet_protosw mptcp_v6_protosw = {
2447 .type = SOCK_STREAM,
2448 .protocol = IPPROTO_MPTCP,
2449 .prot = &mptcp_v6_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08002450 .ops = &mptcp_v6_stream_ops,
Mat Martineauf870fa02020-01-21 16:56:15 -08002451 .flags = INET_PROTOSW_ICSK,
2452};
2453
Paolo Abenid39dcec2020-06-26 19:29:59 +02002454int __init mptcp_proto_v6_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08002455{
2456 int err;
2457
2458 mptcp_v6_prot = mptcp_prot;
2459 strcpy(mptcp_v6_prot.name, "MPTCPv6");
2460 mptcp_v6_prot.slab = NULL;
Peter Krystad79c09492020-01-21 16:56:20 -08002461 mptcp_v6_prot.destroy = mptcp_v6_destroy;
Florian Westphalb0519de2020-02-06 00:39:37 +01002462 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
Mat Martineauf870fa02020-01-21 16:56:15 -08002463
2464 err = proto_register(&mptcp_v6_prot, 1);
2465 if (err)
2466 return err;
2467
Mat Martineauf870fa02020-01-21 16:56:15 -08002468 err = inet6_register_protosw(&mptcp_v6_protosw);
2469 if (err)
2470 proto_unregister(&mptcp_v6_prot);
2471
2472 return err;
2473}
2474#endif