blob: d073b211138287342cf6c2faf6b0fc299c203205 [file] [log] [blame]
Mat Martineauf870fa02020-01-21 16:56:15 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -080012#include <linux/sched/signal.h>
13#include <linux/atomic.h>
Mat Martineauf870fa02020-01-21 16:56:15 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Mat Martineau3721b9b2020-07-28 15:12:03 -070019#include <net/tcp_states.h>
Peter Krystadcf7da0d2020-01-21 16:56:19 -080020#if IS_ENABLED(CONFIG_MPTCP_IPV6)
21#include <net/transp_v6.h>
22#endif
Mat Martineauf870fa02020-01-21 16:56:15 -080023#include <net/mptcp.h>
Paolo Abenie16163b2020-11-16 10:48:09 +010024#include <net/xfrm.h>
Mat Martineauf870fa02020-01-21 16:56:15 -080025#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070026#include "mib.h"
Mat Martineauf870fa02020-01-21 16:56:15 -080027
Geliang Tange10a9892021-04-16 15:38:04 -070028#define CREATE_TRACE_POINTS
29#include <trace/events/mptcp.h>
30
Florian Westphalb0519de2020-02-06 00:39:37 +010031#if IS_ENABLED(CONFIG_MPTCP_IPV6)
32struct mptcp6_sock {
33 struct mptcp_sock msk;
34 struct ipv6_pinfo np;
35};
36#endif
37
Florian Westphal6771bfd2020-02-26 10:14:48 +010038struct mptcp_skb_cb {
Paolo Abeniab174ad2020-09-14 10:01:12 +020039 u64 map_seq;
40 u64 end_seq;
Florian Westphal6771bfd2020-02-26 10:14:48 +010041 u32 offset;
Florian Westphalb7f653b2021-06-03 16:24:32 -070042 u8 has_rxtstamp:1;
Florian Westphal6771bfd2020-02-26 10:14:48 +010043};
44
45#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
46
Florian Westphalb7f653b2021-06-03 16:24:32 -070047enum {
48 MPTCP_CMSG_TS = BIT(0),
49};
50
Paolo Abenid0272362020-03-27 14:48:45 -070051static struct percpu_counter mptcp_sockets_allocated;
52
Paolo Abenie16163b2020-11-16 10:48:09 +010053static void __mptcp_destroy_sock(struct sock *sk);
Paolo Abenid9ca1de2020-11-16 10:48:10 +010054static void __mptcp_check_send_data_fin(struct sock *sk);
Paolo Abenie16163b2020-11-16 10:48:09 +010055
Paolo Abenib19bc292021-01-20 15:39:14 +010056DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
57static struct net_device mptcp_napi_dev;
58
Peter Krystad2303f992020-01-21 16:56:17 -080059/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
60 * completed yet or has failed, return the subflow socket.
61 * Otherwise return NULL.
62 */
Geliang Tang1729cf12021-02-01 15:09:12 -080063struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
Peter Krystad2303f992020-01-21 16:56:17 -080064{
Christoph Paaschd22f4982020-01-21 16:56:32 -080065 if (!msk->subflow || READ_ONCE(msk->can_ack))
Peter Krystad2303f992020-01-21 16:56:17 -080066 return NULL;
67
68 return msk->subflow;
69}
70
Florian Westphal6f8a6122020-11-16 10:48:13 +010071/* Returns end sequence number of the receiver's advertised window */
72static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
73{
Paolo Abeni7439d682020-11-27 11:10:26 +010074 return READ_ONCE(msk->wnd_end);
Florian Westphal6f8a6122020-11-16 10:48:13 +010075}
76
Paolo Abenid2f77c52020-06-29 22:26:22 +020077static bool mptcp_is_tcpsk(struct sock *sk)
Florian Westphal0b4f33d2020-04-02 13:44:51 +020078{
79 struct socket *sock = sk->sk_socket;
80
Florian Westphal0b4f33d2020-04-02 13:44:51 +020081 if (unlikely(sk->sk_prot == &tcp_prot)) {
82 /* we are being invoked after mptcp_accept() has
83 * accepted a non-mp-capable flow: sk is a tcp_sk,
84 * not an mptcp one.
85 *
86 * Hand the socket over to tcp so all further socket ops
87 * bypass mptcp.
88 */
89 sock->ops = &inet_stream_ops;
Paolo Abenid2f77c52020-06-29 22:26:22 +020090 return true;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020091#if IS_ENABLED(CONFIG_MPTCP_IPV6)
92 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
93 sock->ops = &inet6_stream_ops;
Paolo Abenid2f77c52020-06-29 22:26:22 +020094 return true;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020095#endif
96 }
97
Paolo Abenid2f77c52020-06-29 22:26:22 +020098 return false;
Florian Westphal0b4f33d2020-04-02 13:44:51 +020099}
100
Paolo Abenifa680182020-06-29 22:26:23 +0200101static int __mptcp_socket_create(struct mptcp_sock *msk)
Peter Krystad2303f992020-01-21 16:56:17 -0800102{
103 struct mptcp_subflow_context *subflow;
104 struct sock *sk = (struct sock *)msk;
105 struct socket *ssock;
106 int err;
107
Peter Krystad2303f992020-01-21 16:56:17 -0800108 err = mptcp_subflow_create_socket(sk, &ssock);
109 if (err)
Paolo Abenifa680182020-06-29 22:26:23 +0200110 return err;
Peter Krystad2303f992020-01-21 16:56:17 -0800111
Paolo Abeni8ab183d2020-01-21 16:56:33 -0800112 msk->first = ssock->sk;
Peter Krystad2303f992020-01-21 16:56:17 -0800113 msk->subflow = ssock;
114 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800115 list_add(&subflow->node, &msk->conn_list);
Paolo Abenie16163b2020-11-16 10:48:09 +0100116 sock_hold(ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -0800117 subflow->request_mptcp = 1;
Paolo Abeni866f26f2021-01-20 15:39:10 +0100118 mptcp_sock_graft(msk->first, sk->sk_socket);
Davide Carattie1ff9e82020-06-29 22:26:20 +0200119
Paolo Abenifa680182020-06-29 22:26:23 +0200120 return 0;
Peter Krystad2303f992020-01-21 16:56:17 -0800121}
122
Paolo Abeniab174ad2020-09-14 10:01:12 +0200123static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
124{
125 sk_drops_add(sk, skb);
126 __kfree_skb(skb);
127}
128
Paolo Abeni8268ed42020-09-14 10:01:11 +0200129static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
130 struct sk_buff *from)
131{
132 bool fragstolen;
133 int delta;
134
135 if (MPTCP_SKB_CB(from)->offset ||
136 !skb_try_coalesce(to, from, &fragstolen, &delta))
137 return false;
138
Paolo Abeni06242e42020-09-14 10:01:14 +0200139 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
140 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
141 to->len, MPTCP_SKB_CB(from)->end_seq);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200142 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
Paolo Abeni8268ed42020-09-14 10:01:11 +0200143 kfree_skb_partial(from, fragstolen);
144 atomic_add(delta, &sk->sk_rmem_alloc);
145 sk_mem_charge(sk, delta);
146 return true;
147}
148
Paolo Abeniab174ad2020-09-14 10:01:12 +0200149static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
150 struct sk_buff *from)
Florian Westphal6771bfd2020-02-26 10:14:48 +0100151{
Paolo Abeniab174ad2020-09-14 10:01:12 +0200152 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
153 return false;
154
155 return mptcp_try_coalesce((struct sock *)msk, to, from);
156}
157
158/* "inspired" by tcp_data_queue_ofo(), main differences:
159 * - use mptcp seqs
160 * - don't cope with sacks
161 */
162static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
163{
164 struct sock *sk = (struct sock *)msk;
165 struct rb_node **p, *parent;
166 u64 seq, end_seq, max_seq;
167 struct sk_buff *skb1;
168
169 seq = MPTCP_SKB_CB(skb)->map_seq;
170 end_seq = MPTCP_SKB_CB(skb)->end_seq;
Florian Westphalfa3fe2b2020-11-19 11:46:02 -0800171 max_seq = READ_ONCE(msk->rcv_wnd_sent);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200172
Paolo Abeni06242e42020-09-14 10:01:14 +0200173 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
174 RB_EMPTY_ROOT(&msk->out_of_order_queue));
Florian Westphalfa3fe2b2020-11-19 11:46:02 -0800175 if (after64(end_seq, max_seq)) {
Paolo Abeniab174ad2020-09-14 10:01:12 +0200176 /* out of window */
177 mptcp_drop(sk, skb);
Florian Westphalfa3fe2b2020-11-19 11:46:02 -0800178 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
179 (unsigned long long)end_seq - (unsigned long)max_seq,
180 (unsigned long long)msk->rcv_wnd_sent);
Paolo Abeni06242e42020-09-14 10:01:14 +0200181 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200182 return;
183 }
184
185 p = &msk->out_of_order_queue.rb_node;
Paolo Abeni06242e42020-09-14 10:01:14 +0200186 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200187 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
188 rb_link_node(&skb->rbnode, NULL, p);
189 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
190 msk->ooo_last_skb = skb;
191 goto end;
192 }
193
194 /* with 2 subflows, adding at end of ooo queue is quite likely
195 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
196 */
Paolo Abeni06242e42020-09-14 10:01:14 +0200197 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
198 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
199 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200200 return;
Paolo Abeni06242e42020-09-14 10:01:14 +0200201 }
Paolo Abeniab174ad2020-09-14 10:01:12 +0200202
203 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
204 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
Paolo Abeni06242e42020-09-14 10:01:14 +0200205 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200206 parent = &msk->ooo_last_skb->rbnode;
207 p = &parent->rb_right;
208 goto insert;
209 }
210
211 /* Find place to insert this segment. Handle overlaps on the way. */
212 parent = NULL;
213 while (*p) {
214 parent = *p;
215 skb1 = rb_to_skb(parent);
216 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
217 p = &parent->rb_left;
218 continue;
219 }
220 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
221 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
222 /* All the bits are present. Drop. */
223 mptcp_drop(sk, skb);
Paolo Abeni06242e42020-09-14 10:01:14 +0200224 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200225 return;
226 }
227 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
228 /* partial overlap:
229 * | skb |
230 * | skb1 |
231 * continue traversing
232 */
233 } else {
234 /* skb's seq == skb1's seq and skb covers skb1.
235 * Replace skb1 with skb.
236 */
237 rb_replace_node(&skb1->rbnode, &skb->rbnode,
238 &msk->out_of_order_queue);
239 mptcp_drop(sk, skb1);
Paolo Abeni06242e42020-09-14 10:01:14 +0200240 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200241 goto merge_right;
242 }
243 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
Paolo Abeni06242e42020-09-14 10:01:14 +0200244 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200245 return;
246 }
247 p = &parent->rb_right;
248 }
Paolo Abeni06242e42020-09-14 10:01:14 +0200249
Paolo Abeniab174ad2020-09-14 10:01:12 +0200250insert:
251 /* Insert segment into RB tree. */
252 rb_link_node(&skb->rbnode, parent, p);
253 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
254
255merge_right:
256 /* Remove other segments covered by skb. */
257 while ((skb1 = skb_rb_next(skb)) != NULL) {
258 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
259 break;
260 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
261 mptcp_drop(sk, skb1);
Paolo Abeni06242e42020-09-14 10:01:14 +0200262 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200263 }
264 /* If there is no skb after us, we are the last_skb ! */
265 if (!skb1)
266 msk->ooo_last_skb = skb;
267
268end:
269 skb_condense(skb);
270 skb_set_owner_r(skb, sk);
271}
272
273static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
274 struct sk_buff *skb, unsigned int offset,
275 size_t copy_len)
276{
277 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100278 struct sock *sk = (struct sock *)msk;
Florian Westphal4e637c72020-05-25 23:41:13 +0200279 struct sk_buff *tail;
Florian Westphalb7f653b2021-06-03 16:24:32 -0700280 bool has_rxtstamp;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100281
282 __skb_unlink(skb, &ssk->sk_receive_queue);
Florian Westphal4e637c72020-05-25 23:41:13 +0200283
284 skb_ext_reset(skb);
285 skb_orphan(skb);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200286
Paolo Abeni9c3f94e2020-10-27 15:59:14 +0100287 /* try to fetch required memory from subflow */
288 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Paolo Abeni72f96132021-06-10 15:59:40 -0700289 int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
290
291 if (ssk->sk_forward_alloc < amount)
Paolo Abeni9c3f94e2020-10-27 15:59:14 +0100292 goto drop;
Paolo Abeni72f96132021-06-10 15:59:40 -0700293
294 ssk->sk_forward_alloc -= amount;
295 sk->sk_forward_alloc += amount;
Paolo Abeni9c3f94e2020-10-27 15:59:14 +0100296 }
297
Florian Westphalb7f653b2021-06-03 16:24:32 -0700298 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
299
Paolo Abeniab174ad2020-09-14 10:01:12 +0200300 /* the skb map_seq accounts for the skb offset:
301 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
302 * value
303 */
304 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
305 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
Paolo Abeni8268ed42020-09-14 10:01:11 +0200306 MPTCP_SKB_CB(skb)->offset = offset;
Florian Westphalb7f653b2021-06-03 16:24:32 -0700307 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
Florian Westphal4e637c72020-05-25 23:41:13 +0200308
Paolo Abeniab174ad2020-09-14 10:01:12 +0200309 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
310 /* in sequence */
David S. Miller8b0308f2020-10-05 17:33:26 -0700311 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200312 tail = skb_peek_tail(&sk->sk_receive_queue);
313 if (tail && mptcp_try_coalesce(sk, tail, skb))
314 return true;
Florian Westphal4e637c72020-05-25 23:41:13 +0200315
Paolo Abeniab174ad2020-09-14 10:01:12 +0200316 skb_set_owner_r(skb, sk);
317 __skb_queue_tail(&sk->sk_receive_queue, skb);
318 return true;
319 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
320 mptcp_data_queue_ofo(msk, skb);
321 return false;
322 }
323
324 /* old data, keep it simple and drop the whole pkt, sender
325 * will retransmit as needed, if needed.
326 */
Paolo Abeni06242e42020-09-14 10:01:14 +0200327 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
Paolo Abeni9c3f94e2020-10-27 15:59:14 +0100328drop:
Paolo Abeniab174ad2020-09-14 10:01:12 +0200329 mptcp_drop(sk, skb);
330 return false;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100331}
332
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700333static void mptcp_stop_timer(struct sock *sk)
334{
335 struct inet_connection_sock *icsk = inet_csk(sk);
336
337 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
338 mptcp_sk(sk)->timer_ival = 0;
339}
340
Paolo Abenie16163b2020-11-16 10:48:09 +0100341static void mptcp_close_wake_up(struct sock *sk)
342{
343 if (sock_flag(sk, SOCK_DEAD))
344 return;
345
346 sk->sk_state_change(sk);
347 if (sk->sk_shutdown == SHUTDOWN_MASK ||
348 sk->sk_state == TCP_CLOSE)
349 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
350 else
351 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
352}
353
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100354static bool mptcp_pending_data_fin_ack(struct sock *sk)
355{
356 struct mptcp_sock *msk = mptcp_sk(sk);
357
358 return !__mptcp_check_fallback(msk) &&
359 ((1 << sk->sk_state) &
360 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
361 msk->write_seq == READ_ONCE(msk->snd_una);
362}
363
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700364static void mptcp_check_data_fin_ack(struct sock *sk)
365{
366 struct mptcp_sock *msk = mptcp_sk(sk);
367
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700368 /* Look for an acknowledged DATA_FIN */
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100369 if (mptcp_pending_data_fin_ack(sk)) {
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700370 WRITE_ONCE(msk->snd_data_fin_enable, 0);
371
372 switch (sk->sk_state) {
373 case TCP_FIN_WAIT1:
374 inet_sk_state_store(sk, TCP_FIN_WAIT2);
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700375 break;
376 case TCP_CLOSING:
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700377 case TCP_LAST_ACK:
378 inet_sk_state_store(sk, TCP_CLOSE);
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700379 break;
380 }
381
Paolo Abenie16163b2020-11-16 10:48:09 +0100382 mptcp_close_wake_up(sk);
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700383 }
384}
385
Mat Martineau3721b9b2020-07-28 15:12:03 -0700386static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
387{
388 struct mptcp_sock *msk = mptcp_sk(sk);
389
390 if (READ_ONCE(msk->rcv_data_fin) &&
391 ((1 << sk->sk_state) &
392 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
393 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
394
395 if (msk->ack_seq == rcv_data_fin_seq) {
396 if (seq)
397 *seq = rcv_data_fin_seq;
398
399 return true;
400 }
401 }
402
403 return false;
404}
405
Mat Martineau6477dd32021-04-23 09:40:33 -0700406static void mptcp_set_datafin_timeout(const struct sock *sk)
407{
408 struct inet_connection_sock *icsk = inet_csk(sk);
409
410 mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
411 TCP_RTO_MIN << icsk->icsk_retransmits);
412}
413
Paolo Abeni33d41c92021-08-13 15:15:41 -0700414static void __mptcp_set_timeout(struct sock *sk, long tout)
Mat Martineau3721b9b2020-07-28 15:12:03 -0700415{
Mat Martineau3721b9b2020-07-28 15:12:03 -0700416 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
417}
418
Paolo Abeni33d41c92021-08-13 15:15:41 -0700419static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
420{
421 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
422
Paolo Abeni71b7dec2021-08-13 15:15:42 -0700423 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
424 inet_csk(ssk)->icsk_timeout - jiffies : 0;
Paolo Abeni33d41c92021-08-13 15:15:41 -0700425}
426
427static void mptcp_set_timeout(struct sock *sk)
428{
429 struct mptcp_subflow_context *subflow;
430 long tout = 0;
431
432 mptcp_for_each_subflow(mptcp_sk(sk), subflow)
433 tout = max(tout, mptcp_timeout_from_subflow(subflow));
434 __mptcp_set_timeout(sk, tout);
435}
436
Paolo Abenifd897672020-11-24 22:51:24 +0100437static bool tcp_can_send_ack(const struct sock *ssk)
438{
439 return !((1 << inet_sk_state_load(ssk)) &
Paolo Abeni20bc80b2021-01-12 18:25:23 +0100440 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
Paolo Abenifd897672020-11-24 22:51:24 +0100441}
442
Mat Martineau340fa662021-09-02 11:51:19 -0700443void mptcp_subflow_send_ack(struct sock *ssk)
444{
445 bool slow;
446
447 slow = lock_sock_fast(ssk);
448 if (tcp_can_send_ack(ssk))
449 tcp_send_ack(ssk);
450 unlock_sock_fast(ssk, slow);
451}
452
Paolo Abenifd897672020-11-24 22:51:24 +0100453static void mptcp_send_ack(struct mptcp_sock *msk)
Paolo Abeni7ed90802020-11-16 10:48:14 +0100454{
455 struct mptcp_subflow_context *subflow;
456
Mat Martineau340fa662021-09-02 11:51:19 -0700457 mptcp_for_each_subflow(msk, subflow)
458 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
Paolo Abenifd897672020-11-24 22:51:24 +0100459}
Paolo Abeniea4ca582020-11-19 11:46:03 -0800460
Paolo Abenifde56ee2021-06-22 12:25:23 -0700461static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
Paolo Abenifd897672020-11-24 22:51:24 +0100462{
Paolo Abeni75e908c2021-06-21 15:54:34 -0700463 bool slow;
Paolo Abenifd897672020-11-24 22:51:24 +0100464
Paolo Abeni75e908c2021-06-21 15:54:34 -0700465 slow = lock_sock_fast(ssk);
Paolo Abenifde56ee2021-06-22 12:25:23 -0700466 if (tcp_can_send_ack(ssk))
Paolo Abenifd897672020-11-24 22:51:24 +0100467 tcp_cleanup_rbuf(ssk, 1);
Paolo Abeni75e908c2021-06-21 15:54:34 -0700468 unlock_sock_fast(ssk, slow);
Paolo Abenifde56ee2021-06-22 12:25:23 -0700469}
470
471static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
472{
473 const struct inet_connection_sock *icsk = inet_csk(ssk);
Matthieu Baertsc4512c62021-06-25 14:25:22 -0700474 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
Paolo Abenifde56ee2021-06-22 12:25:23 -0700475 const struct tcp_sock *tp = tcp_sk(ssk);
476
477 return (ack_pending & ICSK_ACK_SCHED) &&
478 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
479 READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
480 (rx_empty && ack_pending &
481 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
Paolo Abenifd897672020-11-24 22:51:24 +0100482}
483
484static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
485{
Paolo Abenie3859602021-02-11 15:30:41 -0800486 int old_space = READ_ONCE(msk->old_wspace);
Paolo Abenifd897672020-11-24 22:51:24 +0100487 struct mptcp_subflow_context *subflow;
Paolo Abenie3859602021-02-11 15:30:41 -0800488 struct sock *sk = (struct sock *)msk;
Paolo Abenifde56ee2021-06-22 12:25:23 -0700489 int space = __mptcp_space(sk);
490 bool cleanup, rx_empty;
Paolo Abenie3859602021-02-11 15:30:41 -0800491
Paolo Abenifde56ee2021-06-22 12:25:23 -0700492 cleanup = (space > 0) && (space >= (old_space << 1));
Paolo Abenice599c52021-07-09 17:20:51 -0700493 rx_empty = !__mptcp_rmem(sk);
Paolo Abenifd897672020-11-24 22:51:24 +0100494
Paolo Abenifde56ee2021-06-22 12:25:23 -0700495 mptcp_for_each_subflow(msk, subflow) {
496 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
Paolo Abenifd897672020-11-24 22:51:24 +0100497
Paolo Abenifde56ee2021-06-22 12:25:23 -0700498 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
499 mptcp_subflow_cleanup_rbuf(ssk);
Paolo Abeniea4ca582020-11-19 11:46:03 -0800500 }
Paolo Abeni7ed90802020-11-16 10:48:14 +0100501}
502
503static bool mptcp_check_data_fin(struct sock *sk)
Mat Martineau3721b9b2020-07-28 15:12:03 -0700504{
505 struct mptcp_sock *msk = mptcp_sk(sk);
506 u64 rcv_data_fin_seq;
Paolo Abeni7ed90802020-11-16 10:48:14 +0100507 bool ret = false;
Mat Martineau3721b9b2020-07-28 15:12:03 -0700508
Paolo Abeni781bf132021-04-01 16:19:43 -0700509 if (__mptcp_check_fallback(msk))
Paolo Abeni7ed90802020-11-16 10:48:14 +0100510 return ret;
Mat Martineau3721b9b2020-07-28 15:12:03 -0700511
512 /* Need to ack a DATA_FIN received from a peer while this side
513 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
514 * msk->rcv_data_fin was set when parsing the incoming options
515 * at the subflow level and the msk lock was not held, so this
516 * is the first opportunity to act on the DATA_FIN and change
517 * the msk state.
518 *
519 * If we are caught up to the sequence number of the incoming
520 * DATA_FIN, send the DATA_ACK now and do state transition. If
521 * not caught up, do nothing and let the recv code send DATA_ACK
522 * when catching up.
523 */
524
525 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
Mat Martineau917944d2020-09-29 15:08:19 -0700526 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
Mat Martineau3721b9b2020-07-28 15:12:03 -0700527 WRITE_ONCE(msk->rcv_data_fin, 0);
528
529 sk->sk_shutdown |= RCV_SHUTDOWN;
Mat Martineau16a9a9d2020-07-28 15:12:05 -0700530 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
Mat Martineau3721b9b2020-07-28 15:12:03 -0700531
532 switch (sk->sk_state) {
533 case TCP_ESTABLISHED:
534 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
535 break;
536 case TCP_FIN_WAIT1:
537 inet_sk_state_store(sk, TCP_CLOSING);
538 break;
539 case TCP_FIN_WAIT2:
540 inet_sk_state_store(sk, TCP_CLOSE);
Mat Martineau3721b9b2020-07-28 15:12:03 -0700541 break;
542 default:
543 /* Other states not expected */
544 WARN_ON_ONCE(1);
545 break;
546 }
547
Paolo Abeni7ed90802020-11-16 10:48:14 +0100548 ret = true;
Paolo Abenifd897672020-11-24 22:51:24 +0100549 mptcp_send_ack(msk);
Paolo Abenie16163b2020-11-16 10:48:09 +0100550 mptcp_close_wake_up(sk);
Mat Martineau3721b9b2020-07-28 15:12:03 -0700551 }
Paolo Abeni7ed90802020-11-16 10:48:14 +0100552 return ret;
Mat Martineau3721b9b2020-07-28 15:12:03 -0700553}
554
Florian Westphal6771bfd2020-02-26 10:14:48 +0100555static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
556 struct sock *ssk,
557 unsigned int *bytes)
558{
559 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Florian Westphal600911f2020-02-26 10:14:49 +0100560 struct sock *sk = (struct sock *)msk;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100561 unsigned int moved = 0;
562 bool more_data_avail;
563 struct tcp_sock *tp;
564 bool done = false;
Florian Westphal13c7ba02020-11-03 11:05:03 -0800565 int sk_rbuf;
566
567 sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
568
569 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
570 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
571
572 if (unlikely(ssk_rbuf > sk_rbuf)) {
573 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
574 sk_rbuf = ssk_rbuf;
575 }
576 }
Florian Westphal600911f2020-02-26 10:14:49 +0100577
Paolo Abeniab174ad2020-09-14 10:01:12 +0200578 pr_debug("msk=%p ssk=%p", msk, ssk);
Florian Westphal6771bfd2020-02-26 10:14:48 +0100579 tp = tcp_sk(ssk);
580 do {
581 u32 map_remaining, offset;
582 u32 seq = tp->copied_seq;
583 struct sk_buff *skb;
584 bool fin;
585
586 /* try to move as much data as available */
587 map_remaining = subflow->map_data_len -
588 mptcp_subflow_get_map_offset(subflow);
589
590 skb = skb_peek(&ssk->sk_receive_queue);
Paolo Abenid9fb8c52020-10-06 08:27:34 +0200591 if (!skb) {
592 /* if no data is found, a racing workqueue/recvmsg
593 * already processed the new data, stop here or we
594 * can enter an infinite loop
595 */
596 if (!moved)
597 done = true;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100598 break;
Paolo Abenid9fb8c52020-10-06 08:27:34 +0200599 }
Florian Westphal6771bfd2020-02-26 10:14:48 +0100600
Davide Carattie1ff9e82020-06-29 22:26:20 +0200601 if (__mptcp_check_fallback(msk)) {
602 /* if we are running under the workqueue, TCP could have
603 * collapsed skbs between dummy map creation and now
604 * be sure to adjust the size
605 */
606 map_remaining = skb->len;
607 subflow->map_data_len = skb->len;
608 }
609
Florian Westphal6771bfd2020-02-26 10:14:48 +0100610 offset = seq - TCP_SKB_CB(skb)->seq;
611 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
612 if (fin) {
613 done = true;
614 seq++;
615 }
616
617 if (offset < skb->len) {
618 size_t len = skb->len - offset;
619
620 if (tp->urg_data)
621 done = true;
622
Paolo Abeniab174ad2020-09-14 10:01:12 +0200623 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
624 moved += len;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100625 seq += len;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100626
627 if (WARN_ON_ONCE(map_remaining < len))
628 break;
629 } else {
630 WARN_ON_ONCE(!fin);
631 sk_eat_skb(ssk, skb);
632 done = true;
633 }
634
635 WRITE_ONCE(tp->copied_seq, seq);
636 more_data_avail = mptcp_subflow_data_available(ssk);
Florian Westphal600911f2020-02-26 10:14:49 +0100637
Florian Westphal13c7ba02020-11-03 11:05:03 -0800638 if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
Florian Westphal600911f2020-02-26 10:14:49 +0100639 done = true;
640 break;
641 }
Florian Westphal6771bfd2020-02-26 10:14:48 +0100642 } while (more_data_avail);
643
Paolo Abeni67193312020-09-14 10:01:09 +0200644 *bytes += moved;
Florian Westphal6771bfd2020-02-26 10:14:48 +0100645 return done;
646}
647
Paolo Abeni87952602020-11-27 11:10:24 +0100648static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
Paolo Abeniab174ad2020-09-14 10:01:12 +0200649{
650 struct sock *sk = (struct sock *)msk;
651 struct sk_buff *skb, *tail;
652 bool moved = false;
653 struct rb_node *p;
654 u64 end_seq;
655
656 p = rb_first(&msk->out_of_order_queue);
Paolo Abeni06242e42020-09-14 10:01:14 +0200657 pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
Paolo Abeniab174ad2020-09-14 10:01:12 +0200658 while (p) {
659 skb = rb_to_skb(p);
660 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
661 break;
662
663 p = rb_next(p);
664 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
665
666 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
667 msk->ack_seq))) {
668 mptcp_drop(sk, skb);
Paolo Abeni06242e42020-09-14 10:01:14 +0200669 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200670 continue;
671 }
672
673 end_seq = MPTCP_SKB_CB(skb)->end_seq;
674 tail = skb_peek_tail(&sk->sk_receive_queue);
675 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
676 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
677
678 /* skip overlapping data, if any */
Paolo Abeni06242e42020-09-14 10:01:14 +0200679 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
680 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
681 delta);
Paolo Abeniab174ad2020-09-14 10:01:12 +0200682 MPTCP_SKB_CB(skb)->offset += delta;
683 __skb_queue_tail(&sk->sk_receive_queue, skb);
684 }
685 msk->ack_seq = end_seq;
686 moved = true;
687 }
688 return moved;
689}
690
Florian Westphal2e522132020-02-26 10:14:51 +0100691/* In most cases we will be able to lock the mptcp socket. If its already
692 * owned, we need to defer to the work queue to avoid ABBA deadlock.
693 */
Paolo Abeni99d10552021-06-10 15:59:41 -0700694static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
Florian Westphal2e522132020-02-26 10:14:51 +0100695{
696 struct sock *sk = (struct sock *)msk;
697 unsigned int moved = 0;
698
Paolo Abeni87952602020-11-27 11:10:24 +0100699 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
700 __mptcp_ofo_queue(msk);
Paolo Abeni499ada52021-06-10 15:59:44 -0700701 if (unlikely(ssk->sk_err)) {
702 if (!sock_owned_by_user(sk))
703 __mptcp_error_report(sk);
704 else
705 set_bit(MPTCP_ERROR_REPORT, &msk->flags);
706 }
Paolo Abeniab174ad2020-09-14 10:01:12 +0200707
Paolo Abeni87952602020-11-27 11:10:24 +0100708 /* If the moves have caught up with the DATA_FIN sequence number
709 * it's time to ack the DATA_FIN and change socket state, but
710 * this is not a good place to change state. Let the workqueue
711 * do it.
712 */
713 if (mptcp_pending_data_fin(sk, NULL))
714 mptcp_schedule_work(sk);
Paolo Abeni99d10552021-06-10 15:59:41 -0700715 return moved > 0;
Florian Westphal2e522132020-02-26 10:14:51 +0100716}
717
718void mptcp_data_ready(struct sock *sk, struct sock *ssk)
Florian Westphal101f6f82020-02-26 10:14:46 +0100719{
Paolo Abeni67193312020-09-14 10:01:09 +0200720 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Florian Westphal101f6f82020-02-26 10:14:46 +0100721 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphal13c7ba02020-11-03 11:05:03 -0800722 int sk_rbuf, ssk_rbuf;
Florian Westphal101f6f82020-02-26 10:14:46 +0100723
Paolo Abenid7b1bfd2020-12-09 12:03:31 +0100724 /* The peer can send data while we are shutting down this
725 * subflow at msk destruction time, but we must avoid enqueuing
726 * more data to the msk receive queue
727 */
728 if (unlikely(subflow->disposable))
729 return;
730
Florian Westphal13c7ba02020-11-03 11:05:03 -0800731 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
732 sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
733 if (unlikely(ssk_rbuf > sk_rbuf))
734 sk_rbuf = ssk_rbuf;
735
Paolo Abeni99d10552021-06-10 15:59:41 -0700736 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
Paolo Abenice599c52021-07-09 17:20:51 -0700737 if (__mptcp_rmem(sk) > sk_rbuf) {
738 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
Paolo Abeni99d10552021-06-10 15:59:41 -0700739 return;
Paolo Abenice599c52021-07-09 17:20:51 -0700740 }
Florian Westphal2e522132020-02-26 10:14:51 +0100741
Paolo Abeni99d10552021-06-10 15:59:41 -0700742 /* Wake-up the reader only for in-sequence data */
743 mptcp_data_lock(sk);
Paolo Abeni612f71d2021-10-07 15:05:00 -0700744 if (move_skbs_to_msk(msk, ssk))
Paolo Abeni67193312020-09-14 10:01:09 +0200745 sk->sk_data_ready(sk);
Paolo Abeni612f71d2021-10-07 15:05:00 -0700746
Paolo Abeni99d10552021-06-10 15:59:41 -0700747 mptcp_data_unlock(sk);
Florian Westphal101f6f82020-02-26 10:14:46 +0100748}
749
Florian Westphal78962482021-04-15 16:44:53 -0700750static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
Peter Krystadec3edaa2020-03-27 14:48:40 -0700751{
Paolo Abeni5cf92bb2021-01-20 15:39:11 +0100752 struct mptcp_subflow_context *subflow;
Florian Westphaldf00b082021-04-15 16:44:54 -0700753 bool ret = false;
Paolo Abeni5cf92bb2021-01-20 15:39:11 +0100754
Peter Krystadec3edaa2020-03-27 14:48:40 -0700755 if (likely(list_empty(&msk->join_list)))
Florian Westphal78962482021-04-15 16:44:53 -0700756 return false;
Peter Krystadec3edaa2020-03-27 14:48:40 -0700757
758 spin_lock_bh(&msk->join_list_lock);
Florian Westphaldf00b082021-04-15 16:44:54 -0700759 list_for_each_entry(subflow, &msk->join_list, node) {
760 u32 sseq = READ_ONCE(subflow->setsockopt_seq);
Florian Westphal78962482021-04-15 16:44:53 -0700761
Florian Westphaldf00b082021-04-15 16:44:54 -0700762 mptcp_propagate_sndbuf((struct sock *)msk, mptcp_subflow_tcp_sock(subflow));
763 if (READ_ONCE(msk->setsockopt_seq) != sseq)
764 ret = true;
765 }
Peter Krystadec3edaa2020-03-27 14:48:40 -0700766 list_splice_tail_init(&msk->join_list, &msk->conn_list);
767 spin_unlock_bh(&msk->join_list_lock);
Florian Westphal78962482021-04-15 16:44:53 -0700768
Florian Westphaldf00b082021-04-15 16:44:54 -0700769 return ret;
Florian Westphal78962482021-04-15 16:44:53 -0700770}
771
772void __mptcp_flush_join_list(struct mptcp_sock *msk)
773{
774 if (likely(!mptcp_do_flush_join_list(msk)))
775 return;
776
777 if (!test_and_set_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags))
778 mptcp_schedule_work((struct sock *)msk);
779}
780
781static void mptcp_flush_join_list(struct mptcp_sock *msk)
782{
783 bool sync_needed = test_and_clear_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags);
784
785 might_sleep();
786
787 if (!mptcp_do_flush_join_list(msk) && !sync_needed)
788 return;
789
790 mptcp_sockopt_sync_all(msk);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700791}
792
Paolo Abenib51f9b82020-03-27 14:48:44 -0700793static bool mptcp_timer_pending(struct sock *sk)
794{
795 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
796}
797
798static void mptcp_reset_timer(struct sock *sk)
799{
800 struct inet_connection_sock *icsk = inet_csk(sk);
801 unsigned long tout;
802
Paolo Abenie16163b2020-11-16 10:48:09 +0100803 /* prevent rescheduling on close */
804 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
805 return;
806
Paolo Abeni33d41c92021-08-13 15:15:41 -0700807 tout = mptcp_sk(sk)->timer_ival;
Paolo Abenib51f9b82020-03-27 14:48:44 -0700808 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
809}
810
Paolo Abeniba8f48f2020-11-16 10:48:05 +0100811bool mptcp_schedule_work(struct sock *sk)
812{
813 if (inet_sk_state_load(sk) != TCP_CLOSE &&
814 schedule_work(&mptcp_sk(sk)->work)) {
815 /* each subflow already holds a reference to the sk, and the
816 * workqueue is invoked by a subflow, so sk can't go away here.
817 */
818 sock_hold(sk);
819 return true;
820 }
821 return false;
822}
823
Florian Westphal59832e22020-04-02 13:44:52 +0200824void mptcp_subflow_eof(struct sock *sk)
825{
Paolo Abeniba8f48f2020-11-16 10:48:05 +0100826 if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
827 mptcp_schedule_work(sk);
Florian Westphal59832e22020-04-02 13:44:52 +0200828}
829
Paolo Abeni59698562020-06-10 10:47:41 +0200830static void mptcp_check_for_eof(struct mptcp_sock *msk)
831{
832 struct mptcp_subflow_context *subflow;
833 struct sock *sk = (struct sock *)msk;
834 int receivers = 0;
835
836 mptcp_for_each_subflow(msk, subflow)
837 receivers += !subflow->rx_eof;
Paolo Abenie16163b2020-11-16 10:48:09 +0100838 if (receivers)
839 return;
Paolo Abeni59698562020-06-10 10:47:41 +0200840
Paolo Abenie16163b2020-11-16 10:48:09 +0100841 if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
Paolo Abeni59698562020-06-10 10:47:41 +0200842 /* hopefully temporary hack: propagate shutdown status
843 * to msk, when all subflows agree on it
844 */
845 sk->sk_shutdown |= RCV_SHUTDOWN;
846
847 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
Paolo Abeni59698562020-06-10 10:47:41 +0200848 sk->sk_data_ready(sk);
849 }
Paolo Abenie16163b2020-11-16 10:48:09 +0100850
851 switch (sk->sk_state) {
852 case TCP_ESTABLISHED:
853 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
854 break;
855 case TCP_FIN_WAIT1:
Paolo Abeni26aa2312020-11-19 11:45:55 -0800856 inet_sk_state_store(sk, TCP_CLOSING);
857 break;
858 case TCP_FIN_WAIT2:
Paolo Abenie16163b2020-11-16 10:48:09 +0100859 inet_sk_state_store(sk, TCP_CLOSE);
860 break;
861 default:
862 return;
863 }
864 mptcp_close_wake_up(sk);
Paolo Abeni59698562020-06-10 10:47:41 +0200865}
866
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800867static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
868{
869 struct mptcp_subflow_context *subflow;
870 struct sock *sk = (struct sock *)msk;
871
872 sock_owned_by_me(sk);
873
874 mptcp_for_each_subflow(msk, subflow) {
Paolo Abeni99d10552021-06-10 15:59:41 -0700875 if (READ_ONCE(subflow->data_avail))
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -0800876 return mptcp_subflow_tcp_sock(subflow);
877 }
878
879 return NULL;
880}
881
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -0700882static bool mptcp_skb_can_collapse_to(u64 write_seq,
883 const struct sk_buff *skb,
884 const struct mptcp_ext *mpext)
Mat Martineau6d0060f2020-01-21 16:56:23 -0800885{
Paolo Abeni57040752020-01-21 16:56:27 -0800886 if (!tcp_skb_can_collapse_to(skb))
887 return false;
888
Paolo Abeni5a369ca2020-11-03 11:05:05 -0800889 /* can collapse only if MPTCP level sequence is in order and this
890 * mapping has not been xmitted yet
891 */
892 return mpext && mpext->data_seq + mpext->data_len == write_seq &&
893 !mpext->frozen;
Paolo Abeni57040752020-01-21 16:56:27 -0800894}
895
Paolo Abeni29249ea2021-05-11 19:13:51 +0200896/* we can append data to the given data frag if:
897 * - there is space available in the backing page_frag
898 * - the data frag tail matches the current page_frag free offset
899 * - the data frag end sequence number matches the current write seq
900 */
Paolo Abeni18b683b2020-03-27 14:48:43 -0700901static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
902 const struct page_frag *pfrag,
903 const struct mptcp_data_frag *df)
904{
905 return df && pfrag->page == df->page &&
Paolo Abenid9ca1de2020-11-16 10:48:10 +0100906 pfrag->size - pfrag->offset > 0 &&
Paolo Abeni29249ea2021-05-11 19:13:51 +0200907 pfrag->offset == (df->offset + df->data_len) &&
Paolo Abeni18b683b2020-03-27 14:48:43 -0700908 df->data_seq + df->data_len == msk->write_seq;
909}
910
Paolo Abeni8ce568e2021-06-21 15:54:33 -0700911static int mptcp_wmem_with_overhead(int size)
Paolo Abenie93da922020-11-27 11:10:23 +0100912{
Paolo Abeni8ce568e2021-06-21 15:54:33 -0700913 return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
Paolo Abenie93da922020-11-27 11:10:23 +0100914}
915
916static void __mptcp_wmem_reserve(struct sock *sk, int size)
917{
Paolo Abeni8ce568e2021-06-21 15:54:33 -0700918 int amount = mptcp_wmem_with_overhead(size);
Paolo Abenie93da922020-11-27 11:10:23 +0100919 struct mptcp_sock *msk = mptcp_sk(sk);
920
921 WARN_ON_ONCE(msk->wmem_reserved);
Davide Carattie7579d52020-12-21 22:07:25 +0100922 if (WARN_ON_ONCE(amount < 0))
923 amount = 0;
924
Paolo Abenie93da922020-11-27 11:10:23 +0100925 if (amount <= sk->sk_forward_alloc)
926 goto reserve;
927
928 /* under memory pressure try to reserve at most a single page
929 * otherwise try to reserve the full estimate and fallback
930 * to a single page before entering the error path
931 */
932 if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) ||
933 !sk_wmem_schedule(sk, amount)) {
934 if (amount <= PAGE_SIZE)
935 goto nomem;
936
937 amount = PAGE_SIZE;
938 if (!sk_wmem_schedule(sk, amount))
939 goto nomem;
940 }
941
942reserve:
943 msk->wmem_reserved = amount;
944 sk->sk_forward_alloc -= amount;
945 return;
946
947nomem:
948 /* we will wait for memory on next allocation */
949 msk->wmem_reserved = -1;
950}
951
952static void __mptcp_update_wmem(struct sock *sk)
953{
954 struct mptcp_sock *msk = mptcp_sk(sk);
955
Paolo Abenib5941f02021-05-27 16:31:37 -0700956#ifdef CONFIG_LOCKDEP
957 WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
958#endif
959
Paolo Abenie93da922020-11-27 11:10:23 +0100960 if (!msk->wmem_reserved)
961 return;
962
963 if (msk->wmem_reserved < 0)
964 msk->wmem_reserved = 0;
965 if (msk->wmem_reserved > 0) {
966 sk->sk_forward_alloc += msk->wmem_reserved;
967 msk->wmem_reserved = 0;
968 }
969}
970
971static bool mptcp_wmem_alloc(struct sock *sk, int size)
972{
973 struct mptcp_sock *msk = mptcp_sk(sk);
974
975 /* check for pre-existing error condition */
976 if (msk->wmem_reserved < 0)
977 return false;
978
979 if (msk->wmem_reserved >= size)
980 goto account;
981
Paolo Abeni87952602020-11-27 11:10:24 +0100982 mptcp_data_lock(sk);
983 if (!sk_wmem_schedule(sk, size)) {
984 mptcp_data_unlock(sk);
Paolo Abenie93da922020-11-27 11:10:23 +0100985 return false;
Paolo Abeni87952602020-11-27 11:10:24 +0100986 }
Paolo Abenie93da922020-11-27 11:10:23 +0100987
988 sk->sk_forward_alloc -= size;
989 msk->wmem_reserved += size;
Paolo Abeni87952602020-11-27 11:10:24 +0100990 mptcp_data_unlock(sk);
Paolo Abenie93da922020-11-27 11:10:23 +0100991
992account:
993 msk->wmem_reserved -= size;
994 return true;
995}
996
Paolo Abeni87952602020-11-27 11:10:24 +0100997static void mptcp_wmem_uncharge(struct sock *sk, int size)
998{
999 struct mptcp_sock *msk = mptcp_sk(sk);
1000
1001 if (msk->wmem_reserved < 0)
1002 msk->wmem_reserved = 0;
1003 msk->wmem_reserved += size;
1004}
1005
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001006static void __mptcp_mem_reclaim_partial(struct sock *sk)
1007{
1008 lockdep_assert_held_once(&sk->sk_lock.slock);
1009 __mptcp_update_wmem(sk);
1010 sk_mem_reclaim_partial(sk);
1011}
1012
Paolo Abeni724cfd22020-11-27 11:10:25 +01001013static void mptcp_mem_reclaim_partial(struct sock *sk)
1014{
1015 struct mptcp_sock *msk = mptcp_sk(sk);
1016
1017 /* if we are experiencing a transint allocation error,
1018 * the forward allocation memory has been already
1019 * released
1020 */
1021 if (msk->wmem_reserved < 0)
1022 return;
1023
1024 mptcp_data_lock(sk);
1025 sk->sk_forward_alloc += msk->wmem_reserved;
1026 sk_mem_reclaim_partial(sk);
1027 msk->wmem_reserved = sk->sk_forward_alloc;
1028 sk->sk_forward_alloc = 0;
1029 mptcp_data_unlock(sk);
1030}
1031
Paolo Abenid0272362020-03-27 14:48:45 -07001032static void dfrag_uncharge(struct sock *sk, int len)
Paolo Abeni18b683b2020-03-27 14:48:43 -07001033{
Paolo Abenid0272362020-03-27 14:48:45 -07001034 sk_mem_uncharge(sk, len);
Florian Westphal7948f6c2020-03-27 14:48:46 -07001035 sk_wmem_queued_add(sk, -len);
Paolo Abenid0272362020-03-27 14:48:45 -07001036}
1037
1038static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1039{
1040 int len = dfrag->data_len + dfrag->overhead;
1041
Paolo Abeni18b683b2020-03-27 14:48:43 -07001042 list_del(&dfrag->list);
Paolo Abenid0272362020-03-27 14:48:45 -07001043 dfrag_uncharge(sk, len);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001044 put_page(dfrag->page);
1045}
1046
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001047static void __mptcp_clean_una(struct sock *sk)
Paolo Abeni18b683b2020-03-27 14:48:43 -07001048{
1049 struct mptcp_sock *msk = mptcp_sk(sk);
1050 struct mptcp_data_frag *dtmp, *dfrag;
Paolo Abenid0272362020-03-27 14:48:45 -07001051 bool cleaned = false;
Davide Carattie1ff9e82020-06-29 22:26:20 +02001052 u64 snd_una;
1053
1054 /* on fallback we just need to ignore snd_una, as this is really
1055 * plain TCP
1056 */
1057 if (__mptcp_check_fallback(msk))
Paolo Abeni7439d682020-11-27 11:10:26 +01001058 msk->snd_una = READ_ONCE(msk->snd_nxt);
1059
Paolo Abeni7439d682020-11-27 11:10:26 +01001060 snd_una = msk->snd_una;
Paolo Abeni18b683b2020-03-27 14:48:43 -07001061 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1062 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1063 break;
1064
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07001065 if (unlikely(dfrag == msk->first_pending)) {
1066 /* in recovery mode can see ack after the current snd head */
1067 if (WARN_ON_ONCE(!msk->recovery))
1068 break;
1069
1070 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1071 }
1072
Paolo Abenid0272362020-03-27 14:48:45 -07001073 dfrag_clear(sk, dfrag);
1074 cleaned = true;
1075 }
1076
Florian Westphal7948f6c2020-03-27 14:48:46 -07001077 dfrag = mptcp_rtx_head(sk);
1078 if (dfrag && after64(snd_una, dfrag->data_seq)) {
Paolo Abeni53eb4c32020-07-23 13:02:30 +02001079 u64 delta = snd_una - dfrag->data_seq;
1080
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07001081 /* prevent wrap around in recovery mode */
1082 if (unlikely(delta > dfrag->already_sent)) {
1083 if (WARN_ON_ONCE(!msk->recovery))
1084 goto out;
1085 if (WARN_ON_ONCE(delta > dfrag->data_len))
1086 goto out;
1087 dfrag->already_sent += delta - dfrag->already_sent;
1088 }
Florian Westphal7948f6c2020-03-27 14:48:46 -07001089
1090 dfrag->data_seq += delta;
Paolo Abeni53eb4c32020-07-23 13:02:30 +02001091 dfrag->offset += delta;
Florian Westphal7948f6c2020-03-27 14:48:46 -07001092 dfrag->data_len -= delta;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001093 dfrag->already_sent -= delta;
Florian Westphal7948f6c2020-03-27 14:48:46 -07001094
1095 dfrag_uncharge(sk, delta);
1096 cleaned = true;
1097 }
1098
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07001099 /* all retransmitted data acked, recovery completed */
1100 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1101 msk->recovery = false;
1102
Paolo Abeni53eb4c32020-07-23 13:02:30 +02001103out:
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001104 if (cleaned && tcp_under_memory_pressure(sk))
1105 __mptcp_mem_reclaim_partial(sk);
Paolo Abeni63561a42020-09-14 10:01:07 +02001106
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07001107 if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) {
Paolo Abeni33d41c92021-08-13 15:15:41 -07001108 if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001109 mptcp_stop_timer(sk);
1110 } else {
1111 mptcp_reset_timer(sk);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001112 }
1113}
1114
Paolo Abeni417789d2021-03-04 13:32:15 -08001115static void __mptcp_clean_una_wakeup(struct sock *sk)
1116{
Paolo Abenib5941f02021-05-27 16:31:37 -07001117#ifdef CONFIG_LOCKDEP
1118 WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
1119#endif
Paolo Abeni417789d2021-03-04 13:32:15 -08001120 __mptcp_clean_una(sk);
1121 mptcp_write_space(sk);
1122}
1123
Paolo Abenib5941f02021-05-27 16:31:37 -07001124static void mptcp_clean_una_wakeup(struct sock *sk)
1125{
1126 mptcp_data_lock(sk);
1127 __mptcp_clean_una_wakeup(sk);
1128 mptcp_data_unlock(sk);
1129}
1130
Paolo Abeni724cfd22020-11-27 11:10:25 +01001131static void mptcp_enter_memory_pressure(struct sock *sk)
Paolo Abeni18b683b2020-03-27 14:48:43 -07001132{
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001133 struct mptcp_subflow_context *subflow;
1134 struct mptcp_sock *msk = mptcp_sk(sk);
1135 bool first = true;
1136
Paolo Abeni18b683b2020-03-27 14:48:43 -07001137 sk_stream_moderate_sndbuf(sk);
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001138 mptcp_for_each_subflow(msk, subflow) {
1139 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1140
1141 if (first)
1142 tcp_enter_memory_pressure(ssk);
1143 sk_stream_moderate_sndbuf(ssk);
1144 first = false;
1145 }
Paolo Abeni724cfd22020-11-27 11:10:25 +01001146}
1147
1148/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1149 * data
1150 */
1151static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1152{
1153 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1154 pfrag, sk->sk_allocation)))
1155 return true;
1156
1157 mptcp_enter_memory_pressure(sk);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001158 return false;
1159}
1160
1161static struct mptcp_data_frag *
1162mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1163 int orig_offset)
1164{
1165 int offset = ALIGN(orig_offset, sizeof(long));
1166 struct mptcp_data_frag *dfrag;
1167
1168 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1169 dfrag->data_len = 0;
1170 dfrag->data_seq = msk->write_seq;
1171 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1172 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001173 dfrag->already_sent = 0;
Paolo Abeni18b683b2020-03-27 14:48:43 -07001174 dfrag->page = pfrag->page;
1175
1176 return dfrag;
1177}
1178
Paolo Abenicaf971d2020-11-16 10:48:06 +01001179struct mptcp_sendmsg_info {
1180 int mss_now;
1181 int size_goal;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001182 u16 limit;
1183 u16 sent;
1184 unsigned int flags;
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001185 bool data_lock_held;
Paolo Abenicaf971d2020-11-16 10:48:06 +01001186};
1187
Florian Westphal6f8a6122020-11-16 10:48:13 +01001188static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
1189 int avail_size)
1190{
1191 u64 window_end = mptcp_wnd_end(msk);
1192
1193 if (__mptcp_check_fallback(msk))
1194 return avail_size;
1195
1196 if (!before64(data_seq + avail_size, window_end)) {
1197 u64 allowed_size = window_end - data_seq;
1198
1199 return min_t(unsigned int, allowed_size, avail_size);
1200 }
1201
1202 return avail_size;
1203}
1204
Paolo Abeni724cfd22020-11-27 11:10:25 +01001205static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1206{
1207 struct skb_ext *mpext = __skb_ext_alloc(gfp);
1208
1209 if (!mpext)
1210 return false;
1211 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1212 return true;
1213}
1214
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001215static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
Paolo Abeni724cfd22020-11-27 11:10:25 +01001216{
1217 struct sk_buff *skb;
1218
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001219 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
Paolo Abeni724cfd22020-11-27 11:10:25 +01001220 if (likely(skb)) {
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001221 if (likely(__mptcp_add_ext(skb, gfp))) {
Paolo Abeni724cfd22020-11-27 11:10:25 +01001222 skb_reserve(skb, MAX_TCP_HEADER);
1223 skb->reserved_tailroom = skb->end - skb->tail;
1224 return skb;
1225 }
1226 __kfree_skb(skb);
1227 } else {
1228 mptcp_enter_memory_pressure(sk);
1229 }
1230 return NULL;
1231}
1232
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001233static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
Paolo Abeni724cfd22020-11-27 11:10:25 +01001234{
Paolo Abeni724cfd22020-11-27 11:10:25 +01001235 struct sk_buff *skb;
1236
1237 if (ssk->sk_tx_skb_cache) {
1238 skb = ssk->sk_tx_skb_cache;
1239 if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001240 !__mptcp_add_ext(skb, gfp)))
Paolo Abeni724cfd22020-11-27 11:10:25 +01001241 return false;
1242 return true;
1243 }
1244
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001245 skb = __mptcp_do_alloc_tx_skb(sk, gfp);
Paolo Abeni724cfd22020-11-27 11:10:25 +01001246 if (!skb)
1247 return false;
1248
1249 if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1250 ssk->sk_tx_skb_cache = skb;
1251 return true;
1252 }
1253 kfree_skb(skb);
1254 return false;
1255}
1256
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001257static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
Paolo Abeni724cfd22020-11-27 11:10:25 +01001258{
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001259 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
Paolo Abeni724cfd22020-11-27 11:10:25 +01001260
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001261 if (unlikely(tcp_under_memory_pressure(sk))) {
1262 if (data_lock_held)
1263 __mptcp_mem_reclaim_partial(sk);
1264 else
1265 mptcp_mem_reclaim_partial(sk);
1266 }
1267 return __mptcp_alloc_tx_skb(sk, ssk, gfp);
Paolo Abeni724cfd22020-11-27 11:10:25 +01001268}
1269
Geliang Tangd0cc2982021-06-17 16:46:08 -07001270/* note: this always recompute the csum on the whole skb, even
1271 * if we just appended a single frag. More status info needed
1272 */
1273static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1274{
1275 struct mptcp_ext *mpext = mptcp_get_ext(skb);
1276 __wsum csum = ~csum_unfold(mpext->csum);
1277 int offset = skb->len - added;
1278
1279 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1280}
1281
Paolo Abeni57040752020-01-21 16:56:27 -08001282static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001283 struct mptcp_data_frag *dfrag,
Paolo Abenicaf971d2020-11-16 10:48:06 +01001284 struct mptcp_sendmsg_info *info)
Paolo Abeni57040752020-01-21 16:56:27 -08001285{
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001286 u64 data_seq = dfrag->data_seq + info->sent;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001287 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphal6f8a6122020-11-16 10:48:13 +01001288 bool zero_window_probe = false;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001289 struct mptcp_ext *mpext = NULL;
Paolo Abeni57040752020-01-21 16:56:27 -08001290 struct sk_buff *skb, *tail;
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001291 bool must_collapse = false;
Paolo Abeni15e6ca972020-12-10 14:25:06 -08001292 int size_bias = 0;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001293 int avail_size;
Paolo Abeni724cfd22020-11-27 11:10:25 +01001294 size_t ret = 0;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001295
Geliang Tange4b61352021-04-16 15:38:02 -07001296 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001297 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
Mat Martineau6d0060f2020-01-21 16:56:23 -08001298
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001299 /* compute send limit */
1300 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
Paolo Abenicaf971d2020-11-16 10:48:06 +01001301 avail_size = info->size_goal;
Paolo Abeni57040752020-01-21 16:56:27 -08001302 skb = tcp_write_queue_tail(ssk);
1303 if (skb) {
Paolo Abeni57040752020-01-21 16:56:27 -08001304 /* Limit the write to the size available in the
1305 * current skb, if any, so that we create at most a new skb.
1306 * Explicitly tells TCP internals to avoid collapsing on later
1307 * queue management operation, to avoid breaking the ext <->
1308 * SSN association set here
1309 */
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001310 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001311 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
Paolo Abeni57040752020-01-21 16:56:27 -08001312 TCP_SKB_CB(skb)->eor = 1;
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001313 goto alloc_skb;
1314 }
1315
Paolo Abeni977d2932021-09-22 13:12:17 +02001316 must_collapse = (info->size_goal > skb->len) &&
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001317 (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
1318 if (must_collapse) {
Paolo Abeni15e6ca972020-12-10 14:25:06 -08001319 size_bias = skb->len;
Paolo Abenicaf971d2020-11-16 10:48:06 +01001320 avail_size = info->size_goal - skb->len;
Paolo Abeni15e6ca972020-12-10 14:25:06 -08001321 }
Paolo Abeni57040752020-01-21 16:56:27 -08001322 }
Paolo Abeni18b683b2020-03-27 14:48:43 -07001323
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001324alloc_skb:
Paolo Abeni977d2932021-09-22 13:12:17 +02001325 if (!must_collapse &&
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001326 !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
1327 return 0;
1328
Florian Westphal6f8a6122020-11-16 10:48:13 +01001329 /* Zero window and all data acked? Probe. */
1330 avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
1331 if (avail_size == 0) {
Paolo Abeni7439d682020-11-27 11:10:26 +01001332 u64 snd_una = READ_ONCE(msk->snd_una);
1333
1334 if (skb || snd_una != msk->snd_nxt)
Florian Westphal6f8a6122020-11-16 10:48:13 +01001335 return 0;
1336 zero_window_probe = true;
Paolo Abeni7439d682020-11-27 11:10:26 +01001337 data_seq = snd_una - 1;
Florian Westphal6f8a6122020-11-16 10:48:13 +01001338 avail_size = 1;
1339 }
1340
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001341 if (WARN_ON_ONCE(info->sent > info->limit ||
1342 info->limit > dfrag->data_len))
1343 return 0;
Paolo Abeni3f8e0aa2020-03-27 14:48:47 -07001344
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001345 ret = info->limit - info->sent;
Paolo Abeni15e6ca972020-12-10 14:25:06 -08001346 tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
1347 dfrag->page, dfrag->offset + info->sent, &ret);
Paolo Abenie2223992020-11-16 10:48:03 +01001348 if (!tail) {
1349 tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
1350 return -ENOMEM;
Florian Westphal357593832020-08-14 15:56:34 +02001351 }
Paolo Abeni18b683b2020-03-27 14:48:43 -07001352
Paolo Abenie2223992020-11-16 10:48:03 +01001353 /* if the tail skb is still the cached one, collapsing really happened.
Paolo Abeni57040752020-01-21 16:56:27 -08001354 */
Paolo Abenie2223992020-11-16 10:48:03 +01001355 if (skb == tail) {
Paolo Abeni15e6ca972020-12-10 14:25:06 -08001356 TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
Paolo Abeni57040752020-01-21 16:56:27 -08001357 mpext->data_len += ret;
Florian Westphal6f8a6122020-11-16 10:48:13 +01001358 WARN_ON_ONCE(zero_window_probe);
Paolo Abeni57040752020-01-21 16:56:27 -08001359 goto out;
1360 }
1361
Paolo Abeni724cfd22020-11-27 11:10:25 +01001362 mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
1363 if (WARN_ON_ONCE(!mpext)) {
1364 /* should never reach here, stream corrupted */
1365 return -EINVAL;
1366 }
Mat Martineau6d0060f2020-01-21 16:56:23 -08001367
1368 memset(mpext, 0, sizeof(*mpext));
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001369 mpext->data_seq = data_seq;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001370 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1371 mpext->data_len = ret;
1372 mpext->use_map = 1;
1373 mpext->dsn64 = 1;
1374
1375 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1376 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1377 mpext->dsn64);
1378
Florian Westphal6f8a6122020-11-16 10:48:13 +01001379 if (zero_window_probe) {
1380 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
1381 mpext->frozen = 1;
Geliang Tangd0cc2982021-06-17 16:46:08 -07001382 if (READ_ONCE(msk->csum_enabled))
1383 mptcp_update_data_checksum(tail, ret);
Florian Westphal6f8a6122020-11-16 10:48:13 +01001384 tcp_push_pending_frames(ssk);
Geliang Tangd0cc2982021-06-17 16:46:08 -07001385 return 0;
Florian Westphal6f8a6122020-11-16 10:48:13 +01001386 }
Paolo Abeni57040752020-01-21 16:56:27 -08001387out:
Geliang Tangd0cc2982021-06-17 16:46:08 -07001388 if (READ_ONCE(msk->csum_enabled))
1389 mptcp_update_data_checksum(tail, ret);
Mat Martineau6d0060f2020-01-21 16:56:23 -08001390 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001391 return ret;
1392}
1393
Paolo Abenid5f49192020-09-14 10:01:17 +02001394#define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1395 sizeof(struct tcphdr) - \
1396 MAX_TCP_OPTION_SPACE - \
1397 sizeof(struct ipv6hdr) - \
1398 sizeof(struct frag_hdr))
1399
1400struct subflow_send_info {
1401 struct sock *ssk;
1402 u64 ratio;
1403};
1404
Paolo Abeniff5a0b42021-08-13 15:15:45 -07001405void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1406{
1407 if (!subflow->stale)
1408 return;
1409
1410 subflow->stale = 0;
Paolo Abenifc1b4e32021-08-13 15:15:46 -07001411 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
Paolo Abeniff5a0b42021-08-13 15:15:45 -07001412}
1413
1414bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1415{
1416 if (unlikely(subflow->stale)) {
1417 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1418
1419 if (subflow->stale_rcv_tstamp == rcv_tstamp)
1420 return false;
1421
1422 mptcp_subflow_set_active(subflow);
1423 }
1424 return __mptcp_subflow_active(subflow);
1425}
1426
Paolo Abeni33d41c92021-08-13 15:15:41 -07001427/* implement the mptcp packet scheduler;
1428 * returns the subflow that will transmit the next DSS
1429 * additionally updates the rtx timeout
1430 */
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001431static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
Peter Krystadf2962342020-03-27 14:48:39 -07001432{
Paolo Abenid5f49192020-09-14 10:01:17 +02001433 struct subflow_send_info send_info[2];
Peter Krystadf2962342020-03-27 14:48:39 -07001434 struct mptcp_subflow_context *subflow;
Paolo Abeni33d41c92021-08-13 15:15:41 -07001435 struct sock *sk = (struct sock *)msk;
Paolo Abenid5f49192020-09-14 10:01:17 +02001436 int i, nr_active = 0;
1437 struct sock *ssk;
Paolo Abeni33d41c92021-08-13 15:15:41 -07001438 long tout = 0;
Paolo Abenid5f49192020-09-14 10:01:17 +02001439 u64 ratio;
1440 u32 pace;
Peter Krystadf2962342020-03-27 14:48:39 -07001441
Paolo Abeni33d41c92021-08-13 15:15:41 -07001442 sock_owned_by_me(sk);
Peter Krystadf2962342020-03-27 14:48:39 -07001443
Paolo Abenid5f49192020-09-14 10:01:17 +02001444 if (__mptcp_check_fallback(msk)) {
1445 if (!msk->first)
Peter Krystadf2962342020-03-27 14:48:39 -07001446 return NULL;
Paolo Abenid5f49192020-09-14 10:01:17 +02001447 return sk_stream_memory_free(msk->first) ? msk->first : NULL;
Peter Krystadf2962342020-03-27 14:48:39 -07001448 }
1449
Paolo Abenid5f49192020-09-14 10:01:17 +02001450 /* re-use last subflow, if the burst allow that */
1451 if (msk->last_snd && msk->snd_burst > 0 &&
1452 sk_stream_memory_free(msk->last_snd) &&
Paolo Abeni33d41c92021-08-13 15:15:41 -07001453 mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
1454 mptcp_set_timeout(sk);
Paolo Abenid5f49192020-09-14 10:01:17 +02001455 return msk->last_snd;
Paolo Abeni33d41c92021-08-13 15:15:41 -07001456 }
Paolo Abenid5f49192020-09-14 10:01:17 +02001457
1458 /* pick the subflow with the lower wmem/wspace ratio */
1459 for (i = 0; i < 2; ++i) {
1460 send_info[i].ssk = NULL;
1461 send_info[i].ratio = -1;
1462 }
1463 mptcp_for_each_subflow(msk, subflow) {
Geliang Tange10a9892021-04-16 15:38:04 -07001464 trace_mptcp_subflow_get_send(subflow);
Paolo Abenid5f49192020-09-14 10:01:17 +02001465 ssk = mptcp_subflow_tcp_sock(subflow);
1466 if (!mptcp_subflow_active(subflow))
1467 continue;
1468
Paolo Abeni33d41c92021-08-13 15:15:41 -07001469 tout = max(tout, mptcp_timeout_from_subflow(subflow));
Paolo Abenid5f49192020-09-14 10:01:17 +02001470 nr_active += !subflow->backup;
Paolo Abeniec369c32021-01-20 15:39:12 +01001471 if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
Paolo Abenid5f49192020-09-14 10:01:17 +02001472 continue;
1473
1474 pace = READ_ONCE(ssk->sk_pacing_rate);
1475 if (!pace)
1476 continue;
1477
1478 ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
1479 pace);
1480 if (ratio < send_info[subflow->backup].ratio) {
1481 send_info[subflow->backup].ssk = ssk;
1482 send_info[subflow->backup].ratio = ratio;
1483 }
1484 }
Paolo Abeni33d41c92021-08-13 15:15:41 -07001485 __mptcp_set_timeout(sk, tout);
Paolo Abenid5f49192020-09-14 10:01:17 +02001486
Paolo Abenid5f49192020-09-14 10:01:17 +02001487 /* pick the best backup if no other subflow is active */
1488 if (!nr_active)
1489 send_info[0].ssk = send_info[1].ssk;
1490
1491 if (send_info[0].ssk) {
1492 msk->last_snd = send_info[0].ssk;
1493 msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
Paolo Abeniec369c32021-01-20 15:39:12 +01001494 tcp_sk(msk->last_snd)->snd_wnd);
Paolo Abenid5f49192020-09-14 10:01:17 +02001495 return msk->last_snd;
1496 }
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001497
Paolo Abenid5f49192020-09-14 10:01:17 +02001498 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -07001499}
1500
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001501static void mptcp_push_release(struct sock *sk, struct sock *ssk,
1502 struct mptcp_sendmsg_info *info)
1503{
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001504 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1505 release_sock(ssk);
1506}
1507
Paolo Abeniff5a0b42021-08-13 15:15:45 -07001508void __mptcp_push_pending(struct sock *sk, unsigned int flags)
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001509{
1510 struct sock *prev_ssk = NULL, *ssk = NULL;
1511 struct mptcp_sock *msk = mptcp_sk(sk);
1512 struct mptcp_sendmsg_info info = {
1513 .flags = flags,
1514 };
1515 struct mptcp_data_frag *dfrag;
1516 int len, copied = 0;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001517
1518 while ((dfrag = mptcp_send_head(sk))) {
1519 info.sent = dfrag->already_sent;
1520 info.limit = dfrag->data_len;
1521 len = dfrag->data_len - dfrag->already_sent;
1522 while (len > 0) {
1523 int ret = 0;
1524
1525 prev_ssk = ssk;
Florian Westphal78962482021-04-15 16:44:53 -07001526 mptcp_flush_join_list(msk);
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001527 ssk = mptcp_subflow_get_send(msk);
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001528
Paolo Abeni9758f402021-08-26 17:44:54 -07001529 /* First check. If the ssk has changed since
1530 * the last round, release prev_ssk
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001531 */
1532 if (ssk != prev_ssk && prev_ssk)
1533 mptcp_push_release(sk, prev_ssk, &info);
1534 if (!ssk)
1535 goto out;
1536
Paolo Abeni9758f402021-08-26 17:44:54 -07001537 /* Need to lock the new subflow only if different
1538 * from the previous one, otherwise we are still
1539 * helding the relevant lock
1540 */
1541 if (ssk != prev_ssk)
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001542 lock_sock(ssk);
1543
1544 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1545 if (ret <= 0) {
1546 mptcp_push_release(sk, ssk, &info);
1547 goto out;
1548 }
1549
1550 info.sent += ret;
1551 dfrag->already_sent += ret;
1552 msk->snd_nxt += ret;
1553 msk->snd_burst -= ret;
Paolo Abeni724cfd22020-11-27 11:10:25 +01001554 msk->tx_pending_data -= ret;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001555 copied += ret;
1556 len -= ret;
1557 }
1558 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1559 }
1560
1561 /* at this point we held the socket lock for the last subflow we used */
1562 if (ssk)
1563 mptcp_push_release(sk, ssk, &info);
1564
1565out:
Paolo Abeni33d41c92021-08-13 15:15:41 -07001566 /* ensure the rtx timer is running */
1567 if (!mptcp_timer_pending(sk))
1568 mptcp_reset_timer(sk);
1569 if (copied)
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001570 __mptcp_check_send_data_fin(sk);
1571}
1572
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001573static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
1574{
1575 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abeni1094c6f2021-09-01 10:15:36 -07001576 struct mptcp_sendmsg_info info = {
1577 .data_lock_held = true,
1578 };
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001579 struct mptcp_data_frag *dfrag;
Paolo Abenib19bc292021-01-20 15:39:14 +01001580 struct sock *xmit_ssk;
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001581 int len, copied = 0;
Paolo Abenib19bc292021-01-20 15:39:14 +01001582 bool first = true;
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001583
1584 info.flags = 0;
1585 while ((dfrag = mptcp_send_head(sk))) {
1586 info.sent = dfrag->already_sent;
1587 info.limit = dfrag->data_len;
1588 len = dfrag->data_len - dfrag->already_sent;
1589 while (len > 0) {
1590 int ret = 0;
1591
Paolo Abenib19bc292021-01-20 15:39:14 +01001592 /* the caller already invoked the packet scheduler,
1593 * check for a different subflow usage only after
1594 * spooling the first chunk of data
1595 */
1596 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
1597 if (!xmit_ssk)
1598 goto out;
1599 if (xmit_ssk != ssk) {
1600 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
1601 goto out;
1602 }
1603
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001604 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1605 if (ret <= 0)
1606 goto out;
1607
1608 info.sent += ret;
1609 dfrag->already_sent += ret;
1610 msk->snd_nxt += ret;
1611 msk->snd_burst -= ret;
1612 msk->tx_pending_data -= ret;
1613 copied += ret;
1614 len -= ret;
Paolo Abenib19bc292021-01-20 15:39:14 +01001615 first = false;
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001616 }
1617 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1618 }
1619
1620out:
1621 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1622 * not going to flush it via release_sock()
1623 */
1624 __mptcp_update_wmem(sk);
1625 if (copied) {
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001626 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1627 info.size_goal);
Paolo Abenid09d8182021-02-11 15:30:42 -08001628 if (!mptcp_timer_pending(sk))
1629 mptcp_reset_timer(sk);
1630
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001631 if (msk->snd_data_fin_enable &&
1632 msk->snd_nxt + 1 == msk->write_seq)
1633 mptcp_schedule_work(sk);
1634 }
1635}
1636
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001637static void mptcp_set_nospace(struct sock *sk)
1638{
1639 /* enable autotune */
1640 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1641
1642 /* will be cleared on avail space */
1643 set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
1644}
1645
Mat Martineauf870fa02020-01-21 16:56:15 -08001646static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1647{
1648 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphal17091702020-05-16 10:46:21 +02001649 struct page_frag *pfrag;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001650 size_t copied = 0;
Paolo Abenicaf971d2020-11-16 10:48:06 +01001651 int ret = 0;
Mat Martineau6d0060f2020-01-21 16:56:23 -08001652 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -08001653
Paolo Abeni987858e2021-04-23 11:17:07 -07001654 /* we don't support FASTOPEN yet */
1655 if (msg->msg_flags & MSG_FASTOPEN)
Mat Martineauf870fa02020-01-21 16:56:15 -08001656 return -EOPNOTSUPP;
1657
Paolo Abeni987858e2021-04-23 11:17:07 -07001658 /* silently ignore everything else */
1659 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL;
1660
Davide Carattie7579d52020-12-21 22:07:25 +01001661 mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len)));
Mat Martineau1954b862020-02-28 15:47:39 -08001662
1663 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1664
1665 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1666 ret = sk_stream_wait_connect(sk, &timeo);
1667 if (ret)
1668 goto out;
1669 }
1670
Florian Westphal17091702020-05-16 10:46:21 +02001671 pfrag = sk_page_frag(sk);
Paolo Abeni18b683b2020-03-27 14:48:43 -07001672
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001673 while (msg_data_left(msg)) {
Paolo Abeni724cfd22020-11-27 11:10:25 +01001674 int total_ts, frag_truesize = 0;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001675 struct mptcp_data_frag *dfrag;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001676 bool dfrag_collapsed;
1677 size_t psize, offset;
Mat Martineau57baaf22020-07-28 15:12:00 -07001678
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001679 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
1680 ret = -EPIPE;
1681 goto out;
Florian Westphalfb529e62020-05-16 10:46:18 +02001682 }
1683
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001684 /* reuse tail pfrag, if possible, or carve a new one from the
1685 * page allocator
1686 */
1687 dfrag = mptcp_pending_tail(sk);
1688 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1689 if (!dfrag_collapsed) {
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001690 if (!sk_stream_memory_free(sk))
1691 goto wait_for_memory;
1692
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001693 if (!mptcp_page_frag_refill(sk, pfrag))
1694 goto wait_for_memory;
1695
1696 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1697 frag_truesize = dfrag->overhead;
1698 }
1699
1700 /* we do not bound vs wspace, to allow a single packet.
1701 * memory accounting will prevent execessive memory usage
1702 * anyway
1703 */
1704 offset = dfrag->offset + dfrag->data_len;
1705 psize = pfrag->size - offset;
1706 psize = min_t(size_t, psize, msg_data_left(msg));
Paolo Abeni724cfd22020-11-27 11:10:25 +01001707 total_ts = psize + frag_truesize;
Paolo Abeni8ce568e2021-06-21 15:54:33 -07001708
1709 if (!mptcp_wmem_alloc(sk, total_ts))
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001710 goto wait_for_memory;
1711
1712 if (copy_page_from_iter(dfrag->page, offset, psize,
1713 &msg->msg_iter) != psize) {
Paolo Abeni87952602020-11-27 11:10:24 +01001714 mptcp_wmem_uncharge(sk, psize + frag_truesize);
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001715 ret = -EFAULT;
1716 goto out;
1717 }
1718
1719 /* data successfully copied into the write queue */
1720 copied += psize;
1721 dfrag->data_len += psize;
1722 frag_truesize += psize;
1723 pfrag->offset += frag_truesize;
1724 WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
Paolo Abeni13e16032020-12-16 12:48:35 +01001725 msk->tx_pending_data += psize;
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001726
1727 /* charge data on mptcp pending queue to the msk socket
1728 * Note: we charge such data both to sk and ssk
1729 */
1730 sk_wmem_queued_add(sk, frag_truesize);
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001731 if (!dfrag_collapsed) {
1732 get_page(dfrag->page);
1733 list_add_tail(&dfrag->list, &msk->rtx_queue);
1734 if (!msk->first_pending)
1735 WRITE_ONCE(msk->first_pending, dfrag);
1736 }
Geliang Tange4b61352021-04-16 15:38:02 -07001737 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001738 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1739 !dfrag_collapsed);
1740
Paolo Abenid9ca1de2020-11-16 10:48:10 +01001741 continue;
1742
1743wait_for_memory:
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001744 mptcp_set_nospace(sk);
Paolo Abenic2e60482021-03-04 13:32:14 -08001745 __mptcp_push_pending(sk, msg->msg_flags);
Peter Krystadf2962342020-03-27 14:48:39 -07001746 ret = sk_stream_wait_memory(sk, &timeo);
1747 if (ret)
1748 goto out;
Peter Krystadcec37a62020-01-21 16:56:18 -08001749 }
1750
Paolo Abeni13e16032020-12-16 12:48:35 +01001751 if (copied)
Paolo Abenic2e60482021-03-04 13:32:14 -08001752 __mptcp_push_pending(sk, msg->msg_flags);
Paolo Abenida51aef2020-09-14 10:01:10 +02001753
Mat Martineau1954b862020-02-28 15:47:39 -08001754out:
Peter Krystadcec37a62020-01-21 16:56:18 -08001755 release_sock(sk);
Paolo Abeni8555c6b2020-08-03 18:40:39 +02001756 return copied ? : ret;
Mat Martineauf870fa02020-01-21 16:56:15 -08001757}
1758
Florian Westphal6771bfd2020-02-26 10:14:48 +01001759static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1760 struct msghdr *msg,
Florian Westphalb7f653b2021-06-03 16:24:32 -07001761 size_t len, int flags,
1762 struct scm_timestamping_internal *tss,
1763 int *cmsg_flags)
Florian Westphal6771bfd2020-02-26 10:14:48 +01001764{
Yonglong Lica4fb8922021-04-23 11:17:08 -07001765 struct sk_buff *skb, *tmp;
Florian Westphal6771bfd2020-02-26 10:14:48 +01001766 int copied = 0;
1767
Yonglong Lica4fb8922021-04-23 11:17:08 -07001768 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
Florian Westphal6771bfd2020-02-26 10:14:48 +01001769 u32 offset = MPTCP_SKB_CB(skb)->offset;
1770 u32 data_len = skb->len - offset;
1771 u32 count = min_t(size_t, len - copied, data_len);
1772 int err;
1773
Paolo Abenid9760922021-04-23 11:17:06 -07001774 if (!(flags & MSG_TRUNC)) {
1775 err = skb_copy_datagram_msg(skb, offset, msg, count);
1776 if (unlikely(err < 0)) {
1777 if (!copied)
1778 return err;
1779 break;
1780 }
Florian Westphal6771bfd2020-02-26 10:14:48 +01001781 }
1782
Florian Westphalb7f653b2021-06-03 16:24:32 -07001783 if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
1784 tcp_update_recv_tstamps(skb, tss);
1785 *cmsg_flags |= MPTCP_CMSG_TS;
1786 }
1787
Florian Westphal6771bfd2020-02-26 10:14:48 +01001788 copied += count;
1789
1790 if (count < data_len) {
Yonglong Lica4fb8922021-04-23 11:17:08 -07001791 if (!(flags & MSG_PEEK))
1792 MPTCP_SKB_CB(skb)->offset += count;
Florian Westphal6771bfd2020-02-26 10:14:48 +01001793 break;
1794 }
1795
Yonglong Lica4fb8922021-04-23 11:17:08 -07001796 if (!(flags & MSG_PEEK)) {
1797 /* we will bulk release the skb memory later */
1798 skb->destructor = NULL;
Paolo Abenice599c52021-07-09 17:20:51 -07001799 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
Yonglong Lica4fb8922021-04-23 11:17:08 -07001800 __skb_unlink(skb, &msk->receive_queue);
1801 __kfree_skb(skb);
1802 }
Florian Westphal6771bfd2020-02-26 10:14:48 +01001803
1804 if (copied >= len)
1805 break;
1806 }
1807
1808 return copied;
1809}
1810
Florian Westphala6b118f2020-06-30 21:24:45 +02001811/* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
1812 *
1813 * Only difference: Use highest rtt estimate of the subflows in use.
1814 */
1815static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1816{
1817 struct mptcp_subflow_context *subflow;
1818 struct sock *sk = (struct sock *)msk;
1819 u32 time, advmss = 1;
1820 u64 rtt_us, mstamp;
1821
1822 sock_owned_by_me(sk);
1823
1824 if (copied <= 0)
1825 return;
1826
1827 msk->rcvq_space.copied += copied;
1828
1829 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1830 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1831
1832 rtt_us = msk->rcvq_space.rtt_us;
1833 if (rtt_us && time < (rtt_us >> 3))
1834 return;
1835
1836 rtt_us = 0;
1837 mptcp_for_each_subflow(msk, subflow) {
1838 const struct tcp_sock *tp;
1839 u64 sf_rtt_us;
1840 u32 sf_advmss;
1841
1842 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1843
1844 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1845 sf_advmss = READ_ONCE(tp->advmss);
1846
1847 rtt_us = max(sf_rtt_us, rtt_us);
1848 advmss = max(sf_advmss, advmss);
1849 }
1850
1851 msk->rcvq_space.rtt_us = rtt_us;
1852 if (time < (rtt_us >> 3) || rtt_us == 0)
1853 return;
1854
1855 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1856 goto new_measure;
1857
1858 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1859 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1860 int rcvmem, rcvbuf;
1861 u64 rcvwin, grow;
1862
1863 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1864
1865 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1866
1867 do_div(grow, msk->rcvq_space.space);
1868 rcvwin += (grow << 1);
1869
1870 rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1871 while (tcp_win_from_space(sk, rcvmem) < advmss)
1872 rcvmem += 128;
1873
1874 do_div(rcvwin, advmss);
1875 rcvbuf = min_t(u64, rcvwin * rcvmem,
1876 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
1877
1878 if (rcvbuf > sk->sk_rcvbuf) {
1879 u32 window_clamp;
1880
1881 window_clamp = tcp_win_from_space(sk, rcvbuf);
1882 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1883
1884 /* Make subflows follow along. If we do not do this, we
1885 * get drops at subflow level if skbs can't be moved to
1886 * the mptcp rx queue fast enough (announced rcv_win can
1887 * exceed ssk->sk_rcvbuf).
1888 */
1889 mptcp_for_each_subflow(msk, subflow) {
1890 struct sock *ssk;
Paolo Abenic76c6952020-09-14 10:01:18 +02001891 bool slow;
Florian Westphala6b118f2020-06-30 21:24:45 +02001892
1893 ssk = mptcp_subflow_tcp_sock(subflow);
Paolo Abenic76c6952020-09-14 10:01:18 +02001894 slow = lock_sock_fast(ssk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001895 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1896 tcp_sk(ssk)->window_clamp = window_clamp;
Paolo Abenic76c6952020-09-14 10:01:18 +02001897 tcp_cleanup_rbuf(ssk, 1);
1898 unlock_sock_fast(ssk, slow);
Florian Westphala6b118f2020-06-30 21:24:45 +02001899 }
1900 }
1901 }
1902
1903 msk->rcvq_space.space = msk->rcvq_space.copied;
1904new_measure:
1905 msk->rcvq_space.copied = 0;
1906 msk->rcvq_space.time = mstamp;
1907}
1908
Paolo Abeni87952602020-11-27 11:10:24 +01001909static void __mptcp_update_rmem(struct sock *sk)
1910{
1911 struct mptcp_sock *msk = mptcp_sk(sk);
1912
1913 if (!msk->rmem_released)
1914 return;
1915
1916 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
1917 sk_mem_uncharge(sk, msk->rmem_released);
Paolo Abenice599c52021-07-09 17:20:51 -07001918 WRITE_ONCE(msk->rmem_released, 0);
Paolo Abeni87952602020-11-27 11:10:24 +01001919}
1920
1921static void __mptcp_splice_receive_queue(struct sock *sk)
1922{
1923 struct mptcp_sock *msk = mptcp_sk(sk);
1924
1925 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
1926}
1927
Paolo Abenie3859602021-02-11 15:30:41 -08001928static bool __mptcp_move_skbs(struct mptcp_sock *msk)
Florian Westphal6771bfd2020-02-26 10:14:48 +01001929{
Paolo Abeni87952602020-11-27 11:10:24 +01001930 struct sock *sk = (struct sock *)msk;
Florian Westphal6771bfd2020-02-26 10:14:48 +01001931 unsigned int moved = 0;
Paolo Abeni87952602020-11-27 11:10:24 +01001932 bool ret, done;
Paolo Abenid5f49192020-09-14 10:01:17 +02001933
Florian Westphal78962482021-04-15 16:44:53 -07001934 mptcp_flush_join_list(msk);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001935 do {
1936 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
Florian Westphal65f49fe72020-11-03 11:05:04 -08001937 bool slowpath;
Florian Westphal6771bfd2020-02-26 10:14:48 +01001938
Paolo Abeni87952602020-11-27 11:10:24 +01001939 /* we can have data pending in the subflows only if the msk
1940 * receive buffer was full at subflow_data_ready() time,
1941 * that is an unlikely slow path.
1942 */
1943 if (likely(!ssk))
Florian Westphal6771bfd2020-02-26 10:14:48 +01001944 break;
1945
Florian Westphal65f49fe72020-11-03 11:05:04 -08001946 slowpath = lock_sock_fast(ssk);
Paolo Abeni87952602020-11-27 11:10:24 +01001947 mptcp_data_lock(sk);
Paolo Abenie3859602021-02-11 15:30:41 -08001948 __mptcp_update_rmem(sk);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001949 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
Paolo Abeni87952602020-11-27 11:10:24 +01001950 mptcp_data_unlock(sk);
Paolo Abeni499ada52021-06-10 15:59:44 -07001951
1952 if (unlikely(ssk->sk_err))
1953 __mptcp_error_report(sk);
Florian Westphal65f49fe72020-11-03 11:05:04 -08001954 unlock_sock_fast(ssk, slowpath);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001955 } while (!done);
1956
Paolo Abeni87952602020-11-27 11:10:24 +01001957 /* acquire the data lock only if some input data is pending */
1958 ret = moved > 0;
1959 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
1960 !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
1961 mptcp_data_lock(sk);
1962 __mptcp_update_rmem(sk);
1963 ret |= __mptcp_ofo_queue(msk);
1964 __mptcp_splice_receive_queue(sk);
1965 mptcp_data_unlock(sk);
Paolo Abeniab174ad2020-09-14 10:01:12 +02001966 }
Paolo Abeni87952602020-11-27 11:10:24 +01001967 if (ret)
1968 mptcp_check_data_fin((struct sock *)msk);
1969 return !skb_queue_empty(&msk->receive_queue);
Florian Westphal6771bfd2020-02-26 10:14:48 +01001970}
1971
Mat Martineauf870fa02020-01-21 16:56:15 -08001972static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1973 int nonblock, int flags, int *addr_len)
1974{
1975 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphalb7f653b2021-06-03 16:24:32 -07001976 struct scm_timestamping_internal tss;
1977 int copied = 0, cmsg_flags = 0;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001978 int target;
1979 long timeo;
Mat Martineauf870fa02020-01-21 16:56:15 -08001980
Paolo Abenicb9d80f2021-04-23 11:17:05 -07001981 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
1982 if (unlikely(flags & MSG_ERRQUEUE))
1983 return inet_recv_error(sk, msg, len, addr_len);
1984
Paolo Abeni87952602020-11-27 11:10:24 +01001985 mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk));
Paolo Abenifd897672020-11-24 22:51:24 +01001986 if (unlikely(sk->sk_state == TCP_LISTEN)) {
1987 copied = -ENOTCONN;
1988 goto out_err;
1989 }
1990
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001991 timeo = sock_rcvtimeo(sk, nonblock);
1992
1993 len = min_t(size_t, len, INT_MAX);
1994 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1995
Eric Dumazet05e3ece2020-12-02 09:16:57 -08001996 while (copied < len) {
Paolo Abenie3859602021-02-11 15:30:41 -08001997 int bytes_read;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08001998
Florian Westphalb7f653b2021-06-03 16:24:32 -07001999 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
Florian Westphal6771bfd2020-02-26 10:14:48 +01002000 if (unlikely(bytes_read < 0)) {
2001 if (!copied)
2002 copied = bytes_read;
2003 goto out_err;
2004 }
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002005
Florian Westphal6771bfd2020-02-26 10:14:48 +01002006 copied += bytes_read;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002007
Paolo Abeniea4ca582020-11-19 11:46:03 -08002008 /* be sure to advertise window change */
Paolo Abenie3859602021-02-11 15:30:41 -08002009 mptcp_cleanup_rbuf(msk);
2010
2011 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
2012 continue;
Paolo Abeniea4ca582020-11-19 11:46:03 -08002013
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002014 /* only the master socket status is relevant here. The exit
2015 * conditions mirror closely tcp_recvmsg()
2016 */
2017 if (copied >= target)
2018 break;
2019
2020 if (copied) {
2021 if (sk->sk_err ||
2022 sk->sk_state == TCP_CLOSE ||
2023 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2024 !timeo ||
2025 signal_pending(current))
2026 break;
2027 } else {
2028 if (sk->sk_err) {
2029 copied = sock_error(sk);
2030 break;
2031 }
2032
Paolo Abeni59698562020-06-10 10:47:41 +02002033 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2034 mptcp_check_for_eof(msk);
2035
Paolo Abeni87952602020-11-27 11:10:24 +01002036 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2037 /* race breaker: the shutdown could be after the
2038 * previous receive queue check
2039 */
Paolo Abenie3859602021-02-11 15:30:41 -08002040 if (__mptcp_move_skbs(msk))
Paolo Abeni87952602020-11-27 11:10:24 +01002041 continue;
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002042 break;
Paolo Abeni87952602020-11-27 11:10:24 +01002043 }
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002044
2045 if (sk->sk_state == TCP_CLOSE) {
2046 copied = -ENOTCONN;
2047 break;
2048 }
2049
2050 if (!timeo) {
2051 copied = -EAGAIN;
2052 break;
2053 }
2054
2055 if (signal_pending(current)) {
2056 copied = sock_intr_errno(timeo);
2057 break;
2058 }
2059 }
2060
2061 pr_debug("block timeout %ld", timeo);
Paolo Abeni612f71d2021-10-07 15:05:00 -07002062 sk_wait_data(sk, &timeo, NULL);
Paolo Abeni7a6a6cb2020-01-21 16:56:26 -08002063 }
Paolo Abeni3c90e372021-06-21 15:54:35 -07002064
Florian Westphal6771bfd2020-02-26 10:14:48 +01002065out_err:
Florian Westphalb7f653b2021-06-03 16:24:32 -07002066 if (cmsg_flags && copied >= 0) {
2067 if (cmsg_flags & MPTCP_CMSG_TS)
2068 tcp_recv_timestamp(msg, sk, &tss);
2069 }
2070
Paolo Abeni612f71d2021-10-07 15:05:00 -07002071 pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2072 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
2073 skb_queue_empty(&msk->receive_queue), copied);
Yonglong Lica4fb8922021-04-23 11:17:08 -07002074 if (!(flags & MSG_PEEK))
2075 mptcp_rcv_space_adjust(msk, copied);
Florian Westphala6b118f2020-06-30 21:24:45 +02002076
Peter Krystadcec37a62020-01-21 16:56:18 -08002077 release_sock(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -08002078 return copied;
2079}
2080
Paolo Abenib51f9b82020-03-27 14:48:44 -07002081static void mptcp_retransmit_timer(struct timer_list *t)
2082{
2083 struct inet_connection_sock *icsk = from_timer(icsk, t,
2084 icsk_retransmit_timer);
2085 struct sock *sk = &icsk->icsk_inet.sk;
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002086 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abenib51f9b82020-03-27 14:48:44 -07002087
2088 bh_lock_sock(sk);
2089 if (!sock_owned_by_user(sk)) {
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002090 /* we need a process context to retransmit */
2091 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2092 mptcp_schedule_work(sk);
Paolo Abenib51f9b82020-03-27 14:48:44 -07002093 } else {
2094 /* delegate our work to tcp_release_cb() */
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002095 set_bit(MPTCP_RETRANSMIT, &msk->flags);
Paolo Abenib51f9b82020-03-27 14:48:44 -07002096 }
2097 bh_unlock_sock(sk);
2098 sock_put(sk);
2099}
2100
Paolo Abenie16163b2020-11-16 10:48:09 +01002101static void mptcp_timeout_timer(struct timer_list *t)
2102{
2103 struct sock *sk = from_timer(sk, t, sk_timer);
2104
2105 mptcp_schedule_work(sk);
Florian Westphalb6d69fc2020-11-24 17:24:46 +01002106 sock_put(sk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002107}
2108
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002109/* Find an idle subflow. Return NULL if there is unacked data at tcp
2110 * level.
2111 *
2112 * A backup subflow is returned only if that is the only kind available.
2113 */
Paolo Abeniff5a0b42021-08-13 15:15:45 -07002114static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002115{
Paolo Abeni71b7dec2021-08-13 15:15:42 -07002116 struct sock *backup = NULL, *pick = NULL;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002117 struct mptcp_subflow_context *subflow;
Paolo Abeni71b7dec2021-08-13 15:15:42 -07002118 int min_stale_count = INT_MAX;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002119
2120 sock_owned_by_me((const struct sock *)msk);
2121
Paolo Abenid5f49192020-09-14 10:01:17 +02002122 if (__mptcp_check_fallback(msk))
Paolo Abenid9ca1de2020-11-16 10:48:10 +01002123 return NULL;
Paolo Abenid5f49192020-09-14 10:01:17 +02002124
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002125 mptcp_for_each_subflow(msk, subflow) {
2126 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2127
Paolo Abeniff5a0b42021-08-13 15:15:45 -07002128 if (!__mptcp_subflow_active(subflow))
Paolo Abenid5f49192020-09-14 10:01:17 +02002129 continue;
2130
Paolo Abeni71b7dec2021-08-13 15:15:42 -07002131 /* still data outstanding at TCP level? skip this */
2132 if (!tcp_rtx_and_write_queues_empty(ssk)) {
2133 mptcp_pm_subflow_chk_stale(msk, ssk);
2134 min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2135 continue;
Florian Westphal860975c2020-11-19 11:45:56 -08002136 }
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002137
2138 if (subflow->backup) {
2139 if (!backup)
2140 backup = ssk;
2141 continue;
2142 }
2143
Paolo Abeni71b7dec2021-08-13 15:15:42 -07002144 if (!pick)
2145 pick = ssk;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002146 }
2147
Paolo Abeni71b7dec2021-08-13 15:15:42 -07002148 if (pick)
2149 return pick;
2150
2151 /* use backup only if there are no progresses anywhere */
2152 return min_stale_count > 1 ? backup : NULL;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002153}
2154
Florian Westphal17aee052021-03-04 13:32:11 -08002155static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
2156{
2157 if (msk->subflow) {
2158 iput(SOCK_INODE(msk->subflow));
2159 msk->subflow = NULL;
2160 }
2161}
2162
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07002163bool __mptcp_retransmit_pending_data(struct sock *sk)
2164{
2165 struct mptcp_data_frag *cur, *rtx_head;
2166 struct mptcp_sock *msk = mptcp_sk(sk);
2167
2168 if (__mptcp_check_fallback(mptcp_sk(sk)))
2169 return false;
2170
2171 if (tcp_rtx_and_write_queues_empty(sk))
2172 return false;
2173
2174 /* the closing socket has some data untransmitted and/or unacked:
2175 * some data in the mptcp rtx queue has not really xmitted yet.
2176 * keep it simple and re-inject the whole mptcp level rtx queue
2177 */
2178 mptcp_data_lock(sk);
2179 __mptcp_clean_una_wakeup(sk);
2180 rtx_head = mptcp_rtx_head(sk);
2181 if (!rtx_head) {
2182 mptcp_data_unlock(sk);
2183 return false;
2184 }
2185
2186 /* will accept ack for reijected data before re-sending them */
2187 if (!msk->recovery || after64(msk->snd_nxt, msk->recovery_snd_nxt))
2188 msk->recovery_snd_nxt = msk->snd_nxt;
2189 msk->recovery = true;
2190 mptcp_data_unlock(sk);
2191
2192 msk->first_pending = rtx_head;
2193 msk->tx_pending_data += msk->snd_nxt - rtx_head->data_seq;
2194 msk->snd_nxt = rtx_head->data_seq;
2195 msk->snd_burst = 0;
2196
2197 /* be sure to clear the "sent status" on all re-injected fragments */
2198 list_for_each_entry(cur, &msk->rtx_queue, list) {
2199 if (!cur->already_sent)
2200 break;
2201 cur->already_sent = 0;
2202 }
2203
2204 return true;
2205}
2206
Peter Krystadcec37a62020-01-21 16:56:18 -08002207/* subflow sockets can be either outgoing (connect) or incoming
2208 * (accept).
2209 *
2210 * Outgoing subflows use in-kernel sockets.
2211 * Incoming subflows do not have their own 'struct socket' allocated,
2212 * so we need to use tcp_close() after detaching them from the mptcp
2213 * parent socket.
2214 */
Florian Westphala141e022021-02-12 15:59:55 -08002215static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2216 struct mptcp_subflow_context *subflow)
Peter Krystadcec37a62020-01-21 16:56:18 -08002217{
Florian Westphale0be4932021-03-04 13:32:08 -08002218 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07002219 bool need_push;
Florian Westphale0be4932021-03-04 13:32:08 -08002220
Peter Krystadcec37a62020-01-21 16:56:18 -08002221 list_del(&subflow->node);
2222
Paolo Abeni3f8b2662020-12-16 12:48:33 +01002223 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
Paolo Abenie16163b2020-11-16 10:48:09 +01002224
2225 /* if we are invoked by the msk cleanup code, the subflow is
2226 * already orphaned
2227 */
Paolo Abeni866f26f2021-01-20 15:39:10 +01002228 if (ssk->sk_socket)
Paolo Abenie16163b2020-11-16 10:48:09 +01002229 sock_orphan(ssk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002230
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07002231 need_push = __mptcp_retransmit_pending_data(sk);
Paolo Abenid7b1bfd2020-12-09 12:03:31 +01002232 subflow->disposable = 1;
2233
Paolo Abenie16163b2020-11-16 10:48:09 +01002234 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2235 * the ssk has been already destroyed, we just need to release the
2236 * reference owned by msk;
2237 */
2238 if (!inet_csk(ssk)->icsk_ulp_ops) {
2239 kfree_rcu(subflow, rcu);
2240 } else {
Paolo Abenid7b1bfd2020-12-09 12:03:31 +01002241 /* otherwise tcp will dispose of the ssk and subflow ctx */
Paolo Abenie16163b2020-11-16 10:48:09 +01002242 __tcp_close(ssk, 0);
2243
2244 /* close acquired an extra ref */
2245 __sock_put(ssk);
2246 }
2247 release_sock(ssk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002248
2249 sock_put(ssk);
Florian Westphale0be4932021-03-04 13:32:08 -08002250
2251 if (ssk == msk->last_snd)
2252 msk->last_snd = NULL;
Florian Westphal17aee052021-03-04 13:32:11 -08002253
Florian Westphalc8fe62f2021-03-04 13:32:12 -08002254 if (ssk == msk->first)
2255 msk->first = NULL;
2256
Florian Westphal17aee052021-03-04 13:32:11 -08002257 if (msk->subflow && ssk == msk->subflow->sk)
2258 mptcp_dispose_initial_subflow(msk);
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07002259
2260 if (need_push)
2261 __mptcp_push_pending(sk, 0);
Mat Martineauf870fa02020-01-21 16:56:15 -08002262}
2263
Florian Westphala141e022021-02-12 15:59:55 -08002264void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2265 struct mptcp_subflow_context *subflow)
2266{
Florian Westphalb911c972021-02-12 16:00:01 -08002267 if (sk->sk_state == TCP_ESTABLISHED)
2268 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
Florian Westphala141e022021-02-12 15:59:55 -08002269 __mptcp_close_ssk(sk, ssk, subflow);
2270}
2271
Paolo Abenidc24f8b2020-02-26 12:19:03 +01002272static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2273{
2274 return 0;
2275}
2276
Paolo Abeni0e4f35d2020-10-09 19:00:01 +02002277static void __mptcp_close_subflow(struct mptcp_sock *msk)
2278{
2279 struct mptcp_subflow_context *subflow, *tmp;
2280
Florian Westphal3abc05d2021-02-04 15:23:30 -08002281 might_sleep();
2282
Paolo Abeni0e4f35d2020-10-09 19:00:01 +02002283 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2284 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2285
2286 if (inet_sk_state_load(ssk) != TCP_CLOSE)
2287 continue;
2288
Florian Westphal40947e12021-02-12 15:59:56 -08002289 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2290 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2291 continue;
2292
Florian Westphala141e022021-02-12 15:59:55 -08002293 mptcp_close_ssk((struct sock *)msk, ssk, subflow);
Paolo Abeni0e4f35d2020-10-09 19:00:01 +02002294 }
2295}
2296
Paolo Abenie16163b2020-11-16 10:48:09 +01002297static bool mptcp_check_close_timeout(const struct sock *sk)
2298{
2299 s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
2300 struct mptcp_subflow_context *subflow;
2301
2302 if (delta >= TCP_TIMEWAIT_LEN)
2303 return true;
2304
2305 /* if all subflows are in closed status don't bother with additional
2306 * timeout
2307 */
2308 mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
2309 if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
2310 TCP_CLOSE)
2311 return false;
2312 }
2313 return true;
2314}
2315
Florian Westphal50c504a2020-12-10 14:25:04 -08002316static void mptcp_check_fastclose(struct mptcp_sock *msk)
2317{
2318 struct mptcp_subflow_context *subflow, *tmp;
2319 struct sock *sk = &msk->sk.icsk_inet.sk;
2320
2321 if (likely(!READ_ONCE(msk->rcv_fastclose)))
2322 return;
2323
2324 mptcp_token_destroy(msk);
2325
2326 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2327 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
Paolo Abeni75e908c2021-06-21 15:54:34 -07002328 bool slow;
Florian Westphal50c504a2020-12-10 14:25:04 -08002329
Paolo Abeni75e908c2021-06-21 15:54:34 -07002330 slow = lock_sock_fast(tcp_sk);
Florian Westphal50c504a2020-12-10 14:25:04 -08002331 if (tcp_sk->sk_state != TCP_CLOSE) {
2332 tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
2333 tcp_set_state(tcp_sk, TCP_CLOSE);
2334 }
Paolo Abeni75e908c2021-06-21 15:54:34 -07002335 unlock_sock_fast(tcp_sk, slow);
Florian Westphal50c504a2020-12-10 14:25:04 -08002336 }
2337
2338 inet_sk_state_store(sk, TCP_CLOSE);
2339 sk->sk_shutdown = SHUTDOWN_MASK;
2340 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
Florian Westphal50c504a2020-12-10 14:25:04 -08002341 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2342
2343 mptcp_close_wake_up(sk);
2344}
2345
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002346static void __mptcp_retrans(struct sock *sk)
Paolo Abeni80992012020-02-26 10:14:47 +01002347{
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002348 struct mptcp_sock *msk = mptcp_sk(sk);
Paolo Abenicaf971d2020-11-16 10:48:06 +01002349 struct mptcp_sendmsg_info info = {};
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002350 struct mptcp_data_frag *dfrag;
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002351 size_t copied = 0;
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002352 struct sock *ssk;
2353 int ret;
2354
Paolo Abenib5941f02021-05-27 16:31:37 -07002355 mptcp_clean_una_wakeup(sk);
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002356 dfrag = mptcp_rtx_head(sk);
Mat Martineau6477dd32021-04-23 09:40:33 -07002357 if (!dfrag) {
2358 if (mptcp_data_fin_enabled(msk)) {
2359 struct inet_connection_sock *icsk = inet_csk(sk);
2360
2361 icsk->icsk_retransmits++;
2362 mptcp_set_datafin_timeout(sk);
2363 mptcp_send_ack(msk);
2364
2365 goto reset_timer;
2366 }
2367
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002368 return;
Mat Martineau6477dd32021-04-23 09:40:33 -07002369 }
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002370
2371 ssk = mptcp_subflow_get_retrans(msk);
2372 if (!ssk)
2373 goto reset_timer;
2374
2375 lock_sock(ssk);
2376
2377 /* limit retransmission to the bytes already sent on some subflows */
2378 info.sent = 0;
Paolo Abeni4e148672021-06-17 16:46:17 -07002379 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
2380 while (info.sent < info.limit) {
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002381 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2382 if (ret <= 0)
2383 break;
2384
2385 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2386 copied += ret;
2387 info.sent += ret;
2388 }
Paolo Abeni4e148672021-06-17 16:46:17 -07002389 if (copied) {
2390 dfrag->already_sent = max(dfrag->already_sent, info.sent);
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002391 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2392 info.size_goal);
Paolo Abeni4e148672021-06-17 16:46:17 -07002393 }
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002394
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002395 release_sock(ssk);
2396
2397reset_timer:
2398 if (!mptcp_timer_pending(sk))
2399 mptcp_reset_timer(sk);
2400}
2401
2402static void mptcp_worker(struct work_struct *work)
2403{
2404 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2405 struct sock *sk = &msk->sk.icsk_inet.sk;
2406 int state;
Paolo Abeni80992012020-02-26 10:14:47 +01002407
2408 lock_sock(sk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002409 state = sk->sk_state;
2410 if (unlikely(state == TCP_CLOSE))
2411 goto unlock;
2412
Mat Martineau43b54c62020-07-28 15:12:06 -07002413 mptcp_check_data_fin_ack(sk);
Florian Westphal78962482021-04-15 16:44:53 -07002414 mptcp_flush_join_list(msk);
Florian Westphal50c504a2020-12-10 14:25:04 -08002415
2416 mptcp_check_fastclose(msk);
2417
Florian Westphalb4162682020-07-07 14:40:48 +02002418 if (msk->pm.status)
Florian Westphale9801432021-02-12 15:59:54 -08002419 mptcp_pm_nl_work(msk);
Florian Westphalb4162682020-07-07 14:40:48 +02002420
Florian Westphal59832e22020-04-02 13:44:52 +02002421 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2422 mptcp_check_for_eof(msk);
2423
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002424 __mptcp_check_send_data_fin(sk);
Mat Martineau43b54c62020-07-28 15:12:06 -07002425 mptcp_check_data_fin(sk);
2426
Paolo Abeni341c6522021-02-19 18:35:37 +01002427 /* There is no point in keeping around an orphaned sk timedout or
2428 * closed, but we need the msk around to reply to incoming DATA_FIN,
2429 * even if it is orphaned and in FIN_WAIT2 state
Paolo Abenie16163b2020-11-16 10:48:09 +01002430 */
2431 if (sock_flag(sk, SOCK_DEAD) &&
Paolo Abeni341c6522021-02-19 18:35:37 +01002432 (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
Paolo Abenie16163b2020-11-16 10:48:09 +01002433 inet_sk_state_store(sk, TCP_CLOSE);
2434 __mptcp_destroy_sock(sk);
2435 goto unlock;
2436 }
2437
Florian Westphalb263b0d2021-02-12 15:59:57 -08002438 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2439 __mptcp_close_subflow(msk);
2440
Paolo Abeni2948d0a2021-03-04 13:32:13 -08002441 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2442 __mptcp_retrans(sk);
Paolo Abeni3b1d6212020-03-27 14:48:48 -07002443
2444unlock:
Paolo Abeni80992012020-02-26 10:14:47 +01002445 release_sock(sk);
2446 sock_put(sk);
2447}
2448
Matthieu Baerts784325e2020-01-21 16:56:28 -08002449static int __mptcp_init_sock(struct sock *sk)
Mat Martineauf870fa02020-01-21 16:56:15 -08002450{
Peter Krystadcec37a62020-01-21 16:56:18 -08002451 struct mptcp_sock *msk = mptcp_sk(sk);
2452
Peter Krystadec3edaa2020-03-27 14:48:40 -07002453 spin_lock_init(&msk->join_list_lock);
2454
Peter Krystadcec37a62020-01-21 16:56:18 -08002455 INIT_LIST_HEAD(&msk->conn_list);
Peter Krystadec3edaa2020-03-27 14:48:40 -07002456 INIT_LIST_HEAD(&msk->join_list);
Paolo Abeni18b683b2020-03-27 14:48:43 -07002457 INIT_LIST_HEAD(&msk->rtx_queue);
Paolo Abeni80992012020-02-26 10:14:47 +01002458 INIT_WORK(&msk->work, mptcp_worker);
Paolo Abeni87952602020-11-27 11:10:24 +01002459 __skb_queue_head_init(&msk->receive_queue);
Paolo Abeniab174ad2020-09-14 10:01:12 +02002460 msk->out_of_order_queue = RB_ROOT;
Paolo Abenif0e6a4c2020-11-16 10:48:07 +01002461 msk->first_pending = NULL;
Paolo Abenie93da922020-11-27 11:10:23 +01002462 msk->wmem_reserved = 0;
Paolo Abenice599c52021-07-09 17:20:51 -07002463 WRITE_ONCE(msk->rmem_released, 0);
Paolo Abeni724cfd22020-11-27 11:10:25 +01002464 msk->tx_pending_data = 0;
Paolo Abeni33d41c92021-08-13 15:15:41 -07002465 msk->timer_ival = TCP_RTO_MIN;
Peter Krystadcec37a62020-01-21 16:56:18 -08002466
Paolo Abeni8ab183d2020-01-21 16:56:33 -08002467 msk->first = NULL;
Paolo Abenidc24f8b2020-02-26 12:19:03 +01002468 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
Geliang Tang752e9062021-06-17 16:46:07 -07002469 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
Paolo Abeni1e1d9d62021-08-13 15:15:43 -07002470 msk->recovery = false;
Paolo Abeni8ab183d2020-01-21 16:56:33 -08002471
Peter Krystad1b1c7a02020-03-27 14:48:38 -07002472 mptcp_pm_data_init(msk);
2473
Paolo Abenib51f9b82020-03-27 14:48:44 -07002474 /* re-use the csk retrans timer for MPTCP-level retrans */
2475 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
Paolo Abenie16163b2020-11-16 10:48:09 +01002476 timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
Florian Westphalaa1fbd92021-04-15 16:45:01 -07002477
Mat Martineauf870fa02020-01-21 16:56:15 -08002478 return 0;
2479}
2480
Matthieu Baerts784325e2020-01-21 16:56:28 -08002481static int mptcp_init_sock(struct sock *sk)
2482{
Paolo Abeni20b57592021-05-25 14:23:10 -07002483 struct inet_connection_sock *icsk = inet_csk(sk);
Florian Westphalfc518952020-03-27 14:48:50 -07002484 struct net *net = sock_net(sk);
2485 int ret;
Paolo Abeni18b683b2020-03-27 14:48:43 -07002486
Geliang Tangb6c08382020-09-24 08:29:54 +08002487 ret = __mptcp_init_sock(sk);
2488 if (ret)
2489 return ret;
2490
Florian Westphalfc518952020-03-27 14:48:50 -07002491 if (!mptcp_is_enabled(net))
2492 return -ENOPROTOOPT;
2493
2494 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
2495 return -ENOMEM;
2496
Paolo Abenifa680182020-06-29 22:26:23 +02002497 ret = __mptcp_socket_create(mptcp_sk(sk));
2498 if (ret)
2499 return ret;
2500
Paolo Abeni20b57592021-05-25 14:23:10 -07002501 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2502 * propagate the correct value
2503 */
2504 tcp_assign_congestion_control(sk);
2505 strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
2506
2507 /* no need to keep a reference to the ops, the name will suffice */
2508 tcp_cleanup_congestion_control(sk);
2509 icsk->icsk_ca_ops = NULL;
2510
Paolo Abenid0272362020-03-27 14:48:45 -07002511 sk_sockets_allocated_inc(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02002512 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
Paolo Abenida51aef2020-09-14 10:01:10 +02002513 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
Paolo Abenid0272362020-03-27 14:48:45 -07002514
Paolo Abeni18b683b2020-03-27 14:48:43 -07002515 return 0;
2516}
2517
2518static void __mptcp_clear_xmit(struct sock *sk)
2519{
2520 struct mptcp_sock *msk = mptcp_sk(sk);
2521 struct mptcp_data_frag *dtmp, *dfrag;
2522
Paolo Abenid9ca1de2020-11-16 10:48:10 +01002523 WRITE_ONCE(msk->first_pending, NULL);
Paolo Abeni18b683b2020-03-27 14:48:43 -07002524 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
Paolo Abenid0272362020-03-27 14:48:45 -07002525 dfrag_clear(sk, dfrag);
Matthieu Baerts784325e2020-01-21 16:56:28 -08002526}
2527
Paolo Abeni80992012020-02-26 10:14:47 +01002528static void mptcp_cancel_work(struct sock *sk)
2529{
2530 struct mptcp_sock *msk = mptcp_sk(sk);
2531
Paolo Abenib2771d22020-11-19 11:45:54 -08002532 if (cancel_work_sync(&msk->work))
Paolo Abenie16163b2020-11-16 10:48:09 +01002533 __sock_put(sk);
Paolo Abeni80992012020-02-26 10:14:47 +01002534}
2535
Geliang Tangd0876b22020-09-24 08:29:49 +08002536void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
Peter Krystad21498492020-01-21 16:56:21 -08002537{
2538 lock_sock(ssk);
2539
2540 switch (ssk->sk_state) {
2541 case TCP_LISTEN:
2542 if (!(how & RCV_SHUTDOWN))
2543 break;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002544 fallthrough;
Peter Krystad21498492020-01-21 16:56:21 -08002545 case TCP_SYN_SENT:
2546 tcp_disconnect(ssk, O_NONBLOCK);
2547 break;
2548 default:
Mat Martineau43b54c62020-07-28 15:12:06 -07002549 if (__mptcp_check_fallback(mptcp_sk(sk))) {
2550 pr_debug("Fallback");
2551 ssk->sk_shutdown |= how;
2552 tcp_shutdown(ssk, how);
2553 } else {
2554 pr_debug("Sending DATA_FIN on subflow %p", ssk);
Mat Martineau43b54c62020-07-28 15:12:06 -07002555 tcp_send_ack(ssk);
Mat Martineau6477dd32021-04-23 09:40:33 -07002556 if (!mptcp_timer_pending(sk))
2557 mptcp_reset_timer(sk);
Mat Martineau43b54c62020-07-28 15:12:06 -07002558 }
Peter Krystad21498492020-01-21 16:56:21 -08002559 break;
2560 }
2561
Peter Krystad21498492020-01-21 16:56:21 -08002562 release_sock(ssk);
2563}
2564
Mat Martineau6920b852020-07-28 15:12:04 -07002565static const unsigned char new_state[16] = {
2566 /* current state: new state: action: */
2567 [0 /* (Invalid) */] = TCP_CLOSE,
2568 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2569 [TCP_SYN_SENT] = TCP_CLOSE,
2570 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2571 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
2572 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
2573 [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */
2574 [TCP_CLOSE] = TCP_CLOSE,
2575 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
2576 [TCP_LAST_ACK] = TCP_LAST_ACK,
2577 [TCP_LISTEN] = TCP_CLOSE,
2578 [TCP_CLOSING] = TCP_CLOSING,
2579 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
2580};
2581
2582static int mptcp_close_state(struct sock *sk)
2583{
2584 int next = (int)new_state[sk->sk_state];
2585 int ns = next & TCP_STATE_MASK;
2586
2587 inet_sk_state_store(sk, ns);
2588
2589 return next & TCP_ACTION_FIN;
2590}
2591
Paolo Abenie16163b2020-11-16 10:48:09 +01002592static void __mptcp_check_send_data_fin(struct sock *sk)
2593{
2594 struct mptcp_subflow_context *subflow;
2595 struct mptcp_sock *msk = mptcp_sk(sk);
2596
2597 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2598 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
2599 msk->snd_nxt, msk->write_seq);
2600
2601 /* we still need to enqueue subflows or not really shutting down,
2602 * skip this
2603 */
2604 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
2605 mptcp_send_head(sk))
2606 return;
2607
2608 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
2609
Paolo Abeni26aa2312020-11-19 11:45:55 -08002610 /* fallback socket will not get data_fin/ack, can move to the next
2611 * state now
2612 */
2613 if (__mptcp_check_fallback(msk)) {
2614 if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
2615 inet_sk_state_store(sk, TCP_CLOSE);
2616 mptcp_close_wake_up(sk);
2617 } else if (sk->sk_state == TCP_FIN_WAIT1) {
2618 inet_sk_state_store(sk, TCP_FIN_WAIT2);
2619 }
Paolo Abenie16163b2020-11-16 10:48:09 +01002620 }
2621
Florian Westphal78962482021-04-15 16:44:53 -07002622 mptcp_flush_join_list(msk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002623 mptcp_for_each_subflow(msk, subflow) {
2624 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2625
2626 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
2627 }
2628}
2629
2630static void __mptcp_wr_shutdown(struct sock *sk)
2631{
2632 struct mptcp_sock *msk = mptcp_sk(sk);
2633
2634 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2635 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
2636 !!mptcp_send_head(sk));
2637
2638 /* will be ignored by fallback sockets */
2639 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2640 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2641
2642 __mptcp_check_send_data_fin(sk);
2643}
2644
2645static void __mptcp_destroy_sock(struct sock *sk)
Mat Martineauf870fa02020-01-21 16:56:15 -08002646{
Peter Krystadcec37a62020-01-21 16:56:18 -08002647 struct mptcp_subflow_context *subflow, *tmp;
Mat Martineauf870fa02020-01-21 16:56:15 -08002648 struct mptcp_sock *msk = mptcp_sk(sk);
Florian Westphalb2c5b612020-01-29 15:54:45 +01002649 LIST_HEAD(conn_list);
Mat Martineauf870fa02020-01-21 16:56:15 -08002650
Paolo Abenie16163b2020-11-16 10:48:09 +01002651 pr_debug("msk=%p", msk);
Florian Westphal2c22c062020-02-04 18:12:30 +01002652
Florian Westphal3abc05d2021-02-04 15:23:30 -08002653 might_sleep();
2654
Paolo Abeni10f6d462020-05-29 17:43:30 +02002655 /* be sure to always acquire the join list lock, to sync vs
2656 * mptcp_finish_join().
2657 */
2658 spin_lock_bh(&msk->join_list_lock);
2659 list_splice_tail_init(&msk->join_list, &msk->conn_list);
2660 spin_unlock_bh(&msk->join_list_lock);
Florian Westphalb2c5b612020-01-29 15:54:45 +01002661 list_splice_init(&msk->conn_list, &conn_list);
2662
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002663 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
Paolo Abenie16163b2020-11-16 10:48:09 +01002664 sk_stop_timer(sk, &sk->sk_timer);
2665 msk->pm.status = 0;
Florian Westphalb2c5b612020-01-29 15:54:45 +01002666
2667 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
Peter Krystadcec37a62020-01-21 16:56:18 -08002668 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
Paolo Abenie16163b2020-11-16 10:48:09 +01002669 __mptcp_close_ssk(sk, ssk, subflow);
Mat Martineauf870fa02020-01-21 16:56:15 -08002670 }
2671
Paolo Abenie16163b2020-11-16 10:48:09 +01002672 sk->sk_prot->destroy(sk);
Paolo Abeni80992012020-02-26 10:14:47 +01002673
Paolo Abenie93da922020-11-27 11:10:23 +01002674 WARN_ON_ONCE(msk->wmem_reserved);
Paolo Abeni87952602020-11-27 11:10:24 +01002675 WARN_ON_ONCE(msk->rmem_released);
Paolo Abenie16163b2020-11-16 10:48:09 +01002676 sk_stream_kill_queues(sk);
2677 xfrm_sk_free_policy(sk);
Florian Westphalaa1fbd92021-04-15 16:45:01 -07002678
Paolo Abenie16163b2020-11-16 10:48:09 +01002679 sk_refcnt_debug_release(sk);
Florian Westphal17aee052021-03-04 13:32:11 -08002680 mptcp_dispose_initial_subflow(msk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002681 sock_put(sk);
2682}
Florian Westphal6771bfd2020-02-26 10:14:48 +01002683
Paolo Abenie16163b2020-11-16 10:48:09 +01002684static void mptcp_close(struct sock *sk, long timeout)
2685{
2686 struct mptcp_subflow_context *subflow;
2687 bool do_cancel_work = false;
2688
2689 lock_sock(sk);
2690 sk->sk_shutdown = SHUTDOWN_MASK;
2691
2692 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
2693 inet_sk_state_store(sk, TCP_CLOSE);
2694 goto cleanup;
2695 }
2696
2697 if (mptcp_close_state(sk))
2698 __mptcp_wr_shutdown(sk);
2699
2700 sk_stream_wait_close(sk, timeout);
2701
2702cleanup:
2703 /* orphan all the subflows */
2704 inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
Geliang Tang44227912021-04-16 15:38:08 -07002705 mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
Paolo Abenie16163b2020-11-16 10:48:09 +01002706 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
Paolo Abeni49054552021-09-29 11:59:17 +02002707 bool slow = lock_sock_fast_nested(ssk);
Paolo Abenie16163b2020-11-16 10:48:09 +01002708
Paolo Abenie16163b2020-11-16 10:48:09 +01002709 sock_orphan(ssk);
2710 unlock_sock_fast(ssk, slow);
Paolo Abenie16163b2020-11-16 10:48:09 +01002711 }
2712 sock_orphan(sk);
2713
2714 sock_hold(sk);
2715 pr_debug("msk=%p state=%d", sk, sk->sk_state);
2716 if (sk->sk_state == TCP_CLOSE) {
2717 __mptcp_destroy_sock(sk);
2718 do_cancel_work = true;
2719 } else {
2720 sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
2721 }
2722 release_sock(sk);
2723 if (do_cancel_work)
2724 mptcp_cancel_work(sk);
Florian Westphalb911c972021-02-12 16:00:01 -08002725
2726 if (mptcp_sk(sk)->token)
2727 mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
2728
Paolo Abenie16163b2020-11-16 10:48:09 +01002729 sock_put(sk);
Mat Martineauf870fa02020-01-21 16:56:15 -08002730}
2731
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002732static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2733{
2734#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2735 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
2736 struct ipv6_pinfo *msk6 = inet6_sk(msk);
2737
2738 msk->sk_v6_daddr = ssk->sk_v6_daddr;
2739 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2740
2741 if (msk6 && ssk6) {
2742 msk6->saddr = ssk6->saddr;
2743 msk6->flow_label = ssk6->flow_label;
2744 }
2745#endif
2746
2747 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2748 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2749 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2750 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2751 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2752 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2753}
2754
Paolo Abeni18b683b2020-03-27 14:48:43 -07002755static int mptcp_disconnect(struct sock *sk, int flags)
2756{
Paolo Abeni76e2a552021-01-12 18:25:24 +01002757 struct mptcp_subflow_context *subflow;
2758 struct mptcp_sock *msk = mptcp_sk(sk);
2759
Florian Westphal78962482021-04-15 16:44:53 -07002760 mptcp_do_flush_join_list(msk);
2761
Paolo Abeni13a94992021-01-14 16:37:37 +01002762 mptcp_for_each_subflow(msk, subflow) {
2763 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2764
2765 lock_sock(ssk);
2766 tcp_disconnect(ssk, flags);
2767 release_sock(ssk);
2768 }
Florian Westphal42c556f2020-04-29 20:43:20 +02002769 return 0;
Paolo Abeni18b683b2020-03-27 14:48:43 -07002770}
2771
Florian Westphalb0519de2020-02-06 00:39:37 +01002772#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2773static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
2774{
2775 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
2776
2777 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
2778}
2779#endif
2780
Paolo Abenifca5c822020-04-20 16:25:06 +02002781struct sock *mptcp_sk_clone(const struct sock *sk,
Paolo Abenicfde1412020-04-30 15:01:52 +02002782 const struct mptcp_options_received *mp_opt,
Paolo Abenifca5c822020-04-20 16:25:06 +02002783 struct request_sock *req)
Florian Westphalb0519de2020-02-06 00:39:37 +01002784{
Paolo Abeni58b09912020-03-13 16:52:41 +01002785 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Florian Westphalb0519de2020-02-06 00:39:37 +01002786 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
Paolo Abeni58b09912020-03-13 16:52:41 +01002787 struct mptcp_sock *msk;
2788 u64 ack_seq;
Florian Westphalb0519de2020-02-06 00:39:37 +01002789
2790 if (!nsk)
2791 return NULL;
2792
2793#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2794 if (nsk->sk_family == AF_INET6)
2795 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
2796#endif
2797
Paolo Abeni58b09912020-03-13 16:52:41 +01002798 __mptcp_init_sock(nsk);
2799
2800 msk = mptcp_sk(nsk);
2801 msk->local_key = subflow_req->local_key;
2802 msk->token = subflow_req->token;
2803 msk->subflow = NULL;
Paolo Abenib93df082020-07-23 13:02:32 +02002804 WRITE_ONCE(msk->fully_established, false);
Paolo Abeni74c7dfb2021-08-26 17:44:52 -07002805 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
Geliang Tang06251182021-06-17 16:46:13 -07002806 WRITE_ONCE(msk->csum_enabled, true);
Paolo Abeni58b09912020-03-13 16:52:41 +01002807
Paolo Abeni58b09912020-03-13 16:52:41 +01002808 msk->write_seq = subflow_req->idsn + 1;
Paolo Abenieaa2ffa2020-11-16 10:48:08 +01002809 msk->snd_nxt = msk->write_seq;
Paolo Abeni7439d682020-11-27 11:10:26 +01002810 msk->snd_una = msk->write_seq;
2811 msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
Florian Westphaldf00b082021-04-15 16:44:54 -07002812 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
Florian Westphal6f8a6122020-11-16 10:48:13 +01002813
Paolo Abeni74c7dfb2021-08-26 17:44:52 -07002814 if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
Paolo Abeni58b09912020-03-13 16:52:41 +01002815 msk->can_ack = true;
Paolo Abenicfde1412020-04-30 15:01:52 +02002816 msk->remote_key = mp_opt->sndr_key;
Paolo Abeni58b09912020-03-13 16:52:41 +01002817 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
2818 ack_seq++;
Mat Martineau917944d2020-09-29 15:08:19 -07002819 WRITE_ONCE(msk->ack_seq, ack_seq);
Florian Westphalfa3fe2b2020-11-19 11:46:02 -08002820 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
Paolo Abeni58b09912020-03-13 16:52:41 +01002821 }
Paolo Abeni7f20d5f2020-03-17 15:53:34 +01002822
Florian Westphal5e200872020-04-20 16:25:04 +02002823 sock_reset_flag(nsk, SOCK_RCU_FREE);
Paolo Abeni7f20d5f2020-03-17 15:53:34 +01002824 /* will be fully established after successful MPC subflow creation */
2825 inet_sk_state_store(nsk, TCP_SYN_RECV);
Paolo Abeni0c148462020-12-16 12:48:32 +01002826
2827 security_inet_csk_clone(nsk, req);
Paolo Abeni58b09912020-03-13 16:52:41 +01002828 bh_unlock_sock(nsk);
2829
2830 /* keep a single reference */
2831 __sock_put(nsk);
Florian Westphalb0519de2020-02-06 00:39:37 +01002832 return nsk;
2833}
2834
Florian Westphala6b118f2020-06-30 21:24:45 +02002835void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2836{
2837 const struct tcp_sock *tp = tcp_sk(ssk);
2838
2839 msk->rcvq_space.copied = 0;
2840 msk->rcvq_space.rtt_us = 0;
2841
2842 msk->rcvq_space.time = tp->tcp_mstamp;
2843
2844 /* initial rcv_space offering made to peer */
2845 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2846 TCP_INIT_CWND * tp->advmss);
2847 if (msk->rcvq_space.space == 0)
2848 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
Florian Westphal6f8a6122020-11-16 10:48:13 +01002849
Paolo Abeni7439d682020-11-27 11:10:26 +01002850 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
Florian Westphala6b118f2020-06-30 21:24:45 +02002851}
2852
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002853static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
2854 bool kern)
2855{
2856 struct mptcp_sock *msk = mptcp_sk(sk);
2857 struct socket *listener;
2858 struct sock *newsk;
2859
2860 listener = __mptcp_nmpc_socket(msk);
2861 if (WARN_ON_ONCE(!listener)) {
2862 *err = -EINVAL;
2863 return NULL;
2864 }
2865
2866 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
2867 newsk = inet_csk_accept(listener->sk, flags, err, kern);
2868 if (!newsk)
2869 return NULL;
2870
2871 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002872 if (sk_is_mptcp(newsk)) {
2873 struct mptcp_subflow_context *subflow;
2874 struct sock *new_mptcp_sock;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002875
2876 subflow = mptcp_subflow_ctx(newsk);
Paolo Abeni58b09912020-03-13 16:52:41 +01002877 new_mptcp_sock = subflow->conn;
2878
2879 /* is_mptcp should be false if subflow->conn is missing, see
2880 * subflow_syn_recv_sock()
2881 */
2882 if (WARN_ON_ONCE(!new_mptcp_sock)) {
2883 tcp_sk(newsk)->is_mptcp = 0;
2884 return newsk;
2885 }
2886
2887 /* acquire the 2nd reference for the owning socket */
2888 sock_hold(new_mptcp_sock);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002889 newsk = new_mptcp_sock;
Paolo Abeni0397c6d2020-11-19 11:45:58 -08002890 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
Florian Westphalfc518952020-03-27 14:48:50 -07002891 } else {
2892 MPTCP_INC_STATS(sock_net(sk),
2893 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08002894 }
2895
2896 return newsk;
2897}
2898
Geliang Tang5c8c1642020-09-24 08:29:57 +08002899void mptcp_destroy_common(struct mptcp_sock *msk)
2900{
Paolo Abeni87952602020-11-27 11:10:24 +01002901 struct sock *sk = (struct sock *)msk;
2902
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002903 __mptcp_clear_xmit(sk);
2904
Paolo Abeni87952602020-11-27 11:10:24 +01002905 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
2906 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
2907
Geliang Tang5c8c1642020-09-24 08:29:57 +08002908 skb_rbtree_purge(&msk->out_of_order_queue);
2909 mptcp_token_destroy(msk);
2910 mptcp_pm_free_anno_list(msk);
2911}
2912
Peter Krystad79c09492020-01-21 16:56:20 -08002913static void mptcp_destroy(struct sock *sk)
2914{
Florian Westphalc9fd9c52020-01-29 15:54:43 +01002915 struct mptcp_sock *msk = mptcp_sk(sk);
2916
Geliang Tang5c8c1642020-09-24 08:29:57 +08002917 mptcp_destroy_common(msk);
Paolo Abenid0272362020-03-27 14:48:45 -07002918 sk_sockets_allocated_dec(sk);
Peter Krystad79c09492020-01-21 16:56:20 -08002919}
2920
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002921void __mptcp_data_acked(struct sock *sk)
2922{
2923 if (!sock_owned_by_user(sk))
2924 __mptcp_clean_una(sk);
2925 else
2926 set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags);
2927
2928 if (mptcp_pending_data_fin_ack(sk))
2929 mptcp_schedule_work(sk);
2930}
2931
Paolo Abeni219d0492020-12-16 12:48:34 +01002932void __mptcp_check_push(struct sock *sk, struct sock *ssk)
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002933{
2934 if (!mptcp_send_head(sk))
2935 return;
2936
Paolo Abeni40dc9412021-01-20 15:39:13 +01002937 if (!sock_owned_by_user(sk)) {
Paolo Abenib19bc292021-01-20 15:39:14 +01002938 struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
2939
2940 if (xmit_ssk == ssk)
Paolo Abeni40dc9412021-01-20 15:39:13 +01002941 __mptcp_subflow_push_pending(sk, ssk);
Paolo Abenib19bc292021-01-20 15:39:14 +01002942 else if (xmit_ssk)
2943 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
Paolo Abeni40dc9412021-01-20 15:39:13 +01002944 } else {
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002945 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
Paolo Abeni40dc9412021-01-20 15:39:13 +01002946 }
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002947}
2948
Paolo Abenie93da922020-11-27 11:10:23 +01002949/* processes deferred events and flush wmem */
Paolo Abeni14c441b2020-02-26 10:14:52 +01002950static void mptcp_release_cb(struct sock *sk)
2951{
Paolo Abenic2e60482021-03-04 13:32:14 -08002952 for (;;) {
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002953 unsigned long flags = 0;
2954
Paolo Abenic2e60482021-03-04 13:32:14 -08002955 if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
Dan Carpenter2e5de7e2021-03-12 10:41:12 +03002956 flags |= BIT(MPTCP_PUSH_PENDING);
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002957 if (test_and_clear_bit(MPTCP_RETRANSMIT, &mptcp_sk(sk)->flags))
2958 flags |= BIT(MPTCP_RETRANSMIT);
Paolo Abenic2e60482021-03-04 13:32:14 -08002959 if (!flags)
2960 break;
2961
2962 /* the following actions acquire the subflow socket lock
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002963 *
2964 * 1) can't be invoked in atomic scope
2965 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
2966 * datapath acquires the msk socket spinlock while helding
2967 * the subflow socket lock
2968 */
2969
2970 spin_unlock_bh(&sk->sk_lock.slock);
Dan Carpenter2e5de7e2021-03-12 10:41:12 +03002971 if (flags & BIT(MPTCP_PUSH_PENDING))
Paolo Abenic2e60482021-03-04 13:32:14 -08002972 __mptcp_push_pending(sk, 0);
Paolo Abeni2d6f5a22021-03-26 11:26:30 -07002973 if (flags & BIT(MPTCP_RETRANSMIT))
2974 __mptcp_retrans(sk);
Paolo Abenic2e60482021-03-04 13:32:14 -08002975
2976 cond_resched();
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002977 spin_lock_bh(&sk->sk_lock.slock);
2978 }
Paolo Abenic2e60482021-03-04 13:32:14 -08002979
Paolo Abeni490274b2021-06-21 17:33:08 -07002980 /* be sure to set the current sk state before tacking actions
2981 * depending on sk_state
2982 */
2983 if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
2984 __mptcp_set_connected(sk);
Paolo Abenic2e60482021-03-04 13:32:14 -08002985 if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
Paolo Abeni417789d2021-03-04 13:32:15 -08002986 __mptcp_clean_una_wakeup(sk);
Paolo Abeni15cc1042021-02-11 15:30:37 -08002987 if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
2988 __mptcp_error_report(sk);
Paolo Abeni6e628cd2020-11-27 11:10:27 +01002989
Paolo Abenic2e60482021-03-04 13:32:14 -08002990 /* push_pending may touch wmem_reserved, ensure we do the cleanup
2991 * later
2992 */
Paolo Abenie93da922020-11-27 11:10:23 +01002993 __mptcp_update_wmem(sk);
Paolo Abeni87952602020-11-27 11:10:24 +01002994 __mptcp_update_rmem(sk);
Paolo Abeni14c441b2020-02-26 10:14:52 +01002995}
2996
Paolo Abenib19bc292021-01-20 15:39:14 +01002997void mptcp_subflow_process_delegated(struct sock *ssk)
2998{
2999 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3000 struct sock *sk = subflow->conn;
3001
3002 mptcp_data_lock(sk);
3003 if (!sock_owned_by_user(sk))
3004 __mptcp_subflow_push_pending(sk, ssk);
3005 else
3006 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
3007 mptcp_data_unlock(sk);
3008 mptcp_subflow_delegated_done(subflow);
3009}
3010
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003011static int mptcp_hash(struct sock *sk)
3012{
3013 /* should never be called,
3014 * we hash the TCP subflows not the master socket
3015 */
3016 WARN_ON_ONCE(1);
3017 return 0;
3018}
3019
3020static void mptcp_unhash(struct sock *sk)
3021{
3022 /* called from sk_common_release(), but nothing to do here */
3023}
3024
Peter Krystadcec37a62020-01-21 16:56:18 -08003025static int mptcp_get_port(struct sock *sk, unsigned short snum)
Mat Martineauf870fa02020-01-21 16:56:15 -08003026{
3027 struct mptcp_sock *msk = mptcp_sk(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -08003028 struct socket *ssock;
Mat Martineauf870fa02020-01-21 16:56:15 -08003029
Peter Krystadcec37a62020-01-21 16:56:18 -08003030 ssock = __mptcp_nmpc_socket(msk);
3031 pr_debug("msk=%p, subflow=%p", msk, ssock);
3032 if (WARN_ON_ONCE(!ssock))
3033 return -EINVAL;
Mat Martineauf870fa02020-01-21 16:56:15 -08003034
Peter Krystadcec37a62020-01-21 16:56:18 -08003035 return inet_csk_get_port(ssock->sk, snum);
3036}
Mat Martineauf870fa02020-01-21 16:56:15 -08003037
Peter Krystadcec37a62020-01-21 16:56:18 -08003038void mptcp_finish_connect(struct sock *ssk)
3039{
3040 struct mptcp_subflow_context *subflow;
3041 struct mptcp_sock *msk;
3042 struct sock *sk;
Mat Martineau6d0060f2020-01-21 16:56:23 -08003043 u64 ack_seq;
Mat Martineauf870fa02020-01-21 16:56:15 -08003044
Peter Krystadcec37a62020-01-21 16:56:18 -08003045 subflow = mptcp_subflow_ctx(ssk);
Peter Krystadcec37a62020-01-21 16:56:18 -08003046 sk = subflow->conn;
3047 msk = mptcp_sk(sk);
3048
Mat Martineau648ef4b2020-01-21 16:56:24 -08003049 pr_debug("msk=%p, token=%u", sk, subflow->token);
3050
Mat Martineau6d0060f2020-01-21 16:56:23 -08003051 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
3052 ack_seq++;
Mat Martineau648ef4b2020-01-21 16:56:24 -08003053 subflow->map_seq = ack_seq;
3054 subflow->map_subflow_seq = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -08003055
Peter Krystadcec37a62020-01-21 16:56:18 -08003056 /* the socket is not connected yet, no msk/subflow ops can access/race
3057 * accessing the field below
3058 */
3059 WRITE_ONCE(msk->remote_key, subflow->remote_key);
3060 WRITE_ONCE(msk->local_key, subflow->local_key);
Mat Martineau6d0060f2020-01-21 16:56:23 -08003061 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
Paolo Abenieaa2ffa2020-11-16 10:48:08 +01003062 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
Mat Martineau6d0060f2020-01-21 16:56:23 -08003063 WRITE_ONCE(msk->ack_seq, ack_seq);
Florian Westphalfa3fe2b2020-11-19 11:46:02 -08003064 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
Christoph Paaschd22f4982020-01-21 16:56:32 -08003065 WRITE_ONCE(msk->can_ack, 1);
Paolo Abeni7439d682020-11-27 11:10:26 +01003066 WRITE_ONCE(msk->snd_una, msk->write_seq);
Peter Krystad1b1c7a02020-03-27 14:48:38 -07003067
Florian Westphal6c714f12021-02-12 15:59:58 -08003068 mptcp_pm_new_connection(msk, ssk, 0);
Florian Westphala6b118f2020-06-30 21:24:45 +02003069
3070 mptcp_rcv_space_init(msk, ssk);
Mat Martineauf870fa02020-01-21 16:56:15 -08003071}
3072
Paolo Abeni866f26f2021-01-20 15:39:10 +01003073void mptcp_sock_graft(struct sock *sk, struct socket *parent)
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003074{
3075 write_lock_bh(&sk->sk_callback_lock);
3076 rcu_assign_pointer(sk->sk_wq, &parent->wq);
3077 sk_set_socket(sk, parent);
3078 sk->sk_uid = SOCK_INODE(parent)->i_uid;
3079 write_unlock_bh(&sk->sk_callback_lock);
3080}
3081
Paolo Abenie16163b2020-11-16 10:48:09 +01003082bool mptcp_finish_join(struct sock *ssk)
Peter Krystadf2962342020-03-27 14:48:39 -07003083{
Paolo Abenie16163b2020-11-16 10:48:09 +01003084 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Peter Krystadf2962342020-03-27 14:48:39 -07003085 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3086 struct sock *parent = (void *)msk;
3087 struct socket *parent_sock;
Peter Krystadec3edaa2020-03-27 14:48:40 -07003088 bool ret;
Peter Krystadf2962342020-03-27 14:48:39 -07003089
3090 pr_debug("msk=%p, subflow=%p", msk, subflow);
3091
3092 /* mptcp socket already closing? */
Florian Westphaldc87efd2021-04-01 16:19:44 -07003093 if (!mptcp_is_fully_established(parent)) {
3094 subflow->reset_reason = MPTCP_RST_EMPTCP;
Peter Krystadf2962342020-03-27 14:48:39 -07003095 return false;
Florian Westphaldc87efd2021-04-01 16:19:44 -07003096 }
Peter Krystadf2962342020-03-27 14:48:39 -07003097
3098 if (!msk->pm.server_side)
Florian Westphalb911c972021-02-12 16:00:01 -08003099 goto out;
Peter Krystadf2962342020-03-27 14:48:39 -07003100
Florian Westphaldc87efd2021-04-01 16:19:44 -07003101 if (!mptcp_pm_allow_new_subflow(msk)) {
3102 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
Paolo Abeni10f6d462020-05-29 17:43:30 +02003103 return false;
Florian Westphaldc87efd2021-04-01 16:19:44 -07003104 }
Paolo Abeni10f6d462020-05-29 17:43:30 +02003105
3106 /* active connections are already on conn_list, and we can't acquire
3107 * msk lock here.
3108 * use the join list lock as synchronization point and double-check
Paolo Abenie16163b2020-11-16 10:48:09 +01003109 * msk status to avoid racing with __mptcp_destroy_sock()
Paolo Abeni10f6d462020-05-29 17:43:30 +02003110 */
3111 spin_lock_bh(&msk->join_list_lock);
3112 ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
Paolo Abenie16163b2020-11-16 10:48:09 +01003113 if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) {
Paolo Abeni10f6d462020-05-29 17:43:30 +02003114 list_add_tail(&subflow->node, &msk->join_list);
Paolo Abenie16163b2020-11-16 10:48:09 +01003115 sock_hold(ssk);
3116 }
Paolo Abeni10f6d462020-05-29 17:43:30 +02003117 spin_unlock_bh(&msk->join_list_lock);
Florian Westphaldc87efd2021-04-01 16:19:44 -07003118 if (!ret) {
3119 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
Paolo Abeni10f6d462020-05-29 17:43:30 +02003120 return false;
Florian Westphaldc87efd2021-04-01 16:19:44 -07003121 }
Paolo Abeni10f6d462020-05-29 17:43:30 +02003122
3123 /* attach to msk socket only after we are sure he will deal with us
3124 * at close time
3125 */
Peter Krystadf2962342020-03-27 14:48:39 -07003126 parent_sock = READ_ONCE(parent->sk_socket);
Paolo Abenie16163b2020-11-16 10:48:09 +01003127 if (parent_sock && !ssk->sk_socket)
3128 mptcp_sock_graft(ssk, parent_sock);
Mat Martineau917944d2020-09-29 15:08:19 -07003129 subflow->map_seq = READ_ONCE(msk->ack_seq);
Florian Westphalb911c972021-02-12 16:00:01 -08003130out:
3131 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
Paolo Abeni10f6d462020-05-29 17:43:30 +02003132 return true;
Peter Krystadf2962342020-03-27 14:48:39 -07003133}
3134
Paolo Abeni76e2a552021-01-12 18:25:24 +01003135static void mptcp_shutdown(struct sock *sk, int how)
3136{
3137 pr_debug("sk=%p, how=%d", sk, how);
3138
3139 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3140 __mptcp_wr_shutdown(sk);
3141}
3142
Mat Martineauf870fa02020-01-21 16:56:15 -08003143static struct proto mptcp_prot = {
3144 .name = "MPTCP",
3145 .owner = THIS_MODULE,
3146 .init = mptcp_init_sock,
Paolo Abeni18b683b2020-03-27 14:48:43 -07003147 .disconnect = mptcp_disconnect,
Mat Martineauf870fa02020-01-21 16:56:15 -08003148 .close = mptcp_close,
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003149 .accept = mptcp_accept,
Peter Krystad717e79c2020-01-21 16:56:22 -08003150 .setsockopt = mptcp_setsockopt,
3151 .getsockopt = mptcp_getsockopt,
Paolo Abeni76e2a552021-01-12 18:25:24 +01003152 .shutdown = mptcp_shutdown,
Peter Krystad79c09492020-01-21 16:56:20 -08003153 .destroy = mptcp_destroy,
Mat Martineauf870fa02020-01-21 16:56:15 -08003154 .sendmsg = mptcp_sendmsg,
3155 .recvmsg = mptcp_recvmsg,
Paolo Abeni14c441b2020-02-26 10:14:52 +01003156 .release_cb = mptcp_release_cb,
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003157 .hash = mptcp_hash,
3158 .unhash = mptcp_unhash,
Peter Krystadcec37a62020-01-21 16:56:18 -08003159 .get_port = mptcp_get_port,
Paolo Abenid0272362020-03-27 14:48:45 -07003160 .sockets_allocated = &mptcp_sockets_allocated,
3161 .memory_allocated = &tcp_memory_allocated,
3162 .memory_pressure = &tcp_memory_pressure,
Paolo Abenid0272362020-03-27 14:48:45 -07003163 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
Paolo Abeni989ef492020-11-08 19:49:59 +01003164 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Paolo Abenid0272362020-03-27 14:48:45 -07003165 .sysctl_mem = sysctl_tcp_mem,
Mat Martineauf870fa02020-01-21 16:56:15 -08003166 .obj_size = sizeof(struct mptcp_sock),
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003167 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Mat Martineauf870fa02020-01-21 16:56:15 -08003168 .no_autobind = true,
3169};
3170
Peter Krystad2303f992020-01-21 16:56:17 -08003171static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3172{
3173 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3174 struct socket *ssock;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003175 int err;
Peter Krystad2303f992020-01-21 16:56:17 -08003176
3177 lock_sock(sock->sk);
Paolo Abenifa680182020-06-29 22:26:23 +02003178 ssock = __mptcp_nmpc_socket(msk);
3179 if (!ssock) {
3180 err = -EINVAL;
Peter Krystad2303f992020-01-21 16:56:17 -08003181 goto unlock;
3182 }
3183
3184 err = ssock->ops->bind(ssock, uaddr, addr_len);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003185 if (!err)
3186 mptcp_copy_inaddrs(sock->sk, ssock->sk);
Peter Krystad2303f992020-01-21 16:56:17 -08003187
3188unlock:
3189 release_sock(sock->sk);
3190 return err;
3191}
3192
Paolo Abeni0235d072020-07-23 13:02:31 +02003193static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
3194 struct mptcp_subflow_context *subflow)
3195{
3196 subflow->request_mptcp = 0;
3197 __mptcp_do_fallback(msk);
3198}
3199
Peter Krystad2303f992020-01-21 16:56:17 -08003200static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
3201 int addr_len, int flags)
3202{
3203 struct mptcp_sock *msk = mptcp_sk(sock->sk);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003204 struct mptcp_subflow_context *subflow;
Peter Krystad2303f992020-01-21 16:56:17 -08003205 struct socket *ssock;
3206 int err;
3207
3208 lock_sock(sock->sk);
Paolo Abeni41be81a2020-05-29 17:43:29 +02003209 if (sock->state != SS_UNCONNECTED && msk->subflow) {
3210 /* pending connection or invalid state, let existing subflow
3211 * cope with that
3212 */
3213 ssock = msk->subflow;
3214 goto do_connect;
3215 }
3216
Paolo Abenifa680182020-06-29 22:26:23 +02003217 ssock = __mptcp_nmpc_socket(msk);
3218 if (!ssock) {
3219 err = -EINVAL;
Peter Krystad2303f992020-01-21 16:56:17 -08003220 goto unlock;
3221 }
3222
Paolo Abenifa680182020-06-29 22:26:23 +02003223 mptcp_token_destroy(msk);
3224 inet_sk_state_store(sock->sk, TCP_SYN_SENT);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003225 subflow = mptcp_subflow_ctx(ssock->sk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003226#ifdef CONFIG_TCP_MD5SIG
3227 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3228 * TCP option space.
3229 */
3230 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
Paolo Abeni0235d072020-07-23 13:02:31 +02003231 mptcp_subflow_early_fallback(msk, subflow);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003232#endif
Paolo Abenia16195e2021-04-01 16:19:41 -07003233 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
3234 MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
Paolo Abeni0235d072020-07-23 13:02:31 +02003235 mptcp_subflow_early_fallback(msk, subflow);
Paolo Abenia16195e2021-04-01 16:19:41 -07003236 }
Paolo Abeni5695eb82021-04-01 16:19:42 -07003237 if (likely(!__mptcp_check_fallback(msk)))
3238 MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003239
Paolo Abeni41be81a2020-05-29 17:43:29 +02003240do_connect:
Peter Krystad2303f992020-01-21 16:56:17 -08003241 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
Paolo Abeni41be81a2020-05-29 17:43:29 +02003242 sock->state = ssock->state;
3243
3244 /* on successful connect, the msk state will be moved to established by
3245 * subflow_finish_connect()
3246 */
Matthieu Baerts367fe042020-07-27 12:24:33 +02003247 if (!err || err == -EINPROGRESS)
Paolo Abeni41be81a2020-05-29 17:43:29 +02003248 mptcp_copy_inaddrs(sock->sk, ssock->sk);
3249 else
3250 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
Peter Krystad2303f992020-01-21 16:56:17 -08003251
3252unlock:
3253 release_sock(sock->sk);
3254 return err;
3255}
3256
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003257static int mptcp_listen(struct socket *sock, int backlog)
3258{
3259 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3260 struct socket *ssock;
3261 int err;
3262
3263 pr_debug("msk=%p", msk);
3264
3265 lock_sock(sock->sk);
Paolo Abenifa680182020-06-29 22:26:23 +02003266 ssock = __mptcp_nmpc_socket(msk);
3267 if (!ssock) {
3268 err = -EINVAL;
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003269 goto unlock;
3270 }
3271
Paolo Abenifa680182020-06-29 22:26:23 +02003272 mptcp_token_destroy(msk);
3273 inet_sk_state_store(sock->sk, TCP_LISTEN);
Florian Westphal5e200872020-04-20 16:25:04 +02003274 sock_set_flag(sock->sk, SOCK_RCU_FREE);
3275
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003276 err = ssock->ops->listen(ssock, backlog);
3277 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3278 if (!err)
3279 mptcp_copy_inaddrs(sock->sk, ssock->sk);
3280
3281unlock:
3282 release_sock(sock->sk);
3283 return err;
3284}
3285
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003286static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
3287 int flags, bool kern)
3288{
3289 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3290 struct socket *ssock;
3291 int err;
3292
3293 pr_debug("msk=%p", msk);
3294
3295 lock_sock(sock->sk);
3296 if (sock->sk->sk_state != TCP_LISTEN)
3297 goto unlock_fail;
3298
3299 ssock = __mptcp_nmpc_socket(msk);
3300 if (!ssock)
3301 goto unlock_fail;
3302
Paolo Abeni8a056612020-06-29 22:26:25 +02003303 clear_bit(MPTCP_DATA_READY, &msk->flags);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003304 sock_hold(ssock->sk);
3305 release_sock(sock->sk);
3306
3307 err = ssock->ops->accept(sock, newsock, flags, kern);
Paolo Abenid2f77c52020-06-29 22:26:22 +02003308 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003309 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
3310 struct mptcp_subflow_context *subflow;
Paolo Abeni0397c6d2020-11-19 11:45:58 -08003311 struct sock *newsk = newsock->sk;
Paolo Abeni0397c6d2020-11-19 11:45:58 -08003312
Florian Westphal4d54cc32021-02-12 15:59:59 -08003313 lock_sock(newsk);
Paolo Abeni5b950ff2020-12-09 12:03:29 +01003314
3315 /* PM/worker can now acquire the first subflow socket
3316 * lock without racing with listener queue cleanup,
3317 * we can notify it, if needed.
Florian Westphalc8fe62f2021-03-04 13:32:12 -08003318 *
3319 * Even if remote has reset the initial subflow by now
3320 * the refcnt is still at least one.
Paolo Abeni5b950ff2020-12-09 12:03:29 +01003321 */
3322 subflow = mptcp_subflow_ctx(msk->first);
3323 list_add(&subflow->node, &msk->conn_list);
3324 sock_hold(msk->first);
3325 if (mptcp_is_fully_established(newsk))
Florian Westphal6c714f12021-02-12 15:59:58 -08003326 mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
Paolo Abeni5b950ff2020-12-09 12:03:29 +01003327
Paolo Abeni0397c6d2020-11-19 11:45:58 -08003328 mptcp_copy_inaddrs(newsk, msk->first);
3329 mptcp_rcv_space_init(msk, msk->first);
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01003330 mptcp_propagate_sndbuf(newsk, msk->first);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003331
3332 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
3333 * This is needed so NOSPACE flag can be set from tcp stack.
3334 */
Florian Westphal78962482021-04-15 16:44:53 -07003335 mptcp_flush_join_list(msk);
Geliang Tang190f8b02020-08-03 21:00:44 +08003336 mptcp_for_each_subflow(msk, subflow) {
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003337 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3338
3339 if (!ssk->sk_socket)
3340 mptcp_sock_graft(ssk, newsock);
3341 }
Florian Westphal4d54cc32021-02-12 15:59:59 -08003342 release_sock(newsk);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003343 }
3344
Paolo Abeni8a056612020-06-29 22:26:25 +02003345 if (inet_csk_listen_poll(ssock->sk))
3346 set_bit(MPTCP_DATA_READY, &msk->flags);
Peter Krystadcf7da0d2020-01-21 16:56:19 -08003347 sock_put(ssock->sk);
3348 return err;
3349
3350unlock_fail:
3351 release_sock(sock->sk);
3352 return -EINVAL;
3353}
3354
Paolo Abeni8a056612020-06-29 22:26:25 +02003355static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
3356{
Paolo Abeni612f71d2021-10-07 15:05:00 -07003357 /* Concurrent splices from sk_receive_queue into receive_queue will
3358 * always show at least one non-empty queue when checked in this order.
3359 */
3360 if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
3361 skb_queue_empty_lockless(&msk->receive_queue))
3362 return 0;
3363
3364 return EPOLLIN | EPOLLRDNORM;
Paolo Abeni8a056612020-06-29 22:26:25 +02003365}
3366
Florian Westphal8edf0862020-11-16 10:48:12 +01003367static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
3368{
3369 struct sock *sk = (struct sock *)msk;
Florian Westphal8edf0862020-11-16 10:48:12 +01003370
3371 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
Paolo Abenidd913412021-02-11 15:30:38 -08003372 return EPOLLOUT | EPOLLWRNORM;
Florian Westphal8edf0862020-11-16 10:48:12 +01003373
3374 if (sk_stream_is_writeable(sk))
3375 return EPOLLOUT | EPOLLWRNORM;
3376
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01003377 mptcp_set_nospace(sk);
Paolo Abeni6e628cd2020-11-27 11:10:27 +01003378 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3379 if (sk_stream_is_writeable(sk))
3380 return EPOLLOUT | EPOLLWRNORM;
Florian Westphal8edf0862020-11-16 10:48:12 +01003381
Paolo Abeni6e628cd2020-11-27 11:10:27 +01003382 return 0;
Florian Westphal8edf0862020-11-16 10:48:12 +01003383}
3384
Peter Krystad2303f992020-01-21 16:56:17 -08003385static __poll_t mptcp_poll(struct file *file, struct socket *sock,
3386 struct poll_table_struct *wait)
3387{
Florian Westphal1891c4a2020-01-21 16:56:25 -08003388 struct sock *sk = sock->sk;
Paolo Abeni8ab183d2020-01-21 16:56:33 -08003389 struct mptcp_sock *msk;
Peter Krystad2303f992020-01-21 16:56:17 -08003390 __poll_t mask = 0;
Paolo Abeni8a056612020-06-29 22:26:25 +02003391 int state;
Peter Krystad2303f992020-01-21 16:56:17 -08003392
Florian Westphal1891c4a2020-01-21 16:56:25 -08003393 msk = mptcp_sk(sk);
Florian Westphal1891c4a2020-01-21 16:56:25 -08003394 sock_poll_wait(file, sock, wait);
Florian Westphal1891c4a2020-01-21 16:56:25 -08003395
Paolo Abeni8a056612020-06-29 22:26:25 +02003396 state = inet_sk_state_load(sk);
Paolo Abeni67193312020-09-14 10:01:09 +02003397 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
Paolo Abeni8a056612020-06-29 22:26:25 +02003398 if (state == TCP_LISTEN)
Paolo Abeni612f71d2021-10-07 15:05:00 -07003399 return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
Paolo Abeni8a056612020-06-29 22:26:25 +02003400
3401 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
3402 mask |= mptcp_check_readable(msk);
Florian Westphal8edf0862020-11-16 10:48:12 +01003403 mask |= mptcp_check_writeable(msk);
Paolo Abeni8a056612020-06-29 22:26:25 +02003404 }
Paolo Abenidd913412021-02-11 15:30:38 -08003405 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
3406 mask |= EPOLLHUP;
Florian Westphal1891c4a2020-01-21 16:56:25 -08003407 if (sk->sk_shutdown & RCV_SHUTDOWN)
3408 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
3409
Paolo Abeni15cc1042021-02-11 15:30:37 -08003410 /* This barrier is coupled with smp_wmb() in tcp_reset() */
3411 smp_rmb();
3412 if (sk->sk_err)
3413 mask |= EPOLLERR;
3414
Peter Krystad2303f992020-01-21 16:56:17 -08003415 return mask;
3416}
3417
Florian Westphale42f1ac2020-01-24 16:04:02 -08003418static const struct proto_ops mptcp_stream_ops = {
3419 .family = PF_INET,
3420 .owner = THIS_MODULE,
Paolo Abeni0a3cc572021-04-01 18:57:45 +02003421 .release = inet_release,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003422 .bind = mptcp_bind,
3423 .connect = mptcp_stream_connect,
3424 .socketpair = sock_no_socketpair,
3425 .accept = mptcp_stream_accept,
Paolo Abenid2f77c52020-06-29 22:26:22 +02003426 .getname = inet_getname,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003427 .poll = mptcp_poll,
3428 .ioctl = inet_ioctl,
3429 .gettstamp = sock_gettstamp,
3430 .listen = mptcp_listen,
Paolo Abeni76e2a552021-01-12 18:25:24 +01003431 .shutdown = inet_shutdown,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003432 .setsockopt = sock_common_setsockopt,
3433 .getsockopt = sock_common_getsockopt,
3434 .sendmsg = inet_sendmsg,
3435 .recvmsg = inet_recvmsg,
3436 .mmap = sock_no_mmap,
3437 .sendpage = inet_sendpage,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003438};
Peter Krystad2303f992020-01-21 16:56:17 -08003439
Mat Martineauf870fa02020-01-21 16:56:15 -08003440static struct inet_protosw mptcp_protosw = {
3441 .type = SOCK_STREAM,
3442 .protocol = IPPROTO_MPTCP,
3443 .prot = &mptcp_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08003444 .ops = &mptcp_stream_ops,
3445 .flags = INET_PROTOSW_ICSK,
Mat Martineauf870fa02020-01-21 16:56:15 -08003446};
3447
Paolo Abenib19bc292021-01-20 15:39:14 +01003448static int mptcp_napi_poll(struct napi_struct *napi, int budget)
3449{
3450 struct mptcp_delegated_action *delegated;
3451 struct mptcp_subflow_context *subflow;
3452 int work_done = 0;
3453
3454 delegated = container_of(napi, struct mptcp_delegated_action, napi);
3455 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
3456 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3457
3458 bh_lock_sock_nested(ssk);
3459 if (!sock_owned_by_user(ssk) &&
3460 mptcp_subflow_has_delegated_action(subflow))
3461 mptcp_subflow_process_delegated(ssk);
3462 /* ... elsewhere tcp_release_cb_override already processed
3463 * the action or will do at next release_sock().
3464 * In both case must dequeue the subflow here - on the same
3465 * CPU that scheduled it.
3466 */
3467 bh_unlock_sock(ssk);
3468 sock_put(ssk);
3469
3470 if (++work_done == budget)
3471 return budget;
3472 }
3473
3474 /* always provide a 0 'work_done' argument, so that napi_complete_done
3475 * will not try accessing the NULL napi->dev ptr
3476 */
3477 napi_complete_done(napi, 0);
3478 return work_done;
3479}
3480
Paolo Abenid39dcec2020-06-26 19:29:59 +02003481void __init mptcp_proto_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08003482{
Paolo Abenib19bc292021-01-20 15:39:14 +01003483 struct mptcp_delegated_action *delegated;
3484 int cpu;
3485
Peter Krystad2303f992020-01-21 16:56:17 -08003486 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
Peter Krystad2303f992020-01-21 16:56:17 -08003487
Paolo Abenid0272362020-03-27 14:48:45 -07003488 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
3489 panic("Failed to allocate MPTCP pcpu counter\n");
3490
Paolo Abenib19bc292021-01-20 15:39:14 +01003491 init_dummy_netdev(&mptcp_napi_dev);
3492 for_each_possible_cpu(cpu) {
3493 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
3494 INIT_LIST_HEAD(&delegated->head);
3495 netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll,
3496 NAPI_POLL_WEIGHT);
3497 napi_enable(&delegated->napi);
3498 }
3499
Peter Krystad2303f992020-01-21 16:56:17 -08003500 mptcp_subflow_init();
Peter Krystad1b1c7a02020-03-27 14:48:38 -07003501 mptcp_pm_init();
Paolo Abeni2c5ebd02020-06-26 19:30:00 +02003502 mptcp_token_init();
Peter Krystad2303f992020-01-21 16:56:17 -08003503
Mat Martineauf870fa02020-01-21 16:56:15 -08003504 if (proto_register(&mptcp_prot, 1) != 0)
3505 panic("Failed to register MPTCP proto.\n");
3506
3507 inet_register_protosw(&mptcp_protosw);
Florian Westphal6771bfd2020-02-26 10:14:48 +01003508
3509 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
Mat Martineauf870fa02020-01-21 16:56:15 -08003510}
3511
3512#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Florian Westphale42f1ac2020-01-24 16:04:02 -08003513static const struct proto_ops mptcp_v6_stream_ops = {
3514 .family = PF_INET6,
3515 .owner = THIS_MODULE,
Paolo Abeni0a3cc572021-04-01 18:57:45 +02003516 .release = inet6_release,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003517 .bind = mptcp_bind,
3518 .connect = mptcp_stream_connect,
3519 .socketpair = sock_no_socketpair,
3520 .accept = mptcp_stream_accept,
Paolo Abenid2f77c52020-06-29 22:26:22 +02003521 .getname = inet6_getname,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003522 .poll = mptcp_poll,
3523 .ioctl = inet6_ioctl,
3524 .gettstamp = sock_gettstamp,
3525 .listen = mptcp_listen,
Paolo Abeni76e2a552021-01-12 18:25:24 +01003526 .shutdown = inet_shutdown,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003527 .setsockopt = sock_common_setsockopt,
3528 .getsockopt = sock_common_getsockopt,
3529 .sendmsg = inet6_sendmsg,
3530 .recvmsg = inet6_recvmsg,
3531 .mmap = sock_no_mmap,
3532 .sendpage = inet_sendpage,
3533#ifdef CONFIG_COMPAT
Christoph Hellwig39869122020-05-18 08:28:06 +02003534 .compat_ioctl = inet6_compat_ioctl,
Florian Westphale42f1ac2020-01-24 16:04:02 -08003535#endif
3536};
3537
Mat Martineauf870fa02020-01-21 16:56:15 -08003538static struct proto mptcp_v6_prot;
3539
Peter Krystad79c09492020-01-21 16:56:20 -08003540static void mptcp_v6_destroy(struct sock *sk)
3541{
3542 mptcp_destroy(sk);
3543 inet6_destroy_sock(sk);
3544}
3545
Mat Martineauf870fa02020-01-21 16:56:15 -08003546static struct inet_protosw mptcp_v6_protosw = {
3547 .type = SOCK_STREAM,
3548 .protocol = IPPROTO_MPTCP,
3549 .prot = &mptcp_v6_prot,
Peter Krystad2303f992020-01-21 16:56:17 -08003550 .ops = &mptcp_v6_stream_ops,
Mat Martineauf870fa02020-01-21 16:56:15 -08003551 .flags = INET_PROTOSW_ICSK,
3552};
3553
Paolo Abenid39dcec2020-06-26 19:29:59 +02003554int __init mptcp_proto_v6_init(void)
Mat Martineauf870fa02020-01-21 16:56:15 -08003555{
3556 int err;
3557
3558 mptcp_v6_prot = mptcp_prot;
3559 strcpy(mptcp_v6_prot.name, "MPTCPv6");
3560 mptcp_v6_prot.slab = NULL;
Peter Krystad79c09492020-01-21 16:56:20 -08003561 mptcp_v6_prot.destroy = mptcp_v6_destroy;
Florian Westphalb0519de2020-02-06 00:39:37 +01003562 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
Mat Martineauf870fa02020-01-21 16:56:15 -08003563
3564 err = proto_register(&mptcp_v6_prot, 1);
3565 if (err)
3566 return err;
3567
Mat Martineauf870fa02020-01-21 16:56:15 -08003568 err = inet6_register_protosw(&mptcp_v6_protosw);
3569 if (err)
3570 proto_unregister(&mptcp_v6_prot);
3571
3572 return err;
3573}
3574#endif