blob: 06e233410e0e5ec21f6b88509ca3c5c2aa5a58b9 [file] [log] [blame]
Peter Krystad2303f992020-01-21 16:56:17 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
Peter Krystad79c09492020-01-21 16:56:20 -08007#define pr_fmt(fmt) "MPTCP: " fmt
8
Peter Krystad2303f992020-01-21 16:56:17 -08009#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Peter Krystadf2962342020-03-27 14:48:39 -070012#include <crypto/algapi.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080013#include <crypto/sha2.h>
Peter Krystad2303f992020-01-21 16:56:17 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080019#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
Paolo Abenib19bc292021-01-20 15:39:14 +010021#include <net/transp_v6.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080022#endif
Peter Krystad2303f992020-01-21 16:56:17 -080023#include <net/mptcp.h>
Paolo Abeni4596a2c2020-09-14 10:01:16 +020024#include <uapi/linux/mptcp.h>
Peter Krystad2303f992020-01-21 16:56:17 -080025#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070026#include "mib.h"
27
Paolo Abenib19bc292021-01-20 15:39:14 +010028static void mptcp_subflow_ops_undo_override(struct sock *ssk);
29
Florian Westphalfc518952020-03-27 14:48:50 -070030static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
31 enum linux_mptcp_mib_field field)
32{
33 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
34}
Peter Krystad2303f992020-01-21 16:56:17 -080035
Peter Krystad79c09492020-01-21 16:56:20 -080036static void subflow_req_destructor(struct request_sock *req)
37{
38 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
39
40 pr_debug("subflow_req=%p", subflow_req);
41
Paolo Abeni8fd4de12020-06-17 12:08:56 +020042 if (subflow_req->msk)
43 sock_put((struct sock *)subflow_req->msk);
44
Paolo Abeni2c5ebd02020-06-26 19:30:00 +020045 mptcp_token_destroy_request(req);
Peter Krystad79c09492020-01-21 16:56:20 -080046 tcp_request_sock_ops.destructor(req);
47}
48
Peter Krystadf2962342020-03-27 14:48:39 -070049static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
50 void *hmac)
51{
52 u8 msg[8];
53
54 put_unaligned_be32(nonce1, &msg[0]);
55 put_unaligned_be32(nonce2, &msg[4]);
56
57 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
58}
59
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +020060static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
61{
62 return mptcp_is_fully_established((void *)msk) &&
63 READ_ONCE(msk->pm.accept_subflow);
64}
65
Peter Krystadf2962342020-03-27 14:48:39 -070066/* validate received token and create truncated hmac and nonce for SYN-ACK */
Geliang Tangec20e142021-02-01 15:09:14 -080067static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
68{
69 struct mptcp_sock *msk = subflow_req->msk;
70 u8 hmac[SHA256_DIGEST_SIZE];
71
72 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
73
74 subflow_generate_hmac(msk->local_key, msk->remote_key,
75 subflow_req->local_nonce,
76 subflow_req->remote_nonce, hmac);
77
78 subflow_req->thmac = get_unaligned_be64(hmac);
79}
80
Geliang Tangb5e2e422021-02-01 15:09:13 -080081static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
Peter Krystadf2962342020-03-27 14:48:39 -070082{
83 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadf2962342020-03-27 14:48:39 -070084 struct mptcp_sock *msk;
85 int local_id;
86
87 msk = mptcp_token_get_sock(subflow_req->token);
88 if (!msk) {
Florian Westphalfc518952020-03-27 14:48:50 -070089 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020090 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070091 }
92
93 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
94 if (local_id < 0) {
95 sock_put((struct sock *)msk);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020096 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070097 }
98 subflow_req->local_id = local_id;
99
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200100 return msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700101}
102
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800103static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
Peter Krystadcec37a62020-01-21 16:56:18 -0800104{
Peter Krystadcec37a62020-01-21 16:56:18 -0800105 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadcec37a62020-01-21 16:56:18 -0800106
107 subflow_req->mp_capable = 0;
Peter Krystadf2962342020-03-27 14:48:39 -0700108 subflow_req->mp_join = 0;
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200109 subflow_req->msk = NULL;
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200110 mptcp_token_init_request(req);
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200111}
112
Geliang Tang5bc56382021-02-01 15:09:15 -0800113static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
114{
115 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
116}
117
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100118/* Init mptcp request socket.
119 *
120 * Returns an error code if a JOIN has failed and a TCP reset
121 * should be sent.
122 */
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800123static int subflow_check_req(struct request_sock *req,
124 const struct sock *sk_listener,
125 struct sk_buff *skb)
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200126{
127 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
128 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
129 struct mptcp_options_received mp_opt;
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200130
131 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
132
Peter Krystadcec37a62020-01-21 16:56:18 -0800133#ifdef CONFIG_TCP_MD5SIG
134 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
135 * TCP option space.
136 */
137 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
138 return -EINVAL;
139#endif
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200140
141 mptcp_get_options(skb, &mp_opt);
142
Paolo Abenicfde1412020-04-30 15:01:52 +0200143 if (mp_opt.mp_capable) {
Florian Westphalfc518952020-03-27 14:48:50 -0700144 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
145
Paolo Abenicfde1412020-04-30 15:01:52 +0200146 if (mp_opt.mp_join)
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100147 return 0;
Paolo Abenicfde1412020-04-30 15:01:52 +0200148 } else if (mp_opt.mp_join) {
Florian Westphalfc518952020-03-27 14:48:50 -0700149 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
150 }
Peter Krystadf2962342020-03-27 14:48:39 -0700151
Paolo Abenicfde1412020-04-30 15:01:52 +0200152 if (mp_opt.mp_capable && listener->request_mptcp) {
Florian Westphal535fb812020-07-30 21:25:51 +0200153 int err, retries = 4;
154
Florian Westphalc83a47e2020-07-30 21:25:54 +0200155 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Florian Westphal535fb812020-07-30 21:25:51 +0200156again:
157 do {
158 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
159 } while (subflow_req->local_key == 0);
Peter Krystad79c09492020-01-21 16:56:20 -0800160
Florian Westphalc83a47e2020-07-30 21:25:54 +0200161 if (unlikely(req->syncookie)) {
162 mptcp_crypto_key_sha(subflow_req->local_key,
163 &subflow_req->token,
164 &subflow_req->idsn);
165 if (mptcp_token_exists(subflow_req->token)) {
166 if (retries-- > 0)
167 goto again;
168 } else {
169 subflow_req->mp_capable = 1;
170 }
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100171 return 0;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200172 }
173
Peter Krystad79c09492020-01-21 16:56:20 -0800174 err = mptcp_token_new_request(req);
175 if (err == 0)
176 subflow_req->mp_capable = 1;
Florian Westphal535fb812020-07-30 21:25:51 +0200177 else if (retries-- > 0)
178 goto again;
Peter Krystad79c09492020-01-21 16:56:20 -0800179
Paolo Abenicfde1412020-04-30 15:01:52 +0200180 } else if (mp_opt.mp_join && listener->request_mptcp) {
Peter Krystadec3edaa2020-03-27 14:48:40 -0700181 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Peter Krystadf2962342020-03-27 14:48:39 -0700182 subflow_req->mp_join = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200183 subflow_req->backup = mp_opt.backup;
184 subflow_req->remote_id = mp_opt.join_id;
185 subflow_req->token = mp_opt.token;
186 subflow_req->remote_nonce = mp_opt.nonce;
Geliang Tangb5e2e422021-02-01 15:09:13 -0800187 subflow_req->msk = subflow_token_join_request(req);
Florian Westphal9466a1c2020-07-30 21:25:56 +0200188
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100189 /* Can't fall back to TCP in this case. */
190 if (!subflow_req->msk)
191 return -EPERM;
192
Geliang Tang5bc56382021-02-01 15:09:15 -0800193 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
194 pr_debug("syn inet_sport=%d %d",
195 ntohs(inet_sk(sk_listener)->inet_sport),
196 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
197 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
198 sock_put((struct sock *)subflow_req->msk);
199 mptcp_token_destroy_request(req);
200 tcp_request_sock_ops.destructor(req);
201 subflow_req->msk = NULL;
202 subflow_req->mp_join = 0;
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800203 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800204 return -EPERM;
205 }
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800206 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800207 }
208
Geliang Tangec20e142021-02-01 15:09:14 -0800209 subflow_req_create_thmac(subflow_req);
210
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100211 if (unlikely(req->syncookie)) {
Florian Westphal9466a1c2020-07-30 21:25:56 +0200212 if (mptcp_can_accept_new_subflow(subflow_req->msk))
213 subflow_init_req_cookie_join_save(subflow_req, skb);
214 }
215
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200216 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
217 subflow_req->remote_nonce, subflow_req->msk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800218 }
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100219
220 return 0;
Peter Krystadcec37a62020-01-21 16:56:18 -0800221}
222
Florian Westphalc83a47e2020-07-30 21:25:54 +0200223int mptcp_subflow_init_cookie_req(struct request_sock *req,
224 const struct sock *sk_listener,
225 struct sk_buff *skb)
226{
227 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
228 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
229 struct mptcp_options_received mp_opt;
230 int err;
231
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800232 subflow_init_req(req, sk_listener);
Florian Westphalc83a47e2020-07-30 21:25:54 +0200233 mptcp_get_options(skb, &mp_opt);
234
235 if (mp_opt.mp_capable && mp_opt.mp_join)
236 return -EINVAL;
237
238 if (mp_opt.mp_capable && listener->request_mptcp) {
239 if (mp_opt.sndr_key == 0)
240 return -EINVAL;
241
242 subflow_req->local_key = mp_opt.rcvr_key;
243 err = mptcp_token_new_request(req);
244 if (err)
245 return err;
246
247 subflow_req->mp_capable = 1;
248 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200249 } else if (mp_opt.mp_join && listener->request_mptcp) {
250 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
251 return -EINVAL;
252
253 if (mptcp_can_accept_new_subflow(subflow_req->msk))
254 subflow_req->mp_join = 1;
255
256 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200257 }
258
259 return 0;
260}
261EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
262
Florian Westphal7ea851d2020-11-30 16:36:30 +0100263static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
264 struct sk_buff *skb,
265 struct flowi *fl,
266 struct request_sock *req)
Peter Krystadcec37a62020-01-21 16:56:18 -0800267{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100268 struct dst_entry *dst;
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100269 int err;
Florian Westphal7ea851d2020-11-30 16:36:30 +0100270
Peter Krystadcec37a62020-01-21 16:56:18 -0800271 tcp_rsk(req)->is_mptcp = 1;
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800272 subflow_init_req(req, sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800273
Florian Westphal7ea851d2020-11-30 16:36:30 +0100274 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
275 if (!dst)
276 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800277
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800278 err = subflow_check_req(req, sk, skb);
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100279 if (err == 0)
280 return dst;
281
282 dst_release(dst);
283 if (!req->syncookie)
284 tcp_request_sock_ops.send_reset(sk, skb);
285 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800286}
287
288#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Florian Westphal7ea851d2020-11-30 16:36:30 +0100289static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
290 struct sk_buff *skb,
291 struct flowi *fl,
292 struct request_sock *req)
Peter Krystadcec37a62020-01-21 16:56:18 -0800293{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100294 struct dst_entry *dst;
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100295 int err;
Florian Westphal7ea851d2020-11-30 16:36:30 +0100296
Peter Krystadcec37a62020-01-21 16:56:18 -0800297 tcp_rsk(req)->is_mptcp = 1;
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800298 subflow_init_req(req, sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800299
Florian Westphal7ea851d2020-11-30 16:36:30 +0100300 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
301 if (!dst)
302 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800303
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800304 err = subflow_check_req(req, sk, skb);
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100305 if (err == 0)
306 return dst;
307
308 dst_release(dst);
309 if (!req->syncookie)
310 tcp6_request_sock_ops.send_reset(sk, skb);
311 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800312}
313#endif
314
Peter Krystadec3edaa2020-03-27 14:48:40 -0700315/* validate received truncated hmac and create hmac for third ACK */
316static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
317{
Todd Malsbarybd697222020-05-21 19:10:49 -0700318 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadec3edaa2020-03-27 14:48:40 -0700319 u64 thmac;
320
321 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
322 subflow->remote_nonce, subflow->local_nonce,
323 hmac);
324
325 thmac = get_unaligned_be64(hmac);
326 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
327 subflow, subflow->token,
328 (unsigned long long)thmac,
329 (unsigned long long)subflow->thmac);
330
331 return thmac == subflow->thmac;
332}
333
Paolo Abenid5824842020-10-09 19:00:00 +0200334void mptcp_subflow_reset(struct sock *ssk)
335{
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200336 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
337 struct sock *sk = subflow->conn;
338
Florian Westphalab82e992020-12-10 14:25:02 -0800339 /* must hold: tcp_done() could drop last reference on parent */
340 sock_hold(sk);
341
Paolo Abenid5824842020-10-09 19:00:00 +0200342 tcp_set_state(ssk, TCP_CLOSE);
343 tcp_send_active_reset(ssk, GFP_ATOMIC);
344 tcp_done(ssk);
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200345 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
346 schedule_work(&mptcp_sk(sk)->work))
Florian Westphalab82e992020-12-10 14:25:02 -0800347 return; /* worker will put sk for us */
348
349 sock_put(sk);
Paolo Abenid5824842020-10-09 19:00:00 +0200350}
351
Geliang Tang5bc56382021-02-01 15:09:15 -0800352static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
353{
354 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
355}
356
Peter Krystadcec37a62020-01-21 16:56:18 -0800357static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
358{
359 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenicfde1412020-04-30 15:01:52 +0200360 struct mptcp_options_received mp_opt;
Davide Carattic3c123d2020-03-19 22:45:37 +0100361 struct sock *parent = subflow->conn;
Peter Krystadcec37a62020-01-21 16:56:18 -0800362
363 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
364
Paolo Abeni12008322020-04-24 13:15:21 +0200365 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
Davide Carattic3c123d2020-03-19 22:45:37 +0100366 inet_sk_state_store(parent, TCP_ESTABLISHED);
367 parent->sk_state_change(parent);
368 }
369
Paolo Abeni263e1202020-04-30 15:01:51 +0200370 /* be sure no special action on any packet other than syn-ack */
371 if (subflow->conn_finished)
372 return;
373
Paolo Abeni5cf92bb2021-01-20 15:39:11 +0100374 mptcp_propagate_sndbuf(parent, sk);
Paolo Abenib0977bb2020-07-23 13:02:29 +0200375 subflow->rel_write_seq = 1;
Paolo Abeni263e1202020-04-30 15:01:51 +0200376 subflow->conn_finished = 1;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200377 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
378 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
Paolo Abeni263e1202020-04-30 15:01:51 +0200379
Paolo Abenicfde1412020-04-30 15:01:52 +0200380 mptcp_get_options(skb, &mp_opt);
Paolo Abenifa25e812020-07-23 13:02:33 +0200381 if (subflow->request_mptcp) {
382 if (!mp_opt.mp_capable) {
383 MPTCP_INC_STATS(sock_net(sk),
384 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
385 mptcp_do_fallback(sk);
386 pr_fallback(mptcp_sk(subflow->conn));
387 goto fallback;
388 }
389
Paolo Abeni263e1202020-04-30 15:01:51 +0200390 subflow->mp_capable = 1;
391 subflow->can_ack = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200392 subflow->remote_key = mp_opt.sndr_key;
Paolo Abeni263e1202020-04-30 15:01:51 +0200393 pr_debug("subflow=%p, remote_key=%llu", subflow,
394 subflow->remote_key);
Paolo Abenifa25e812020-07-23 13:02:33 +0200395 mptcp_finish_connect(sk);
396 } else if (subflow->request_join) {
397 u8 hmac[SHA256_DIGEST_SIZE];
398
399 if (!mp_opt.mp_join)
400 goto do_reset;
401
Paolo Abenicfde1412020-04-30 15:01:52 +0200402 subflow->thmac = mp_opt.thmac;
403 subflow->remote_nonce = mp_opt.nonce;
Paolo Abeni263e1202020-04-30 15:01:51 +0200404 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
405 subflow->thmac, subflow->remote_nonce);
Paolo Abeni263e1202020-04-30 15:01:51 +0200406
Peter Krystadec3edaa2020-03-27 14:48:40 -0700407 if (!subflow_thmac_valid(subflow)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700408 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700409 goto do_reset;
410 }
411
412 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
413 subflow->local_nonce,
414 subflow->remote_nonce,
Todd Malsbarybd697222020-05-21 19:10:49 -0700415 hmac);
Todd Malsbarybd697222020-05-21 19:10:49 -0700416 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700417
Peter Krystadec3edaa2020-03-27 14:48:40 -0700418 if (!mptcp_finish_join(sk))
419 goto do_reset;
420
Paolo Abenifa25e812020-07-23 13:02:33 +0200421 subflow->mp_join = 1;
Florian Westphalfc518952020-03-27 14:48:50 -0700422 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800423
424 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
425 pr_debug("synack inet_dport=%d %d",
426 ntohs(inet_sk(sk)->inet_dport),
427 ntohs(inet_sk(parent)->inet_dport));
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800428 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800429 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200430 } else if (mptcp_check_fallback(sk)) {
431fallback:
432 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800433 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200434 return;
435
436do_reset:
Paolo Abenid5824842020-10-09 19:00:00 +0200437 mptcp_subflow_reset(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800438}
439
Florian Westphal08b8d082020-07-30 21:25:53 +0200440struct request_sock_ops mptcp_subflow_request_sock_ops;
441EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
Peter Krystadcec37a62020-01-21 16:56:18 -0800442static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
443
444static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
445{
446 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
447
448 pr_debug("subflow=%p", subflow);
449
450 /* Never answer to SYNs sent to broadcast or multicast */
451 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
452 goto drop;
453
Florian Westphal08b8d082020-07-30 21:25:53 +0200454 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800455 &subflow_request_sock_ipv4_ops,
456 sk, skb);
457drop:
458 tcp_listendrop(sk);
459 return 0;
460}
461
462#if IS_ENABLED(CONFIG_MPTCP_IPV6)
463static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
464static struct inet_connection_sock_af_ops subflow_v6_specific;
465static struct inet_connection_sock_af_ops subflow_v6m_specific;
Paolo Abenib19bc292021-01-20 15:39:14 +0100466static struct proto tcpv6_prot_override;
Peter Krystadcec37a62020-01-21 16:56:18 -0800467
468static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
469{
470 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
471
472 pr_debug("subflow=%p", subflow);
473
474 if (skb->protocol == htons(ETH_P_IP))
475 return subflow_v4_conn_request(sk, skb);
476
477 if (!ipv6_unicast_destination(skb))
478 goto drop;
479
Florian Westphal08b8d082020-07-30 21:25:53 +0200480 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800481 &subflow_request_sock_ipv6_ops, sk, skb);
482
483drop:
484 tcp_listendrop(sk);
485 return 0; /* don't send reset */
486}
487#endif
488
Peter Krystadf2962342020-03-27 14:48:39 -0700489/* validate hmac received in third ACK */
490static bool subflow_hmac_valid(const struct request_sock *req,
Paolo Abenicfde1412020-04-30 15:01:52 +0200491 const struct mptcp_options_received *mp_opt)
Peter Krystadf2962342020-03-27 14:48:39 -0700492{
493 const struct mptcp_subflow_request_sock *subflow_req;
Todd Malsbarybd697222020-05-21 19:10:49 -0700494 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -0700495 struct mptcp_sock *msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700496
497 subflow_req = mptcp_subflow_rsk(req);
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200498 msk = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700499 if (!msk)
500 return false;
501
502 subflow_generate_hmac(msk->remote_key, msk->local_key,
503 subflow_req->remote_nonce,
504 subflow_req->local_nonce, hmac);
505
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200506 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadf2962342020-03-27 14:48:39 -0700507}
508
Florian Westphaldf1036d2020-04-17 09:28:22 +0200509static void mptcp_sock_destruct(struct sock *sk)
510{
511 /* if new mptcp socket isn't accepted, it is free'd
512 * from the tcp listener sockets request queue, linked
513 * from req->sk. The tcp socket is released.
514 * This calls the ULP release function which will
515 * also remove the mptcp socket, via
516 * sock_put(ctx->conn).
517 *
Paolo Abeni7ee24922020-08-07 19:03:53 +0200518 * Problem is that the mptcp socket will be in
519 * ESTABLISHED state and will not have the SOCK_DEAD flag.
Florian Westphaldf1036d2020-04-17 09:28:22 +0200520 * Both result in warnings from inet_sock_destruct.
521 */
522
Paolo Abeni7ee24922020-08-07 19:03:53 +0200523 if (sk->sk_state == TCP_ESTABLISHED) {
Florian Westphaldf1036d2020-04-17 09:28:22 +0200524 sk->sk_state = TCP_CLOSE;
525 WARN_ON_ONCE(sk->sk_socket);
526 sock_orphan(sk);
527 }
528
Geliang Tang5c8c1642020-09-24 08:29:57 +0800529 mptcp_destroy_common(mptcp_sk(sk));
Florian Westphaldf1036d2020-04-17 09:28:22 +0200530 inet_sock_destruct(sk);
531}
532
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200533static void mptcp_force_close(struct sock *sk)
534{
535 inet_sk_state_store(sk, TCP_CLOSE);
536 sk_common_release(sk);
537}
538
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200539static void subflow_ulp_fallback(struct sock *sk,
540 struct mptcp_subflow_context *old_ctx)
541{
542 struct inet_connection_sock *icsk = inet_csk(sk);
543
544 mptcp_subflow_tcp_fallback(sk, old_ctx);
545 icsk->icsk_ulp_ops = NULL;
546 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
547 tcp_sk(sk)->is_mptcp = 0;
Paolo Abenib19bc292021-01-20 15:39:14 +0100548
549 mptcp_subflow_ops_undo_override(sk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200550}
551
Paolo Abeni39884602020-05-29 17:49:18 +0200552static void subflow_drop_ctx(struct sock *ssk)
553{
554 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
555
556 if (!ctx)
557 return;
558
559 subflow_ulp_fallback(ssk, ctx);
560 if (ctx->conn)
561 sock_put(ctx->conn);
562
563 kfree_rcu(ctx, rcu);
564}
565
Paolo Abenib93df082020-07-23 13:02:32 +0200566void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
567 struct mptcp_options_received *mp_opt)
568{
569 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
570
571 subflow->remote_key = mp_opt->sndr_key;
572 subflow->fully_established = 1;
573 subflow->can_ack = 1;
574 WRITE_ONCE(msk->fully_established, true);
575}
576
Peter Krystadcec37a62020-01-21 16:56:18 -0800577static struct sock *subflow_syn_recv_sock(const struct sock *sk,
578 struct sk_buff *skb,
579 struct request_sock *req,
580 struct dst_entry *dst,
581 struct request_sock *req_unhash,
582 bool *own_req)
583{
584 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800585 struct mptcp_subflow_request_sock *subflow_req;
Paolo Abenicfde1412020-04-30 15:01:52 +0200586 struct mptcp_options_received mp_opt;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200587 bool fallback, fallback_is_fatal;
Paolo Abeni58b09912020-03-13 16:52:41 +0100588 struct sock *new_msk = NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800589 struct sock *child;
590
591 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
592
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200593 /* After child creation we must look for 'mp_capable' even when options
594 * are not parsed
Paolo Abenicfde1412020-04-30 15:01:52 +0200595 */
596 mp_opt.mp_capable = 0;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200597
598 /* hopefully temporary handling for MP_JOIN+syncookie */
599 subflow_req = mptcp_subflow_rsk(req);
Paolo Abenib7514692020-07-23 13:02:34 +0200600 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200601 fallback = !tcp_rsk(req)->is_mptcp;
602 if (fallback)
Florian Westphalae2dd712020-01-29 15:54:46 +0100603 goto create_child;
604
Christoph Paaschd22f4982020-01-21 16:56:32 -0800605 /* if the sk is MP_CAPABLE, we try to fetch the client key */
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800606 if (subflow_req->mp_capable) {
Christoph Paaschd22f4982020-01-21 16:56:32 -0800607 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
608 /* here we can receive and accept an in-window,
609 * out-of-order pkt, which will not carry the MP_CAPABLE
610 * opt even on mptcp enabled paths
611 */
Paolo Abeni58b09912020-03-13 16:52:41 +0100612 goto create_msk;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800613 }
614
Paolo Abenicfde1412020-04-30 15:01:52 +0200615 mptcp_get_options(skb, &mp_opt);
616 if (!mp_opt.mp_capable) {
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200617 fallback = true;
Paolo Abeni58b09912020-03-13 16:52:41 +0100618 goto create_child;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800619 }
Paolo Abeni58b09912020-03-13 16:52:41 +0100620
621create_msk:
Paolo Abenicfde1412020-04-30 15:01:52 +0200622 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
Paolo Abeni58b09912020-03-13 16:52:41 +0100623 if (!new_msk)
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200624 fallback = true;
Peter Krystadf2962342020-03-27 14:48:39 -0700625 } else if (subflow_req->mp_join) {
Paolo Abenicfde1412020-04-30 15:01:52 +0200626 mptcp_get_options(skb, &mp_opt);
Paolo Abenid3ab7882020-11-26 15:17:53 +0100627 if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
628 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700629 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200630 fallback = true;
Florian Westphalfc518952020-03-27 14:48:50 -0700631 }
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800632 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800633
Christoph Paaschd22f4982020-01-21 16:56:32 -0800634create_child:
Peter Krystadcec37a62020-01-21 16:56:18 -0800635 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
636 req_unhash, own_req);
637
638 if (child && *own_req) {
Peter Krystad79c09492020-01-21 16:56:20 -0800639 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
640
Paolo Abeni90bf4512020-05-15 19:22:15 +0200641 tcp_rsk(req)->drop_req = false;
642
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200643 /* we need to fallback on ctx allocation failure and on pre-reqs
644 * checking above. In the latter scenario we additionally need
645 * to reset the context to non MPTCP status.
Peter Krystad79c09492020-01-21 16:56:20 -0800646 */
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200647 if (!ctx || fallback) {
Peter Krystadf2962342020-03-27 14:48:39 -0700648 if (fallback_is_fatal)
Paolo Abeni729cd642020-05-15 19:22:17 +0200649 goto dispose_child;
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200650
Paolo Abeni39884602020-05-29 17:49:18 +0200651 subflow_drop_ctx(child);
Paolo Abeni58b09912020-03-13 16:52:41 +0100652 goto out;
Peter Krystadf2962342020-03-27 14:48:39 -0700653 }
Peter Krystad79c09492020-01-21 16:56:20 -0800654
655 if (ctx->mp_capable) {
Paolo Abenib93df082020-07-23 13:02:32 +0200656 /* this can't race with mptcp_close(), as the msk is
657 * not yet exposted to user-space
658 */
659 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
660
Paolo Abeni5b950ff2020-12-09 12:03:29 +0100661 /* record the newly created socket as the first msk
662 * subflow, but don't link it yet into conn_list
663 */
Paolo Abeni0397c6d2020-11-19 11:45:58 -0800664 WRITE_ONCE(mptcp_sk(new_msk)->first, child);
665
Paolo Abeni58b09912020-03-13 16:52:41 +0100666 /* new mpc subflow takes ownership of the newly
667 * created mptcp socket
668 */
Florian Westphaldf1036d2020-04-17 09:28:22 +0200669 new_msk->sk_destruct = mptcp_sock_destruct;
Florian Westphal6c714f12021-02-12 15:59:58 -0800670 mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200671 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
Paolo Abeni58b09912020-03-13 16:52:41 +0100672 ctx->conn = new_msk;
673 new_msk = NULL;
Paolo Abenifca5c822020-04-20 16:25:06 +0200674
675 /* with OoO packets we can reach here without ingress
676 * mpc option
677 */
Paolo Abenib93df082020-07-23 13:02:32 +0200678 if (mp_opt.mp_capable)
679 mptcp_subflow_fully_established(ctx, &mp_opt);
Peter Krystadf2962342020-03-27 14:48:39 -0700680 } else if (ctx->mp_join) {
681 struct mptcp_sock *owner;
682
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200683 owner = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700684 if (!owner)
Paolo Abeni729cd642020-05-15 19:22:17 +0200685 goto dispose_child;
Peter Krystadf2962342020-03-27 14:48:39 -0700686
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200687 /* move the msk reference ownership to the subflow */
688 subflow_req->msk = NULL;
Peter Krystadf2962342020-03-27 14:48:39 -0700689 ctx->conn = (struct sock *)owner;
690 if (!mptcp_finish_join(child))
Paolo Abeni729cd642020-05-15 19:22:17 +0200691 goto dispose_child;
Florian Westphalfc518952020-03-27 14:48:50 -0700692
693 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
Paolo Abeni90bf4512020-05-15 19:22:15 +0200694 tcp_rsk(req)->drop_req = true;
Geliang Tang5bc56382021-02-01 15:09:15 -0800695
696 if (subflow_use_different_sport(owner, sk)) {
697 pr_debug("ack inet_sport=%d %d",
698 ntohs(inet_sk(sk)->inet_sport),
699 ntohs(inet_sk((struct sock *)owner)->inet_sport));
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800700 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
701 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800702 goto out;
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800703 }
704 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800705 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800706 }
707 }
708
Paolo Abeni58b09912020-03-13 16:52:41 +0100709out:
710 /* dispose of the left over mptcp master, if any */
711 if (unlikely(new_msk))
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200712 mptcp_force_close(new_msk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200713
714 /* check for expected invariant - should never trigger, just help
715 * catching eariler subtle bugs
716 */
Paolo Abeniac2b47f2020-04-30 15:03:22 +0200717 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200718 (!mptcp_subflow_ctx(child) ||
719 !mptcp_subflow_ctx(child)->conn));
Peter Krystadcec37a62020-01-21 16:56:18 -0800720 return child;
Peter Krystadf2962342020-03-27 14:48:39 -0700721
Paolo Abeni729cd642020-05-15 19:22:17 +0200722dispose_child:
Paolo Abeni39884602020-05-29 17:49:18 +0200723 subflow_drop_ctx(child);
Paolo Abeni729cd642020-05-15 19:22:17 +0200724 tcp_rsk(req)->drop_req = true;
Paolo Abeni729cd642020-05-15 19:22:17 +0200725 inet_csk_prepare_for_destroy_sock(child);
Peter Krystadf2962342020-03-27 14:48:39 -0700726 tcp_done(child);
Paolo Abeni97e61752020-07-23 13:02:35 +0200727 req->rsk_ops->send_reset(sk, skb);
Paolo Abeni729cd642020-05-15 19:22:17 +0200728
729 /* The last child reference will be released by the caller */
730 return child;
Peter Krystadcec37a62020-01-21 16:56:18 -0800731}
732
733static struct inet_connection_sock_af_ops subflow_specific;
Paolo Abenib19bc292021-01-20 15:39:14 +0100734static struct proto tcp_prot_override;
Peter Krystadcec37a62020-01-21 16:56:18 -0800735
Mat Martineau648ef4b2020-01-21 16:56:24 -0800736enum mapping_status {
737 MAPPING_OK,
738 MAPPING_INVALID,
739 MAPPING_EMPTY,
Davide Carattie1ff9e82020-06-29 22:26:20 +0200740 MAPPING_DATA_FIN,
741 MAPPING_DUMMY
Mat Martineau648ef4b2020-01-21 16:56:24 -0800742};
743
744static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
745{
746 if ((u32)seq == (u32)old_seq)
747 return old_seq;
748
749 /* Assume map covers data not mapped yet. */
750 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
751}
752
753static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
754{
755 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
756 ssn, subflow->map_subflow_seq, subflow->map_data_len);
757}
758
759static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
760{
761 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
762 unsigned int skb_consumed;
763
764 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
765 if (WARN_ON_ONCE(skb_consumed >= skb->len))
766 return true;
767
768 return skb->len - skb_consumed <= subflow->map_data_len -
769 mptcp_subflow_get_map_offset(subflow);
770}
771
772static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
773{
774 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
775 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
776
777 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
778 /* Mapping covers data later in the subflow stream,
779 * currently unsupported.
780 */
781 warn_bad_map(subflow, ssn);
782 return false;
783 }
784 if (unlikely(!before(ssn, subflow->map_subflow_seq +
785 subflow->map_data_len))) {
786 /* Mapping does covers past subflow data, invalid */
787 warn_bad_map(subflow, ssn + skb->len);
788 return false;
789 }
790 return true;
791}
792
Mat Martineau43b54c62020-07-28 15:12:06 -0700793static enum mapping_status get_mapping_status(struct sock *ssk,
794 struct mptcp_sock *msk)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800795{
796 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
797 struct mptcp_ext *mpext;
798 struct sk_buff *skb;
799 u16 data_len;
800 u64 map_seq;
801
802 skb = skb_peek(&ssk->sk_receive_queue);
803 if (!skb)
804 return MAPPING_EMPTY;
805
Davide Carattie1ff9e82020-06-29 22:26:20 +0200806 if (mptcp_check_fallback(ssk))
807 return MAPPING_DUMMY;
808
Mat Martineau648ef4b2020-01-21 16:56:24 -0800809 mpext = mptcp_get_ext(skb);
810 if (!mpext || !mpext->use_map) {
811 if (!subflow->map_valid && !skb->len) {
812 /* the TCP stack deliver 0 len FIN pkt to the receive
813 * queue, that is the only 0len pkts ever expected here,
814 * and we can admit no mapping only for 0 len pkts
815 */
816 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
817 WARN_ONCE(1, "0len seq %d:%d flags %x",
818 TCP_SKB_CB(skb)->seq,
819 TCP_SKB_CB(skb)->end_seq,
820 TCP_SKB_CB(skb)->tcp_flags);
821 sk_eat_skb(ssk, skb);
822 return MAPPING_EMPTY;
823 }
824
825 if (!subflow->map_valid)
826 return MAPPING_INVALID;
827
828 goto validate_seq;
829 }
830
831 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
832 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
833 mpext->data_len, mpext->data_fin);
834
835 data_len = mpext->data_len;
836 if (data_len == 0) {
837 pr_err("Infinite mapping not handled");
Florian Westphalfc518952020-03-27 14:48:50 -0700838 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800839 return MAPPING_INVALID;
840 }
841
842 if (mpext->data_fin == 1) {
843 if (data_len == 1) {
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700844 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
845 mpext->dsn64);
Mat Martineau43b54c62020-07-28 15:12:06 -0700846 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800847 if (subflow->map_valid) {
848 /* A DATA_FIN might arrive in a DSS
849 * option before the previous mapping
850 * has been fully consumed. Continue
851 * handling the existing mapping.
852 */
853 skb_ext_del(skb, SKB_EXT_MPTCP);
854 return MAPPING_OK;
855 } else {
Mat Martineauef59b192020-09-21 16:57:58 +0200856 if (updated && schedule_work(&msk->work))
857 sock_hold((struct sock *)msk);
858
Mat Martineau648ef4b2020-01-21 16:56:24 -0800859 return MAPPING_DATA_FIN;
860 }
Mat Martineau43b54c62020-07-28 15:12:06 -0700861 } else {
Paolo Abeni017512a2020-10-05 12:01:06 +0200862 u64 data_fin_seq = mpext->data_seq + data_len - 1;
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700863
864 /* If mpext->data_seq is a 32-bit value, data_fin_seq
865 * must also be limited to 32 bits.
866 */
867 if (!mpext->dsn64)
868 data_fin_seq &= GENMASK_ULL(31, 0);
869
870 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
871 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
872 data_fin_seq, mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800873 }
874
875 /* Adjust for DATA_FIN using 1 byte of sequence space */
876 data_len--;
877 }
878
879 if (!mpext->dsn64) {
880 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
881 mpext->data_seq);
882 pr_debug("expanded seq=%llu", subflow->map_seq);
883 } else {
884 map_seq = mpext->data_seq;
885 }
Davide Caratti37198e92020-10-06 18:26:17 +0200886 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800887
888 if (subflow->map_valid) {
889 /* Allow replacing only with an identical map */
890 if (subflow->map_seq == map_seq &&
891 subflow->map_subflow_seq == mpext->subflow_seq &&
892 subflow->map_data_len == data_len) {
893 skb_ext_del(skb, SKB_EXT_MPTCP);
894 return MAPPING_OK;
895 }
896
897 /* If this skb data are fully covered by the current mapping,
898 * the new map would need caching, which is not supported
899 */
Florian Westphalfc518952020-03-27 14:48:50 -0700900 if (skb_is_fully_mapped(ssk, skb)) {
901 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800902 return MAPPING_INVALID;
Florian Westphalfc518952020-03-27 14:48:50 -0700903 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800904
905 /* will validate the next map after consuming the current one */
906 return MAPPING_OK;
907 }
908
909 subflow->map_seq = map_seq;
910 subflow->map_subflow_seq = mpext->subflow_seq;
911 subflow->map_data_len = data_len;
912 subflow->map_valid = 1;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800913 subflow->mpc_map = mpext->mpc_map;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800914 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
915 subflow->map_seq, subflow->map_subflow_seq,
916 subflow->map_data_len);
917
918validate_seq:
919 /* we revalidate valid mapping on new skb, because we must ensure
920 * the current skb is completely covered by the available mapping
921 */
922 if (!validate_mapping(ssk, skb))
923 return MAPPING_INVALID;
924
925 skb_ext_del(skb, SKB_EXT_MPTCP);
926 return MAPPING_OK;
927}
928
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200929static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
Paolo Abeni1d39cd82020-09-17 23:07:24 +0200930 u64 limit)
Paolo Abeni67193312020-09-14 10:01:09 +0200931{
932 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200933 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
934 u32 incr;
Paolo Abeni67193312020-09-14 10:01:09 +0200935
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200936 incr = limit >= skb->len ? skb->len + fin : limit;
Paolo Abeni67193312020-09-14 10:01:09 +0200937
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200938 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
939 subflow->map_subflow_seq);
Paolo Abeni06242e42020-09-14 10:01:14 +0200940 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200941 tcp_sk(ssk)->copied_seq += incr;
942 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
943 sk_eat_skb(ssk, skb);
944 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
945 subflow->map_valid = 0;
Paolo Abeni67193312020-09-14 10:01:09 +0200946}
947
Florian Westphal40947e12021-02-12 15:59:56 -0800948/* sched mptcp worker to remove the subflow if no more data is pending */
949static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
950{
951 struct sock *sk = (struct sock *)msk;
952
953 if (likely(ssk->sk_state != TCP_CLOSE))
954 return;
955
956 if (skb_queue_empty(&ssk->sk_receive_queue) &&
957 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
958 sock_hold(sk);
959 if (!schedule_work(&msk->work))
960 sock_put(sk);
961 }
962}
963
Mat Martineau648ef4b2020-01-21 16:56:24 -0800964static bool subflow_check_data_avail(struct sock *ssk)
965{
966 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
967 enum mapping_status status;
968 struct mptcp_sock *msk;
969 struct sk_buff *skb;
970
971 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
972 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200973 if (!skb_peek(&ssk->sk_receive_queue))
974 subflow->data_avail = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800975 if (subflow->data_avail)
976 return true;
977
Mat Martineau648ef4b2020-01-21 16:56:24 -0800978 msk = mptcp_sk(subflow->conn);
979 for (;;) {
Mat Martineau648ef4b2020-01-21 16:56:24 -0800980 u64 ack_seq;
981 u64 old_ack;
982
Mat Martineau43b54c62020-07-28 15:12:06 -0700983 status = get_mapping_status(ssk, msk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800984 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
985 if (status == MAPPING_INVALID) {
986 ssk->sk_err = EBADMSG;
987 goto fatal;
988 }
Davide Carattie1ff9e82020-06-29 22:26:20 +0200989 if (status == MAPPING_DUMMY) {
990 __mptcp_do_fallback(msk);
991 skb = skb_peek(&ssk->sk_receive_queue);
992 subflow->map_valid = 1;
993 subflow->map_seq = READ_ONCE(msk->ack_seq);
994 subflow->map_data_len = skb->len;
995 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
996 subflow->ssn_offset;
Paolo Abeni67193312020-09-14 10:01:09 +0200997 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200998 return true;
999 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001000
1001 if (status != MAPPING_OK)
Florian Westphal40947e12021-02-12 15:59:56 -08001002 goto no_data;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001003
1004 skb = skb_peek(&ssk->sk_receive_queue);
1005 if (WARN_ON_ONCE(!skb))
Florian Westphal40947e12021-02-12 15:59:56 -08001006 goto no_data;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001007
Christoph Paaschd22f4982020-01-21 16:56:32 -08001008 /* if msk lacks the remote key, this subflow must provide an
1009 * MP_CAPABLE-based mapping
1010 */
1011 if (unlikely(!READ_ONCE(msk->can_ack))) {
1012 if (!subflow->mpc_map) {
1013 ssk->sk_err = EBADMSG;
1014 goto fatal;
1015 }
1016 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1017 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1018 WRITE_ONCE(msk->can_ack, true);
1019 }
1020
Mat Martineau648ef4b2020-01-21 16:56:24 -08001021 old_ack = READ_ONCE(msk->ack_seq);
1022 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1023 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1024 ack_seq);
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001025 if (ack_seq == old_ack) {
Paolo Abeni67193312020-09-14 10:01:09 +02001026 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
1027 break;
1028 } else if (after64(ack_seq, old_ack)) {
1029 subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001030 break;
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001031 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001032
1033 /* only accept in-sequence mapping. Old values are spurious
Paolo Abeni67193312020-09-14 10:01:09 +02001034 * retransmission
Mat Martineau648ef4b2020-01-21 16:56:24 -08001035 */
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001036 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001037 }
1038 return true;
1039
Florian Westphal40947e12021-02-12 15:59:56 -08001040no_data:
1041 subflow_sched_work_if_closed(msk, ssk);
1042 return false;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001043fatal:
1044 /* fatal protocol error, close the socket */
1045 /* This barrier is coupled with smp_rmb() in tcp_poll() */
1046 smp_wmb();
1047 ssk->sk_error_report(ssk);
1048 tcp_set_state(ssk, TCP_CLOSE);
1049 tcp_send_active_reset(ssk, GFP_ATOMIC);
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001050 subflow->data_avail = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001051 return false;
1052}
1053
1054bool mptcp_subflow_data_available(struct sock *sk)
1055{
1056 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001057
1058 /* check if current mapping is still valid */
1059 if (subflow->map_valid &&
1060 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1061 subflow->map_valid = 0;
1062 subflow->data_avail = 0;
1063
1064 pr_debug("Done with mapping: seq=%u data_len=%u",
1065 subflow->map_subflow_seq,
1066 subflow->map_data_len);
1067 }
1068
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001069 return subflow_check_data_avail(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001070}
1071
Florian Westphal071c8ed2020-04-24 12:31:50 +02001072/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1073 * not the ssk one.
1074 *
1075 * In mptcp, rwin is about the mptcp-level connection data.
1076 *
1077 * Data that is still on the ssk rx queue can thus be ignored,
1078 * as far as mptcp peer is concerened that data is still inflight.
1079 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1080 */
1081void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1082{
1083 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1084 const struct sock *sk = subflow->conn;
1085
Paolo Abeniea4ca582020-11-19 11:46:03 -08001086 *space = __mptcp_space(sk);
Florian Westphal071c8ed2020-04-24 12:31:50 +02001087 *full_space = tcp_full_space(sk);
1088}
1089
Mat Martineau648ef4b2020-01-21 16:56:24 -08001090static void subflow_data_ready(struct sock *sk)
1091{
1092 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Davide Caratti8c728942020-07-15 22:27:05 +02001093 u16 state = 1 << inet_sk_state_load(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001094 struct sock *parent = subflow->conn;
Davide Carattie1ff9e82020-06-29 22:26:20 +02001095 struct mptcp_sock *msk;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001096
Davide Carattie1ff9e82020-06-29 22:26:20 +02001097 msk = mptcp_sk(parent);
Davide Caratti8c728942020-07-15 22:27:05 +02001098 if (state & TCPF_LISTEN) {
Davide Carattie1ff9e82020-06-29 22:26:20 +02001099 set_bit(MPTCP_DATA_READY, &msk->flags);
Paolo Abenidc093db2020-03-13 16:52:42 +01001100 parent->sk_data_ready(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001101 return;
1102 }
1103
Davide Carattie1ff9e82020-06-29 22:26:20 +02001104 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
Davide Caratti8c728942020-07-15 22:27:05 +02001105 !subflow->mp_join && !(state & TCPF_CLOSE));
Davide Carattie1ff9e82020-06-29 22:26:20 +02001106
Florian Westphal101f6f82020-02-26 10:14:46 +01001107 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +01001108 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001109}
1110
Paolo Abeni6e628cd2020-11-27 11:10:27 +01001111static void subflow_write_space(struct sock *ssk)
Mat Martineau648ef4b2020-01-21 16:56:24 -08001112{
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001113 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1114
1115 mptcp_propagate_sndbuf(sk, ssk);
1116 mptcp_write_space(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001117}
1118
Paolo Abeni15cc1042021-02-11 15:30:37 -08001119void __mptcp_error_report(struct sock *sk)
1120{
1121 struct mptcp_subflow_context *subflow;
1122 struct mptcp_sock *msk = mptcp_sk(sk);
1123
1124 mptcp_for_each_subflow(msk, subflow) {
1125 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1126 int err = sock_error(ssk);
1127
1128 if (!err)
1129 continue;
1130
1131 /* only propagate errors on fallen-back sockets or
1132 * on MPC connect
1133 */
1134 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1135 continue;
1136
1137 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1138 sk->sk_err = -err;
1139
1140 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1141 smp_wmb();
1142 sk->sk_error_report(sk);
1143 break;
1144 }
1145}
1146
1147static void subflow_error_report(struct sock *ssk)
1148{
1149 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1150
1151 mptcp_data_lock(sk);
1152 if (!sock_owned_by_user(sk))
1153 __mptcp_error_report(sk);
1154 else
1155 set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
1156 mptcp_data_unlock(sk);
1157}
1158
Peter Krystadcec37a62020-01-21 16:56:18 -08001159static struct inet_connection_sock_af_ops *
1160subflow_default_af_ops(struct sock *sk)
1161{
1162#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1163 if (sk->sk_family == AF_INET6)
1164 return &subflow_v6_specific;
1165#endif
1166 return &subflow_specific;
1167}
1168
Peter Krystadcec37a62020-01-21 16:56:18 -08001169#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001170void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1171{
Peter Krystadcec37a62020-01-21 16:56:18 -08001172 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1173 struct inet_connection_sock *icsk = inet_csk(sk);
1174 struct inet_connection_sock_af_ops *target;
1175
1176 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1177
1178 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
Mat Martineauedc7e482020-01-24 16:04:03 -08001179 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
Peter Krystadcec37a62020-01-21 16:56:18 -08001180
1181 if (likely(icsk->icsk_af_ops == target))
1182 return;
1183
1184 subflow->icsk_af_ops = icsk->icsk_af_ops;
1185 icsk->icsk_af_ops = target;
Peter Krystadcec37a62020-01-21 16:56:18 -08001186}
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001187#endif
Peter Krystadcec37a62020-01-21 16:56:18 -08001188
Geliang Tang1729cf12021-02-01 15:09:12 -08001189void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1190 struct sockaddr_storage *addr,
1191 unsigned short family)
Peter Krystadec3edaa2020-03-27 14:48:40 -07001192{
1193 memset(addr, 0, sizeof(*addr));
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001194 addr->ss_family = family;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001195 if (addr->ss_family == AF_INET) {
1196 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1197
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001198 if (info->family == AF_INET)
1199 in_addr->sin_addr = info->addr;
1200#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1201 else if (ipv6_addr_v4mapped(&info->addr6))
1202 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1203#endif
Peter Krystadec3edaa2020-03-27 14:48:40 -07001204 in_addr->sin_port = info->port;
1205 }
1206#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1207 else if (addr->ss_family == AF_INET6) {
1208 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1209
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001210 if (info->family == AF_INET)
1211 ipv6_addr_set_v4mapped(info->addr.s_addr,
1212 &in6_addr->sin6_addr);
1213 else
1214 in6_addr->sin6_addr = info->addr6;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001215 in6_addr->sin6_port = info->port;
1216 }
1217#endif
1218}
1219
Paolo Abenief0da3b2020-09-14 10:01:15 +02001220int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
Peter Krystadec3edaa2020-03-27 14:48:40 -07001221 const struct mptcp_addr_info *remote)
1222{
1223 struct mptcp_sock *msk = mptcp_sk(sk);
1224 struct mptcp_subflow_context *subflow;
1225 struct sockaddr_storage addr;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001226 int remote_id = remote->id;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001227 int local_id = loc->id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001228 struct socket *sf;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001229 struct sock *ssk;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001230 u32 remote_token;
1231 int addrlen;
1232 int err;
1233
Paolo Abenib93df082020-07-23 13:02:32 +02001234 if (!mptcp_is_fully_established(sk))
Peter Krystadec3edaa2020-03-27 14:48:40 -07001235 return -ENOTCONN;
1236
1237 err = mptcp_subflow_create_socket(sk, &sf);
1238 if (err)
1239 return err;
1240
Paolo Abeni6bad9122020-06-30 16:38:26 +02001241 ssk = sf->sk;
1242 subflow = mptcp_subflow_ctx(ssk);
1243 do {
1244 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1245 } while (!subflow->local_nonce);
1246
1247 if (!local_id) {
1248 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1249 if (err < 0)
1250 goto failed;
1251
1252 local_id = err;
1253 }
1254
Peter Krystadec3edaa2020-03-27 14:48:40 -07001255 subflow->remote_key = msk->remote_key;
1256 subflow->local_key = msk->local_key;
1257 subflow->token = msk->token;
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001258 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001259
1260 addrlen = sizeof(struct sockaddr_in);
1261#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001262 if (addr.ss_family == AF_INET6)
Peter Krystadec3edaa2020-03-27 14:48:40 -07001263 addrlen = sizeof(struct sockaddr_in6);
1264#endif
Paolo Abenief0da3b2020-09-14 10:01:15 +02001265 ssk->sk_bound_dev_if = loc->ifindex;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001266 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1267 if (err)
1268 goto failed;
1269
1270 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
Geliang Tang2ff0e562020-09-08 10:49:39 +08001271 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1272 remote_token, local_id, remote_id);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001273 subflow->remote_token = remote_token;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001274 subflow->local_id = local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001275 subflow->remote_id = remote_id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001276 subflow->request_join = 1;
Paolo Abeni4596a2c2020-09-14 10:01:16 +02001277 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001278 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001279
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001280 mptcp_add_pending_subflow(msk, subflow);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001281 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1282 if (err && err != -EINPROGRESS)
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001283 goto failed_unlink;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001284
Paolo Abeni866f26f2021-01-20 15:39:10 +01001285 /* discard the subflow socket */
1286 mptcp_sock_graft(ssk, sk->sk_socket);
1287 iput(SOCK_INODE(sf));
Peter Krystadec3edaa2020-03-27 14:48:40 -07001288 return err;
1289
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001290failed_unlink:
Peter Krystadec3edaa2020-03-27 14:48:40 -07001291 spin_lock_bh(&msk->join_list_lock);
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001292 list_del(&subflow->node);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001293 spin_unlock_bh(&msk->join_list_lock);
1294
Peter Krystadec3edaa2020-03-27 14:48:40 -07001295failed:
Paolo Abenie16163b2020-11-16 10:48:09 +01001296 subflow->disposable = 1;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001297 sock_release(sf);
1298 return err;
1299}
1300
Nicolas Rybowski3764b0c2020-12-10 14:24:58 -08001301static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1302{
1303#ifdef CONFIG_SOCK_CGROUP_DATA
1304 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1305 *child_skcd = &child->sk_cgrp_data;
1306
1307 /* only the additional subflows created by kworkers have to be modified */
1308 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1309 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1310#ifdef CONFIG_MEMCG
1311 struct mem_cgroup *memcg = parent->sk_memcg;
1312
1313 mem_cgroup_sk_free(child);
1314 if (memcg && css_tryget(&memcg->css))
1315 child->sk_memcg = memcg;
1316#endif /* CONFIG_MEMCG */
1317
1318 cgroup_sk_free(child_skcd);
1319 *child_skcd = *parent_skcd;
1320 cgroup_sk_clone(child_skcd);
1321 }
1322#endif /* CONFIG_SOCK_CGROUP_DATA */
1323}
1324
Paolo Abenib19bc292021-01-20 15:39:14 +01001325static void mptcp_subflow_ops_override(struct sock *ssk)
1326{
1327#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1328 if (ssk->sk_prot == &tcpv6_prot)
1329 ssk->sk_prot = &tcpv6_prot_override;
1330 else
1331#endif
1332 ssk->sk_prot = &tcp_prot_override;
1333}
1334
1335static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1336{
1337#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1338 if (ssk->sk_prot == &tcpv6_prot_override)
1339 ssk->sk_prot = &tcpv6_prot;
1340 else
1341#endif
1342 ssk->sk_prot = &tcp_prot;
1343}
Peter Krystad2303f992020-01-21 16:56:17 -08001344int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1345{
1346 struct mptcp_subflow_context *subflow;
1347 struct net *net = sock_net(sk);
1348 struct socket *sf;
1349 int err;
1350
Paolo Abeniadf73412020-08-04 18:31:06 +02001351 /* un-accepted server sockets can reach here - on bad configuration
1352 * bail early to avoid greater trouble later
1353 */
1354 if (unlikely(!sk->sk_socket))
1355 return -EINVAL;
1356
Peter Krystadcec37a62020-01-21 16:56:18 -08001357 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1358 &sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001359 if (err)
1360 return err;
1361
1362 lock_sock(sf->sk);
1363
Nicolas Rybowski3764b0c2020-12-10 14:24:58 -08001364 /* the newly created socket has to be in the same cgroup as its parent */
1365 mptcp_attach_cgroup(sk, sf->sk);
1366
Peter Krystad2303f992020-01-21 16:56:17 -08001367 /* kernel sockets do not by default acquire net ref, but TCP timer
1368 * needs it.
1369 */
1370 sf->sk->sk_net_refcnt = 1;
1371 get_net(net);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001372#ifdef CONFIG_PROC_FS
Peter Krystad2303f992020-01-21 16:56:17 -08001373 this_cpu_add(*net->core.sock_inuse, 1);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001374#endif
Peter Krystad2303f992020-01-21 16:56:17 -08001375 err = tcp_set_ulp(sf->sk, "mptcp");
1376 release_sock(sf->sk);
1377
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001378 if (err) {
1379 sock_release(sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001380 return err;
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001381 }
Peter Krystad2303f992020-01-21 16:56:17 -08001382
Paolo Abeni7d14b0d2020-05-07 18:53:24 +02001383 /* the newly created socket really belongs to the owning MPTCP master
1384 * socket, even if for additional subflows the allocation is performed
1385 * by a kernel workqueue. Adjust inode references, so that the
1386 * procfs/diag interaces really show this one belonging to the correct
1387 * user.
1388 */
1389 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1390 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1391 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1392
Peter Krystad2303f992020-01-21 16:56:17 -08001393 subflow = mptcp_subflow_ctx(sf->sk);
1394 pr_debug("subflow=%p", subflow);
1395
1396 *new_sock = sf;
Peter Krystad79c09492020-01-21 16:56:20 -08001397 sock_hold(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001398 subflow->conn = sk;
Paolo Abenib19bc292021-01-20 15:39:14 +01001399 mptcp_subflow_ops_override(sf->sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001400
1401 return 0;
1402}
1403
1404static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1405 gfp_t priority)
1406{
1407 struct inet_connection_sock *icsk = inet_csk(sk);
1408 struct mptcp_subflow_context *ctx;
1409
1410 ctx = kzalloc(sizeof(*ctx), priority);
1411 if (!ctx)
1412 return NULL;
1413
1414 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001415 INIT_LIST_HEAD(&ctx->node);
Paolo Abenib19bc292021-01-20 15:39:14 +01001416 INIT_LIST_HEAD(&ctx->delegated_node);
Peter Krystad2303f992020-01-21 16:56:17 -08001417
1418 pr_debug("subflow=%p", ctx);
1419
1420 ctx->tcp_sock = sk;
1421
1422 return ctx;
1423}
1424
Mat Martineau648ef4b2020-01-21 16:56:24 -08001425static void __subflow_state_change(struct sock *sk)
1426{
1427 struct socket_wq *wq;
1428
1429 rcu_read_lock();
1430 wq = rcu_dereference(sk->sk_wq);
1431 if (skwq_has_sleeper(wq))
1432 wake_up_interruptible_all(&wq->wait);
1433 rcu_read_unlock();
1434}
1435
1436static bool subflow_is_done(const struct sock *sk)
1437{
1438 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1439}
1440
1441static void subflow_state_change(struct sock *sk)
1442{
1443 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenidc093db2020-03-13 16:52:42 +01001444 struct sock *parent = subflow->conn;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001445
1446 __subflow_state_change(sk);
1447
Davide Caratti8fd73802020-06-29 22:26:21 +02001448 if (subflow_simultaneous_connect(sk)) {
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001449 mptcp_propagate_sndbuf(parent, sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001450 mptcp_do_fallback(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001451 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001452 pr_fallback(mptcp_sk(parent));
1453 subflow->conn_finished = 1;
1454 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1455 inet_sk_state_store(parent, TCP_ESTABLISHED);
1456 parent->sk_state_change(parent);
1457 }
1458 }
1459
Mat Martineau648ef4b2020-01-21 16:56:24 -08001460 /* as recvmsg() does not acquire the subflow socket for ssk selection
1461 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1462 * the data available machinery here.
1463 */
Davide Carattie1ff9e82020-06-29 22:26:20 +02001464 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +01001465 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001466
Florian Westphal40947e12021-02-12 15:59:56 -08001467 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1468
Mat Martineau067a0b32020-07-28 15:12:07 -07001469 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
Mat Martineau648ef4b2020-01-21 16:56:24 -08001470 !subflow->rx_eof && subflow_is_done(sk)) {
1471 subflow->rx_eof = 1;
Florian Westphal59832e22020-04-02 13:44:52 +02001472 mptcp_subflow_eof(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001473 }
1474}
1475
Peter Krystad2303f992020-01-21 16:56:17 -08001476static int subflow_ulp_init(struct sock *sk)
1477{
Peter Krystadcec37a62020-01-21 16:56:18 -08001478 struct inet_connection_sock *icsk = inet_csk(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001479 struct mptcp_subflow_context *ctx;
1480 struct tcp_sock *tp = tcp_sk(sk);
1481 int err = 0;
1482
1483 /* disallow attaching ULP to a socket unless it has been
1484 * created with sock_create_kern()
1485 */
1486 if (!sk->sk_kern_sock) {
1487 err = -EOPNOTSUPP;
1488 goto out;
1489 }
1490
1491 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1492 if (!ctx) {
1493 err = -ENOMEM;
1494 goto out;
1495 }
1496
1497 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1498
1499 tp->is_mptcp = 1;
Peter Krystadcec37a62020-01-21 16:56:18 -08001500 ctx->icsk_af_ops = icsk->icsk_af_ops;
1501 icsk->icsk_af_ops = subflow_default_af_ops(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001502 ctx->tcp_data_ready = sk->sk_data_ready;
1503 ctx->tcp_state_change = sk->sk_state_change;
1504 ctx->tcp_write_space = sk->sk_write_space;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001505 ctx->tcp_error_report = sk->sk_error_report;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001506 sk->sk_data_ready = subflow_data_ready;
1507 sk->sk_write_space = subflow_write_space;
1508 sk->sk_state_change = subflow_state_change;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001509 sk->sk_error_report = subflow_error_report;
Peter Krystad2303f992020-01-21 16:56:17 -08001510out:
1511 return err;
1512}
1513
Paolo Abenie16163b2020-11-16 10:48:09 +01001514static void subflow_ulp_release(struct sock *ssk)
Peter Krystad2303f992020-01-21 16:56:17 -08001515{
Paolo Abenie16163b2020-11-16 10:48:09 +01001516 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1517 bool release = true;
1518 struct sock *sk;
Peter Krystad2303f992020-01-21 16:56:17 -08001519
1520 if (!ctx)
1521 return;
1522
Paolo Abenie16163b2020-11-16 10:48:09 +01001523 sk = ctx->conn;
1524 if (sk) {
1525 /* if the msk has been orphaned, keep the ctx
Paolo Abeni0597d0f2020-12-09 12:03:30 +01001526 * alive, will be freed by __mptcp_close_ssk(),
1527 * when the subflow is still unaccepted
Paolo Abenie16163b2020-11-16 10:48:09 +01001528 */
Paolo Abeni0597d0f2020-12-09 12:03:30 +01001529 release = ctx->disposable || list_empty(&ctx->node);
Paolo Abenie16163b2020-11-16 10:48:09 +01001530 sock_put(sk);
1531 }
Peter Krystad79c09492020-01-21 16:56:20 -08001532
Paolo Abenib19bc292021-01-20 15:39:14 +01001533 mptcp_subflow_ops_undo_override(ssk);
Paolo Abenie16163b2020-11-16 10:48:09 +01001534 if (release)
1535 kfree_rcu(ctx, rcu);
Peter Krystad2303f992020-01-21 16:56:17 -08001536}
1537
Peter Krystadcec37a62020-01-21 16:56:18 -08001538static void subflow_ulp_clone(const struct request_sock *req,
1539 struct sock *newsk,
1540 const gfp_t priority)
1541{
1542 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1543 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1544 struct mptcp_subflow_context *new_ctx;
1545
Peter Krystadf2962342020-03-27 14:48:39 -07001546 if (!tcp_rsk(req)->is_mptcp ||
1547 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001548 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001549 return;
1550 }
1551
1552 new_ctx = subflow_create_ctx(newsk, priority);
Mat Martineauedc7e482020-01-24 16:04:03 -08001553 if (!new_ctx) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001554 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001555 return;
1556 }
1557
1558 new_ctx->conn_finished = 1;
1559 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001560 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1561 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1562 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001563 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
Paolo Abeni58b09912020-03-13 16:52:41 +01001564 new_ctx->rel_write_seq = 1;
1565 new_ctx->tcp_sock = newsk;
1566
Peter Krystadf2962342020-03-27 14:48:39 -07001567 if (subflow_req->mp_capable) {
1568 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1569 * is fully established only after we receive the remote key
1570 */
1571 new_ctx->mp_capable = 1;
Peter Krystadf2962342020-03-27 14:48:39 -07001572 new_ctx->local_key = subflow_req->local_key;
1573 new_ctx->token = subflow_req->token;
1574 new_ctx->ssn_offset = subflow_req->ssn_offset;
1575 new_ctx->idsn = subflow_req->idsn;
1576 } else if (subflow_req->mp_join) {
Peter Krystadec3edaa2020-03-27 14:48:40 -07001577 new_ctx->ssn_offset = subflow_req->ssn_offset;
Peter Krystadf2962342020-03-27 14:48:39 -07001578 new_ctx->mp_join = 1;
1579 new_ctx->fully_established = 1;
1580 new_ctx->backup = subflow_req->backup;
1581 new_ctx->local_id = subflow_req->local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001582 new_ctx->remote_id = subflow_req->remote_id;
Peter Krystadf2962342020-03-27 14:48:39 -07001583 new_ctx->token = subflow_req->token;
1584 new_ctx->thmac = subflow_req->thmac;
1585 }
Peter Krystadcec37a62020-01-21 16:56:18 -08001586}
1587
Paolo Abenib19bc292021-01-20 15:39:14 +01001588static void tcp_release_cb_override(struct sock *ssk)
1589{
1590 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1591
1592 if (mptcp_subflow_has_delegated_action(subflow))
1593 mptcp_subflow_process_delegated(ssk);
1594
1595 tcp_release_cb(ssk);
1596}
1597
Peter Krystad2303f992020-01-21 16:56:17 -08001598static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1599 .name = "mptcp",
1600 .owner = THIS_MODULE,
1601 .init = subflow_ulp_init,
1602 .release = subflow_ulp_release,
Peter Krystadcec37a62020-01-21 16:56:18 -08001603 .clone = subflow_ulp_clone,
Peter Krystad2303f992020-01-21 16:56:17 -08001604};
1605
Peter Krystadcec37a62020-01-21 16:56:18 -08001606static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1607{
1608 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1609 subflow_ops->slab_name = "request_sock_subflow";
1610
1611 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1612 subflow_ops->obj_size, 0,
1613 SLAB_ACCOUNT |
1614 SLAB_TYPESAFE_BY_RCU,
1615 NULL);
1616 if (!subflow_ops->slab)
1617 return -ENOMEM;
1618
Peter Krystad79c09492020-01-21 16:56:20 -08001619 subflow_ops->destructor = subflow_req_destructor;
1620
Peter Krystadcec37a62020-01-21 16:56:18 -08001621 return 0;
1622}
1623
Paolo Abenid39dcec2020-06-26 19:29:59 +02001624void __init mptcp_subflow_init(void)
Peter Krystad2303f992020-01-21 16:56:17 -08001625{
Florian Westphal08b8d082020-07-30 21:25:53 +02001626 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1627 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
Peter Krystadcec37a62020-01-21 16:56:18 -08001628 panic("MPTCP: failed to init subflow request sock ops\n");
1629
1630 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
Florian Westphal7ea851d2020-11-30 16:36:30 +01001631 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
Peter Krystadcec37a62020-01-21 16:56:18 -08001632
1633 subflow_specific = ipv4_specific;
1634 subflow_specific.conn_request = subflow_v4_conn_request;
1635 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1636 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1637
Paolo Abenib19bc292021-01-20 15:39:14 +01001638 tcp_prot_override = tcp_prot;
1639 tcp_prot_override.release_cb = tcp_release_cb_override;
1640
Peter Krystadcec37a62020-01-21 16:56:18 -08001641#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1642 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
Florian Westphal7ea851d2020-11-30 16:36:30 +01001643 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
Peter Krystadcec37a62020-01-21 16:56:18 -08001644
1645 subflow_v6_specific = ipv6_specific;
1646 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1647 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1648 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1649
1650 subflow_v6m_specific = subflow_v6_specific;
1651 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1652 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1653 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1654 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1655 subflow_v6m_specific.net_frag_header_len = 0;
Paolo Abenib19bc292021-01-20 15:39:14 +01001656
1657 tcpv6_prot_override = tcpv6_prot;
1658 tcpv6_prot_override.release_cb = tcp_release_cb_override;
Peter Krystadcec37a62020-01-21 16:56:18 -08001659#endif
1660
Davide Caratti5147dfb2020-03-27 14:48:49 -07001661 mptcp_diag_subflow_init(&subflow_ulp_ops);
1662
Peter Krystad2303f992020-01-21 16:56:17 -08001663 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1664 panic("MPTCP: failed to register subflows to ULP\n");
1665}