blob: e8cac2655c827b76ee5327d6a0f44fac6a9f45c7 [file] [log] [blame]
Peter Krystad2303f992020-01-21 16:56:17 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
Peter Krystad79c09492020-01-21 16:56:20 -08007#define pr_fmt(fmt) "MPTCP: " fmt
8
Peter Krystad2303f992020-01-21 16:56:17 -08009#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Peter Krystadf2962342020-03-27 14:48:39 -070012#include <crypto/algapi.h>
Todd Malsbarybd697222020-05-21 19:10:49 -070013#include <crypto/sha.h>
Peter Krystad2303f992020-01-21 16:56:17 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080019#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
21#endif
Peter Krystad2303f992020-01-21 16:56:17 -080022#include <net/mptcp.h>
23#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070024#include "mib.h"
25
26static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
27 enum linux_mptcp_mib_field field)
28{
29 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
30}
Peter Krystad2303f992020-01-21 16:56:17 -080031
Peter Krystad79c09492020-01-21 16:56:20 -080032static void subflow_req_destructor(struct request_sock *req)
33{
34 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
35
36 pr_debug("subflow_req=%p", subflow_req);
37
Paolo Abeni8fd4de12020-06-17 12:08:56 +020038 if (subflow_req->msk)
39 sock_put((struct sock *)subflow_req->msk);
40
Paolo Abeni2c5ebd02020-06-26 19:30:00 +020041 mptcp_token_destroy_request(req);
Peter Krystad79c09492020-01-21 16:56:20 -080042 tcp_request_sock_ops.destructor(req);
43}
44
Peter Krystadf2962342020-03-27 14:48:39 -070045static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
46 void *hmac)
47{
48 u8 msg[8];
49
50 put_unaligned_be32(nonce1, &msg[0]);
51 put_unaligned_be32(nonce2, &msg[4]);
52
53 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
54}
55
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +020056static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
57{
58 return mptcp_is_fully_established((void *)msk) &&
59 READ_ONCE(msk->pm.accept_subflow);
60}
61
Peter Krystadf2962342020-03-27 14:48:39 -070062/* validate received token and create truncated hmac and nonce for SYN-ACK */
Paolo Abeni8fd4de12020-06-17 12:08:56 +020063static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
64 const struct sk_buff *skb)
Peter Krystadf2962342020-03-27 14:48:39 -070065{
66 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Todd Malsbarybd697222020-05-21 19:10:49 -070067 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -070068 struct mptcp_sock *msk;
69 int local_id;
70
71 msk = mptcp_token_get_sock(subflow_req->token);
72 if (!msk) {
Florian Westphalfc518952020-03-27 14:48:50 -070073 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020074 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070075 }
76
77 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
78 if (local_id < 0) {
79 sock_put((struct sock *)msk);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020080 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070081 }
82 subflow_req->local_id = local_id;
83
84 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
85
86 subflow_generate_hmac(msk->local_key, msk->remote_key,
87 subflow_req->local_nonce,
88 subflow_req->remote_nonce, hmac);
89
90 subflow_req->thmac = get_unaligned_be64(hmac);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020091 return msk;
Peter Krystadf2962342020-03-27 14:48:39 -070092}
93
Florian Westphal78d8b7b2020-07-30 21:25:52 +020094static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
Peter Krystadcec37a62020-01-21 16:56:18 -080095{
Peter Krystadcec37a62020-01-21 16:56:18 -080096 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadcec37a62020-01-21 16:56:18 -080097
98 subflow_req->mp_capable = 0;
Peter Krystadf2962342020-03-27 14:48:39 -070099 subflow_req->mp_join = 0;
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200100 subflow_req->msk = NULL;
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200101 mptcp_token_init_request(req);
Peter Krystadcec37a62020-01-21 16:56:18 -0800102
103#ifdef CONFIG_TCP_MD5SIG
104 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
105 * TCP option space.
106 */
107 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200108 return -EINVAL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800109#endif
110
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200111 return 0;
112}
113
114static void subflow_init_req(struct request_sock *req,
115 const struct sock *sk_listener,
116 struct sk_buff *skb)
117{
118 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
119 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
120 struct mptcp_options_received mp_opt;
121 int ret;
122
123 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
124
125 ret = __subflow_init_req(req, sk_listener);
126 if (ret)
127 return;
128
129 mptcp_get_options(skb, &mp_opt);
130
Paolo Abenicfde1412020-04-30 15:01:52 +0200131 if (mp_opt.mp_capable) {
Florian Westphalfc518952020-03-27 14:48:50 -0700132 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
133
Paolo Abenicfde1412020-04-30 15:01:52 +0200134 if (mp_opt.mp_join)
Florian Westphalfc518952020-03-27 14:48:50 -0700135 return;
Paolo Abenicfde1412020-04-30 15:01:52 +0200136 } else if (mp_opt.mp_join) {
Florian Westphalfc518952020-03-27 14:48:50 -0700137 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
138 }
Peter Krystadf2962342020-03-27 14:48:39 -0700139
Paolo Abenicfde1412020-04-30 15:01:52 +0200140 if (mp_opt.mp_capable && listener->request_mptcp) {
Florian Westphal535fb812020-07-30 21:25:51 +0200141 int err, retries = 4;
142
Florian Westphalc83a47e2020-07-30 21:25:54 +0200143 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Florian Westphal535fb812020-07-30 21:25:51 +0200144again:
145 do {
146 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
147 } while (subflow_req->local_key == 0);
Peter Krystad79c09492020-01-21 16:56:20 -0800148
Florian Westphalc83a47e2020-07-30 21:25:54 +0200149 if (unlikely(req->syncookie)) {
150 mptcp_crypto_key_sha(subflow_req->local_key,
151 &subflow_req->token,
152 &subflow_req->idsn);
153 if (mptcp_token_exists(subflow_req->token)) {
154 if (retries-- > 0)
155 goto again;
156 } else {
157 subflow_req->mp_capable = 1;
158 }
159 return;
160 }
161
Peter Krystad79c09492020-01-21 16:56:20 -0800162 err = mptcp_token_new_request(req);
163 if (err == 0)
164 subflow_req->mp_capable = 1;
Florian Westphal535fb812020-07-30 21:25:51 +0200165 else if (retries-- > 0)
166 goto again;
Peter Krystad79c09492020-01-21 16:56:20 -0800167
Paolo Abenicfde1412020-04-30 15:01:52 +0200168 } else if (mp_opt.mp_join && listener->request_mptcp) {
Peter Krystadec3edaa2020-03-27 14:48:40 -0700169 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Peter Krystadf2962342020-03-27 14:48:39 -0700170 subflow_req->mp_join = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200171 subflow_req->backup = mp_opt.backup;
172 subflow_req->remote_id = mp_opt.join_id;
173 subflow_req->token = mp_opt.token;
174 subflow_req->remote_nonce = mp_opt.nonce;
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200175 subflow_req->msk = subflow_token_join_request(req, skb);
Florian Westphal9466a1c2020-07-30 21:25:56 +0200176
177 if (unlikely(req->syncookie) && subflow_req->msk) {
178 if (mptcp_can_accept_new_subflow(subflow_req->msk))
179 subflow_init_req_cookie_join_save(subflow_req, skb);
180 }
181
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200182 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
183 subflow_req->remote_nonce, subflow_req->msk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800184 }
185}
186
Florian Westphalc83a47e2020-07-30 21:25:54 +0200187int mptcp_subflow_init_cookie_req(struct request_sock *req,
188 const struct sock *sk_listener,
189 struct sk_buff *skb)
190{
191 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
192 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
193 struct mptcp_options_received mp_opt;
194 int err;
195
196 err = __subflow_init_req(req, sk_listener);
197 if (err)
198 return err;
199
200 mptcp_get_options(skb, &mp_opt);
201
202 if (mp_opt.mp_capable && mp_opt.mp_join)
203 return -EINVAL;
204
205 if (mp_opt.mp_capable && listener->request_mptcp) {
206 if (mp_opt.sndr_key == 0)
207 return -EINVAL;
208
209 subflow_req->local_key = mp_opt.rcvr_key;
210 err = mptcp_token_new_request(req);
211 if (err)
212 return err;
213
214 subflow_req->mp_capable = 1;
215 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200216 } else if (mp_opt.mp_join && listener->request_mptcp) {
217 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
218 return -EINVAL;
219
220 if (mptcp_can_accept_new_subflow(subflow_req->msk))
221 subflow_req->mp_join = 1;
222
223 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200224 }
225
226 return 0;
227}
228EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
229
Peter Krystadcec37a62020-01-21 16:56:18 -0800230static void subflow_v4_init_req(struct request_sock *req,
231 const struct sock *sk_listener,
232 struct sk_buff *skb)
233{
234 tcp_rsk(req)->is_mptcp = 1;
235
236 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
237
238 subflow_init_req(req, sk_listener, skb);
239}
240
241#if IS_ENABLED(CONFIG_MPTCP_IPV6)
242static void subflow_v6_init_req(struct request_sock *req,
243 const struct sock *sk_listener,
244 struct sk_buff *skb)
245{
246 tcp_rsk(req)->is_mptcp = 1;
247
248 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
249
250 subflow_init_req(req, sk_listener, skb);
251}
252#endif
253
Peter Krystadec3edaa2020-03-27 14:48:40 -0700254/* validate received truncated hmac and create hmac for third ACK */
255static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
256{
Todd Malsbarybd697222020-05-21 19:10:49 -0700257 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadec3edaa2020-03-27 14:48:40 -0700258 u64 thmac;
259
260 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
261 subflow->remote_nonce, subflow->local_nonce,
262 hmac);
263
264 thmac = get_unaligned_be64(hmac);
265 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
266 subflow, subflow->token,
267 (unsigned long long)thmac,
268 (unsigned long long)subflow->thmac);
269
270 return thmac == subflow->thmac;
271}
272
Peter Krystadcec37a62020-01-21 16:56:18 -0800273static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
274{
275 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenicfde1412020-04-30 15:01:52 +0200276 struct mptcp_options_received mp_opt;
Davide Carattic3c123d2020-03-19 22:45:37 +0100277 struct sock *parent = subflow->conn;
Peter Krystadcec37a62020-01-21 16:56:18 -0800278
279 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
280
Paolo Abeni12008322020-04-24 13:15:21 +0200281 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
Davide Carattic3c123d2020-03-19 22:45:37 +0100282 inet_sk_state_store(parent, TCP_ESTABLISHED);
283 parent->sk_state_change(parent);
284 }
285
Paolo Abeni263e1202020-04-30 15:01:51 +0200286 /* be sure no special action on any packet other than syn-ack */
287 if (subflow->conn_finished)
288 return;
289
Paolo Abenib0977bb2020-07-23 13:02:29 +0200290 subflow->rel_write_seq = 1;
Paolo Abeni263e1202020-04-30 15:01:51 +0200291 subflow->conn_finished = 1;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200292 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
293 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
Paolo Abeni263e1202020-04-30 15:01:51 +0200294
Paolo Abenicfde1412020-04-30 15:01:52 +0200295 mptcp_get_options(skb, &mp_opt);
Paolo Abenifa25e812020-07-23 13:02:33 +0200296 if (subflow->request_mptcp) {
297 if (!mp_opt.mp_capable) {
298 MPTCP_INC_STATS(sock_net(sk),
299 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
300 mptcp_do_fallback(sk);
301 pr_fallback(mptcp_sk(subflow->conn));
302 goto fallback;
303 }
304
Paolo Abeni263e1202020-04-30 15:01:51 +0200305 subflow->mp_capable = 1;
306 subflow->can_ack = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200307 subflow->remote_key = mp_opt.sndr_key;
Paolo Abeni263e1202020-04-30 15:01:51 +0200308 pr_debug("subflow=%p, remote_key=%llu", subflow,
309 subflow->remote_key);
Paolo Abenifa25e812020-07-23 13:02:33 +0200310 mptcp_finish_connect(sk);
311 } else if (subflow->request_join) {
312 u8 hmac[SHA256_DIGEST_SIZE];
313
314 if (!mp_opt.mp_join)
315 goto do_reset;
316
Paolo Abenicfde1412020-04-30 15:01:52 +0200317 subflow->thmac = mp_opt.thmac;
318 subflow->remote_nonce = mp_opt.nonce;
Paolo Abeni263e1202020-04-30 15:01:51 +0200319 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
320 subflow->thmac, subflow->remote_nonce);
Paolo Abeni263e1202020-04-30 15:01:51 +0200321
Peter Krystadec3edaa2020-03-27 14:48:40 -0700322 if (!subflow_thmac_valid(subflow)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700323 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700324 goto do_reset;
325 }
326
327 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
328 subflow->local_nonce,
329 subflow->remote_nonce,
Todd Malsbarybd697222020-05-21 19:10:49 -0700330 hmac);
Todd Malsbarybd697222020-05-21 19:10:49 -0700331 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700332
Peter Krystadec3edaa2020-03-27 14:48:40 -0700333 if (!mptcp_finish_join(sk))
334 goto do_reset;
335
Paolo Abenifa25e812020-07-23 13:02:33 +0200336 subflow->mp_join = 1;
Florian Westphalfc518952020-03-27 14:48:50 -0700337 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
Paolo Abenifa25e812020-07-23 13:02:33 +0200338 } else if (mptcp_check_fallback(sk)) {
339fallback:
340 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800341 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200342 return;
343
344do_reset:
345 tcp_send_active_reset(sk, GFP_ATOMIC);
346 tcp_done(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800347}
348
Florian Westphal08b8d082020-07-30 21:25:53 +0200349struct request_sock_ops mptcp_subflow_request_sock_ops;
350EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
Peter Krystadcec37a62020-01-21 16:56:18 -0800351static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
352
353static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
354{
355 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
356
357 pr_debug("subflow=%p", subflow);
358
359 /* Never answer to SYNs sent to broadcast or multicast */
360 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
361 goto drop;
362
Florian Westphal08b8d082020-07-30 21:25:53 +0200363 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800364 &subflow_request_sock_ipv4_ops,
365 sk, skb);
366drop:
367 tcp_listendrop(sk);
368 return 0;
369}
370
371#if IS_ENABLED(CONFIG_MPTCP_IPV6)
372static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
373static struct inet_connection_sock_af_ops subflow_v6_specific;
374static struct inet_connection_sock_af_ops subflow_v6m_specific;
375
376static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
377{
378 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
379
380 pr_debug("subflow=%p", subflow);
381
382 if (skb->protocol == htons(ETH_P_IP))
383 return subflow_v4_conn_request(sk, skb);
384
385 if (!ipv6_unicast_destination(skb))
386 goto drop;
387
Florian Westphal08b8d082020-07-30 21:25:53 +0200388 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800389 &subflow_request_sock_ipv6_ops, sk, skb);
390
391drop:
392 tcp_listendrop(sk);
393 return 0; /* don't send reset */
394}
395#endif
396
Peter Krystadf2962342020-03-27 14:48:39 -0700397/* validate hmac received in third ACK */
398static bool subflow_hmac_valid(const struct request_sock *req,
Paolo Abenicfde1412020-04-30 15:01:52 +0200399 const struct mptcp_options_received *mp_opt)
Peter Krystadf2962342020-03-27 14:48:39 -0700400{
401 const struct mptcp_subflow_request_sock *subflow_req;
Todd Malsbarybd697222020-05-21 19:10:49 -0700402 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -0700403 struct mptcp_sock *msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700404
405 subflow_req = mptcp_subflow_rsk(req);
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200406 msk = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700407 if (!msk)
408 return false;
409
410 subflow_generate_hmac(msk->remote_key, msk->local_key,
411 subflow_req->remote_nonce,
412 subflow_req->local_nonce, hmac);
413
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200414 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadf2962342020-03-27 14:48:39 -0700415}
416
Florian Westphaldf1036d2020-04-17 09:28:22 +0200417static void mptcp_sock_destruct(struct sock *sk)
418{
419 /* if new mptcp socket isn't accepted, it is free'd
420 * from the tcp listener sockets request queue, linked
421 * from req->sk. The tcp socket is released.
422 * This calls the ULP release function which will
423 * also remove the mptcp socket, via
424 * sock_put(ctx->conn).
425 *
Paolo Abeni7ee24922020-08-07 19:03:53 +0200426 * Problem is that the mptcp socket will be in
427 * ESTABLISHED state and will not have the SOCK_DEAD flag.
Florian Westphaldf1036d2020-04-17 09:28:22 +0200428 * Both result in warnings from inet_sock_destruct.
429 */
430
Paolo Abeni7ee24922020-08-07 19:03:53 +0200431 if (sk->sk_state == TCP_ESTABLISHED) {
Florian Westphaldf1036d2020-04-17 09:28:22 +0200432 sk->sk_state = TCP_CLOSE;
433 WARN_ON_ONCE(sk->sk_socket);
434 sock_orphan(sk);
435 }
436
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200437 mptcp_token_destroy(mptcp_sk(sk));
Florian Westphaldf1036d2020-04-17 09:28:22 +0200438 inet_sock_destruct(sk);
439}
440
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200441static void mptcp_force_close(struct sock *sk)
442{
443 inet_sk_state_store(sk, TCP_CLOSE);
444 sk_common_release(sk);
445}
446
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200447static void subflow_ulp_fallback(struct sock *sk,
448 struct mptcp_subflow_context *old_ctx)
449{
450 struct inet_connection_sock *icsk = inet_csk(sk);
451
452 mptcp_subflow_tcp_fallback(sk, old_ctx);
453 icsk->icsk_ulp_ops = NULL;
454 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
455 tcp_sk(sk)->is_mptcp = 0;
456}
457
Paolo Abeni39884602020-05-29 17:49:18 +0200458static void subflow_drop_ctx(struct sock *ssk)
459{
460 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
461
462 if (!ctx)
463 return;
464
465 subflow_ulp_fallback(ssk, ctx);
466 if (ctx->conn)
467 sock_put(ctx->conn);
468
469 kfree_rcu(ctx, rcu);
470}
471
Paolo Abenib93df082020-07-23 13:02:32 +0200472void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
473 struct mptcp_options_received *mp_opt)
474{
475 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
476
477 subflow->remote_key = mp_opt->sndr_key;
478 subflow->fully_established = 1;
479 subflow->can_ack = 1;
480 WRITE_ONCE(msk->fully_established, true);
481}
482
Peter Krystadcec37a62020-01-21 16:56:18 -0800483static struct sock *subflow_syn_recv_sock(const struct sock *sk,
484 struct sk_buff *skb,
485 struct request_sock *req,
486 struct dst_entry *dst,
487 struct request_sock *req_unhash,
488 bool *own_req)
489{
490 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800491 struct mptcp_subflow_request_sock *subflow_req;
Paolo Abenicfde1412020-04-30 15:01:52 +0200492 struct mptcp_options_received mp_opt;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200493 bool fallback, fallback_is_fatal;
Paolo Abeni58b09912020-03-13 16:52:41 +0100494 struct sock *new_msk = NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800495 struct sock *child;
496
497 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
498
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200499 /* After child creation we must look for 'mp_capable' even when options
500 * are not parsed
Paolo Abenicfde1412020-04-30 15:01:52 +0200501 */
502 mp_opt.mp_capable = 0;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200503
504 /* hopefully temporary handling for MP_JOIN+syncookie */
505 subflow_req = mptcp_subflow_rsk(req);
Paolo Abenib7514692020-07-23 13:02:34 +0200506 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200507 fallback = !tcp_rsk(req)->is_mptcp;
508 if (fallback)
Florian Westphalae2dd712020-01-29 15:54:46 +0100509 goto create_child;
510
Christoph Paaschd22f4982020-01-21 16:56:32 -0800511 /* if the sk is MP_CAPABLE, we try to fetch the client key */
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800512 if (subflow_req->mp_capable) {
Christoph Paaschd22f4982020-01-21 16:56:32 -0800513 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
514 /* here we can receive and accept an in-window,
515 * out-of-order pkt, which will not carry the MP_CAPABLE
516 * opt even on mptcp enabled paths
517 */
Paolo Abeni58b09912020-03-13 16:52:41 +0100518 goto create_msk;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800519 }
520
Paolo Abenicfde1412020-04-30 15:01:52 +0200521 mptcp_get_options(skb, &mp_opt);
522 if (!mp_opt.mp_capable) {
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200523 fallback = true;
Paolo Abeni58b09912020-03-13 16:52:41 +0100524 goto create_child;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800525 }
Paolo Abeni58b09912020-03-13 16:52:41 +0100526
527create_msk:
Paolo Abenicfde1412020-04-30 15:01:52 +0200528 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
Paolo Abeni58b09912020-03-13 16:52:41 +0100529 if (!new_msk)
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200530 fallback = true;
Peter Krystadf2962342020-03-27 14:48:39 -0700531 } else if (subflow_req->mp_join) {
Paolo Abenicfde1412020-04-30 15:01:52 +0200532 mptcp_get_options(skb, &mp_opt);
533 if (!mp_opt.mp_join ||
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +0200534 !mptcp_can_accept_new_subflow(subflow_req->msk) ||
Paolo Abenicfde1412020-04-30 15:01:52 +0200535 !subflow_hmac_valid(req, &mp_opt)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700536 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200537 fallback = true;
Florian Westphalfc518952020-03-27 14:48:50 -0700538 }
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800539 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800540
Christoph Paaschd22f4982020-01-21 16:56:32 -0800541create_child:
Peter Krystadcec37a62020-01-21 16:56:18 -0800542 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
543 req_unhash, own_req);
544
545 if (child && *own_req) {
Peter Krystad79c09492020-01-21 16:56:20 -0800546 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
547
Paolo Abeni90bf4512020-05-15 19:22:15 +0200548 tcp_rsk(req)->drop_req = false;
549
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200550 /* we need to fallback on ctx allocation failure and on pre-reqs
551 * checking above. In the latter scenario we additionally need
552 * to reset the context to non MPTCP status.
Peter Krystad79c09492020-01-21 16:56:20 -0800553 */
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200554 if (!ctx || fallback) {
Peter Krystadf2962342020-03-27 14:48:39 -0700555 if (fallback_is_fatal)
Paolo Abeni729cd642020-05-15 19:22:17 +0200556 goto dispose_child;
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200557
Paolo Abeni39884602020-05-29 17:49:18 +0200558 subflow_drop_ctx(child);
Paolo Abeni58b09912020-03-13 16:52:41 +0100559 goto out;
Peter Krystadf2962342020-03-27 14:48:39 -0700560 }
Peter Krystad79c09492020-01-21 16:56:20 -0800561
562 if (ctx->mp_capable) {
Paolo Abenib93df082020-07-23 13:02:32 +0200563 /* this can't race with mptcp_close(), as the msk is
564 * not yet exposted to user-space
565 */
566 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
567
Paolo Abeni58b09912020-03-13 16:52:41 +0100568 /* new mpc subflow takes ownership of the newly
569 * created mptcp socket
570 */
Florian Westphaldf1036d2020-04-17 09:28:22 +0200571 new_msk->sk_destruct = mptcp_sock_destruct;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700572 mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200573 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
Paolo Abeni58b09912020-03-13 16:52:41 +0100574 ctx->conn = new_msk;
575 new_msk = NULL;
Paolo Abenifca5c822020-04-20 16:25:06 +0200576
577 /* with OoO packets we can reach here without ingress
578 * mpc option
579 */
Paolo Abenib93df082020-07-23 13:02:32 +0200580 if (mp_opt.mp_capable)
581 mptcp_subflow_fully_established(ctx, &mp_opt);
Peter Krystadf2962342020-03-27 14:48:39 -0700582 } else if (ctx->mp_join) {
583 struct mptcp_sock *owner;
584
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200585 owner = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700586 if (!owner)
Paolo Abeni729cd642020-05-15 19:22:17 +0200587 goto dispose_child;
Peter Krystadf2962342020-03-27 14:48:39 -0700588
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200589 /* move the msk reference ownership to the subflow */
590 subflow_req->msk = NULL;
Peter Krystadf2962342020-03-27 14:48:39 -0700591 ctx->conn = (struct sock *)owner;
592 if (!mptcp_finish_join(child))
Paolo Abeni729cd642020-05-15 19:22:17 +0200593 goto dispose_child;
Florian Westphalfc518952020-03-27 14:48:50 -0700594
595 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
Paolo Abeni90bf4512020-05-15 19:22:15 +0200596 tcp_rsk(req)->drop_req = true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800597 }
598 }
599
Paolo Abeni58b09912020-03-13 16:52:41 +0100600out:
601 /* dispose of the left over mptcp master, if any */
602 if (unlikely(new_msk))
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200603 mptcp_force_close(new_msk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200604
605 /* check for expected invariant - should never trigger, just help
606 * catching eariler subtle bugs
607 */
Paolo Abeniac2b47f2020-04-30 15:03:22 +0200608 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200609 (!mptcp_subflow_ctx(child) ||
610 !mptcp_subflow_ctx(child)->conn));
Peter Krystadcec37a62020-01-21 16:56:18 -0800611 return child;
Peter Krystadf2962342020-03-27 14:48:39 -0700612
Paolo Abeni729cd642020-05-15 19:22:17 +0200613dispose_child:
Paolo Abeni39884602020-05-29 17:49:18 +0200614 subflow_drop_ctx(child);
Paolo Abeni729cd642020-05-15 19:22:17 +0200615 tcp_rsk(req)->drop_req = true;
Paolo Abeni729cd642020-05-15 19:22:17 +0200616 inet_csk_prepare_for_destroy_sock(child);
Peter Krystadf2962342020-03-27 14:48:39 -0700617 tcp_done(child);
Paolo Abeni97e61752020-07-23 13:02:35 +0200618 req->rsk_ops->send_reset(sk, skb);
Paolo Abeni729cd642020-05-15 19:22:17 +0200619
620 /* The last child reference will be released by the caller */
621 return child;
Peter Krystadcec37a62020-01-21 16:56:18 -0800622}
623
624static struct inet_connection_sock_af_ops subflow_specific;
625
Mat Martineau648ef4b2020-01-21 16:56:24 -0800626enum mapping_status {
627 MAPPING_OK,
628 MAPPING_INVALID,
629 MAPPING_EMPTY,
Davide Carattie1ff9e82020-06-29 22:26:20 +0200630 MAPPING_DATA_FIN,
631 MAPPING_DUMMY
Mat Martineau648ef4b2020-01-21 16:56:24 -0800632};
633
634static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
635{
636 if ((u32)seq == (u32)old_seq)
637 return old_seq;
638
639 /* Assume map covers data not mapped yet. */
640 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
641}
642
643static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
644{
645 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
646 ssn, subflow->map_subflow_seq, subflow->map_data_len);
647}
648
649static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
650{
651 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
652 unsigned int skb_consumed;
653
654 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
655 if (WARN_ON_ONCE(skb_consumed >= skb->len))
656 return true;
657
658 return skb->len - skb_consumed <= subflow->map_data_len -
659 mptcp_subflow_get_map_offset(subflow);
660}
661
662static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
663{
664 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
665 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
666
667 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
668 /* Mapping covers data later in the subflow stream,
669 * currently unsupported.
670 */
671 warn_bad_map(subflow, ssn);
672 return false;
673 }
674 if (unlikely(!before(ssn, subflow->map_subflow_seq +
675 subflow->map_data_len))) {
676 /* Mapping does covers past subflow data, invalid */
677 warn_bad_map(subflow, ssn + skb->len);
678 return false;
679 }
680 return true;
681}
682
Mat Martineau43b54c62020-07-28 15:12:06 -0700683static enum mapping_status get_mapping_status(struct sock *ssk,
684 struct mptcp_sock *msk)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800685{
686 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
687 struct mptcp_ext *mpext;
688 struct sk_buff *skb;
689 u16 data_len;
690 u64 map_seq;
691
692 skb = skb_peek(&ssk->sk_receive_queue);
693 if (!skb)
694 return MAPPING_EMPTY;
695
Davide Carattie1ff9e82020-06-29 22:26:20 +0200696 if (mptcp_check_fallback(ssk))
697 return MAPPING_DUMMY;
698
Mat Martineau648ef4b2020-01-21 16:56:24 -0800699 mpext = mptcp_get_ext(skb);
700 if (!mpext || !mpext->use_map) {
701 if (!subflow->map_valid && !skb->len) {
702 /* the TCP stack deliver 0 len FIN pkt to the receive
703 * queue, that is the only 0len pkts ever expected here,
704 * and we can admit no mapping only for 0 len pkts
705 */
706 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
707 WARN_ONCE(1, "0len seq %d:%d flags %x",
708 TCP_SKB_CB(skb)->seq,
709 TCP_SKB_CB(skb)->end_seq,
710 TCP_SKB_CB(skb)->tcp_flags);
711 sk_eat_skb(ssk, skb);
712 return MAPPING_EMPTY;
713 }
714
715 if (!subflow->map_valid)
716 return MAPPING_INVALID;
717
718 goto validate_seq;
719 }
720
721 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
722 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
723 mpext->data_len, mpext->data_fin);
724
725 data_len = mpext->data_len;
726 if (data_len == 0) {
727 pr_err("Infinite mapping not handled");
Florian Westphalfc518952020-03-27 14:48:50 -0700728 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800729 return MAPPING_INVALID;
730 }
731
732 if (mpext->data_fin == 1) {
733 if (data_len == 1) {
Mat Martineau43b54c62020-07-28 15:12:06 -0700734 mptcp_update_rcv_data_fin(msk, mpext->data_seq);
735 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800736 if (subflow->map_valid) {
737 /* A DATA_FIN might arrive in a DSS
738 * option before the previous mapping
739 * has been fully consumed. Continue
740 * handling the existing mapping.
741 */
742 skb_ext_del(skb, SKB_EXT_MPTCP);
743 return MAPPING_OK;
744 } else {
745 return MAPPING_DATA_FIN;
746 }
Mat Martineau43b54c62020-07-28 15:12:06 -0700747 } else {
748 mptcp_update_rcv_data_fin(msk, mpext->data_seq + data_len);
749 pr_debug("DATA_FIN with mapping seq=%llu", mpext->data_seq + data_len);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800750 }
751
752 /* Adjust for DATA_FIN using 1 byte of sequence space */
753 data_len--;
754 }
755
756 if (!mpext->dsn64) {
757 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
758 mpext->data_seq);
Christoph Paascha0c1d0e2020-05-14 08:53:03 -0700759 subflow->use_64bit_ack = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800760 pr_debug("expanded seq=%llu", subflow->map_seq);
761 } else {
762 map_seq = mpext->data_seq;
Christoph Paascha0c1d0e2020-05-14 08:53:03 -0700763 subflow->use_64bit_ack = 1;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800764 }
765
766 if (subflow->map_valid) {
767 /* Allow replacing only with an identical map */
768 if (subflow->map_seq == map_seq &&
769 subflow->map_subflow_seq == mpext->subflow_seq &&
770 subflow->map_data_len == data_len) {
771 skb_ext_del(skb, SKB_EXT_MPTCP);
772 return MAPPING_OK;
773 }
774
775 /* If this skb data are fully covered by the current mapping,
776 * the new map would need caching, which is not supported
777 */
Florian Westphalfc518952020-03-27 14:48:50 -0700778 if (skb_is_fully_mapped(ssk, skb)) {
779 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800780 return MAPPING_INVALID;
Florian Westphalfc518952020-03-27 14:48:50 -0700781 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800782
783 /* will validate the next map after consuming the current one */
784 return MAPPING_OK;
785 }
786
787 subflow->map_seq = map_seq;
788 subflow->map_subflow_seq = mpext->subflow_seq;
789 subflow->map_data_len = data_len;
790 subflow->map_valid = 1;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800791 subflow->mpc_map = mpext->mpc_map;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800792 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
793 subflow->map_seq, subflow->map_subflow_seq,
794 subflow->map_data_len);
795
796validate_seq:
797 /* we revalidate valid mapping on new skb, because we must ensure
798 * the current skb is completely covered by the available mapping
799 */
800 if (!validate_mapping(ssk, skb))
801 return MAPPING_INVALID;
802
803 skb_ext_del(skb, SKB_EXT_MPTCP);
804 return MAPPING_OK;
805}
806
Florian Westphalbfae9da2020-02-26 10:14:50 +0100807static int subflow_read_actor(read_descriptor_t *desc,
808 struct sk_buff *skb,
809 unsigned int offset, size_t len)
810{
811 size_t copy_len = min(desc->count, len);
812
813 desc->count -= copy_len;
814
815 pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
816 return copy_len;
817}
818
Mat Martineau648ef4b2020-01-21 16:56:24 -0800819static bool subflow_check_data_avail(struct sock *ssk)
820{
821 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
822 enum mapping_status status;
823 struct mptcp_sock *msk;
824 struct sk_buff *skb;
825
826 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
827 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
828 if (subflow->data_avail)
829 return true;
830
Mat Martineau648ef4b2020-01-21 16:56:24 -0800831 msk = mptcp_sk(subflow->conn);
832 for (;;) {
833 u32 map_remaining;
834 size_t delta;
835 u64 ack_seq;
836 u64 old_ack;
837
Mat Martineau43b54c62020-07-28 15:12:06 -0700838 status = get_mapping_status(ssk, msk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800839 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
840 if (status == MAPPING_INVALID) {
841 ssk->sk_err = EBADMSG;
842 goto fatal;
843 }
Davide Carattie1ff9e82020-06-29 22:26:20 +0200844 if (status == MAPPING_DUMMY) {
845 __mptcp_do_fallback(msk);
846 skb = skb_peek(&ssk->sk_receive_queue);
847 subflow->map_valid = 1;
848 subflow->map_seq = READ_ONCE(msk->ack_seq);
849 subflow->map_data_len = skb->len;
850 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
851 subflow->ssn_offset;
852 return true;
853 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800854
855 if (status != MAPPING_OK)
856 return false;
857
858 skb = skb_peek(&ssk->sk_receive_queue);
859 if (WARN_ON_ONCE(!skb))
860 return false;
861
Christoph Paaschd22f4982020-01-21 16:56:32 -0800862 /* if msk lacks the remote key, this subflow must provide an
863 * MP_CAPABLE-based mapping
864 */
865 if (unlikely(!READ_ONCE(msk->can_ack))) {
866 if (!subflow->mpc_map) {
867 ssk->sk_err = EBADMSG;
868 goto fatal;
869 }
870 WRITE_ONCE(msk->remote_key, subflow->remote_key);
871 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
872 WRITE_ONCE(msk->can_ack, true);
873 }
874
Mat Martineau648ef4b2020-01-21 16:56:24 -0800875 old_ack = READ_ONCE(msk->ack_seq);
876 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
877 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
878 ack_seq);
879 if (ack_seq == old_ack)
880 break;
881
882 /* only accept in-sequence mapping. Old values are spurious
883 * retransmission; we can hit "future" values on active backup
884 * subflow switch, we relay on retransmissions to get
885 * in-sequence data.
886 * Cuncurrent subflows support will require subflow data
887 * reordering
888 */
889 map_remaining = subflow->map_data_len -
890 mptcp_subflow_get_map_offset(subflow);
891 if (before64(ack_seq, old_ack))
892 delta = min_t(size_t, old_ack - ack_seq, map_remaining);
893 else
894 delta = min_t(size_t, ack_seq - old_ack, map_remaining);
895
896 /* discard mapped data */
897 pr_debug("discarding %zu bytes, current map len=%d", delta,
898 map_remaining);
899 if (delta) {
Mat Martineau648ef4b2020-01-21 16:56:24 -0800900 read_descriptor_t desc = {
901 .count = delta,
Mat Martineau648ef4b2020-01-21 16:56:24 -0800902 };
903 int ret;
904
Florian Westphalbfae9da2020-02-26 10:14:50 +0100905 ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800906 if (ret < 0) {
907 ssk->sk_err = -ret;
908 goto fatal;
909 }
910 if (ret < delta)
911 return false;
912 if (delta == map_remaining)
913 subflow->map_valid = 0;
914 }
915 }
916 return true;
917
918fatal:
919 /* fatal protocol error, close the socket */
920 /* This barrier is coupled with smp_rmb() in tcp_poll() */
921 smp_wmb();
922 ssk->sk_error_report(ssk);
923 tcp_set_state(ssk, TCP_CLOSE);
924 tcp_send_active_reset(ssk, GFP_ATOMIC);
925 return false;
926}
927
928bool mptcp_subflow_data_available(struct sock *sk)
929{
930 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
931 struct sk_buff *skb;
932
933 /* check if current mapping is still valid */
934 if (subflow->map_valid &&
935 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
936 subflow->map_valid = 0;
937 subflow->data_avail = 0;
938
939 pr_debug("Done with mapping: seq=%u data_len=%u",
940 subflow->map_subflow_seq,
941 subflow->map_data_len);
942 }
943
944 if (!subflow_check_data_avail(sk)) {
945 subflow->data_avail = 0;
946 return false;
947 }
948
949 skb = skb_peek(&sk->sk_receive_queue);
950 subflow->data_avail = skb &&
951 before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
952 return subflow->data_avail;
953}
954
Florian Westphal071c8ed2020-04-24 12:31:50 +0200955/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
956 * not the ssk one.
957 *
958 * In mptcp, rwin is about the mptcp-level connection data.
959 *
960 * Data that is still on the ssk rx queue can thus be ignored,
961 * as far as mptcp peer is concerened that data is still inflight.
962 * DSS ACK is updated when skb is moved to the mptcp rx queue.
963 */
964void mptcp_space(const struct sock *ssk, int *space, int *full_space)
965{
966 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
967 const struct sock *sk = subflow->conn;
968
969 *space = tcp_space(sk);
970 *full_space = tcp_full_space(sk);
971}
972
Mat Martineau648ef4b2020-01-21 16:56:24 -0800973static void subflow_data_ready(struct sock *sk)
974{
975 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Davide Caratti8c728942020-07-15 22:27:05 +0200976 u16 state = 1 << inet_sk_state_load(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800977 struct sock *parent = subflow->conn;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200978 struct mptcp_sock *msk;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800979
Davide Carattie1ff9e82020-06-29 22:26:20 +0200980 msk = mptcp_sk(parent);
Davide Caratti8c728942020-07-15 22:27:05 +0200981 if (state & TCPF_LISTEN) {
Davide Carattie1ff9e82020-06-29 22:26:20 +0200982 set_bit(MPTCP_DATA_READY, &msk->flags);
Paolo Abenidc093db2020-03-13 16:52:42 +0100983 parent->sk_data_ready(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800984 return;
985 }
986
Davide Carattie1ff9e82020-06-29 22:26:20 +0200987 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
Davide Caratti8c728942020-07-15 22:27:05 +0200988 !subflow->mp_join && !(state & TCPF_CLOSE));
Davide Carattie1ff9e82020-06-29 22:26:20 +0200989
Florian Westphal101f6f82020-02-26 10:14:46 +0100990 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +0100991 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800992}
993
994static void subflow_write_space(struct sock *sk)
995{
996 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
997 struct sock *parent = subflow->conn;
998
999 sk_stream_write_space(sk);
Paolo Abenidc093db2020-03-13 16:52:42 +01001000 if (sk_stream_is_writeable(sk)) {
Florian Westphal1891c4a2020-01-21 16:56:25 -08001001 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
1002 smp_mb__after_atomic();
1003 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
Mat Martineau648ef4b2020-01-21 16:56:24 -08001004 sk_stream_write_space(parent);
1005 }
1006}
1007
Peter Krystadcec37a62020-01-21 16:56:18 -08001008static struct inet_connection_sock_af_ops *
1009subflow_default_af_ops(struct sock *sk)
1010{
1011#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1012 if (sk->sk_family == AF_INET6)
1013 return &subflow_v6_specific;
1014#endif
1015 return &subflow_specific;
1016}
1017
Peter Krystadcec37a62020-01-21 16:56:18 -08001018#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001019void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1020{
Peter Krystadcec37a62020-01-21 16:56:18 -08001021 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1022 struct inet_connection_sock *icsk = inet_csk(sk);
1023 struct inet_connection_sock_af_ops *target;
1024
1025 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1026
1027 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
Mat Martineauedc7e482020-01-24 16:04:03 -08001028 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
Peter Krystadcec37a62020-01-21 16:56:18 -08001029
1030 if (likely(icsk->icsk_af_ops == target))
1031 return;
1032
1033 subflow->icsk_af_ops = icsk->icsk_af_ops;
1034 icsk->icsk_af_ops = target;
Peter Krystadcec37a62020-01-21 16:56:18 -08001035}
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001036#endif
Peter Krystadcec37a62020-01-21 16:56:18 -08001037
Peter Krystadec3edaa2020-03-27 14:48:40 -07001038static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1039 struct sockaddr_storage *addr)
1040{
1041 memset(addr, 0, sizeof(*addr));
1042 addr->ss_family = info->family;
1043 if (addr->ss_family == AF_INET) {
1044 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1045
1046 in_addr->sin_addr = info->addr;
1047 in_addr->sin_port = info->port;
1048 }
1049#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1050 else if (addr->ss_family == AF_INET6) {
1051 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1052
1053 in6_addr->sin6_addr = info->addr6;
1054 in6_addr->sin6_port = info->port;
1055 }
1056#endif
1057}
1058
1059int __mptcp_subflow_connect(struct sock *sk, int ifindex,
1060 const struct mptcp_addr_info *loc,
1061 const struct mptcp_addr_info *remote)
1062{
1063 struct mptcp_sock *msk = mptcp_sk(sk);
1064 struct mptcp_subflow_context *subflow;
1065 struct sockaddr_storage addr;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001066 int local_id = loc->id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001067 struct socket *sf;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001068 struct sock *ssk;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001069 u32 remote_token;
1070 int addrlen;
1071 int err;
1072
Paolo Abenib93df082020-07-23 13:02:32 +02001073 if (!mptcp_is_fully_established(sk))
Peter Krystadec3edaa2020-03-27 14:48:40 -07001074 return -ENOTCONN;
1075
1076 err = mptcp_subflow_create_socket(sk, &sf);
1077 if (err)
1078 return err;
1079
Paolo Abeni6bad9122020-06-30 16:38:26 +02001080 ssk = sf->sk;
1081 subflow = mptcp_subflow_ctx(ssk);
1082 do {
1083 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1084 } while (!subflow->local_nonce);
1085
1086 if (!local_id) {
1087 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1088 if (err < 0)
1089 goto failed;
1090
1091 local_id = err;
1092 }
1093
Peter Krystadec3edaa2020-03-27 14:48:40 -07001094 subflow->remote_key = msk->remote_key;
1095 subflow->local_key = msk->local_key;
1096 subflow->token = msk->token;
1097 mptcp_info2sockaddr(loc, &addr);
1098
1099 addrlen = sizeof(struct sockaddr_in);
1100#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1101 if (loc->family == AF_INET6)
1102 addrlen = sizeof(struct sockaddr_in6);
1103#endif
Paolo Abeni6bad9122020-06-30 16:38:26 +02001104 ssk->sk_bound_dev_if = ifindex;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001105 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1106 if (err)
1107 goto failed;
1108
1109 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
Paolo Abeni6bad9122020-06-30 16:38:26 +02001110 pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
1111 local_id);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001112 subflow->remote_token = remote_token;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001113 subflow->local_id = local_id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001114 subflow->request_join = 1;
1115 subflow->request_bkup = 1;
1116 mptcp_info2sockaddr(remote, &addr);
1117
1118 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1119 if (err && err != -EINPROGRESS)
1120 goto failed;
1121
1122 spin_lock_bh(&msk->join_list_lock);
1123 list_add_tail(&subflow->node, &msk->join_list);
1124 spin_unlock_bh(&msk->join_list_lock);
1125
1126 return err;
1127
1128failed:
1129 sock_release(sf);
1130 return err;
1131}
1132
Peter Krystad2303f992020-01-21 16:56:17 -08001133int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1134{
1135 struct mptcp_subflow_context *subflow;
1136 struct net *net = sock_net(sk);
1137 struct socket *sf;
1138 int err;
1139
Paolo Abeniadf73412020-08-04 18:31:06 +02001140 /* un-accepted server sockets can reach here - on bad configuration
1141 * bail early to avoid greater trouble later
1142 */
1143 if (unlikely(!sk->sk_socket))
1144 return -EINVAL;
1145
Peter Krystadcec37a62020-01-21 16:56:18 -08001146 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1147 &sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001148 if (err)
1149 return err;
1150
1151 lock_sock(sf->sk);
1152
1153 /* kernel sockets do not by default acquire net ref, but TCP timer
1154 * needs it.
1155 */
1156 sf->sk->sk_net_refcnt = 1;
1157 get_net(net);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001158#ifdef CONFIG_PROC_FS
Peter Krystad2303f992020-01-21 16:56:17 -08001159 this_cpu_add(*net->core.sock_inuse, 1);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001160#endif
Peter Krystad2303f992020-01-21 16:56:17 -08001161 err = tcp_set_ulp(sf->sk, "mptcp");
1162 release_sock(sf->sk);
1163
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001164 if (err) {
1165 sock_release(sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001166 return err;
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001167 }
Peter Krystad2303f992020-01-21 16:56:17 -08001168
Paolo Abeni7d14b0d2020-05-07 18:53:24 +02001169 /* the newly created socket really belongs to the owning MPTCP master
1170 * socket, even if for additional subflows the allocation is performed
1171 * by a kernel workqueue. Adjust inode references, so that the
1172 * procfs/diag interaces really show this one belonging to the correct
1173 * user.
1174 */
1175 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1176 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1177 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1178
Peter Krystad2303f992020-01-21 16:56:17 -08001179 subflow = mptcp_subflow_ctx(sf->sk);
1180 pr_debug("subflow=%p", subflow);
1181
1182 *new_sock = sf;
Peter Krystad79c09492020-01-21 16:56:20 -08001183 sock_hold(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001184 subflow->conn = sk;
1185
1186 return 0;
1187}
1188
1189static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1190 gfp_t priority)
1191{
1192 struct inet_connection_sock *icsk = inet_csk(sk);
1193 struct mptcp_subflow_context *ctx;
1194
1195 ctx = kzalloc(sizeof(*ctx), priority);
1196 if (!ctx)
1197 return NULL;
1198
1199 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001200 INIT_LIST_HEAD(&ctx->node);
Peter Krystad2303f992020-01-21 16:56:17 -08001201
1202 pr_debug("subflow=%p", ctx);
1203
1204 ctx->tcp_sock = sk;
1205
1206 return ctx;
1207}
1208
Mat Martineau648ef4b2020-01-21 16:56:24 -08001209static void __subflow_state_change(struct sock *sk)
1210{
1211 struct socket_wq *wq;
1212
1213 rcu_read_lock();
1214 wq = rcu_dereference(sk->sk_wq);
1215 if (skwq_has_sleeper(wq))
1216 wake_up_interruptible_all(&wq->wait);
1217 rcu_read_unlock();
1218}
1219
1220static bool subflow_is_done(const struct sock *sk)
1221{
1222 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1223}
1224
1225static void subflow_state_change(struct sock *sk)
1226{
1227 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenidc093db2020-03-13 16:52:42 +01001228 struct sock *parent = subflow->conn;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001229
1230 __subflow_state_change(sk);
1231
Davide Caratti8fd73802020-06-29 22:26:21 +02001232 if (subflow_simultaneous_connect(sk)) {
1233 mptcp_do_fallback(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001234 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001235 pr_fallback(mptcp_sk(parent));
1236 subflow->conn_finished = 1;
1237 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1238 inet_sk_state_store(parent, TCP_ESTABLISHED);
1239 parent->sk_state_change(parent);
1240 }
1241 }
1242
Mat Martineau648ef4b2020-01-21 16:56:24 -08001243 /* as recvmsg() does not acquire the subflow socket for ssk selection
1244 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1245 * the data available machinery here.
1246 */
Davide Carattie1ff9e82020-06-29 22:26:20 +02001247 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +01001248 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001249
Mat Martineau067a0b32020-07-28 15:12:07 -07001250 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1251 !(parent->sk_shutdown & RCV_SHUTDOWN) &&
Mat Martineau648ef4b2020-01-21 16:56:24 -08001252 !subflow->rx_eof && subflow_is_done(sk)) {
1253 subflow->rx_eof = 1;
Florian Westphal59832e22020-04-02 13:44:52 +02001254 mptcp_subflow_eof(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001255 }
1256}
1257
Peter Krystad2303f992020-01-21 16:56:17 -08001258static int subflow_ulp_init(struct sock *sk)
1259{
Peter Krystadcec37a62020-01-21 16:56:18 -08001260 struct inet_connection_sock *icsk = inet_csk(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001261 struct mptcp_subflow_context *ctx;
1262 struct tcp_sock *tp = tcp_sk(sk);
1263 int err = 0;
1264
1265 /* disallow attaching ULP to a socket unless it has been
1266 * created with sock_create_kern()
1267 */
1268 if (!sk->sk_kern_sock) {
1269 err = -EOPNOTSUPP;
1270 goto out;
1271 }
1272
1273 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1274 if (!ctx) {
1275 err = -ENOMEM;
1276 goto out;
1277 }
1278
1279 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1280
1281 tp->is_mptcp = 1;
Peter Krystadcec37a62020-01-21 16:56:18 -08001282 ctx->icsk_af_ops = icsk->icsk_af_ops;
1283 icsk->icsk_af_ops = subflow_default_af_ops(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001284 ctx->tcp_data_ready = sk->sk_data_ready;
1285 ctx->tcp_state_change = sk->sk_state_change;
1286 ctx->tcp_write_space = sk->sk_write_space;
1287 sk->sk_data_ready = subflow_data_ready;
1288 sk->sk_write_space = subflow_write_space;
1289 sk->sk_state_change = subflow_state_change;
Peter Krystad2303f992020-01-21 16:56:17 -08001290out:
1291 return err;
1292}
1293
1294static void subflow_ulp_release(struct sock *sk)
1295{
1296 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1297
1298 if (!ctx)
1299 return;
1300
Peter Krystad79c09492020-01-21 16:56:20 -08001301 if (ctx->conn)
1302 sock_put(ctx->conn);
1303
Peter Krystad2303f992020-01-21 16:56:17 -08001304 kfree_rcu(ctx, rcu);
1305}
1306
Peter Krystadcec37a62020-01-21 16:56:18 -08001307static void subflow_ulp_clone(const struct request_sock *req,
1308 struct sock *newsk,
1309 const gfp_t priority)
1310{
1311 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1312 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1313 struct mptcp_subflow_context *new_ctx;
1314
Peter Krystadf2962342020-03-27 14:48:39 -07001315 if (!tcp_rsk(req)->is_mptcp ||
1316 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001317 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001318 return;
1319 }
1320
1321 new_ctx = subflow_create_ctx(newsk, priority);
Mat Martineauedc7e482020-01-24 16:04:03 -08001322 if (!new_ctx) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001323 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001324 return;
1325 }
1326
1327 new_ctx->conn_finished = 1;
1328 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001329 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1330 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1331 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
Paolo Abeni58b09912020-03-13 16:52:41 +01001332 new_ctx->rel_write_seq = 1;
1333 new_ctx->tcp_sock = newsk;
1334
Peter Krystadf2962342020-03-27 14:48:39 -07001335 if (subflow_req->mp_capable) {
1336 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1337 * is fully established only after we receive the remote key
1338 */
1339 new_ctx->mp_capable = 1;
Peter Krystadf2962342020-03-27 14:48:39 -07001340 new_ctx->local_key = subflow_req->local_key;
1341 new_ctx->token = subflow_req->token;
1342 new_ctx->ssn_offset = subflow_req->ssn_offset;
1343 new_ctx->idsn = subflow_req->idsn;
1344 } else if (subflow_req->mp_join) {
Peter Krystadec3edaa2020-03-27 14:48:40 -07001345 new_ctx->ssn_offset = subflow_req->ssn_offset;
Peter Krystadf2962342020-03-27 14:48:39 -07001346 new_ctx->mp_join = 1;
1347 new_ctx->fully_established = 1;
1348 new_ctx->backup = subflow_req->backup;
1349 new_ctx->local_id = subflow_req->local_id;
1350 new_ctx->token = subflow_req->token;
1351 new_ctx->thmac = subflow_req->thmac;
1352 }
Peter Krystadcec37a62020-01-21 16:56:18 -08001353}
1354
Peter Krystad2303f992020-01-21 16:56:17 -08001355static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1356 .name = "mptcp",
1357 .owner = THIS_MODULE,
1358 .init = subflow_ulp_init,
1359 .release = subflow_ulp_release,
Peter Krystadcec37a62020-01-21 16:56:18 -08001360 .clone = subflow_ulp_clone,
Peter Krystad2303f992020-01-21 16:56:17 -08001361};
1362
Peter Krystadcec37a62020-01-21 16:56:18 -08001363static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1364{
1365 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1366 subflow_ops->slab_name = "request_sock_subflow";
1367
1368 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1369 subflow_ops->obj_size, 0,
1370 SLAB_ACCOUNT |
1371 SLAB_TYPESAFE_BY_RCU,
1372 NULL);
1373 if (!subflow_ops->slab)
1374 return -ENOMEM;
1375
Peter Krystad79c09492020-01-21 16:56:20 -08001376 subflow_ops->destructor = subflow_req_destructor;
1377
Peter Krystadcec37a62020-01-21 16:56:18 -08001378 return 0;
1379}
1380
Paolo Abenid39dcec2020-06-26 19:29:59 +02001381void __init mptcp_subflow_init(void)
Peter Krystad2303f992020-01-21 16:56:17 -08001382{
Florian Westphal08b8d082020-07-30 21:25:53 +02001383 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1384 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
Peter Krystadcec37a62020-01-21 16:56:18 -08001385 panic("MPTCP: failed to init subflow request sock ops\n");
1386
1387 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1388 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1389
1390 subflow_specific = ipv4_specific;
1391 subflow_specific.conn_request = subflow_v4_conn_request;
1392 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1393 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1394
1395#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1396 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1397 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1398
1399 subflow_v6_specific = ipv6_specific;
1400 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1401 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1402 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1403
1404 subflow_v6m_specific = subflow_v6_specific;
1405 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1406 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1407 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1408 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1409 subflow_v6m_specific.net_frag_header_len = 0;
1410#endif
1411
Davide Caratti5147dfb2020-03-27 14:48:49 -07001412 mptcp_diag_subflow_init(&subflow_ulp_ops);
1413
Peter Krystad2303f992020-01-21 16:56:17 -08001414 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1415 panic("MPTCP: failed to register subflows to ULP\n");
1416}