blob: ac4a1fe3550bd0ddee6e4f3d808aa4e48003dac6 [file] [log] [blame]
Peter Krystad2303f992020-01-21 16:56:17 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
Peter Krystad79c09492020-01-21 16:56:20 -08007#define pr_fmt(fmt) "MPTCP: " fmt
8
Peter Krystad2303f992020-01-21 16:56:17 -08009#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Peter Krystadf2962342020-03-27 14:48:39 -070012#include <crypto/algapi.h>
Todd Malsbarybd697222020-05-21 19:10:49 -070013#include <crypto/sha.h>
Peter Krystad2303f992020-01-21 16:56:17 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080019#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
21#endif
Peter Krystad2303f992020-01-21 16:56:17 -080022#include <net/mptcp.h>
Paolo Abeni4596a2c2020-09-14 10:01:16 +020023#include <uapi/linux/mptcp.h>
Peter Krystad2303f992020-01-21 16:56:17 -080024#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070025#include "mib.h"
26
27static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
28 enum linux_mptcp_mib_field field)
29{
30 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
31}
Peter Krystad2303f992020-01-21 16:56:17 -080032
Peter Krystad79c09492020-01-21 16:56:20 -080033static void subflow_req_destructor(struct request_sock *req)
34{
35 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
36
37 pr_debug("subflow_req=%p", subflow_req);
38
Paolo Abeni8fd4de12020-06-17 12:08:56 +020039 if (subflow_req->msk)
40 sock_put((struct sock *)subflow_req->msk);
41
Paolo Abeni2c5ebd02020-06-26 19:30:00 +020042 mptcp_token_destroy_request(req);
Peter Krystad79c09492020-01-21 16:56:20 -080043 tcp_request_sock_ops.destructor(req);
44}
45
Peter Krystadf2962342020-03-27 14:48:39 -070046static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
47 void *hmac)
48{
49 u8 msg[8];
50
51 put_unaligned_be32(nonce1, &msg[0]);
52 put_unaligned_be32(nonce2, &msg[4]);
53
54 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
55}
56
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +020057static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
58{
59 return mptcp_is_fully_established((void *)msk) &&
60 READ_ONCE(msk->pm.accept_subflow);
61}
62
Peter Krystadf2962342020-03-27 14:48:39 -070063/* validate received token and create truncated hmac and nonce for SYN-ACK */
Paolo Abeni8fd4de12020-06-17 12:08:56 +020064static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
65 const struct sk_buff *skb)
Peter Krystadf2962342020-03-27 14:48:39 -070066{
67 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Todd Malsbarybd697222020-05-21 19:10:49 -070068 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -070069 struct mptcp_sock *msk;
70 int local_id;
71
72 msk = mptcp_token_get_sock(subflow_req->token);
73 if (!msk) {
Florian Westphalfc518952020-03-27 14:48:50 -070074 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020075 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070076 }
77
78 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
79 if (local_id < 0) {
80 sock_put((struct sock *)msk);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020081 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070082 }
83 subflow_req->local_id = local_id;
84
85 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
86
87 subflow_generate_hmac(msk->local_key, msk->remote_key,
88 subflow_req->local_nonce,
89 subflow_req->remote_nonce, hmac);
90
91 subflow_req->thmac = get_unaligned_be64(hmac);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020092 return msk;
Peter Krystadf2962342020-03-27 14:48:39 -070093}
94
Florian Westphal78d8b7b2020-07-30 21:25:52 +020095static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
Peter Krystadcec37a62020-01-21 16:56:18 -080096{
Peter Krystadcec37a62020-01-21 16:56:18 -080097 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadcec37a62020-01-21 16:56:18 -080098
99 subflow_req->mp_capable = 0;
Peter Krystadf2962342020-03-27 14:48:39 -0700100 subflow_req->mp_join = 0;
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200101 subflow_req->msk = NULL;
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200102 mptcp_token_init_request(req);
Peter Krystadcec37a62020-01-21 16:56:18 -0800103
104#ifdef CONFIG_TCP_MD5SIG
105 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
106 * TCP option space.
107 */
108 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200109 return -EINVAL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800110#endif
111
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200112 return 0;
113}
114
115static void subflow_init_req(struct request_sock *req,
116 const struct sock *sk_listener,
117 struct sk_buff *skb)
118{
119 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
120 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
121 struct mptcp_options_received mp_opt;
122 int ret;
123
124 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
125
126 ret = __subflow_init_req(req, sk_listener);
127 if (ret)
128 return;
129
130 mptcp_get_options(skb, &mp_opt);
131
Paolo Abenicfde1412020-04-30 15:01:52 +0200132 if (mp_opt.mp_capable) {
Florian Westphalfc518952020-03-27 14:48:50 -0700133 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
134
Paolo Abenicfde1412020-04-30 15:01:52 +0200135 if (mp_opt.mp_join)
Florian Westphalfc518952020-03-27 14:48:50 -0700136 return;
Paolo Abenicfde1412020-04-30 15:01:52 +0200137 } else if (mp_opt.mp_join) {
Florian Westphalfc518952020-03-27 14:48:50 -0700138 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
139 }
Peter Krystadf2962342020-03-27 14:48:39 -0700140
Paolo Abenicfde1412020-04-30 15:01:52 +0200141 if (mp_opt.mp_capable && listener->request_mptcp) {
Florian Westphal535fb812020-07-30 21:25:51 +0200142 int err, retries = 4;
143
Florian Westphalc83a47e2020-07-30 21:25:54 +0200144 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Florian Westphal535fb812020-07-30 21:25:51 +0200145again:
146 do {
147 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
148 } while (subflow_req->local_key == 0);
Peter Krystad79c09492020-01-21 16:56:20 -0800149
Florian Westphalc83a47e2020-07-30 21:25:54 +0200150 if (unlikely(req->syncookie)) {
151 mptcp_crypto_key_sha(subflow_req->local_key,
152 &subflow_req->token,
153 &subflow_req->idsn);
154 if (mptcp_token_exists(subflow_req->token)) {
155 if (retries-- > 0)
156 goto again;
157 } else {
158 subflow_req->mp_capable = 1;
159 }
160 return;
161 }
162
Peter Krystad79c09492020-01-21 16:56:20 -0800163 err = mptcp_token_new_request(req);
164 if (err == 0)
165 subflow_req->mp_capable = 1;
Florian Westphal535fb812020-07-30 21:25:51 +0200166 else if (retries-- > 0)
167 goto again;
Peter Krystad79c09492020-01-21 16:56:20 -0800168
Paolo Abenicfde1412020-04-30 15:01:52 +0200169 } else if (mp_opt.mp_join && listener->request_mptcp) {
Peter Krystadec3edaa2020-03-27 14:48:40 -0700170 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Peter Krystadf2962342020-03-27 14:48:39 -0700171 subflow_req->mp_join = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200172 subflow_req->backup = mp_opt.backup;
173 subflow_req->remote_id = mp_opt.join_id;
174 subflow_req->token = mp_opt.token;
175 subflow_req->remote_nonce = mp_opt.nonce;
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200176 subflow_req->msk = subflow_token_join_request(req, skb);
Florian Westphal9466a1c2020-07-30 21:25:56 +0200177
178 if (unlikely(req->syncookie) && subflow_req->msk) {
179 if (mptcp_can_accept_new_subflow(subflow_req->msk))
180 subflow_init_req_cookie_join_save(subflow_req, skb);
181 }
182
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200183 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
184 subflow_req->remote_nonce, subflow_req->msk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800185 }
186}
187
Florian Westphalc83a47e2020-07-30 21:25:54 +0200188int mptcp_subflow_init_cookie_req(struct request_sock *req,
189 const struct sock *sk_listener,
190 struct sk_buff *skb)
191{
192 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
193 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
194 struct mptcp_options_received mp_opt;
195 int err;
196
197 err = __subflow_init_req(req, sk_listener);
198 if (err)
199 return err;
200
201 mptcp_get_options(skb, &mp_opt);
202
203 if (mp_opt.mp_capable && mp_opt.mp_join)
204 return -EINVAL;
205
206 if (mp_opt.mp_capable && listener->request_mptcp) {
207 if (mp_opt.sndr_key == 0)
208 return -EINVAL;
209
210 subflow_req->local_key = mp_opt.rcvr_key;
211 err = mptcp_token_new_request(req);
212 if (err)
213 return err;
214
215 subflow_req->mp_capable = 1;
216 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200217 } else if (mp_opt.mp_join && listener->request_mptcp) {
218 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
219 return -EINVAL;
220
221 if (mptcp_can_accept_new_subflow(subflow_req->msk))
222 subflow_req->mp_join = 1;
223
224 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200225 }
226
227 return 0;
228}
229EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
230
Peter Krystadcec37a62020-01-21 16:56:18 -0800231static void subflow_v4_init_req(struct request_sock *req,
232 const struct sock *sk_listener,
233 struct sk_buff *skb)
234{
235 tcp_rsk(req)->is_mptcp = 1;
236
237 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
238
239 subflow_init_req(req, sk_listener, skb);
240}
241
242#if IS_ENABLED(CONFIG_MPTCP_IPV6)
243static void subflow_v6_init_req(struct request_sock *req,
244 const struct sock *sk_listener,
245 struct sk_buff *skb)
246{
247 tcp_rsk(req)->is_mptcp = 1;
248
249 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
250
251 subflow_init_req(req, sk_listener, skb);
252}
253#endif
254
Peter Krystadec3edaa2020-03-27 14:48:40 -0700255/* validate received truncated hmac and create hmac for third ACK */
256static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
257{
Todd Malsbarybd697222020-05-21 19:10:49 -0700258 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadec3edaa2020-03-27 14:48:40 -0700259 u64 thmac;
260
261 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
262 subflow->remote_nonce, subflow->local_nonce,
263 hmac);
264
265 thmac = get_unaligned_be64(hmac);
266 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
267 subflow, subflow->token,
268 (unsigned long long)thmac,
269 (unsigned long long)subflow->thmac);
270
271 return thmac == subflow->thmac;
272}
273
Paolo Abenid5824842020-10-09 19:00:00 +0200274void mptcp_subflow_reset(struct sock *ssk)
275{
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200276 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
277 struct sock *sk = subflow->conn;
278
Paolo Abenid5824842020-10-09 19:00:00 +0200279 tcp_set_state(ssk, TCP_CLOSE);
280 tcp_send_active_reset(ssk, GFP_ATOMIC);
281 tcp_done(ssk);
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200282 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
283 schedule_work(&mptcp_sk(sk)->work))
284 sock_hold(sk);
Paolo Abenid5824842020-10-09 19:00:00 +0200285}
286
Peter Krystadcec37a62020-01-21 16:56:18 -0800287static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
288{
289 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenicfde1412020-04-30 15:01:52 +0200290 struct mptcp_options_received mp_opt;
Davide Carattic3c123d2020-03-19 22:45:37 +0100291 struct sock *parent = subflow->conn;
Peter Krystadcec37a62020-01-21 16:56:18 -0800292
293 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
294
Paolo Abeni12008322020-04-24 13:15:21 +0200295 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
Davide Carattic3c123d2020-03-19 22:45:37 +0100296 inet_sk_state_store(parent, TCP_ESTABLISHED);
297 parent->sk_state_change(parent);
298 }
299
Paolo Abeni263e1202020-04-30 15:01:51 +0200300 /* be sure no special action on any packet other than syn-ack */
301 if (subflow->conn_finished)
302 return;
303
Paolo Abenib0977bb2020-07-23 13:02:29 +0200304 subflow->rel_write_seq = 1;
Paolo Abeni263e1202020-04-30 15:01:51 +0200305 subflow->conn_finished = 1;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200306 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
307 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
Paolo Abeni263e1202020-04-30 15:01:51 +0200308
Paolo Abenicfde1412020-04-30 15:01:52 +0200309 mptcp_get_options(skb, &mp_opt);
Paolo Abenifa25e812020-07-23 13:02:33 +0200310 if (subflow->request_mptcp) {
311 if (!mp_opt.mp_capable) {
312 MPTCP_INC_STATS(sock_net(sk),
313 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
314 mptcp_do_fallback(sk);
315 pr_fallback(mptcp_sk(subflow->conn));
316 goto fallback;
317 }
318
Paolo Abeni263e1202020-04-30 15:01:51 +0200319 subflow->mp_capable = 1;
320 subflow->can_ack = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200321 subflow->remote_key = mp_opt.sndr_key;
Paolo Abeni263e1202020-04-30 15:01:51 +0200322 pr_debug("subflow=%p, remote_key=%llu", subflow,
323 subflow->remote_key);
Paolo Abenifa25e812020-07-23 13:02:33 +0200324 mptcp_finish_connect(sk);
325 } else if (subflow->request_join) {
326 u8 hmac[SHA256_DIGEST_SIZE];
327
328 if (!mp_opt.mp_join)
329 goto do_reset;
330
Paolo Abenicfde1412020-04-30 15:01:52 +0200331 subflow->thmac = mp_opt.thmac;
332 subflow->remote_nonce = mp_opt.nonce;
Paolo Abeni263e1202020-04-30 15:01:51 +0200333 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
334 subflow->thmac, subflow->remote_nonce);
Paolo Abeni263e1202020-04-30 15:01:51 +0200335
Peter Krystadec3edaa2020-03-27 14:48:40 -0700336 if (!subflow_thmac_valid(subflow)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700337 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700338 goto do_reset;
339 }
340
341 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
342 subflow->local_nonce,
343 subflow->remote_nonce,
Todd Malsbarybd697222020-05-21 19:10:49 -0700344 hmac);
Todd Malsbarybd697222020-05-21 19:10:49 -0700345 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700346
Peter Krystadec3edaa2020-03-27 14:48:40 -0700347 if (!mptcp_finish_join(sk))
348 goto do_reset;
349
Paolo Abenifa25e812020-07-23 13:02:33 +0200350 subflow->mp_join = 1;
Florian Westphalfc518952020-03-27 14:48:50 -0700351 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
Paolo Abenifa25e812020-07-23 13:02:33 +0200352 } else if (mptcp_check_fallback(sk)) {
353fallback:
354 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800355 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200356 return;
357
358do_reset:
Paolo Abenid5824842020-10-09 19:00:00 +0200359 mptcp_subflow_reset(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800360}
361
Florian Westphal08b8d082020-07-30 21:25:53 +0200362struct request_sock_ops mptcp_subflow_request_sock_ops;
363EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
Peter Krystadcec37a62020-01-21 16:56:18 -0800364static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
365
366static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
367{
368 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
369
370 pr_debug("subflow=%p", subflow);
371
372 /* Never answer to SYNs sent to broadcast or multicast */
373 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
374 goto drop;
375
Florian Westphal08b8d082020-07-30 21:25:53 +0200376 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800377 &subflow_request_sock_ipv4_ops,
378 sk, skb);
379drop:
380 tcp_listendrop(sk);
381 return 0;
382}
383
384#if IS_ENABLED(CONFIG_MPTCP_IPV6)
385static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
386static struct inet_connection_sock_af_ops subflow_v6_specific;
387static struct inet_connection_sock_af_ops subflow_v6m_specific;
388
389static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
390{
391 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
392
393 pr_debug("subflow=%p", subflow);
394
395 if (skb->protocol == htons(ETH_P_IP))
396 return subflow_v4_conn_request(sk, skb);
397
398 if (!ipv6_unicast_destination(skb))
399 goto drop;
400
Florian Westphal08b8d082020-07-30 21:25:53 +0200401 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800402 &subflow_request_sock_ipv6_ops, sk, skb);
403
404drop:
405 tcp_listendrop(sk);
406 return 0; /* don't send reset */
407}
408#endif
409
Peter Krystadf2962342020-03-27 14:48:39 -0700410/* validate hmac received in third ACK */
411static bool subflow_hmac_valid(const struct request_sock *req,
Paolo Abenicfde1412020-04-30 15:01:52 +0200412 const struct mptcp_options_received *mp_opt)
Peter Krystadf2962342020-03-27 14:48:39 -0700413{
414 const struct mptcp_subflow_request_sock *subflow_req;
Todd Malsbarybd697222020-05-21 19:10:49 -0700415 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -0700416 struct mptcp_sock *msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700417
418 subflow_req = mptcp_subflow_rsk(req);
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200419 msk = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700420 if (!msk)
421 return false;
422
423 subflow_generate_hmac(msk->remote_key, msk->local_key,
424 subflow_req->remote_nonce,
425 subflow_req->local_nonce, hmac);
426
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200427 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadf2962342020-03-27 14:48:39 -0700428}
429
Florian Westphaldf1036d2020-04-17 09:28:22 +0200430static void mptcp_sock_destruct(struct sock *sk)
431{
432 /* if new mptcp socket isn't accepted, it is free'd
433 * from the tcp listener sockets request queue, linked
434 * from req->sk. The tcp socket is released.
435 * This calls the ULP release function which will
436 * also remove the mptcp socket, via
437 * sock_put(ctx->conn).
438 *
Paolo Abeni7ee24922020-08-07 19:03:53 +0200439 * Problem is that the mptcp socket will be in
440 * ESTABLISHED state and will not have the SOCK_DEAD flag.
Florian Westphaldf1036d2020-04-17 09:28:22 +0200441 * Both result in warnings from inet_sock_destruct.
442 */
443
Paolo Abeni7ee24922020-08-07 19:03:53 +0200444 if (sk->sk_state == TCP_ESTABLISHED) {
Florian Westphaldf1036d2020-04-17 09:28:22 +0200445 sk->sk_state = TCP_CLOSE;
446 WARN_ON_ONCE(sk->sk_socket);
447 sock_orphan(sk);
448 }
449
Geliang Tang5c8c1642020-09-24 08:29:57 +0800450 mptcp_destroy_common(mptcp_sk(sk));
Florian Westphaldf1036d2020-04-17 09:28:22 +0200451 inet_sock_destruct(sk);
452}
453
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200454static void mptcp_force_close(struct sock *sk)
455{
456 inet_sk_state_store(sk, TCP_CLOSE);
457 sk_common_release(sk);
458}
459
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200460static void subflow_ulp_fallback(struct sock *sk,
461 struct mptcp_subflow_context *old_ctx)
462{
463 struct inet_connection_sock *icsk = inet_csk(sk);
464
465 mptcp_subflow_tcp_fallback(sk, old_ctx);
466 icsk->icsk_ulp_ops = NULL;
467 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
468 tcp_sk(sk)->is_mptcp = 0;
469}
470
Paolo Abeni39884602020-05-29 17:49:18 +0200471static void subflow_drop_ctx(struct sock *ssk)
472{
473 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
474
475 if (!ctx)
476 return;
477
478 subflow_ulp_fallback(ssk, ctx);
479 if (ctx->conn)
480 sock_put(ctx->conn);
481
482 kfree_rcu(ctx, rcu);
483}
484
Paolo Abenib93df082020-07-23 13:02:32 +0200485void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
486 struct mptcp_options_received *mp_opt)
487{
488 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
489
490 subflow->remote_key = mp_opt->sndr_key;
491 subflow->fully_established = 1;
492 subflow->can_ack = 1;
493 WRITE_ONCE(msk->fully_established, true);
494}
495
Peter Krystadcec37a62020-01-21 16:56:18 -0800496static struct sock *subflow_syn_recv_sock(const struct sock *sk,
497 struct sk_buff *skb,
498 struct request_sock *req,
499 struct dst_entry *dst,
500 struct request_sock *req_unhash,
501 bool *own_req)
502{
503 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800504 struct mptcp_subflow_request_sock *subflow_req;
Paolo Abenicfde1412020-04-30 15:01:52 +0200505 struct mptcp_options_received mp_opt;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200506 bool fallback, fallback_is_fatal;
Paolo Abeni58b09912020-03-13 16:52:41 +0100507 struct sock *new_msk = NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800508 struct sock *child;
509
510 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
511
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200512 /* After child creation we must look for 'mp_capable' even when options
513 * are not parsed
Paolo Abenicfde1412020-04-30 15:01:52 +0200514 */
515 mp_opt.mp_capable = 0;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200516
517 /* hopefully temporary handling for MP_JOIN+syncookie */
518 subflow_req = mptcp_subflow_rsk(req);
Paolo Abenib7514692020-07-23 13:02:34 +0200519 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200520 fallback = !tcp_rsk(req)->is_mptcp;
521 if (fallback)
Florian Westphalae2dd712020-01-29 15:54:46 +0100522 goto create_child;
523
Christoph Paaschd22f4982020-01-21 16:56:32 -0800524 /* if the sk is MP_CAPABLE, we try to fetch the client key */
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800525 if (subflow_req->mp_capable) {
Christoph Paaschd22f4982020-01-21 16:56:32 -0800526 if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
527 /* here we can receive and accept an in-window,
528 * out-of-order pkt, which will not carry the MP_CAPABLE
529 * opt even on mptcp enabled paths
530 */
Paolo Abeni58b09912020-03-13 16:52:41 +0100531 goto create_msk;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800532 }
533
Paolo Abenicfde1412020-04-30 15:01:52 +0200534 mptcp_get_options(skb, &mp_opt);
535 if (!mp_opt.mp_capable) {
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200536 fallback = true;
Paolo Abeni58b09912020-03-13 16:52:41 +0100537 goto create_child;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800538 }
Paolo Abeni58b09912020-03-13 16:52:41 +0100539
540create_msk:
Paolo Abenicfde1412020-04-30 15:01:52 +0200541 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
Paolo Abeni58b09912020-03-13 16:52:41 +0100542 if (!new_msk)
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200543 fallback = true;
Peter Krystadf2962342020-03-27 14:48:39 -0700544 } else if (subflow_req->mp_join) {
Paolo Abenicfde1412020-04-30 15:01:52 +0200545 mptcp_get_options(skb, &mp_opt);
546 if (!mp_opt.mp_join ||
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +0200547 !mptcp_can_accept_new_subflow(subflow_req->msk) ||
Paolo Abenicfde1412020-04-30 15:01:52 +0200548 !subflow_hmac_valid(req, &mp_opt)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700549 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200550 fallback = true;
Florian Westphalfc518952020-03-27 14:48:50 -0700551 }
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800552 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800553
Christoph Paaschd22f4982020-01-21 16:56:32 -0800554create_child:
Peter Krystadcec37a62020-01-21 16:56:18 -0800555 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
556 req_unhash, own_req);
557
558 if (child && *own_req) {
Peter Krystad79c09492020-01-21 16:56:20 -0800559 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
560
Paolo Abeni90bf4512020-05-15 19:22:15 +0200561 tcp_rsk(req)->drop_req = false;
562
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200563 /* we need to fallback on ctx allocation failure and on pre-reqs
564 * checking above. In the latter scenario we additionally need
565 * to reset the context to non MPTCP status.
Peter Krystad79c09492020-01-21 16:56:20 -0800566 */
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200567 if (!ctx || fallback) {
Peter Krystadf2962342020-03-27 14:48:39 -0700568 if (fallback_is_fatal)
Paolo Abeni729cd642020-05-15 19:22:17 +0200569 goto dispose_child;
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200570
Paolo Abeni39884602020-05-29 17:49:18 +0200571 subflow_drop_ctx(child);
Paolo Abeni58b09912020-03-13 16:52:41 +0100572 goto out;
Peter Krystadf2962342020-03-27 14:48:39 -0700573 }
Peter Krystad79c09492020-01-21 16:56:20 -0800574
575 if (ctx->mp_capable) {
Paolo Abenib93df082020-07-23 13:02:32 +0200576 /* this can't race with mptcp_close(), as the msk is
577 * not yet exposted to user-space
578 */
579 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
580
Paolo Abeni58b09912020-03-13 16:52:41 +0100581 /* new mpc subflow takes ownership of the newly
582 * created mptcp socket
583 */
Florian Westphaldf1036d2020-04-17 09:28:22 +0200584 new_msk->sk_destruct = mptcp_sock_destruct;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700585 mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200586 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
Paolo Abeni58b09912020-03-13 16:52:41 +0100587 ctx->conn = new_msk;
588 new_msk = NULL;
Paolo Abenifca5c822020-04-20 16:25:06 +0200589
590 /* with OoO packets we can reach here without ingress
591 * mpc option
592 */
Paolo Abenib93df082020-07-23 13:02:32 +0200593 if (mp_opt.mp_capable)
594 mptcp_subflow_fully_established(ctx, &mp_opt);
Peter Krystadf2962342020-03-27 14:48:39 -0700595 } else if (ctx->mp_join) {
596 struct mptcp_sock *owner;
597
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200598 owner = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700599 if (!owner)
Paolo Abeni729cd642020-05-15 19:22:17 +0200600 goto dispose_child;
Peter Krystadf2962342020-03-27 14:48:39 -0700601
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200602 /* move the msk reference ownership to the subflow */
603 subflow_req->msk = NULL;
Peter Krystadf2962342020-03-27 14:48:39 -0700604 ctx->conn = (struct sock *)owner;
605 if (!mptcp_finish_join(child))
Paolo Abeni729cd642020-05-15 19:22:17 +0200606 goto dispose_child;
Florian Westphalfc518952020-03-27 14:48:50 -0700607
608 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
Paolo Abeni90bf4512020-05-15 19:22:15 +0200609 tcp_rsk(req)->drop_req = true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800610 }
611 }
612
Paolo Abeni58b09912020-03-13 16:52:41 +0100613out:
614 /* dispose of the left over mptcp master, if any */
615 if (unlikely(new_msk))
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200616 mptcp_force_close(new_msk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200617
618 /* check for expected invariant - should never trigger, just help
619 * catching eariler subtle bugs
620 */
Paolo Abeniac2b47f2020-04-30 15:03:22 +0200621 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200622 (!mptcp_subflow_ctx(child) ||
623 !mptcp_subflow_ctx(child)->conn));
Peter Krystadcec37a62020-01-21 16:56:18 -0800624 return child;
Peter Krystadf2962342020-03-27 14:48:39 -0700625
Paolo Abeni729cd642020-05-15 19:22:17 +0200626dispose_child:
Paolo Abeni39884602020-05-29 17:49:18 +0200627 subflow_drop_ctx(child);
Paolo Abeni729cd642020-05-15 19:22:17 +0200628 tcp_rsk(req)->drop_req = true;
Paolo Abeni729cd642020-05-15 19:22:17 +0200629 inet_csk_prepare_for_destroy_sock(child);
Peter Krystadf2962342020-03-27 14:48:39 -0700630 tcp_done(child);
Paolo Abeni97e61752020-07-23 13:02:35 +0200631 req->rsk_ops->send_reset(sk, skb);
Paolo Abeni729cd642020-05-15 19:22:17 +0200632
633 /* The last child reference will be released by the caller */
634 return child;
Peter Krystadcec37a62020-01-21 16:56:18 -0800635}
636
637static struct inet_connection_sock_af_ops subflow_specific;
638
Mat Martineau648ef4b2020-01-21 16:56:24 -0800639enum mapping_status {
640 MAPPING_OK,
641 MAPPING_INVALID,
642 MAPPING_EMPTY,
Davide Carattie1ff9e82020-06-29 22:26:20 +0200643 MAPPING_DATA_FIN,
644 MAPPING_DUMMY
Mat Martineau648ef4b2020-01-21 16:56:24 -0800645};
646
647static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
648{
649 if ((u32)seq == (u32)old_seq)
650 return old_seq;
651
652 /* Assume map covers data not mapped yet. */
653 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
654}
655
656static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
657{
658 WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
659 ssn, subflow->map_subflow_seq, subflow->map_data_len);
660}
661
662static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
663{
664 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
665 unsigned int skb_consumed;
666
667 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
668 if (WARN_ON_ONCE(skb_consumed >= skb->len))
669 return true;
670
671 return skb->len - skb_consumed <= subflow->map_data_len -
672 mptcp_subflow_get_map_offset(subflow);
673}
674
675static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
676{
677 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
678 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
679
680 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
681 /* Mapping covers data later in the subflow stream,
682 * currently unsupported.
683 */
684 warn_bad_map(subflow, ssn);
685 return false;
686 }
687 if (unlikely(!before(ssn, subflow->map_subflow_seq +
688 subflow->map_data_len))) {
689 /* Mapping does covers past subflow data, invalid */
690 warn_bad_map(subflow, ssn + skb->len);
691 return false;
692 }
693 return true;
694}
695
Mat Martineau43b54c62020-07-28 15:12:06 -0700696static enum mapping_status get_mapping_status(struct sock *ssk,
697 struct mptcp_sock *msk)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800698{
699 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
700 struct mptcp_ext *mpext;
701 struct sk_buff *skb;
702 u16 data_len;
703 u64 map_seq;
704
705 skb = skb_peek(&ssk->sk_receive_queue);
706 if (!skb)
707 return MAPPING_EMPTY;
708
Davide Carattie1ff9e82020-06-29 22:26:20 +0200709 if (mptcp_check_fallback(ssk))
710 return MAPPING_DUMMY;
711
Mat Martineau648ef4b2020-01-21 16:56:24 -0800712 mpext = mptcp_get_ext(skb);
713 if (!mpext || !mpext->use_map) {
714 if (!subflow->map_valid && !skb->len) {
715 /* the TCP stack deliver 0 len FIN pkt to the receive
716 * queue, that is the only 0len pkts ever expected here,
717 * and we can admit no mapping only for 0 len pkts
718 */
719 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
720 WARN_ONCE(1, "0len seq %d:%d flags %x",
721 TCP_SKB_CB(skb)->seq,
722 TCP_SKB_CB(skb)->end_seq,
723 TCP_SKB_CB(skb)->tcp_flags);
724 sk_eat_skb(ssk, skb);
725 return MAPPING_EMPTY;
726 }
727
728 if (!subflow->map_valid)
729 return MAPPING_INVALID;
730
731 goto validate_seq;
732 }
733
734 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
735 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
736 mpext->data_len, mpext->data_fin);
737
738 data_len = mpext->data_len;
739 if (data_len == 0) {
740 pr_err("Infinite mapping not handled");
Florian Westphalfc518952020-03-27 14:48:50 -0700741 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800742 return MAPPING_INVALID;
743 }
744
745 if (mpext->data_fin == 1) {
746 if (data_len == 1) {
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700747 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
748 mpext->dsn64);
Mat Martineau43b54c62020-07-28 15:12:06 -0700749 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800750 if (subflow->map_valid) {
751 /* A DATA_FIN might arrive in a DSS
752 * option before the previous mapping
753 * has been fully consumed. Continue
754 * handling the existing mapping.
755 */
756 skb_ext_del(skb, SKB_EXT_MPTCP);
757 return MAPPING_OK;
758 } else {
Mat Martineauef59b192020-09-21 16:57:58 +0200759 if (updated && schedule_work(&msk->work))
760 sock_hold((struct sock *)msk);
761
Mat Martineau648ef4b2020-01-21 16:56:24 -0800762 return MAPPING_DATA_FIN;
763 }
Mat Martineau43b54c62020-07-28 15:12:06 -0700764 } else {
Paolo Abeni017512a2020-10-05 12:01:06 +0200765 u64 data_fin_seq = mpext->data_seq + data_len - 1;
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700766
767 /* If mpext->data_seq is a 32-bit value, data_fin_seq
768 * must also be limited to 32 bits.
769 */
770 if (!mpext->dsn64)
771 data_fin_seq &= GENMASK_ULL(31, 0);
772
773 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
774 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
775 data_fin_seq, mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800776 }
777
778 /* Adjust for DATA_FIN using 1 byte of sequence space */
779 data_len--;
780 }
781
782 if (!mpext->dsn64) {
783 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
784 mpext->data_seq);
785 pr_debug("expanded seq=%llu", subflow->map_seq);
786 } else {
787 map_seq = mpext->data_seq;
788 }
Davide Caratti37198e92020-10-06 18:26:17 +0200789 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800790
791 if (subflow->map_valid) {
792 /* Allow replacing only with an identical map */
793 if (subflow->map_seq == map_seq &&
794 subflow->map_subflow_seq == mpext->subflow_seq &&
795 subflow->map_data_len == data_len) {
796 skb_ext_del(skb, SKB_EXT_MPTCP);
797 return MAPPING_OK;
798 }
799
800 /* If this skb data are fully covered by the current mapping,
801 * the new map would need caching, which is not supported
802 */
Florian Westphalfc518952020-03-27 14:48:50 -0700803 if (skb_is_fully_mapped(ssk, skb)) {
804 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800805 return MAPPING_INVALID;
Florian Westphalfc518952020-03-27 14:48:50 -0700806 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800807
808 /* will validate the next map after consuming the current one */
809 return MAPPING_OK;
810 }
811
812 subflow->map_seq = map_seq;
813 subflow->map_subflow_seq = mpext->subflow_seq;
814 subflow->map_data_len = data_len;
815 subflow->map_valid = 1;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800816 subflow->mpc_map = mpext->mpc_map;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800817 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
818 subflow->map_seq, subflow->map_subflow_seq,
819 subflow->map_data_len);
820
821validate_seq:
822 /* we revalidate valid mapping on new skb, because we must ensure
823 * the current skb is completely covered by the available mapping
824 */
825 if (!validate_mapping(ssk, skb))
826 return MAPPING_INVALID;
827
828 skb_ext_del(skb, SKB_EXT_MPTCP);
829 return MAPPING_OK;
830}
831
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200832static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
Paolo Abeni1d39cd82020-09-17 23:07:24 +0200833 u64 limit)
Paolo Abeni67193312020-09-14 10:01:09 +0200834{
835 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200836 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
837 u32 incr;
Paolo Abeni67193312020-09-14 10:01:09 +0200838
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200839 incr = limit >= skb->len ? skb->len + fin : limit;
Paolo Abeni67193312020-09-14 10:01:09 +0200840
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200841 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
842 subflow->map_subflow_seq);
Paolo Abeni06242e42020-09-14 10:01:14 +0200843 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200844 tcp_sk(ssk)->copied_seq += incr;
845 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
846 sk_eat_skb(ssk, skb);
847 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
848 subflow->map_valid = 0;
Paolo Abenic76c6952020-09-14 10:01:18 +0200849 if (incr)
850 tcp_cleanup_rbuf(ssk, incr);
Paolo Abeni67193312020-09-14 10:01:09 +0200851}
852
Mat Martineau648ef4b2020-01-21 16:56:24 -0800853static bool subflow_check_data_avail(struct sock *ssk)
854{
855 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
856 enum mapping_status status;
857 struct mptcp_sock *msk;
858 struct sk_buff *skb;
859
860 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
861 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200862 if (!skb_peek(&ssk->sk_receive_queue))
863 subflow->data_avail = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800864 if (subflow->data_avail)
865 return true;
866
Mat Martineau648ef4b2020-01-21 16:56:24 -0800867 msk = mptcp_sk(subflow->conn);
868 for (;;) {
Mat Martineau648ef4b2020-01-21 16:56:24 -0800869 u64 ack_seq;
870 u64 old_ack;
871
Mat Martineau43b54c62020-07-28 15:12:06 -0700872 status = get_mapping_status(ssk, msk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800873 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
874 if (status == MAPPING_INVALID) {
875 ssk->sk_err = EBADMSG;
876 goto fatal;
877 }
Davide Carattie1ff9e82020-06-29 22:26:20 +0200878 if (status == MAPPING_DUMMY) {
879 __mptcp_do_fallback(msk);
880 skb = skb_peek(&ssk->sk_receive_queue);
881 subflow->map_valid = 1;
882 subflow->map_seq = READ_ONCE(msk->ack_seq);
883 subflow->map_data_len = skb->len;
884 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
885 subflow->ssn_offset;
Paolo Abeni67193312020-09-14 10:01:09 +0200886 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200887 return true;
888 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800889
890 if (status != MAPPING_OK)
891 return false;
892
893 skb = skb_peek(&ssk->sk_receive_queue);
894 if (WARN_ON_ONCE(!skb))
895 return false;
896
Christoph Paaschd22f4982020-01-21 16:56:32 -0800897 /* if msk lacks the remote key, this subflow must provide an
898 * MP_CAPABLE-based mapping
899 */
900 if (unlikely(!READ_ONCE(msk->can_ack))) {
901 if (!subflow->mpc_map) {
902 ssk->sk_err = EBADMSG;
903 goto fatal;
904 }
905 WRITE_ONCE(msk->remote_key, subflow->remote_key);
906 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
907 WRITE_ONCE(msk->can_ack, true);
908 }
909
Mat Martineau648ef4b2020-01-21 16:56:24 -0800910 old_ack = READ_ONCE(msk->ack_seq);
911 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
912 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
913 ack_seq);
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200914 if (ack_seq == old_ack) {
Paolo Abeni67193312020-09-14 10:01:09 +0200915 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
916 break;
917 } else if (after64(ack_seq, old_ack)) {
918 subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800919 break;
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200920 }
Mat Martineau648ef4b2020-01-21 16:56:24 -0800921
922 /* only accept in-sequence mapping. Old values are spurious
Paolo Abeni67193312020-09-14 10:01:09 +0200923 * retransmission
Mat Martineau648ef4b2020-01-21 16:56:24 -0800924 */
Paolo Abeni04e4cd42020-09-14 10:01:13 +0200925 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800926 }
927 return true;
928
929fatal:
930 /* fatal protocol error, close the socket */
931 /* This barrier is coupled with smp_rmb() in tcp_poll() */
932 smp_wmb();
933 ssk->sk_error_report(ssk);
934 tcp_set_state(ssk, TCP_CLOSE);
935 tcp_send_active_reset(ssk, GFP_ATOMIC);
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200936 subflow->data_avail = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800937 return false;
938}
939
940bool mptcp_subflow_data_available(struct sock *sk)
941{
942 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800943
944 /* check if current mapping is still valid */
945 if (subflow->map_valid &&
946 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
947 subflow->map_valid = 0;
948 subflow->data_avail = 0;
949
950 pr_debug("Done with mapping: seq=%u data_len=%u",
951 subflow->map_subflow_seq,
952 subflow->map_data_len);
953 }
954
Paolo Abeni47bebdf2020-09-14 10:01:08 +0200955 return subflow_check_data_avail(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800956}
957
Florian Westphal071c8ed2020-04-24 12:31:50 +0200958/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
959 * not the ssk one.
960 *
961 * In mptcp, rwin is about the mptcp-level connection data.
962 *
963 * Data that is still on the ssk rx queue can thus be ignored,
964 * as far as mptcp peer is concerened that data is still inflight.
965 * DSS ACK is updated when skb is moved to the mptcp rx queue.
966 */
967void mptcp_space(const struct sock *ssk, int *space, int *full_space)
968{
969 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
970 const struct sock *sk = subflow->conn;
971
972 *space = tcp_space(sk);
973 *full_space = tcp_full_space(sk);
974}
975
Mat Martineau648ef4b2020-01-21 16:56:24 -0800976static void subflow_data_ready(struct sock *sk)
977{
978 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Davide Caratti8c728942020-07-15 22:27:05 +0200979 u16 state = 1 << inet_sk_state_load(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800980 struct sock *parent = subflow->conn;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200981 struct mptcp_sock *msk;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800982
Davide Carattie1ff9e82020-06-29 22:26:20 +0200983 msk = mptcp_sk(parent);
Davide Caratti8c728942020-07-15 22:27:05 +0200984 if (state & TCPF_LISTEN) {
Davide Carattie1ff9e82020-06-29 22:26:20 +0200985 set_bit(MPTCP_DATA_READY, &msk->flags);
Paolo Abenidc093db2020-03-13 16:52:42 +0100986 parent->sk_data_ready(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800987 return;
988 }
989
Davide Carattie1ff9e82020-06-29 22:26:20 +0200990 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
Davide Caratti8c728942020-07-15 22:27:05 +0200991 !subflow->mp_join && !(state & TCPF_CLOSE));
Davide Carattie1ff9e82020-06-29 22:26:20 +0200992
Florian Westphal101f6f82020-02-26 10:14:46 +0100993 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +0100994 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800995}
996
997static void subflow_write_space(struct sock *sk)
998{
999 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1000 struct sock *parent = subflow->conn;
1001
Paolo Abeni63561a42020-09-14 10:01:07 +02001002 if (!sk_stream_is_writeable(sk))
1003 return;
1004
1005 if (sk_stream_is_writeable(parent)) {
Florian Westphal1891c4a2020-01-21 16:56:25 -08001006 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
1007 smp_mb__after_atomic();
1008 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
Mat Martineau648ef4b2020-01-21 16:56:24 -08001009 sk_stream_write_space(parent);
1010 }
1011}
1012
Peter Krystadcec37a62020-01-21 16:56:18 -08001013static struct inet_connection_sock_af_ops *
1014subflow_default_af_ops(struct sock *sk)
1015{
1016#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1017 if (sk->sk_family == AF_INET6)
1018 return &subflow_v6_specific;
1019#endif
1020 return &subflow_specific;
1021}
1022
Peter Krystadcec37a62020-01-21 16:56:18 -08001023#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001024void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1025{
Peter Krystadcec37a62020-01-21 16:56:18 -08001026 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1027 struct inet_connection_sock *icsk = inet_csk(sk);
1028 struct inet_connection_sock_af_ops *target;
1029
1030 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1031
1032 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
Mat Martineauedc7e482020-01-24 16:04:03 -08001033 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
Peter Krystadcec37a62020-01-21 16:56:18 -08001034
1035 if (likely(icsk->icsk_af_ops == target))
1036 return;
1037
1038 subflow->icsk_af_ops = icsk->icsk_af_ops;
1039 icsk->icsk_af_ops = target;
Peter Krystadcec37a62020-01-21 16:56:18 -08001040}
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001041#endif
Peter Krystadcec37a62020-01-21 16:56:18 -08001042
Peter Krystadec3edaa2020-03-27 14:48:40 -07001043static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1044 struct sockaddr_storage *addr)
1045{
1046 memset(addr, 0, sizeof(*addr));
1047 addr->ss_family = info->family;
1048 if (addr->ss_family == AF_INET) {
1049 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1050
1051 in_addr->sin_addr = info->addr;
1052 in_addr->sin_port = info->port;
1053 }
1054#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1055 else if (addr->ss_family == AF_INET6) {
1056 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1057
1058 in6_addr->sin6_addr = info->addr6;
1059 in6_addr->sin6_port = info->port;
1060 }
1061#endif
1062}
1063
Paolo Abenief0da3b2020-09-14 10:01:15 +02001064int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
Peter Krystadec3edaa2020-03-27 14:48:40 -07001065 const struct mptcp_addr_info *remote)
1066{
1067 struct mptcp_sock *msk = mptcp_sk(sk);
1068 struct mptcp_subflow_context *subflow;
1069 struct sockaddr_storage addr;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001070 int remote_id = remote->id;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001071 int local_id = loc->id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001072 struct socket *sf;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001073 struct sock *ssk;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001074 u32 remote_token;
1075 int addrlen;
1076 int err;
1077
Paolo Abenib93df082020-07-23 13:02:32 +02001078 if (!mptcp_is_fully_established(sk))
Peter Krystadec3edaa2020-03-27 14:48:40 -07001079 return -ENOTCONN;
1080
1081 err = mptcp_subflow_create_socket(sk, &sf);
1082 if (err)
1083 return err;
1084
Paolo Abeni6bad9122020-06-30 16:38:26 +02001085 ssk = sf->sk;
1086 subflow = mptcp_subflow_ctx(ssk);
1087 do {
1088 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1089 } while (!subflow->local_nonce);
1090
1091 if (!local_id) {
1092 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1093 if (err < 0)
1094 goto failed;
1095
1096 local_id = err;
1097 }
1098
Peter Krystadec3edaa2020-03-27 14:48:40 -07001099 subflow->remote_key = msk->remote_key;
1100 subflow->local_key = msk->local_key;
1101 subflow->token = msk->token;
1102 mptcp_info2sockaddr(loc, &addr);
1103
1104 addrlen = sizeof(struct sockaddr_in);
1105#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1106 if (loc->family == AF_INET6)
1107 addrlen = sizeof(struct sockaddr_in6);
1108#endif
Paolo Abenief0da3b2020-09-14 10:01:15 +02001109 ssk->sk_bound_dev_if = loc->ifindex;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001110 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1111 if (err)
1112 goto failed;
1113
1114 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
Geliang Tang2ff0e562020-09-08 10:49:39 +08001115 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1116 remote_token, local_id, remote_id);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001117 subflow->remote_token = remote_token;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001118 subflow->local_id = local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001119 subflow->remote_id = remote_id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001120 subflow->request_join = 1;
Paolo Abeni4596a2c2020-09-14 10:01:16 +02001121 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001122 mptcp_info2sockaddr(remote, &addr);
1123
1124 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1125 if (err && err != -EINPROGRESS)
1126 goto failed;
1127
1128 spin_lock_bh(&msk->join_list_lock);
1129 list_add_tail(&subflow->node, &msk->join_list);
1130 spin_unlock_bh(&msk->join_list_lock);
1131
1132 return err;
1133
1134failed:
1135 sock_release(sf);
1136 return err;
1137}
1138
Peter Krystad2303f992020-01-21 16:56:17 -08001139int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1140{
1141 struct mptcp_subflow_context *subflow;
1142 struct net *net = sock_net(sk);
1143 struct socket *sf;
1144 int err;
1145
Paolo Abeniadf73412020-08-04 18:31:06 +02001146 /* un-accepted server sockets can reach here - on bad configuration
1147 * bail early to avoid greater trouble later
1148 */
1149 if (unlikely(!sk->sk_socket))
1150 return -EINVAL;
1151
Peter Krystadcec37a62020-01-21 16:56:18 -08001152 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1153 &sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001154 if (err)
1155 return err;
1156
1157 lock_sock(sf->sk);
1158
1159 /* kernel sockets do not by default acquire net ref, but TCP timer
1160 * needs it.
1161 */
1162 sf->sk->sk_net_refcnt = 1;
1163 get_net(net);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001164#ifdef CONFIG_PROC_FS
Peter Krystad2303f992020-01-21 16:56:17 -08001165 this_cpu_add(*net->core.sock_inuse, 1);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001166#endif
Peter Krystad2303f992020-01-21 16:56:17 -08001167 err = tcp_set_ulp(sf->sk, "mptcp");
1168 release_sock(sf->sk);
1169
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001170 if (err) {
1171 sock_release(sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001172 return err;
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001173 }
Peter Krystad2303f992020-01-21 16:56:17 -08001174
Paolo Abeni7d14b0d2020-05-07 18:53:24 +02001175 /* the newly created socket really belongs to the owning MPTCP master
1176 * socket, even if for additional subflows the allocation is performed
1177 * by a kernel workqueue. Adjust inode references, so that the
1178 * procfs/diag interaces really show this one belonging to the correct
1179 * user.
1180 */
1181 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1182 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1183 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1184
Peter Krystad2303f992020-01-21 16:56:17 -08001185 subflow = mptcp_subflow_ctx(sf->sk);
1186 pr_debug("subflow=%p", subflow);
1187
1188 *new_sock = sf;
Peter Krystad79c09492020-01-21 16:56:20 -08001189 sock_hold(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001190 subflow->conn = sk;
1191
1192 return 0;
1193}
1194
1195static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1196 gfp_t priority)
1197{
1198 struct inet_connection_sock *icsk = inet_csk(sk);
1199 struct mptcp_subflow_context *ctx;
1200
1201 ctx = kzalloc(sizeof(*ctx), priority);
1202 if (!ctx)
1203 return NULL;
1204
1205 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001206 INIT_LIST_HEAD(&ctx->node);
Peter Krystad2303f992020-01-21 16:56:17 -08001207
1208 pr_debug("subflow=%p", ctx);
1209
1210 ctx->tcp_sock = sk;
1211
1212 return ctx;
1213}
1214
Mat Martineau648ef4b2020-01-21 16:56:24 -08001215static void __subflow_state_change(struct sock *sk)
1216{
1217 struct socket_wq *wq;
1218
1219 rcu_read_lock();
1220 wq = rcu_dereference(sk->sk_wq);
1221 if (skwq_has_sleeper(wq))
1222 wake_up_interruptible_all(&wq->wait);
1223 rcu_read_unlock();
1224}
1225
1226static bool subflow_is_done(const struct sock *sk)
1227{
1228 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1229}
1230
1231static void subflow_state_change(struct sock *sk)
1232{
1233 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenidc093db2020-03-13 16:52:42 +01001234 struct sock *parent = subflow->conn;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001235
1236 __subflow_state_change(sk);
1237
Davide Caratti8fd73802020-06-29 22:26:21 +02001238 if (subflow_simultaneous_connect(sk)) {
1239 mptcp_do_fallback(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001240 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001241 pr_fallback(mptcp_sk(parent));
1242 subflow->conn_finished = 1;
1243 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1244 inet_sk_state_store(parent, TCP_ESTABLISHED);
1245 parent->sk_state_change(parent);
1246 }
1247 }
1248
Mat Martineau648ef4b2020-01-21 16:56:24 -08001249 /* as recvmsg() does not acquire the subflow socket for ssk selection
1250 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1251 * the data available machinery here.
1252 */
Davide Carattie1ff9e82020-06-29 22:26:20 +02001253 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +01001254 mptcp_data_ready(parent, sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001255
Mat Martineau067a0b32020-07-28 15:12:07 -07001256 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1257 !(parent->sk_shutdown & RCV_SHUTDOWN) &&
Mat Martineau648ef4b2020-01-21 16:56:24 -08001258 !subflow->rx_eof && subflow_is_done(sk)) {
1259 subflow->rx_eof = 1;
Florian Westphal59832e22020-04-02 13:44:52 +02001260 mptcp_subflow_eof(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001261 }
1262}
1263
Peter Krystad2303f992020-01-21 16:56:17 -08001264static int subflow_ulp_init(struct sock *sk)
1265{
Peter Krystadcec37a62020-01-21 16:56:18 -08001266 struct inet_connection_sock *icsk = inet_csk(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001267 struct mptcp_subflow_context *ctx;
1268 struct tcp_sock *tp = tcp_sk(sk);
1269 int err = 0;
1270
1271 /* disallow attaching ULP to a socket unless it has been
1272 * created with sock_create_kern()
1273 */
1274 if (!sk->sk_kern_sock) {
1275 err = -EOPNOTSUPP;
1276 goto out;
1277 }
1278
1279 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1280 if (!ctx) {
1281 err = -ENOMEM;
1282 goto out;
1283 }
1284
1285 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1286
1287 tp->is_mptcp = 1;
Peter Krystadcec37a62020-01-21 16:56:18 -08001288 ctx->icsk_af_ops = icsk->icsk_af_ops;
1289 icsk->icsk_af_ops = subflow_default_af_ops(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001290 ctx->tcp_data_ready = sk->sk_data_ready;
1291 ctx->tcp_state_change = sk->sk_state_change;
1292 ctx->tcp_write_space = sk->sk_write_space;
1293 sk->sk_data_ready = subflow_data_ready;
1294 sk->sk_write_space = subflow_write_space;
1295 sk->sk_state_change = subflow_state_change;
Peter Krystad2303f992020-01-21 16:56:17 -08001296out:
1297 return err;
1298}
1299
1300static void subflow_ulp_release(struct sock *sk)
1301{
1302 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1303
1304 if (!ctx)
1305 return;
1306
Peter Krystad79c09492020-01-21 16:56:20 -08001307 if (ctx->conn)
1308 sock_put(ctx->conn);
1309
Peter Krystad2303f992020-01-21 16:56:17 -08001310 kfree_rcu(ctx, rcu);
1311}
1312
Peter Krystadcec37a62020-01-21 16:56:18 -08001313static void subflow_ulp_clone(const struct request_sock *req,
1314 struct sock *newsk,
1315 const gfp_t priority)
1316{
1317 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1318 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1319 struct mptcp_subflow_context *new_ctx;
1320
Peter Krystadf2962342020-03-27 14:48:39 -07001321 if (!tcp_rsk(req)->is_mptcp ||
1322 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001323 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001324 return;
1325 }
1326
1327 new_ctx = subflow_create_ctx(newsk, priority);
Mat Martineauedc7e482020-01-24 16:04:03 -08001328 if (!new_ctx) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001329 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001330 return;
1331 }
1332
1333 new_ctx->conn_finished = 1;
1334 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001335 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1336 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1337 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
Paolo Abeni58b09912020-03-13 16:52:41 +01001338 new_ctx->rel_write_seq = 1;
1339 new_ctx->tcp_sock = newsk;
1340
Peter Krystadf2962342020-03-27 14:48:39 -07001341 if (subflow_req->mp_capable) {
1342 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1343 * is fully established only after we receive the remote key
1344 */
1345 new_ctx->mp_capable = 1;
Peter Krystadf2962342020-03-27 14:48:39 -07001346 new_ctx->local_key = subflow_req->local_key;
1347 new_ctx->token = subflow_req->token;
1348 new_ctx->ssn_offset = subflow_req->ssn_offset;
1349 new_ctx->idsn = subflow_req->idsn;
1350 } else if (subflow_req->mp_join) {
Peter Krystadec3edaa2020-03-27 14:48:40 -07001351 new_ctx->ssn_offset = subflow_req->ssn_offset;
Peter Krystadf2962342020-03-27 14:48:39 -07001352 new_ctx->mp_join = 1;
1353 new_ctx->fully_established = 1;
1354 new_ctx->backup = subflow_req->backup;
1355 new_ctx->local_id = subflow_req->local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001356 new_ctx->remote_id = subflow_req->remote_id;
Peter Krystadf2962342020-03-27 14:48:39 -07001357 new_ctx->token = subflow_req->token;
1358 new_ctx->thmac = subflow_req->thmac;
1359 }
Peter Krystadcec37a62020-01-21 16:56:18 -08001360}
1361
Peter Krystad2303f992020-01-21 16:56:17 -08001362static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1363 .name = "mptcp",
1364 .owner = THIS_MODULE,
1365 .init = subflow_ulp_init,
1366 .release = subflow_ulp_release,
Peter Krystadcec37a62020-01-21 16:56:18 -08001367 .clone = subflow_ulp_clone,
Peter Krystad2303f992020-01-21 16:56:17 -08001368};
1369
Peter Krystadcec37a62020-01-21 16:56:18 -08001370static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1371{
1372 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1373 subflow_ops->slab_name = "request_sock_subflow";
1374
1375 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1376 subflow_ops->obj_size, 0,
1377 SLAB_ACCOUNT |
1378 SLAB_TYPESAFE_BY_RCU,
1379 NULL);
1380 if (!subflow_ops->slab)
1381 return -ENOMEM;
1382
Peter Krystad79c09492020-01-21 16:56:20 -08001383 subflow_ops->destructor = subflow_req_destructor;
1384
Peter Krystadcec37a62020-01-21 16:56:18 -08001385 return 0;
1386}
1387
Paolo Abenid39dcec2020-06-26 19:29:59 +02001388void __init mptcp_subflow_init(void)
Peter Krystad2303f992020-01-21 16:56:17 -08001389{
Florian Westphal08b8d082020-07-30 21:25:53 +02001390 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1391 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
Peter Krystadcec37a62020-01-21 16:56:18 -08001392 panic("MPTCP: failed to init subflow request sock ops\n");
1393
1394 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1395 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1396
1397 subflow_specific = ipv4_specific;
1398 subflow_specific.conn_request = subflow_v4_conn_request;
1399 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1400 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1401
1402#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1403 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1404 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1405
1406 subflow_v6_specific = ipv6_specific;
1407 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1408 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1409 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1410
1411 subflow_v6m_specific = subflow_v6_specific;
1412 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1413 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1414 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1415 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1416 subflow_v6m_specific.net_frag_header_len = 0;
1417#endif
1418
Davide Caratti5147dfb2020-03-27 14:48:49 -07001419 mptcp_diag_subflow_init(&subflow_ulp_ops);
1420
Peter Krystad2303f992020-01-21 16:56:17 -08001421 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1422 panic("MPTCP: failed to register subflows to ULP\n");
1423}