blob: dba8ad700fb801fc515be7f7a3e29cd720c73518 [file] [log] [blame]
Peter Krystad2303f992020-01-21 16:56:17 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
Peter Krystad79c09492020-01-21 16:56:20 -08007#define pr_fmt(fmt) "MPTCP: " fmt
8
Peter Krystad2303f992020-01-21 16:56:17 -08009#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
Peter Krystadf2962342020-03-27 14:48:39 -070012#include <crypto/algapi.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080013#include <crypto/sha2.h>
Peter Krystad2303f992020-01-21 16:56:17 -080014#include <net/sock.h>
15#include <net/inet_common.h>
16#include <net/inet_hashtables.h>
17#include <net/protocol.h>
18#include <net/tcp.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080019#if IS_ENABLED(CONFIG_MPTCP_IPV6)
20#include <net/ip6_route.h>
Paolo Abenib19bc292021-01-20 15:39:14 +010021#include <net/transp_v6.h>
Peter Krystadcec37a62020-01-21 16:56:18 -080022#endif
Peter Krystad2303f992020-01-21 16:56:17 -080023#include <net/mptcp.h>
Paolo Abeni4596a2c2020-09-14 10:01:16 +020024#include <uapi/linux/mptcp.h>
Peter Krystad2303f992020-01-21 16:56:17 -080025#include "protocol.h"
Florian Westphalfc518952020-03-27 14:48:50 -070026#include "mib.h"
27
Geliang Tang0918e342021-04-16 15:38:05 -070028#include <trace/events/mptcp.h>
29
Paolo Abenib19bc292021-01-20 15:39:14 +010030static void mptcp_subflow_ops_undo_override(struct sock *ssk);
31
Florian Westphalfc518952020-03-27 14:48:50 -070032static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 enum linux_mptcp_mib_field field)
34{
35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36}
Peter Krystad2303f992020-01-21 16:56:17 -080037
Peter Krystad79c09492020-01-21 16:56:20 -080038static void subflow_req_destructor(struct request_sock *req)
39{
40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
41
42 pr_debug("subflow_req=%p", subflow_req);
43
Paolo Abeni8fd4de12020-06-17 12:08:56 +020044 if (subflow_req->msk)
45 sock_put((struct sock *)subflow_req->msk);
46
Paolo Abeni2c5ebd02020-06-26 19:30:00 +020047 mptcp_token_destroy_request(req);
Peter Krystad79c09492020-01-21 16:56:20 -080048 tcp_request_sock_ops.destructor(req);
49}
50
Peter Krystadf2962342020-03-27 14:48:39 -070051static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
52 void *hmac)
53{
54 u8 msg[8];
55
56 put_unaligned_be32(nonce1, &msg[0]);
57 put_unaligned_be32(nonce2, &msg[4]);
58
59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
60}
61
Paolo Abeni4cf8b7e2020-07-23 13:02:36 +020062static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63{
64 return mptcp_is_fully_established((void *)msk) &&
65 READ_ONCE(msk->pm.accept_subflow);
66}
67
Peter Krystadf2962342020-03-27 14:48:39 -070068/* validate received token and create truncated hmac and nonce for SYN-ACK */
Geliang Tangec20e142021-02-01 15:09:14 -080069static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
70{
71 struct mptcp_sock *msk = subflow_req->msk;
72 u8 hmac[SHA256_DIGEST_SIZE];
73
74 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
75
76 subflow_generate_hmac(msk->local_key, msk->remote_key,
77 subflow_req->local_nonce,
78 subflow_req->remote_nonce, hmac);
79
80 subflow_req->thmac = get_unaligned_be64(hmac);
81}
82
Geliang Tangb5e2e422021-02-01 15:09:13 -080083static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
Peter Krystadf2962342020-03-27 14:48:39 -070084{
85 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadf2962342020-03-27 14:48:39 -070086 struct mptcp_sock *msk;
87 int local_id;
88
89 msk = mptcp_token_get_sock(subflow_req->token);
90 if (!msk) {
Florian Westphalfc518952020-03-27 14:48:50 -070091 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020092 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070093 }
94
95 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
96 if (local_id < 0) {
97 sock_put((struct sock *)msk);
Paolo Abeni8fd4de12020-06-17 12:08:56 +020098 return NULL;
Peter Krystadf2962342020-03-27 14:48:39 -070099 }
100 subflow_req->local_id = local_id;
101
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200102 return msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700103}
104
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800105static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
Peter Krystadcec37a62020-01-21 16:56:18 -0800106{
Peter Krystadcec37a62020-01-21 16:56:18 -0800107 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
Peter Krystadcec37a62020-01-21 16:56:18 -0800108
109 subflow_req->mp_capable = 0;
Peter Krystadf2962342020-03-27 14:48:39 -0700110 subflow_req->mp_join = 0;
Geliang Tang06fe1712021-06-17 16:46:09 -0700111 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
Geliang Tangbab6b882021-06-22 12:25:19 -0700112 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200113 subflow_req->msk = NULL;
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200114 mptcp_token_init_request(req);
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200115}
116
Geliang Tang5bc56382021-02-01 15:09:15 -0800117static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
118{
119 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
120}
121
Florian Westphaldc87efd2021-04-01 16:19:44 -0700122static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
123{
124 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
125
126 if (mpext) {
127 memset(mpext, 0, sizeof(*mpext));
128 mpext->reset_reason = reason;
129 }
130}
131
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100132/* Init mptcp request socket.
133 *
134 * Returns an error code if a JOIN has failed and a TCP reset
135 * should be sent.
136 */
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800137static int subflow_check_req(struct request_sock *req,
138 const struct sock *sk_listener,
139 struct sk_buff *skb)
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200140{
141 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
142 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
143 struct mptcp_options_received mp_opt;
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200144
145 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
146
Peter Krystadcec37a62020-01-21 16:56:18 -0800147#ifdef CONFIG_TCP_MD5SIG
148 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
149 * TCP option space.
150 */
151 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
152 return -EINVAL;
153#endif
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200154
Geliang Tangc8632252021-06-17 16:46:12 -0700155 mptcp_get_options(sk_listener, skb, &mp_opt);
Florian Westphal78d8b7b2020-07-30 21:25:52 +0200156
Paolo Abenicfde1412020-04-30 15:01:52 +0200157 if (mp_opt.mp_capable) {
Florian Westphalfc518952020-03-27 14:48:50 -0700158 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
159
Paolo Abenicfde1412020-04-30 15:01:52 +0200160 if (mp_opt.mp_join)
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100161 return 0;
Paolo Abenicfde1412020-04-30 15:01:52 +0200162 } else if (mp_opt.mp_join) {
Florian Westphalfc518952020-03-27 14:48:50 -0700163 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
164 }
Peter Krystadf2962342020-03-27 14:48:39 -0700165
Paolo Abenicfde1412020-04-30 15:01:52 +0200166 if (mp_opt.mp_capable && listener->request_mptcp) {
Jianguo Wuc68a0cd2021-05-27 16:54:25 -0700167 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
Florian Westphal535fb812020-07-30 21:25:51 +0200168
Florian Westphalc83a47e2020-07-30 21:25:54 +0200169 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Florian Westphal535fb812020-07-30 21:25:51 +0200170again:
171 do {
172 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
173 } while (subflow_req->local_key == 0);
Peter Krystad79c09492020-01-21 16:56:20 -0800174
Florian Westphalc83a47e2020-07-30 21:25:54 +0200175 if (unlikely(req->syncookie)) {
176 mptcp_crypto_key_sha(subflow_req->local_key,
177 &subflow_req->token,
178 &subflow_req->idsn);
179 if (mptcp_token_exists(subflow_req->token)) {
180 if (retries-- > 0)
181 goto again;
Paolo Abenia16195e2021-04-01 16:19:41 -0700182 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
Florian Westphalc83a47e2020-07-30 21:25:54 +0200183 } else {
184 subflow_req->mp_capable = 1;
185 }
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100186 return 0;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200187 }
188
Peter Krystad79c09492020-01-21 16:56:20 -0800189 err = mptcp_token_new_request(req);
190 if (err == 0)
191 subflow_req->mp_capable = 1;
Florian Westphal535fb812020-07-30 21:25:51 +0200192 else if (retries-- > 0)
193 goto again;
Paolo Abenia16195e2021-04-01 16:19:41 -0700194 else
195 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
Peter Krystad79c09492020-01-21 16:56:20 -0800196
Paolo Abenicfde1412020-04-30 15:01:52 +0200197 } else if (mp_opt.mp_join && listener->request_mptcp) {
Peter Krystadec3edaa2020-03-27 14:48:40 -0700198 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
Peter Krystadf2962342020-03-27 14:48:39 -0700199 subflow_req->mp_join = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200200 subflow_req->backup = mp_opt.backup;
201 subflow_req->remote_id = mp_opt.join_id;
202 subflow_req->token = mp_opt.token;
203 subflow_req->remote_nonce = mp_opt.nonce;
Geliang Tangb5e2e422021-02-01 15:09:13 -0800204 subflow_req->msk = subflow_token_join_request(req);
Florian Westphal9466a1c2020-07-30 21:25:56 +0200205
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100206 /* Can't fall back to TCP in this case. */
Florian Westphaldc87efd2021-04-01 16:19:44 -0700207 if (!subflow_req->msk) {
208 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100209 return -EPERM;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700210 }
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100211
Geliang Tang5bc56382021-02-01 15:09:15 -0800212 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
213 pr_debug("syn inet_sport=%d %d",
214 ntohs(inet_sk(sk_listener)->inet_sport),
215 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
216 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800217 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800218 return -EPERM;
219 }
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800220 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800221 }
222
Geliang Tangec20e142021-02-01 15:09:14 -0800223 subflow_req_create_thmac(subflow_req);
224
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100225 if (unlikely(req->syncookie)) {
Florian Westphal9466a1c2020-07-30 21:25:56 +0200226 if (mptcp_can_accept_new_subflow(subflow_req->msk))
227 subflow_init_req_cookie_join_save(subflow_req, skb);
Jianguo Wu8547ea52021-07-09 17:20:48 -0700228 else
229 return -EPERM;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200230 }
231
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200232 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
233 subflow_req->remote_nonce, subflow_req->msk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800234 }
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100235
236 return 0;
Peter Krystadcec37a62020-01-21 16:56:18 -0800237}
238
Florian Westphalc83a47e2020-07-30 21:25:54 +0200239int mptcp_subflow_init_cookie_req(struct request_sock *req,
240 const struct sock *sk_listener,
241 struct sk_buff *skb)
242{
243 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
244 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
245 struct mptcp_options_received mp_opt;
246 int err;
247
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800248 subflow_init_req(req, sk_listener);
Geliang Tangc8632252021-06-17 16:46:12 -0700249 mptcp_get_options(sk_listener, skb, &mp_opt);
Florian Westphalc83a47e2020-07-30 21:25:54 +0200250
251 if (mp_opt.mp_capable && mp_opt.mp_join)
252 return -EINVAL;
253
254 if (mp_opt.mp_capable && listener->request_mptcp) {
255 if (mp_opt.sndr_key == 0)
256 return -EINVAL;
257
258 subflow_req->local_key = mp_opt.rcvr_key;
259 err = mptcp_token_new_request(req);
260 if (err)
261 return err;
262
263 subflow_req->mp_capable = 1;
264 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200265 } else if (mp_opt.mp_join && listener->request_mptcp) {
266 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
267 return -EINVAL;
268
Jianguo Wu8547ea52021-07-09 17:20:48 -0700269 subflow_req->mp_join = 1;
Florian Westphal9466a1c2020-07-30 21:25:56 +0200270 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
Florian Westphalc83a47e2020-07-30 21:25:54 +0200271 }
272
273 return 0;
274}
275EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
276
Florian Westphal7ea851d2020-11-30 16:36:30 +0100277static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
278 struct sk_buff *skb,
279 struct flowi *fl,
280 struct request_sock *req)
Peter Krystadcec37a62020-01-21 16:56:18 -0800281{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100282 struct dst_entry *dst;
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100283 int err;
Florian Westphal7ea851d2020-11-30 16:36:30 +0100284
Peter Krystadcec37a62020-01-21 16:56:18 -0800285 tcp_rsk(req)->is_mptcp = 1;
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800286 subflow_init_req(req, sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800287
Florian Westphal7ea851d2020-11-30 16:36:30 +0100288 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
289 if (!dst)
290 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800291
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800292 err = subflow_check_req(req, sk, skb);
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100293 if (err == 0)
294 return dst;
295
296 dst_release(dst);
297 if (!req->syncookie)
298 tcp_request_sock_ops.send_reset(sk, skb);
299 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800300}
301
302#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Florian Westphal7ea851d2020-11-30 16:36:30 +0100303static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
304 struct sk_buff *skb,
305 struct flowi *fl,
306 struct request_sock *req)
Peter Krystadcec37a62020-01-21 16:56:18 -0800307{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100308 struct dst_entry *dst;
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100309 int err;
Florian Westphal7ea851d2020-11-30 16:36:30 +0100310
Peter Krystadcec37a62020-01-21 16:56:18 -0800311 tcp_rsk(req)->is_mptcp = 1;
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800312 subflow_init_req(req, sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800313
Florian Westphal7ea851d2020-11-30 16:36:30 +0100314 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
315 if (!dst)
316 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800317
Paolo Abenid8b59ef2021-02-11 15:30:40 -0800318 err = subflow_check_req(req, sk, skb);
Florian Westphal3ecfbe3e2020-11-30 16:36:31 +0100319 if (err == 0)
320 return dst;
321
322 dst_release(dst);
323 if (!req->syncookie)
324 tcp6_request_sock_ops.send_reset(sk, skb);
325 return NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800326}
327#endif
328
Peter Krystadec3edaa2020-03-27 14:48:40 -0700329/* validate received truncated hmac and create hmac for third ACK */
330static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
331{
Todd Malsbarybd697222020-05-21 19:10:49 -0700332 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadec3edaa2020-03-27 14:48:40 -0700333 u64 thmac;
334
335 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
336 subflow->remote_nonce, subflow->local_nonce,
337 hmac);
338
339 thmac = get_unaligned_be64(hmac);
340 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
341 subflow, subflow->token,
342 (unsigned long long)thmac,
343 (unsigned long long)subflow->thmac);
344
345 return thmac == subflow->thmac;
346}
347
Paolo Abenid5824842020-10-09 19:00:00 +0200348void mptcp_subflow_reset(struct sock *ssk)
349{
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200350 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
351 struct sock *sk = subflow->conn;
352
Florian Westphalab82e992020-12-10 14:25:02 -0800353 /* must hold: tcp_done() could drop last reference on parent */
354 sock_hold(sk);
355
Paolo Abenid5824842020-10-09 19:00:00 +0200356 tcp_set_state(ssk, TCP_CLOSE);
357 tcp_send_active_reset(ssk, GFP_ATOMIC);
358 tcp_done(ssk);
Paolo Abeni0e4f35d2020-10-09 19:00:01 +0200359 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
360 schedule_work(&mptcp_sk(sk)->work))
Florian Westphalab82e992020-12-10 14:25:02 -0800361 return; /* worker will put sk for us */
362
363 sock_put(sk);
Paolo Abenid5824842020-10-09 19:00:00 +0200364}
365
Geliang Tang5bc56382021-02-01 15:09:15 -0800366static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
367{
368 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
369}
370
Paolo Abeni490274b2021-06-21 17:33:08 -0700371void __mptcp_set_connected(struct sock *sk)
372{
373 if (sk->sk_state == TCP_SYN_SENT) {
374 inet_sk_state_store(sk, TCP_ESTABLISHED);
375 sk->sk_state_change(sk);
376 }
377}
378
379static void mptcp_set_connected(struct sock *sk)
380{
381 mptcp_data_lock(sk);
382 if (!sock_owned_by_user(sk))
383 __mptcp_set_connected(sk);
384 else
385 set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
386 mptcp_data_unlock(sk);
387}
388
Peter Krystadcec37a62020-01-21 16:56:18 -0800389static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
390{
391 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenicfde1412020-04-30 15:01:52 +0200392 struct mptcp_options_received mp_opt;
Davide Carattic3c123d2020-03-19 22:45:37 +0100393 struct sock *parent = subflow->conn;
Peter Krystadcec37a62020-01-21 16:56:18 -0800394
395 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
396
Davide Carattic3c123d2020-03-19 22:45:37 +0100397
Paolo Abeni263e1202020-04-30 15:01:51 +0200398 /* be sure no special action on any packet other than syn-ack */
399 if (subflow->conn_finished)
400 return;
401
Paolo Abeni5cf92bb2021-01-20 15:39:11 +0100402 mptcp_propagate_sndbuf(parent, sk);
Paolo Abenib0977bb2020-07-23 13:02:29 +0200403 subflow->rel_write_seq = 1;
Paolo Abeni263e1202020-04-30 15:01:51 +0200404 subflow->conn_finished = 1;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200405 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
406 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
Paolo Abeni263e1202020-04-30 15:01:51 +0200407
Geliang Tangc8632252021-06-17 16:46:12 -0700408 mptcp_get_options(sk, skb, &mp_opt);
Paolo Abenifa25e812020-07-23 13:02:33 +0200409 if (subflow->request_mptcp) {
410 if (!mp_opt.mp_capable) {
411 MPTCP_INC_STATS(sock_net(sk),
412 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
413 mptcp_do_fallback(sk);
414 pr_fallback(mptcp_sk(subflow->conn));
415 goto fallback;
416 }
417
Geliang Tang06251182021-06-17 16:46:13 -0700418 if (mp_opt.csum_reqd)
419 WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
Geliang Tangdf377be2021-06-22 12:25:20 -0700420 if (mp_opt.deny_join_id0)
421 WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
Paolo Abeni263e1202020-04-30 15:01:51 +0200422 subflow->mp_capable = 1;
423 subflow->can_ack = 1;
Paolo Abenicfde1412020-04-30 15:01:52 +0200424 subflow->remote_key = mp_opt.sndr_key;
Paolo Abeni263e1202020-04-30 15:01:51 +0200425 pr_debug("subflow=%p, remote_key=%llu", subflow,
426 subflow->remote_key);
Paolo Abeni5695eb82021-04-01 16:19:42 -0700427 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
Paolo Abenifa25e812020-07-23 13:02:33 +0200428 mptcp_finish_connect(sk);
Paolo Abeni490274b2021-06-21 17:33:08 -0700429 mptcp_set_connected(parent);
Paolo Abenifa25e812020-07-23 13:02:33 +0200430 } else if (subflow->request_join) {
431 u8 hmac[SHA256_DIGEST_SIZE];
432
Florian Westphaldc87efd2021-04-01 16:19:44 -0700433 if (!mp_opt.mp_join) {
434 subflow->reset_reason = MPTCP_RST_EMPTCP;
Paolo Abenifa25e812020-07-23 13:02:33 +0200435 goto do_reset;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700436 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200437
Paolo Abeni0460ce22021-08-13 15:15:47 -0700438 subflow->backup = mp_opt.backup;
Paolo Abenicfde1412020-04-30 15:01:52 +0200439 subflow->thmac = mp_opt.thmac;
440 subflow->remote_nonce = mp_opt.nonce;
Paolo Abeni0460ce22021-08-13 15:15:47 -0700441 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
442 subflow, subflow->thmac, subflow->remote_nonce,
443 subflow->backup);
Paolo Abeni263e1202020-04-30 15:01:51 +0200444
Peter Krystadec3edaa2020-03-27 14:48:40 -0700445 if (!subflow_thmac_valid(subflow)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700446 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
Florian Westphaldc87efd2021-04-01 16:19:44 -0700447 subflow->reset_reason = MPTCP_RST_EMPTCP;
Peter Krystadec3edaa2020-03-27 14:48:40 -0700448 goto do_reset;
449 }
450
Jianguo Wu0a4d8e92021-05-27 16:54:26 -0700451 if (!mptcp_finish_join(sk))
452 goto do_reset;
453
Peter Krystadec3edaa2020-03-27 14:48:40 -0700454 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
455 subflow->local_nonce,
456 subflow->remote_nonce,
Todd Malsbarybd697222020-05-21 19:10:49 -0700457 hmac);
Todd Malsbarybd697222020-05-21 19:10:49 -0700458 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700459
Paolo Abenifa25e812020-07-23 13:02:33 +0200460 subflow->mp_join = 1;
Florian Westphalfc518952020-03-27 14:48:50 -0700461 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800462
463 if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
464 pr_debug("synack inet_dport=%d %d",
465 ntohs(inet_sk(sk)->inet_dport),
466 ntohs(inet_sk(parent)->inet_dport));
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800467 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800468 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200469 } else if (mptcp_check_fallback(sk)) {
470fallback:
471 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Paolo Abeni490274b2021-06-21 17:33:08 -0700472 mptcp_set_connected(parent);
Peter Krystadcec37a62020-01-21 16:56:18 -0800473 }
Paolo Abenifa25e812020-07-23 13:02:33 +0200474 return;
475
476do_reset:
Florian Westphaldc87efd2021-04-01 16:19:44 -0700477 subflow->reset_transient = 0;
Paolo Abenid5824842020-10-09 19:00:00 +0200478 mptcp_subflow_reset(sk);
Peter Krystadcec37a62020-01-21 16:56:18 -0800479}
480
Florian Westphal08b8d082020-07-30 21:25:53 +0200481struct request_sock_ops mptcp_subflow_request_sock_ops;
482EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
Peter Krystadcec37a62020-01-21 16:56:18 -0800483static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
484
485static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
486{
487 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
488
489 pr_debug("subflow=%p", subflow);
490
491 /* Never answer to SYNs sent to broadcast or multicast */
492 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
493 goto drop;
494
Florian Westphal08b8d082020-07-30 21:25:53 +0200495 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800496 &subflow_request_sock_ipv4_ops,
497 sk, skb);
498drop:
499 tcp_listendrop(sk);
500 return 0;
501}
502
503#if IS_ENABLED(CONFIG_MPTCP_IPV6)
504static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
505static struct inet_connection_sock_af_ops subflow_v6_specific;
506static struct inet_connection_sock_af_ops subflow_v6m_specific;
Paolo Abenib19bc292021-01-20 15:39:14 +0100507static struct proto tcpv6_prot_override;
Peter Krystadcec37a62020-01-21 16:56:18 -0800508
509static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
510{
511 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
512
513 pr_debug("subflow=%p", subflow);
514
515 if (skb->protocol == htons(ETH_P_IP))
516 return subflow_v4_conn_request(sk, skb);
517
518 if (!ipv6_unicast_destination(skb))
519 goto drop;
520
Jakub Kicinskidcc32f4f2021-03-17 09:55:15 -0700521 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
522 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
523 return 0;
524 }
525
Florian Westphal08b8d082020-07-30 21:25:53 +0200526 return tcp_conn_request(&mptcp_subflow_request_sock_ops,
Peter Krystadcec37a62020-01-21 16:56:18 -0800527 &subflow_request_sock_ipv6_ops, sk, skb);
528
529drop:
530 tcp_listendrop(sk);
531 return 0; /* don't send reset */
532}
533#endif
534
Peter Krystadf2962342020-03-27 14:48:39 -0700535/* validate hmac received in third ACK */
536static bool subflow_hmac_valid(const struct request_sock *req,
Paolo Abenicfde1412020-04-30 15:01:52 +0200537 const struct mptcp_options_received *mp_opt)
Peter Krystadf2962342020-03-27 14:48:39 -0700538{
539 const struct mptcp_subflow_request_sock *subflow_req;
Todd Malsbarybd697222020-05-21 19:10:49 -0700540 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystadf2962342020-03-27 14:48:39 -0700541 struct mptcp_sock *msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700542
543 subflow_req = mptcp_subflow_rsk(req);
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200544 msk = subflow_req->msk;
Peter Krystadf2962342020-03-27 14:48:39 -0700545 if (!msk)
546 return false;
547
548 subflow_generate_hmac(msk->remote_key, msk->local_key,
549 subflow_req->remote_nonce,
550 subflow_req->local_nonce, hmac);
551
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200552 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
Peter Krystadf2962342020-03-27 14:48:39 -0700553}
554
Florian Westphaldf1036d2020-04-17 09:28:22 +0200555static void mptcp_sock_destruct(struct sock *sk)
556{
557 /* if new mptcp socket isn't accepted, it is free'd
558 * from the tcp listener sockets request queue, linked
559 * from req->sk. The tcp socket is released.
560 * This calls the ULP release function which will
561 * also remove the mptcp socket, via
562 * sock_put(ctx->conn).
563 *
Paolo Abeni7ee24922020-08-07 19:03:53 +0200564 * Problem is that the mptcp socket will be in
565 * ESTABLISHED state and will not have the SOCK_DEAD flag.
Florian Westphaldf1036d2020-04-17 09:28:22 +0200566 * Both result in warnings from inet_sock_destruct.
567 */
Paolo Abeni578c18ef2021-05-06 17:16:38 -0700568 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
Florian Westphaldf1036d2020-04-17 09:28:22 +0200569 sk->sk_state = TCP_CLOSE;
570 WARN_ON_ONCE(sk->sk_socket);
571 sock_orphan(sk);
572 }
573
Geliang Tang5c8c1642020-09-24 08:29:57 +0800574 mptcp_destroy_common(mptcp_sk(sk));
Florian Westphaldf1036d2020-04-17 09:28:22 +0200575 inet_sock_destruct(sk);
576}
577
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200578static void mptcp_force_close(struct sock *sk)
579{
Paolo Abeni490274b2021-06-21 17:33:08 -0700580 /* the msk is not yet exposed to user-space */
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200581 inet_sk_state_store(sk, TCP_CLOSE);
582 sk_common_release(sk);
583}
584
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200585static void subflow_ulp_fallback(struct sock *sk,
586 struct mptcp_subflow_context *old_ctx)
587{
588 struct inet_connection_sock *icsk = inet_csk(sk);
589
590 mptcp_subflow_tcp_fallback(sk, old_ctx);
591 icsk->icsk_ulp_ops = NULL;
592 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
593 tcp_sk(sk)->is_mptcp = 0;
Paolo Abenib19bc292021-01-20 15:39:14 +0100594
595 mptcp_subflow_ops_undo_override(sk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200596}
597
Paolo Abeni39884602020-05-29 17:49:18 +0200598static void subflow_drop_ctx(struct sock *ssk)
599{
600 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
601
602 if (!ctx)
603 return;
604
605 subflow_ulp_fallback(ssk, ctx);
606 if (ctx->conn)
607 sock_put(ctx->conn);
608
609 kfree_rcu(ctx, rcu);
610}
611
Paolo Abenib93df082020-07-23 13:02:32 +0200612void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
613 struct mptcp_options_received *mp_opt)
614{
615 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
616
617 subflow->remote_key = mp_opt->sndr_key;
618 subflow->fully_established = 1;
619 subflow->can_ack = 1;
620 WRITE_ONCE(msk->fully_established, true);
621}
622
Peter Krystadcec37a62020-01-21 16:56:18 -0800623static struct sock *subflow_syn_recv_sock(const struct sock *sk,
624 struct sk_buff *skb,
625 struct request_sock *req,
626 struct dst_entry *dst,
627 struct request_sock *req_unhash,
628 bool *own_req)
629{
630 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800631 struct mptcp_subflow_request_sock *subflow_req;
Paolo Abenicfde1412020-04-30 15:01:52 +0200632 struct mptcp_options_received mp_opt;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200633 bool fallback, fallback_is_fatal;
Paolo Abeni58b09912020-03-13 16:52:41 +0100634 struct sock *new_msk = NULL;
Peter Krystadcec37a62020-01-21 16:56:18 -0800635 struct sock *child;
636
637 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
638
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200639 /* After child creation we must look for 'mp_capable' even when options
640 * are not parsed
Paolo Abenicfde1412020-04-30 15:01:52 +0200641 */
642 mp_opt.mp_capable = 0;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200643
644 /* hopefully temporary handling for MP_JOIN+syncookie */
645 subflow_req = mptcp_subflow_rsk(req);
Paolo Abenib7514692020-07-23 13:02:34 +0200646 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200647 fallback = !tcp_rsk(req)->is_mptcp;
648 if (fallback)
Florian Westphalae2dd712020-01-29 15:54:46 +0100649 goto create_child;
650
Christoph Paaschd22f4982020-01-21 16:56:32 -0800651 /* if the sk is MP_CAPABLE, we try to fetch the client key */
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800652 if (subflow_req->mp_capable) {
Paolo Abeni06f9a432021-05-27 16:31:38 -0700653 /* we can receive and accept an in-window, out-of-order pkt,
654 * which may not carry the MP_CAPABLE opt even on mptcp enabled
655 * paths: always try to extract the peer key, and fallback
656 * for packets missing it.
657 * Even OoO DSS packets coming legitly after dropped or
658 * reordered MPC will cause fallback, but we don't have other
659 * options.
660 */
Geliang Tangc8632252021-06-17 16:46:12 -0700661 mptcp_get_options(sk, skb, &mp_opt);
Paolo Abenicfde1412020-04-30 15:01:52 +0200662 if (!mp_opt.mp_capable) {
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200663 fallback = true;
Paolo Abeni58b09912020-03-13 16:52:41 +0100664 goto create_child;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800665 }
Paolo Abeni58b09912020-03-13 16:52:41 +0100666
Paolo Abenicfde1412020-04-30 15:01:52 +0200667 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
Paolo Abeni58b09912020-03-13 16:52:41 +0100668 if (!new_msk)
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200669 fallback = true;
Peter Krystadf2962342020-03-27 14:48:39 -0700670 } else if (subflow_req->mp_join) {
Geliang Tangc8632252021-06-17 16:46:12 -0700671 mptcp_get_options(sk, skb, &mp_opt);
Paolo Abenid3ab7882020-11-26 15:17:53 +0100672 if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
673 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
Florian Westphalfc518952020-03-27 14:48:50 -0700674 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
Paolo Abeni9e365ff2020-06-17 12:08:57 +0200675 fallback = true;
Florian Westphalfc518952020-03-27 14:48:50 -0700676 }
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800677 }
Peter Krystadcec37a62020-01-21 16:56:18 -0800678
Christoph Paaschd22f4982020-01-21 16:56:32 -0800679create_child:
Peter Krystadcec37a62020-01-21 16:56:18 -0800680 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
681 req_unhash, own_req);
682
683 if (child && *own_req) {
Peter Krystad79c09492020-01-21 16:56:20 -0800684 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
685
Paolo Abeni90bf4512020-05-15 19:22:15 +0200686 tcp_rsk(req)->drop_req = false;
687
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200688 /* we need to fallback on ctx allocation failure and on pre-reqs
689 * checking above. In the latter scenario we additionally need
690 * to reset the context to non MPTCP status.
Peter Krystad79c09492020-01-21 16:56:20 -0800691 */
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200692 if (!ctx || fallback) {
Florian Westphaldc87efd2021-04-01 16:19:44 -0700693 if (fallback_is_fatal) {
694 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
Paolo Abeni729cd642020-05-15 19:22:17 +0200695 goto dispose_child;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700696 }
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200697
Paolo Abeni39884602020-05-29 17:49:18 +0200698 subflow_drop_ctx(child);
Paolo Abeni58b09912020-03-13 16:52:41 +0100699 goto out;
Peter Krystadf2962342020-03-27 14:48:39 -0700700 }
Peter Krystad79c09492020-01-21 16:56:20 -0800701
Florian Westphaldf00b082021-04-15 16:44:54 -0700702 /* ssk inherits options of listener sk */
703 ctx->setsockopt_seq = listener->setsockopt_seq;
704
Peter Krystad79c09492020-01-21 16:56:20 -0800705 if (ctx->mp_capable) {
Paolo Abenib93df082020-07-23 13:02:32 +0200706 /* this can't race with mptcp_close(), as the msk is
707 * not yet exposted to user-space
708 */
709 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
710
Paolo Abeni5b950ff2020-12-09 12:03:29 +0100711 /* record the newly created socket as the first msk
712 * subflow, but don't link it yet into conn_list
713 */
Paolo Abeni0397c6d2020-11-19 11:45:58 -0800714 WRITE_ONCE(mptcp_sk(new_msk)->first, child);
715
Paolo Abeni58b09912020-03-13 16:52:41 +0100716 /* new mpc subflow takes ownership of the newly
717 * created mptcp socket
718 */
Florian Westphaldf1036d2020-04-17 09:28:22 +0200719 new_msk->sk_destruct = mptcp_sock_destruct;
Florian Westphaldf00b082021-04-15 16:44:54 -0700720 mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
Florian Westphal6c714f12021-02-12 15:59:58 -0800721 mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
Paolo Abeni2c5ebd02020-06-26 19:30:00 +0200722 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
Paolo Abeni58b09912020-03-13 16:52:41 +0100723 ctx->conn = new_msk;
724 new_msk = NULL;
Paolo Abenifca5c822020-04-20 16:25:06 +0200725
726 /* with OoO packets we can reach here without ingress
727 * mpc option
728 */
Paolo Abenib93df082020-07-23 13:02:32 +0200729 if (mp_opt.mp_capable)
730 mptcp_subflow_fully_established(ctx, &mp_opt);
Peter Krystadf2962342020-03-27 14:48:39 -0700731 } else if (ctx->mp_join) {
732 struct mptcp_sock *owner;
733
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200734 owner = subflow_req->msk;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700735 if (!owner) {
736 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
Paolo Abeni729cd642020-05-15 19:22:17 +0200737 goto dispose_child;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700738 }
Peter Krystadf2962342020-03-27 14:48:39 -0700739
Paolo Abeni8fd4de12020-06-17 12:08:56 +0200740 /* move the msk reference ownership to the subflow */
741 subflow_req->msk = NULL;
Peter Krystadf2962342020-03-27 14:48:39 -0700742 ctx->conn = (struct sock *)owner;
Geliang Tang5bc56382021-02-01 15:09:15 -0800743
744 if (subflow_use_different_sport(owner, sk)) {
745 pr_debug("ack inet_sport=%d %d",
746 ntohs(inet_sk(sk)->inet_sport),
747 ntohs(inet_sk((struct sock *)owner)->inet_sport));
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800748 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
749 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
Geliang Tang9238e902021-03-04 13:32:16 -0800750 goto dispose_child;
Geliang Tang2fbdd9e2021-02-01 15:09:19 -0800751 }
752 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
Geliang Tang5bc56382021-02-01 15:09:15 -0800753 }
Geliang Tang9238e902021-03-04 13:32:16 -0800754
755 if (!mptcp_finish_join(child))
756 goto dispose_child;
757
758 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
759 tcp_rsk(req)->drop_req = true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800760 }
761 }
762
Paolo Abeni58b09912020-03-13 16:52:41 +0100763out:
764 /* dispose of the left over mptcp master, if any */
765 if (unlikely(new_msk))
Florian Westphal9f5ca6a2020-04-17 09:28:23 +0200766 mptcp_force_close(new_msk);
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200767
768 /* check for expected invariant - should never trigger, just help
769 * catching eariler subtle bugs
770 */
Paolo Abeniac2b47f2020-04-30 15:03:22 +0200771 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
Paolo Abeni4c8941d2020-04-20 16:25:05 +0200772 (!mptcp_subflow_ctx(child) ||
773 !mptcp_subflow_ctx(child)->conn));
Peter Krystadcec37a62020-01-21 16:56:18 -0800774 return child;
Peter Krystadf2962342020-03-27 14:48:39 -0700775
Paolo Abeni729cd642020-05-15 19:22:17 +0200776dispose_child:
Paolo Abeni39884602020-05-29 17:49:18 +0200777 subflow_drop_ctx(child);
Paolo Abeni729cd642020-05-15 19:22:17 +0200778 tcp_rsk(req)->drop_req = true;
Paolo Abeni729cd642020-05-15 19:22:17 +0200779 inet_csk_prepare_for_destroy_sock(child);
Peter Krystadf2962342020-03-27 14:48:39 -0700780 tcp_done(child);
Paolo Abeni97e61752020-07-23 13:02:35 +0200781 req->rsk_ops->send_reset(sk, skb);
Paolo Abeni729cd642020-05-15 19:22:17 +0200782
783 /* The last child reference will be released by the caller */
784 return child;
Peter Krystadcec37a62020-01-21 16:56:18 -0800785}
786
787static struct inet_connection_sock_af_ops subflow_specific;
Paolo Abenib19bc292021-01-20 15:39:14 +0100788static struct proto tcp_prot_override;
Peter Krystadcec37a62020-01-21 16:56:18 -0800789
Mat Martineau648ef4b2020-01-21 16:56:24 -0800790enum mapping_status {
791 MAPPING_OK,
792 MAPPING_INVALID,
793 MAPPING_EMPTY,
Davide Carattie1ff9e82020-06-29 22:26:20 +0200794 MAPPING_DATA_FIN,
795 MAPPING_DUMMY
Mat Martineau648ef4b2020-01-21 16:56:24 -0800796};
797
Paolo Abeni61e71022021-06-10 15:59:42 -0700798static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800799{
Paolo Abeni61e71022021-06-10 15:59:42 -0700800 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
801 ssn, subflow->map_subflow_seq, subflow->map_data_len);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800802}
803
804static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
805{
806 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
807 unsigned int skb_consumed;
808
809 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
810 if (WARN_ON_ONCE(skb_consumed >= skb->len))
811 return true;
812
813 return skb->len - skb_consumed <= subflow->map_data_len -
814 mptcp_subflow_get_map_offset(subflow);
815}
816
817static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
818{
819 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
820 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
821
822 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
823 /* Mapping covers data later in the subflow stream,
824 * currently unsupported.
825 */
Paolo Abeni61e71022021-06-10 15:59:42 -0700826 dbg_bad_map(subflow, ssn);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800827 return false;
828 }
829 if (unlikely(!before(ssn, subflow->map_subflow_seq +
830 subflow->map_data_len))) {
831 /* Mapping does covers past subflow data, invalid */
Paolo Abeni61e71022021-06-10 15:59:42 -0700832 dbg_bad_map(subflow, ssn);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800833 return false;
834 }
835 return true;
836}
837
Paolo Abenidd8bcd12021-06-17 16:46:16 -0700838static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
839 bool csum_reqd)
840{
841 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
842 struct csum_pseudo_header header;
843 u32 offset, seq, delta;
844 __wsum csum;
845 int len;
846
847 if (!csum_reqd)
848 return MAPPING_OK;
849
850 /* mapping already validated on previous traversal */
851 if (subflow->map_csum_len == subflow->map_data_len)
852 return MAPPING_OK;
853
854 /* traverse the receive queue, ensuring it contains a full
855 * DSS mapping and accumulating the related csum.
856 * Preserve the accoumlate csum across multiple calls, to compute
857 * the csum only once
858 */
859 delta = subflow->map_data_len - subflow->map_csum_len;
860 for (;;) {
861 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
862 offset = seq - TCP_SKB_CB(skb)->seq;
863
864 /* if the current skb has not been accounted yet, csum its contents
865 * up to the amount covered by the current DSS
866 */
867 if (offset < skb->len) {
868 __wsum csum;
869
870 len = min(skb->len - offset, delta);
871 csum = skb_checksum(skb, offset, len, 0);
872 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
873 subflow->map_csum_len);
874
875 delta -= len;
876 subflow->map_csum_len += len;
877 }
878 if (delta == 0)
879 break;
880
881 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
882 /* if this subflow is closed, the partial mapping
883 * will be never completed; flush the pending skbs, so
884 * that subflow_sched_work_if_closed() can kick in
885 */
886 if (unlikely(ssk->sk_state == TCP_CLOSE))
887 while ((skb = skb_peek(&ssk->sk_receive_queue)))
888 sk_eat_skb(ssk, skb);
889
890 /* not enough data to validate the csum */
891 return MAPPING_EMPTY;
892 }
893
894 /* the DSS mapping for next skbs will be validated later,
895 * when a get_mapping_status call will process such skb
896 */
897 skb = skb->next;
898 }
899
900 /* note that 'map_data_len' accounts only for the carried data, does
901 * not include the eventual seq increment due to the data fin,
902 * while the pseudo header requires the original DSS data len,
903 * including that
904 */
905 header.data_seq = cpu_to_be64(subflow->map_seq);
906 header.subflow_seq = htonl(subflow->map_subflow_seq);
907 header.data_len = htons(subflow->map_data_len + subflow->map_data_fin);
908 header.csum = 0;
909
910 csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
Geliang Tangfe3ab1c2021-06-17 16:46:18 -0700911 if (unlikely(csum_fold(csum))) {
912 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
Geliang Tang478d7702021-08-24 16:26:17 -0700913 subflow->send_mp_fail = 1;
Paolo Abenidd8bcd12021-06-17 16:46:16 -0700914 return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
Geliang Tangfe3ab1c2021-06-17 16:46:18 -0700915 }
Paolo Abenidd8bcd12021-06-17 16:46:16 -0700916
917 return MAPPING_OK;
918}
919
Mat Martineau43b54c62020-07-28 15:12:06 -0700920static enum mapping_status get_mapping_status(struct sock *ssk,
921 struct mptcp_sock *msk)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800922{
923 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Paolo Abenidd8bcd12021-06-17 16:46:16 -0700924 bool csum_reqd = READ_ONCE(msk->csum_enabled);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800925 struct mptcp_ext *mpext;
926 struct sk_buff *skb;
927 u16 data_len;
928 u64 map_seq;
929
930 skb = skb_peek(&ssk->sk_receive_queue);
931 if (!skb)
932 return MAPPING_EMPTY;
933
Davide Carattie1ff9e82020-06-29 22:26:20 +0200934 if (mptcp_check_fallback(ssk))
935 return MAPPING_DUMMY;
936
Mat Martineau648ef4b2020-01-21 16:56:24 -0800937 mpext = mptcp_get_ext(skb);
938 if (!mpext || !mpext->use_map) {
939 if (!subflow->map_valid && !skb->len) {
940 /* the TCP stack deliver 0 len FIN pkt to the receive
941 * queue, that is the only 0len pkts ever expected here,
942 * and we can admit no mapping only for 0 len pkts
943 */
944 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
945 WARN_ONCE(1, "0len seq %d:%d flags %x",
946 TCP_SKB_CB(skb)->seq,
947 TCP_SKB_CB(skb)->end_seq,
948 TCP_SKB_CB(skb)->tcp_flags);
949 sk_eat_skb(ssk, skb);
950 return MAPPING_EMPTY;
951 }
952
953 if (!subflow->map_valid)
954 return MAPPING_INVALID;
955
956 goto validate_seq;
957 }
958
Geliang Tang0918e342021-04-16 15:38:05 -0700959 trace_get_mapping_status(mpext);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800960
961 data_len = mpext->data_len;
962 if (data_len == 0) {
Florian Westphalfc518952020-03-27 14:48:50 -0700963 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800964 return MAPPING_INVALID;
965 }
966
967 if (mpext->data_fin == 1) {
968 if (data_len == 1) {
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700969 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
970 mpext->dsn64);
Mat Martineau43b54c62020-07-28 15:12:06 -0700971 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800972 if (subflow->map_valid) {
973 /* A DATA_FIN might arrive in a DSS
974 * option before the previous mapping
975 * has been fully consumed. Continue
976 * handling the existing mapping.
977 */
978 skb_ext_del(skb, SKB_EXT_MPTCP);
979 return MAPPING_OK;
980 } else {
Mat Martineauef59b192020-09-21 16:57:58 +0200981 if (updated && schedule_work(&msk->work))
982 sock_hold((struct sock *)msk);
983
Mat Martineau648ef4b2020-01-21 16:56:24 -0800984 return MAPPING_DATA_FIN;
985 }
Mat Martineau43b54c62020-07-28 15:12:06 -0700986 } else {
Paolo Abeni017512a2020-10-05 12:01:06 +0200987 u64 data_fin_seq = mpext->data_seq + data_len - 1;
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700988
989 /* If mpext->data_seq is a 32-bit value, data_fin_seq
990 * must also be limited to 32 bits.
991 */
992 if (!mpext->dsn64)
993 data_fin_seq &= GENMASK_ULL(31, 0);
994
995 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
996 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
997 data_fin_seq, mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -0800998 }
999
1000 /* Adjust for DATA_FIN using 1 byte of sequence space */
1001 data_len--;
1002 }
1003
Paolo Abeni5957a892021-06-18 15:02:21 -07001004 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
Davide Caratti37198e92020-10-06 18:26:17 +02001005 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001006
1007 if (subflow->map_valid) {
1008 /* Allow replacing only with an identical map */
1009 if (subflow->map_seq == map_seq &&
1010 subflow->map_subflow_seq == mpext->subflow_seq &&
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001011 subflow->map_data_len == data_len &&
1012 subflow->map_csum_reqd == mpext->csum_reqd) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001013 skb_ext_del(skb, SKB_EXT_MPTCP);
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001014 goto validate_csum;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001015 }
1016
1017 /* If this skb data are fully covered by the current mapping,
1018 * the new map would need caching, which is not supported
1019 */
Florian Westphalfc518952020-03-27 14:48:50 -07001020 if (skb_is_fully_mapped(ssk, skb)) {
1021 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001022 return MAPPING_INVALID;
Florian Westphalfc518952020-03-27 14:48:50 -07001023 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001024
1025 /* will validate the next map after consuming the current one */
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001026 goto validate_csum;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001027 }
1028
1029 subflow->map_seq = map_seq;
1030 subflow->map_subflow_seq = mpext->subflow_seq;
1031 subflow->map_data_len = data_len;
1032 subflow->map_valid = 1;
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001033 subflow->map_data_fin = mpext->data_fin;
Christoph Paaschd22f4982020-01-21 16:56:32 -08001034 subflow->mpc_map = mpext->mpc_map;
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001035 subflow->map_csum_reqd = mpext->csum_reqd;
1036 subflow->map_csum_len = 0;
1037 subflow->map_data_csum = csum_unfold(mpext->csum);
1038
1039 /* Cfr RFC 8684 Section 3.3.0 */
1040 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1041 return MAPPING_INVALID;
1042
1043 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
Mat Martineau648ef4b2020-01-21 16:56:24 -08001044 subflow->map_seq, subflow->map_subflow_seq,
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001045 subflow->map_data_len, subflow->map_csum_reqd,
1046 subflow->map_data_csum);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001047
1048validate_seq:
1049 /* we revalidate valid mapping on new skb, because we must ensure
1050 * the current skb is completely covered by the available mapping
1051 */
Paolo Abeni06285da2021-06-21 15:54:37 -07001052 if (!validate_mapping(ssk, skb)) {
1053 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001054 return MAPPING_INVALID;
Paolo Abeni06285da2021-06-21 15:54:37 -07001055 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001056
1057 skb_ext_del(skb, SKB_EXT_MPTCP);
Paolo Abenidd8bcd12021-06-17 16:46:16 -07001058
1059validate_csum:
1060 return validate_data_csum(ssk, skb, csum_reqd);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001061}
1062
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001063static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
Paolo Abeni1d39cd82020-09-17 23:07:24 +02001064 u64 limit)
Paolo Abeni67193312020-09-14 10:01:09 +02001065{
1066 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001067 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1068 u32 incr;
Paolo Abeni67193312020-09-14 10:01:09 +02001069
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001070 incr = limit >= skb->len ? skb->len + fin : limit;
Paolo Abeni67193312020-09-14 10:01:09 +02001071
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001072 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1073 subflow->map_subflow_seq);
Paolo Abeni06242e42020-09-14 10:01:14 +02001074 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
Paolo Abeni04e4cd42020-09-14 10:01:13 +02001075 tcp_sk(ssk)->copied_seq += incr;
1076 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1077 sk_eat_skb(ssk, skb);
1078 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1079 subflow->map_valid = 0;
Paolo Abeni67193312020-09-14 10:01:09 +02001080}
1081
Florian Westphal40947e12021-02-12 15:59:56 -08001082/* sched mptcp worker to remove the subflow if no more data is pending */
1083static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1084{
1085 struct sock *sk = (struct sock *)msk;
1086
1087 if (likely(ssk->sk_state != TCP_CLOSE))
1088 return;
1089
1090 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1091 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
1092 sock_hold(sk);
1093 if (!schedule_work(&msk->work))
1094 sock_put(sk);
1095 }
1096}
1097
Mat Martineau648ef4b2020-01-21 16:56:24 -08001098static bool subflow_check_data_avail(struct sock *ssk)
1099{
1100 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1101 enum mapping_status status;
1102 struct mptcp_sock *msk;
1103 struct sk_buff *skb;
1104
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001105 if (!skb_peek(&ssk->sk_receive_queue))
Paolo Abeni99d10552021-06-10 15:59:41 -07001106 WRITE_ONCE(subflow->data_avail, 0);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001107 if (subflow->data_avail)
1108 return true;
1109
Mat Martineau648ef4b2020-01-21 16:56:24 -08001110 msk = mptcp_sk(subflow->conn);
1111 for (;;) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001112 u64 ack_seq;
1113 u64 old_ack;
1114
Mat Martineau43b54c62020-07-28 15:12:06 -07001115 status = get_mapping_status(ssk, msk);
Geliang Tangd96a8382021-04-16 15:38:07 -07001116 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001117 if (unlikely(status == MAPPING_INVALID))
1118 goto fallback;
1119
1120 if (unlikely(status == MAPPING_DUMMY))
1121 goto fallback;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001122
1123 if (status != MAPPING_OK)
Florian Westphal40947e12021-02-12 15:59:56 -08001124 goto no_data;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001125
1126 skb = skb_peek(&ssk->sk_receive_queue);
1127 if (WARN_ON_ONCE(!skb))
Florian Westphal40947e12021-02-12 15:59:56 -08001128 goto no_data;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001129
Christoph Paaschd22f4982020-01-21 16:56:32 -08001130 /* if msk lacks the remote key, this subflow must provide an
1131 * MP_CAPABLE-based mapping
1132 */
1133 if (unlikely(!READ_ONCE(msk->can_ack))) {
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001134 if (!subflow->mpc_map)
1135 goto fallback;
Christoph Paaschd22f4982020-01-21 16:56:32 -08001136 WRITE_ONCE(msk->remote_key, subflow->remote_key);
1137 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
1138 WRITE_ONCE(msk->can_ack, true);
1139 }
1140
Mat Martineau648ef4b2020-01-21 16:56:24 -08001141 old_ack = READ_ONCE(msk->ack_seq);
1142 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1143 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1144 ack_seq);
Paolo Abeni99d10552021-06-10 15:59:41 -07001145 if (unlikely(before64(ack_seq, old_ack))) {
1146 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1147 continue;
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001148 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001149
Paolo Abeni99d10552021-06-10 15:59:41 -07001150 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1151 break;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001152 }
1153 return true;
1154
Florian Westphal40947e12021-02-12 15:59:56 -08001155no_data:
1156 subflow_sched_work_if_closed(msk, ssk);
1157 return false;
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001158
1159fallback:
1160 /* RFC 8684 section 3.7. */
Geliang Tang478d7702021-08-24 16:26:17 -07001161 if (subflow->send_mp_fail) {
1162 if (mptcp_has_another_subflow(ssk)) {
1163 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1164 sk_eat_skb(ssk, skb);
1165 }
1166 ssk->sk_err = EBADMSG;
1167 tcp_set_state(ssk, TCP_CLOSE);
1168 subflow->reset_transient = 0;
1169 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1170 tcp_send_active_reset(ssk, GFP_ATOMIC);
1171 WRITE_ONCE(subflow->data_avail, 0);
1172 return true;
1173 }
1174
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001175 if (subflow->mp_join || subflow->fully_established) {
1176 /* fatal protocol error, close the socket.
1177 * subflow_error_report() will introduce the appropriate barriers
1178 */
1179 ssk->sk_err = EBADMSG;
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001180 tcp_set_state(ssk, TCP_CLOSE);
1181 subflow->reset_transient = 0;
1182 subflow->reset_reason = MPTCP_RST_EMPTCP;
1183 tcp_send_active_reset(ssk, GFP_ATOMIC);
Paolo Abeni99d10552021-06-10 15:59:41 -07001184 WRITE_ONCE(subflow->data_avail, 0);
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001185 return false;
1186 }
1187
1188 __mptcp_do_fallback(msk);
1189 skb = skb_peek(&ssk->sk_receive_queue);
1190 subflow->map_valid = 1;
1191 subflow->map_seq = READ_ONCE(msk->ack_seq);
1192 subflow->map_data_len = skb->len;
1193 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
Paolo Abeni99d10552021-06-10 15:59:41 -07001194 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
Paolo Abenidea2b1e2021-05-27 16:31:39 -07001195 return true;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001196}
1197
1198bool mptcp_subflow_data_available(struct sock *sk)
1199{
1200 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001201
1202 /* check if current mapping is still valid */
1203 if (subflow->map_valid &&
1204 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1205 subflow->map_valid = 0;
Paolo Abeni99d10552021-06-10 15:59:41 -07001206 WRITE_ONCE(subflow->data_avail, 0);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001207
1208 pr_debug("Done with mapping: seq=%u data_len=%u",
1209 subflow->map_subflow_seq,
1210 subflow->map_data_len);
1211 }
1212
Paolo Abeni47bebdf2020-09-14 10:01:08 +02001213 return subflow_check_data_avail(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001214}
1215
Florian Westphal071c8ed2020-04-24 12:31:50 +02001216/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1217 * not the ssk one.
1218 *
1219 * In mptcp, rwin is about the mptcp-level connection data.
1220 *
1221 * Data that is still on the ssk rx queue can thus be ignored,
Bhaskar Chowdhury55320b82021-03-27 04:42:46 +05301222 * as far as mptcp peer is concerned that data is still inflight.
Florian Westphal071c8ed2020-04-24 12:31:50 +02001223 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1224 */
1225void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1226{
1227 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1228 const struct sock *sk = subflow->conn;
1229
Paolo Abeniea4ca582020-11-19 11:46:03 -08001230 *space = __mptcp_space(sk);
Florian Westphal071c8ed2020-04-24 12:31:50 +02001231 *full_space = tcp_full_space(sk);
1232}
1233
Paolo Abeni15cc1042021-02-11 15:30:37 -08001234void __mptcp_error_report(struct sock *sk)
1235{
1236 struct mptcp_subflow_context *subflow;
1237 struct mptcp_sock *msk = mptcp_sk(sk);
1238
1239 mptcp_for_each_subflow(msk, subflow) {
1240 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1241 int err = sock_error(ssk);
1242
1243 if (!err)
1244 continue;
1245
1246 /* only propagate errors on fallen-back sockets or
1247 * on MPC connect
1248 */
1249 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
1250 continue;
1251
1252 inet_sk_state_store(sk, inet_sk_state_load(ssk));
1253 sk->sk_err = -err;
1254
1255 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1256 smp_wmb();
Alexander Aringe3ae2362021-06-27 18:48:21 -04001257 sk_error_report(sk);
Paolo Abeni15cc1042021-02-11 15:30:37 -08001258 break;
1259 }
1260}
1261
1262static void subflow_error_report(struct sock *ssk)
1263{
1264 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1265
1266 mptcp_data_lock(sk);
1267 if (!sock_owned_by_user(sk))
1268 __mptcp_error_report(sk);
1269 else
1270 set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
1271 mptcp_data_unlock(sk);
1272}
1273
Paolo Abeni499ada52021-06-10 15:59:44 -07001274static void subflow_data_ready(struct sock *sk)
1275{
1276 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1277 u16 state = 1 << inet_sk_state_load(sk);
1278 struct sock *parent = subflow->conn;
1279 struct mptcp_sock *msk;
1280
1281 msk = mptcp_sk(parent);
1282 if (state & TCPF_LISTEN) {
1283 /* MPJ subflow are removed from accept queue before reaching here,
1284 * avoid stray wakeups
1285 */
1286 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1287 return;
1288
1289 set_bit(MPTCP_DATA_READY, &msk->flags);
1290 parent->sk_data_ready(parent);
1291 return;
1292 }
1293
1294 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1295 !subflow->mp_join && !(state & TCPF_CLOSE));
1296
1297 if (mptcp_subflow_data_available(sk))
1298 mptcp_data_ready(parent, sk);
1299 else if (unlikely(sk->sk_err))
1300 subflow_error_report(sk);
1301}
1302
1303static void subflow_write_space(struct sock *ssk)
1304{
1305 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1306
1307 mptcp_propagate_sndbuf(sk, ssk);
1308 mptcp_write_space(sk);
1309}
1310
Peter Krystadcec37a62020-01-21 16:56:18 -08001311static struct inet_connection_sock_af_ops *
1312subflow_default_af_ops(struct sock *sk)
1313{
1314#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1315 if (sk->sk_family == AF_INET6)
1316 return &subflow_v6_specific;
1317#endif
1318 return &subflow_specific;
1319}
1320
Peter Krystadcec37a62020-01-21 16:56:18 -08001321#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001322void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1323{
Peter Krystadcec37a62020-01-21 16:56:18 -08001324 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1325 struct inet_connection_sock *icsk = inet_csk(sk);
1326 struct inet_connection_sock_af_ops *target;
1327
1328 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1329
1330 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
Mat Martineauedc7e482020-01-24 16:04:03 -08001331 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
Peter Krystadcec37a62020-01-21 16:56:18 -08001332
1333 if (likely(icsk->icsk_af_ops == target))
1334 return;
1335
1336 subflow->icsk_af_ops = icsk->icsk_af_ops;
1337 icsk->icsk_af_ops = target;
Peter Krystadcec37a62020-01-21 16:56:18 -08001338}
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001339#endif
Peter Krystadcec37a62020-01-21 16:56:18 -08001340
Geliang Tang1729cf12021-02-01 15:09:12 -08001341void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1342 struct sockaddr_storage *addr,
1343 unsigned short family)
Peter Krystadec3edaa2020-03-27 14:48:40 -07001344{
1345 memset(addr, 0, sizeof(*addr));
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001346 addr->ss_family = family;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001347 if (addr->ss_family == AF_INET) {
1348 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1349
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001350 if (info->family == AF_INET)
1351 in_addr->sin_addr = info->addr;
1352#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1353 else if (ipv6_addr_v4mapped(&info->addr6))
1354 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1355#endif
Peter Krystadec3edaa2020-03-27 14:48:40 -07001356 in_addr->sin_port = info->port;
1357 }
1358#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1359 else if (addr->ss_family == AF_INET6) {
1360 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1361
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001362 if (info->family == AF_INET)
1363 ipv6_addr_set_v4mapped(info->addr.s_addr,
1364 &in6_addr->sin6_addr);
1365 else
1366 in6_addr->sin6_addr = info->addr6;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001367 in6_addr->sin6_port = info->port;
1368 }
1369#endif
1370}
1371
Paolo Abenief0da3b2020-09-14 10:01:15 +02001372int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
Geliang Tangee285252021-08-17 15:07:22 -07001373 const struct mptcp_addr_info *remote)
Peter Krystadec3edaa2020-03-27 14:48:40 -07001374{
1375 struct mptcp_sock *msk = mptcp_sk(sk);
1376 struct mptcp_subflow_context *subflow;
1377 struct sockaddr_storage addr;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001378 int remote_id = remote->id;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001379 int local_id = loc->id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001380 struct socket *sf;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001381 struct sock *ssk;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001382 u32 remote_token;
1383 int addrlen;
Geliang Tangee285252021-08-17 15:07:22 -07001384 int ifindex;
1385 u8 flags;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001386 int err;
1387
Paolo Abenib93df082020-07-23 13:02:32 +02001388 if (!mptcp_is_fully_established(sk))
Peter Krystadec3edaa2020-03-27 14:48:40 -07001389 return -ENOTCONN;
1390
1391 err = mptcp_subflow_create_socket(sk, &sf);
1392 if (err)
1393 return err;
1394
Paolo Abeni6bad9122020-06-30 16:38:26 +02001395 ssk = sf->sk;
1396 subflow = mptcp_subflow_ctx(ssk);
1397 do {
1398 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1399 } while (!subflow->local_nonce);
1400
1401 if (!local_id) {
1402 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1403 if (err < 0)
1404 goto failed;
1405
1406 local_id = err;
1407 }
1408
Geliang Tangee285252021-08-17 15:07:22 -07001409 mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
1410 &flags, &ifindex);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001411 subflow->remote_key = msk->remote_key;
1412 subflow->local_key = msk->local_key;
1413 subflow->token = msk->token;
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001414 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001415
1416 addrlen = sizeof(struct sockaddr_in);
1417#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001418 if (addr.ss_family == AF_INET6)
Peter Krystadec3edaa2020-03-27 14:48:40 -07001419 addrlen = sizeof(struct sockaddr_in6);
1420#endif
Geliang Tangdaa83ab2021-04-06 17:15:57 -07001421 ssk->sk_bound_dev_if = ifindex;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001422 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1423 if (err)
1424 goto failed;
1425
1426 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
Geliang Tang2ff0e562020-09-08 10:49:39 +08001427 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1428 remote_token, local_id, remote_id);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001429 subflow->remote_token = remote_token;
Paolo Abeni6bad9122020-06-30 16:38:26 +02001430 subflow->local_id = local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001431 subflow->remote_id = remote_id;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001432 subflow->request_join = 1;
Geliang Tangdaa83ab2021-04-06 17:15:57 -07001433 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
Matthieu Baerts50a13bc2021-01-25 10:59:00 -08001434 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001435
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001436 mptcp_add_pending_subflow(msk, subflow);
Florian Westphal78962482021-04-15 16:44:53 -07001437 mptcp_sockopt_sync(msk, ssk);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001438 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1439 if (err && err != -EINPROGRESS)
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001440 goto failed_unlink;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001441
Paolo Abeni866f26f2021-01-20 15:39:10 +01001442 /* discard the subflow socket */
1443 mptcp_sock_graft(ssk, sk->sk_socket);
1444 iput(SOCK_INODE(sf));
Peter Krystadec3edaa2020-03-27 14:48:40 -07001445 return err;
1446
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001447failed_unlink:
Peter Krystadec3edaa2020-03-27 14:48:40 -07001448 spin_lock_bh(&msk->join_list_lock);
Paolo Abeni5b950ff2020-12-09 12:03:29 +01001449 list_del(&subflow->node);
Peter Krystadec3edaa2020-03-27 14:48:40 -07001450 spin_unlock_bh(&msk->join_list_lock);
Florian Westphalf0715772021-03-04 13:32:09 -08001451 sock_put(mptcp_subflow_tcp_sock(subflow));
Peter Krystadec3edaa2020-03-27 14:48:40 -07001452
Peter Krystadec3edaa2020-03-27 14:48:40 -07001453failed:
Paolo Abenie16163b2020-11-16 10:48:09 +01001454 subflow->disposable = 1;
Peter Krystadec3edaa2020-03-27 14:48:40 -07001455 sock_release(sf);
1456 return err;
1457}
1458
Nicolas Rybowski3764b0c2020-12-10 14:24:58 -08001459static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1460{
1461#ifdef CONFIG_SOCK_CGROUP_DATA
1462 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1463 *child_skcd = &child->sk_cgrp_data;
1464
1465 /* only the additional subflows created by kworkers have to be modified */
1466 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1467 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1468#ifdef CONFIG_MEMCG
1469 struct mem_cgroup *memcg = parent->sk_memcg;
1470
1471 mem_cgroup_sk_free(child);
1472 if (memcg && css_tryget(&memcg->css))
1473 child->sk_memcg = memcg;
1474#endif /* CONFIG_MEMCG */
1475
1476 cgroup_sk_free(child_skcd);
1477 *child_skcd = *parent_skcd;
1478 cgroup_sk_clone(child_skcd);
1479 }
1480#endif /* CONFIG_SOCK_CGROUP_DATA */
1481}
1482
Paolo Abenib19bc292021-01-20 15:39:14 +01001483static void mptcp_subflow_ops_override(struct sock *ssk)
1484{
1485#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1486 if (ssk->sk_prot == &tcpv6_prot)
1487 ssk->sk_prot = &tcpv6_prot_override;
1488 else
1489#endif
1490 ssk->sk_prot = &tcp_prot_override;
1491}
1492
1493static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1494{
1495#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1496 if (ssk->sk_prot == &tcpv6_prot_override)
1497 ssk->sk_prot = &tcpv6_prot;
1498 else
1499#endif
1500 ssk->sk_prot = &tcp_prot;
1501}
Peter Krystad2303f992020-01-21 16:56:17 -08001502int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1503{
1504 struct mptcp_subflow_context *subflow;
1505 struct net *net = sock_net(sk);
1506 struct socket *sf;
1507 int err;
1508
Paolo Abeniadf73412020-08-04 18:31:06 +02001509 /* un-accepted server sockets can reach here - on bad configuration
1510 * bail early to avoid greater trouble later
1511 */
1512 if (unlikely(!sk->sk_socket))
1513 return -EINVAL;
1514
Peter Krystadcec37a62020-01-21 16:56:18 -08001515 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1516 &sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001517 if (err)
1518 return err;
1519
1520 lock_sock(sf->sk);
1521
Nicolas Rybowski3764b0c2020-12-10 14:24:58 -08001522 /* the newly created socket has to be in the same cgroup as its parent */
1523 mptcp_attach_cgroup(sk, sf->sk);
1524
Peter Krystad2303f992020-01-21 16:56:17 -08001525 /* kernel sockets do not by default acquire net ref, but TCP timer
1526 * needs it.
1527 */
1528 sf->sk->sk_net_refcnt = 1;
1529 get_net(net);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001530#ifdef CONFIG_PROC_FS
Peter Krystad2303f992020-01-21 16:56:17 -08001531 this_cpu_add(*net->core.sock_inuse, 1);
David S. Millerf6f7d8c2020-01-29 10:39:23 +01001532#endif
Peter Krystad2303f992020-01-21 16:56:17 -08001533 err = tcp_set_ulp(sf->sk, "mptcp");
1534 release_sock(sf->sk);
1535
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001536 if (err) {
1537 sock_release(sf);
Peter Krystad2303f992020-01-21 16:56:17 -08001538 return err;
Wei Yongjunb8ad5402020-06-15 09:35:22 +08001539 }
Peter Krystad2303f992020-01-21 16:56:17 -08001540
Paolo Abeni7d14b0d2020-05-07 18:53:24 +02001541 /* the newly created socket really belongs to the owning MPTCP master
1542 * socket, even if for additional subflows the allocation is performed
1543 * by a kernel workqueue. Adjust inode references, so that the
1544 * procfs/diag interaces really show this one belonging to the correct
1545 * user.
1546 */
1547 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1548 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1549 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1550
Peter Krystad2303f992020-01-21 16:56:17 -08001551 subflow = mptcp_subflow_ctx(sf->sk);
1552 pr_debug("subflow=%p", subflow);
1553
1554 *new_sock = sf;
Peter Krystad79c09492020-01-21 16:56:20 -08001555 sock_hold(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001556 subflow->conn = sk;
Paolo Abenib19bc292021-01-20 15:39:14 +01001557 mptcp_subflow_ops_override(sf->sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001558
1559 return 0;
1560}
1561
1562static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1563 gfp_t priority)
1564{
1565 struct inet_connection_sock *icsk = inet_csk(sk);
1566 struct mptcp_subflow_context *ctx;
1567
1568 ctx = kzalloc(sizeof(*ctx), priority);
1569 if (!ctx)
1570 return NULL;
1571
1572 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001573 INIT_LIST_HEAD(&ctx->node);
Paolo Abenib19bc292021-01-20 15:39:14 +01001574 INIT_LIST_HEAD(&ctx->delegated_node);
Peter Krystad2303f992020-01-21 16:56:17 -08001575
1576 pr_debug("subflow=%p", ctx);
1577
1578 ctx->tcp_sock = sk;
1579
1580 return ctx;
1581}
1582
Mat Martineau648ef4b2020-01-21 16:56:24 -08001583static void __subflow_state_change(struct sock *sk)
1584{
1585 struct socket_wq *wq;
1586
1587 rcu_read_lock();
1588 wq = rcu_dereference(sk->sk_wq);
1589 if (skwq_has_sleeper(wq))
1590 wake_up_interruptible_all(&wq->wait);
1591 rcu_read_unlock();
1592}
1593
1594static bool subflow_is_done(const struct sock *sk)
1595{
1596 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1597}
1598
1599static void subflow_state_change(struct sock *sk)
1600{
1601 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Paolo Abenidc093db2020-03-13 16:52:42 +01001602 struct sock *parent = subflow->conn;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001603
1604 __subflow_state_change(sk);
1605
Davide Caratti8fd73802020-06-29 22:26:21 +02001606 if (subflow_simultaneous_connect(sk)) {
Paolo Abeni5cf92bb2021-01-20 15:39:11 +01001607 mptcp_propagate_sndbuf(parent, sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001608 mptcp_do_fallback(sk);
Florian Westphala6b118f2020-06-30 21:24:45 +02001609 mptcp_rcv_space_init(mptcp_sk(parent), sk);
Davide Caratti8fd73802020-06-29 22:26:21 +02001610 pr_fallback(mptcp_sk(parent));
1611 subflow->conn_finished = 1;
Paolo Abeni490274b2021-06-21 17:33:08 -07001612 mptcp_set_connected(parent);
Davide Caratti8fd73802020-06-29 22:26:21 +02001613 }
1614
Mat Martineau648ef4b2020-01-21 16:56:24 -08001615 /* as recvmsg() does not acquire the subflow socket for ssk selection
1616 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1617 * the data available machinery here.
1618 */
Davide Carattie1ff9e82020-06-29 22:26:20 +02001619 if (mptcp_subflow_data_available(sk))
Florian Westphal2e522132020-02-26 10:14:51 +01001620 mptcp_data_ready(parent, sk);
Paolo Abeni499ada52021-06-10 15:59:44 -07001621 else if (unlikely(sk->sk_err))
1622 subflow_error_report(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001623
Florian Westphal40947e12021-02-12 15:59:56 -08001624 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1625
Mat Martineau067a0b32020-07-28 15:12:07 -07001626 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
Mat Martineau648ef4b2020-01-21 16:56:24 -08001627 !subflow->rx_eof && subflow_is_done(sk)) {
1628 subflow->rx_eof = 1;
Florian Westphal59832e22020-04-02 13:44:52 +02001629 mptcp_subflow_eof(parent);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001630 }
1631}
1632
Peter Krystad2303f992020-01-21 16:56:17 -08001633static int subflow_ulp_init(struct sock *sk)
1634{
Peter Krystadcec37a62020-01-21 16:56:18 -08001635 struct inet_connection_sock *icsk = inet_csk(sk);
Peter Krystad2303f992020-01-21 16:56:17 -08001636 struct mptcp_subflow_context *ctx;
1637 struct tcp_sock *tp = tcp_sk(sk);
1638 int err = 0;
1639
1640 /* disallow attaching ULP to a socket unless it has been
1641 * created with sock_create_kern()
1642 */
1643 if (!sk->sk_kern_sock) {
1644 err = -EOPNOTSUPP;
1645 goto out;
1646 }
1647
1648 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1649 if (!ctx) {
1650 err = -ENOMEM;
1651 goto out;
1652 }
1653
1654 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1655
1656 tp->is_mptcp = 1;
Peter Krystadcec37a62020-01-21 16:56:18 -08001657 ctx->icsk_af_ops = icsk->icsk_af_ops;
1658 icsk->icsk_af_ops = subflow_default_af_ops(sk);
Mat Martineau648ef4b2020-01-21 16:56:24 -08001659 ctx->tcp_data_ready = sk->sk_data_ready;
1660 ctx->tcp_state_change = sk->sk_state_change;
1661 ctx->tcp_write_space = sk->sk_write_space;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001662 ctx->tcp_error_report = sk->sk_error_report;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001663 sk->sk_data_ready = subflow_data_ready;
1664 sk->sk_write_space = subflow_write_space;
1665 sk->sk_state_change = subflow_state_change;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001666 sk->sk_error_report = subflow_error_report;
Peter Krystad2303f992020-01-21 16:56:17 -08001667out:
1668 return err;
1669}
1670
Paolo Abenie16163b2020-11-16 10:48:09 +01001671static void subflow_ulp_release(struct sock *ssk)
Peter Krystad2303f992020-01-21 16:56:17 -08001672{
Paolo Abenie16163b2020-11-16 10:48:09 +01001673 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1674 bool release = true;
1675 struct sock *sk;
Peter Krystad2303f992020-01-21 16:56:17 -08001676
1677 if (!ctx)
1678 return;
1679
Paolo Abenie16163b2020-11-16 10:48:09 +01001680 sk = ctx->conn;
1681 if (sk) {
1682 /* if the msk has been orphaned, keep the ctx
Paolo Abeni0597d0f2020-12-09 12:03:30 +01001683 * alive, will be freed by __mptcp_close_ssk(),
1684 * when the subflow is still unaccepted
Paolo Abenie16163b2020-11-16 10:48:09 +01001685 */
Paolo Abeni0597d0f2020-12-09 12:03:30 +01001686 release = ctx->disposable || list_empty(&ctx->node);
Paolo Abenie16163b2020-11-16 10:48:09 +01001687 sock_put(sk);
1688 }
Peter Krystad79c09492020-01-21 16:56:20 -08001689
Paolo Abenib19bc292021-01-20 15:39:14 +01001690 mptcp_subflow_ops_undo_override(ssk);
Paolo Abenie16163b2020-11-16 10:48:09 +01001691 if (release)
1692 kfree_rcu(ctx, rcu);
Peter Krystad2303f992020-01-21 16:56:17 -08001693}
1694
Peter Krystadcec37a62020-01-21 16:56:18 -08001695static void subflow_ulp_clone(const struct request_sock *req,
1696 struct sock *newsk,
1697 const gfp_t priority)
1698{
1699 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1700 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1701 struct mptcp_subflow_context *new_ctx;
1702
Peter Krystadf2962342020-03-27 14:48:39 -07001703 if (!tcp_rsk(req)->is_mptcp ||
1704 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001705 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001706 return;
1707 }
1708
1709 new_ctx = subflow_create_ctx(newsk, priority);
Mat Martineauedc7e482020-01-24 16:04:03 -08001710 if (!new_ctx) {
Mat Martineau648ef4b2020-01-21 16:56:24 -08001711 subflow_ulp_fallback(newsk, old_ctx);
Peter Krystadcec37a62020-01-21 16:56:18 -08001712 return;
1713 }
1714
1715 new_ctx->conn_finished = 1;
1716 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001717 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1718 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1719 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
Paolo Abeni15cc1042021-02-11 15:30:37 -08001720 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
Paolo Abeni58b09912020-03-13 16:52:41 +01001721 new_ctx->rel_write_seq = 1;
1722 new_ctx->tcp_sock = newsk;
1723
Peter Krystadf2962342020-03-27 14:48:39 -07001724 if (subflow_req->mp_capable) {
1725 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1726 * is fully established only after we receive the remote key
1727 */
1728 new_ctx->mp_capable = 1;
Peter Krystadf2962342020-03-27 14:48:39 -07001729 new_ctx->local_key = subflow_req->local_key;
1730 new_ctx->token = subflow_req->token;
1731 new_ctx->ssn_offset = subflow_req->ssn_offset;
1732 new_ctx->idsn = subflow_req->idsn;
1733 } else if (subflow_req->mp_join) {
Peter Krystadec3edaa2020-03-27 14:48:40 -07001734 new_ctx->ssn_offset = subflow_req->ssn_offset;
Peter Krystadf2962342020-03-27 14:48:39 -07001735 new_ctx->mp_join = 1;
1736 new_ctx->fully_established = 1;
1737 new_ctx->backup = subflow_req->backup;
1738 new_ctx->local_id = subflow_req->local_id;
Geliang Tang2ff0e562020-09-08 10:49:39 +08001739 new_ctx->remote_id = subflow_req->remote_id;
Peter Krystadf2962342020-03-27 14:48:39 -07001740 new_ctx->token = subflow_req->token;
1741 new_ctx->thmac = subflow_req->thmac;
1742 }
Peter Krystadcec37a62020-01-21 16:56:18 -08001743}
1744
Paolo Abenib19bc292021-01-20 15:39:14 +01001745static void tcp_release_cb_override(struct sock *ssk)
1746{
1747 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1748
1749 if (mptcp_subflow_has_delegated_action(subflow))
1750 mptcp_subflow_process_delegated(ssk);
1751
1752 tcp_release_cb(ssk);
1753}
1754
Peter Krystad2303f992020-01-21 16:56:17 -08001755static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1756 .name = "mptcp",
1757 .owner = THIS_MODULE,
1758 .init = subflow_ulp_init,
1759 .release = subflow_ulp_release,
Peter Krystadcec37a62020-01-21 16:56:18 -08001760 .clone = subflow_ulp_clone,
Peter Krystad2303f992020-01-21 16:56:17 -08001761};
1762
Peter Krystadcec37a62020-01-21 16:56:18 -08001763static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1764{
1765 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1766 subflow_ops->slab_name = "request_sock_subflow";
1767
1768 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1769 subflow_ops->obj_size, 0,
1770 SLAB_ACCOUNT |
1771 SLAB_TYPESAFE_BY_RCU,
1772 NULL);
1773 if (!subflow_ops->slab)
1774 return -ENOMEM;
1775
Peter Krystad79c09492020-01-21 16:56:20 -08001776 subflow_ops->destructor = subflow_req_destructor;
1777
Peter Krystadcec37a62020-01-21 16:56:18 -08001778 return 0;
1779}
1780
Paolo Abenid39dcec2020-06-26 19:29:59 +02001781void __init mptcp_subflow_init(void)
Peter Krystad2303f992020-01-21 16:56:17 -08001782{
Florian Westphal08b8d082020-07-30 21:25:53 +02001783 mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
1784 if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
Peter Krystadcec37a62020-01-21 16:56:18 -08001785 panic("MPTCP: failed to init subflow request sock ops\n");
1786
1787 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
Florian Westphal7ea851d2020-11-30 16:36:30 +01001788 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
Peter Krystadcec37a62020-01-21 16:56:18 -08001789
1790 subflow_specific = ipv4_specific;
1791 subflow_specific.conn_request = subflow_v4_conn_request;
1792 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1793 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1794
Paolo Abenib19bc292021-01-20 15:39:14 +01001795 tcp_prot_override = tcp_prot;
1796 tcp_prot_override.release_cb = tcp_release_cb_override;
1797
Peter Krystadcec37a62020-01-21 16:56:18 -08001798#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1799 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
Florian Westphal7ea851d2020-11-30 16:36:30 +01001800 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
Peter Krystadcec37a62020-01-21 16:56:18 -08001801
1802 subflow_v6_specific = ipv6_specific;
1803 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1804 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1805 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1806
1807 subflow_v6m_specific = subflow_v6_specific;
1808 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1809 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1810 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1811 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1812 subflow_v6m_specific.net_frag_header_len = 0;
Paolo Abenib19bc292021-01-20 15:39:14 +01001813
1814 tcpv6_prot_override = tcpv6_prot;
1815 tcpv6_prot_override.release_cb = tcp_release_cb_override;
Peter Krystadcec37a62020-01-21 16:56:18 -08001816#endif
1817
Davide Caratti5147dfb2020-03-27 14:48:49 -07001818 mptcp_diag_subflow_init(&subflow_ulp_ops);
1819
Peter Krystad2303f992020-01-21 16:56:17 -08001820 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1821 panic("MPTCP: failed to register subflows to ULP\n");
1822}