blob: ef50a8628d77665722aca158f674f9293afbe236 [file] [log] [blame]
Peter Krystadeda7acd2020-01-21 16:56:16 -08001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
Geliang Tangc85adce2020-04-03 17:14:08 +08007#define pr_fmt(fmt) "MPTCP: " fmt
8
Peter Krystadeda7acd2020-01-21 16:56:16 -08009#include <linux/kernel.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080010#include <crypto/sha2.h>
Peter Krystadeda7acd2020-01-21 16:56:16 -080011#include <net/tcp.h>
12#include <net/mptcp.h>
13#include "protocol.h"
Geliang Tanga877de062020-09-24 08:29:51 +080014#include "mib.h"
Peter Krystadeda7acd2020-01-21 16:56:16 -080015
Paolo Abeni65492c52020-01-21 16:56:30 -080016static bool mptcp_cap_flag_sha256(u8 flags)
17{
18 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
19}
20
Paolo Abenicfde1412020-04-30 15:01:52 +020021static void mptcp_parse_option(const struct sk_buff *skb,
22 const unsigned char *ptr, int opsize,
23 struct mptcp_options_received *mp_opt)
Peter Krystadeda7acd2020-01-21 16:56:16 -080024{
Peter Krystadeda7acd2020-01-21 16:56:16 -080025 u8 subtype = *ptr >> 4;
Mat Martineau648ef4b2020-01-21 16:56:24 -080026 int expected_opsize;
Peter Krystadeda7acd2020-01-21 16:56:16 -080027 u8 version;
28 u8 flags;
29
30 switch (subtype) {
31 case MPTCPOPT_MP_CAPABLE:
Christoph Paaschcc7972e2020-01-21 16:56:31 -080032 /* strict size checking */
33 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
34 if (skb->len > tcp_hdr(skb)->doff << 2)
35 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA;
36 else
37 expected_opsize = TCPOLEN_MPTCP_MPC_ACK;
38 } else {
39 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)
40 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK;
41 else
42 expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
43 }
44 if (opsize != expected_opsize)
Peter Krystadeda7acd2020-01-21 16:56:16 -080045 break;
46
Christoph Paaschcc7972e2020-01-21 16:56:31 -080047 /* try to be gentle vs future versions on the initial syn */
Peter Krystadeda7acd2020-01-21 16:56:16 -080048 version = *ptr++ & MPTCP_VERSION_MASK;
Christoph Paaschcc7972e2020-01-21 16:56:31 -080049 if (opsize != TCPOLEN_MPTCP_MPC_SYN) {
50 if (version != MPTCP_SUPPORTED_VERSION)
51 break;
52 } else if (version < MPTCP_SUPPORTED_VERSION) {
Peter Krystadeda7acd2020-01-21 16:56:16 -080053 break;
Christoph Paaschcc7972e2020-01-21 16:56:31 -080054 }
Peter Krystadeda7acd2020-01-21 16:56:16 -080055
56 flags = *ptr++;
Paolo Abeni65492c52020-01-21 16:56:30 -080057 if (!mptcp_cap_flag_sha256(flags) ||
Peter Krystadeda7acd2020-01-21 16:56:16 -080058 (flags & MPTCP_CAP_EXTENSIBILITY))
59 break;
60
61 /* RFC 6824, Section 3.1:
62 * "For the Checksum Required bit (labeled "A"), if either
63 * host requires the use of checksums, checksums MUST be used.
64 * In other words, the only way for checksums not to be used
65 * is if both hosts in their SYNs set A=0."
66 *
67 * Section 3.3.0:
68 * "If a checksum is not present when its use has been
69 * negotiated, the receiver MUST close the subflow with a RST as
70 * it is considered broken."
71 *
72 * We don't implement DSS checksum - fall back to TCP.
73 */
74 if (flags & MPTCP_CAP_CHECKSUM_REQD)
75 break;
76
77 mp_opt->mp_capable = 1;
Christoph Paaschcc7972e2020-01-21 16:56:31 -080078 if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
79 mp_opt->sndr_key = get_unaligned_be64(ptr);
80 ptr += 8;
81 }
82 if (opsize >= TCPOLEN_MPTCP_MPC_ACK) {
Peter Krystadeda7acd2020-01-21 16:56:16 -080083 mp_opt->rcvr_key = get_unaligned_be64(ptr);
84 ptr += 8;
Peter Krystadeda7acd2020-01-21 16:56:16 -080085 }
Christoph Paaschcc7972e2020-01-21 16:56:31 -080086 if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) {
87 /* Section 3.1.:
88 * "the data parameters in a MP_CAPABLE are semantically
89 * equivalent to those in a DSS option and can be used
90 * interchangeably."
91 */
92 mp_opt->dss = 1;
93 mp_opt->use_map = 1;
94 mp_opt->mpc_map = 1;
95 mp_opt->data_len = get_unaligned_be16(ptr);
96 ptr += 2;
97 }
98 pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
99 version, flags, opsize, mp_opt->sndr_key,
100 mp_opt->rcvr_key, mp_opt->data_len);
Peter Krystadeda7acd2020-01-21 16:56:16 -0800101 break;
102
Peter Krystadf2962342020-03-27 14:48:39 -0700103 case MPTCPOPT_MP_JOIN:
104 mp_opt->mp_join = 1;
105 if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
106 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
107 mp_opt->join_id = *ptr++;
108 mp_opt->token = get_unaligned_be32(ptr);
109 ptr += 4;
110 mp_opt->nonce = get_unaligned_be32(ptr);
111 ptr += 4;
112 pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
113 mp_opt->backup, mp_opt->join_id,
114 mp_opt->token, mp_opt->nonce);
115 } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
116 mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
117 mp_opt->join_id = *ptr++;
118 mp_opt->thmac = get_unaligned_be64(ptr);
119 ptr += 8;
120 mp_opt->nonce = get_unaligned_be32(ptr);
121 ptr += 4;
122 pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
123 mp_opt->backup, mp_opt->join_id,
124 mp_opt->thmac, mp_opt->nonce);
125 } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
126 ptr += 2;
127 memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
128 pr_debug("MP_JOIN hmac");
129 } else {
130 pr_warn("MP_JOIN bad option size");
131 mp_opt->mp_join = 0;
132 }
133 break;
134
Peter Krystadeda7acd2020-01-21 16:56:16 -0800135 case MPTCPOPT_DSS:
136 pr_debug("DSS");
Mat Martineau648ef4b2020-01-21 16:56:24 -0800137 ptr++;
138
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800139 /* we must clear 'mpc_map' be able to detect MP_CAPABLE
140 * map vs DSS map in mptcp_incoming_options(), and reconstruct
141 * map info accordingly
142 */
143 mp_opt->mpc_map = 0;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800144 flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
145 mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
146 mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
147 mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0;
148 mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
149 mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
150
151 pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
152 mp_opt->data_fin, mp_opt->dsn64,
153 mp_opt->use_map, mp_opt->ack64,
154 mp_opt->use_ack);
155
156 expected_opsize = TCPOLEN_MPTCP_DSS_BASE;
157
158 if (mp_opt->use_ack) {
159 if (mp_opt->ack64)
160 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64;
161 else
162 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32;
163 }
164
165 if (mp_opt->use_map) {
166 if (mp_opt->dsn64)
167 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64;
168 else
169 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
170 }
171
172 /* RFC 6824, Section 3.3:
173 * If a checksum is present, but its use had
174 * not been negotiated in the MP_CAPABLE handshake,
175 * the checksum field MUST be ignored.
176 */
177 if (opsize != expected_opsize &&
178 opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
179 break;
180
Peter Krystadeda7acd2020-01-21 16:56:16 -0800181 mp_opt->dss = 1;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800182
183 if (mp_opt->use_ack) {
184 if (mp_opt->ack64) {
185 mp_opt->data_ack = get_unaligned_be64(ptr);
186 ptr += 8;
187 } else {
188 mp_opt->data_ack = get_unaligned_be32(ptr);
189 ptr += 4;
190 }
191
192 pr_debug("data_ack=%llu", mp_opt->data_ack);
193 }
194
195 if (mp_opt->use_map) {
196 if (mp_opt->dsn64) {
197 mp_opt->data_seq = get_unaligned_be64(ptr);
198 ptr += 8;
199 } else {
200 mp_opt->data_seq = get_unaligned_be32(ptr);
201 ptr += 4;
202 }
203
204 mp_opt->subflow_seq = get_unaligned_be32(ptr);
205 ptr += 4;
206
207 mp_opt->data_len = get_unaligned_be16(ptr);
208 ptr += 2;
209
210 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
211 mp_opt->data_seq, mp_opt->subflow_seq,
212 mp_opt->data_len);
213 }
214
Peter Krystadeda7acd2020-01-21 16:56:16 -0800215 break;
216
Peter Krystad3df523a2020-03-27 14:48:37 -0700217 case MPTCPOPT_ADD_ADDR:
218 mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
219 if (!mp_opt->echo) {
220 if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
221 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
222 mp_opt->family = MPTCP_ADDR_IPVERSION_4;
223#if IS_ENABLED(CONFIG_MPTCP_IPV6)
224 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
225 opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
226 mp_opt->family = MPTCP_ADDR_IPVERSION_6;
227#endif
228 else
229 break;
230 } else {
231 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
232 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
233 mp_opt->family = MPTCP_ADDR_IPVERSION_4;
234#if IS_ENABLED(CONFIG_MPTCP_IPV6)
235 else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
236 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
237 mp_opt->family = MPTCP_ADDR_IPVERSION_6;
238#endif
239 else
240 break;
241 }
242
243 mp_opt->add_addr = 1;
Peter Krystad3df523a2020-03-27 14:48:37 -0700244 mp_opt->addr_id = *ptr++;
Peter Krystad3df523a2020-03-27 14:48:37 -0700245 if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
246 memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
247 ptr += 4;
248 if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
249 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
250 mp_opt->port = get_unaligned_be16(ptr);
251 ptr += 2;
252 }
253 }
254#if IS_ENABLED(CONFIG_MPTCP_IPV6)
255 else {
256 memcpy(mp_opt->addr6.s6_addr, (u8 *)ptr, 16);
257 ptr += 16;
258 if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
259 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
260 mp_opt->port = get_unaligned_be16(ptr);
261 ptr += 2;
262 }
263 }
264#endif
265 if (!mp_opt->echo) {
266 mp_opt->ahmac = get_unaligned_be64(ptr);
267 ptr += 8;
268 }
Geliang Tang90a4aea2020-12-09 15:51:25 -0800269 pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
270 (mp_opt->family == MPTCP_ADDR_IPVERSION_6) ? "6" : "",
271 mp_opt->addr_id, mp_opt->ahmac, mp_opt->echo, mp_opt->port);
Peter Krystad3df523a2020-03-27 14:48:37 -0700272 break;
273
274 case MPTCPOPT_RM_ADDR:
275 if (opsize != TCPOLEN_MPTCP_RM_ADDR_BASE)
276 break;
277
Geliang Tang8e60eed2020-06-08 18:47:54 +0800278 ptr++;
279
Peter Krystad3df523a2020-03-27 14:48:37 -0700280 mp_opt->rm_addr = 1;
281 mp_opt->rm_id = *ptr++;
282 pr_debug("RM_ADDR: id=%d", mp_opt->rm_id);
283 break;
284
Florian Westphal50c504a2020-12-10 14:25:04 -0800285 case MPTCPOPT_MP_FASTCLOSE:
286 if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
287 break;
288
289 ptr += 2;
290 mp_opt->rcvr_key = get_unaligned_be64(ptr);
291 ptr += 8;
292 mp_opt->fastclose = 1;
293 break;
294
Peter Krystadeda7acd2020-01-21 16:56:16 -0800295 default:
296 break;
297 }
298}
299
Peter Krystadcec37a62020-01-21 16:56:18 -0800300void mptcp_get_options(const struct sk_buff *skb,
Paolo Abenicfde1412020-04-30 15:01:52 +0200301 struct mptcp_options_received *mp_opt)
Peter Krystadcec37a62020-01-21 16:56:18 -0800302{
Peter Krystadcec37a62020-01-21 16:56:18 -0800303 const struct tcphdr *th = tcp_hdr(skb);
Paolo Abenicfde1412020-04-30 15:01:52 +0200304 const unsigned char *ptr;
305 int length;
Peter Krystadcec37a62020-01-21 16:56:18 -0800306
Paolo Abenicfde1412020-04-30 15:01:52 +0200307 /* initialize option status */
308 mp_opt->mp_capable = 0;
309 mp_opt->mp_join = 0;
310 mp_opt->add_addr = 0;
Geliang Tangfe2d9b12020-10-19 18:23:15 +0800311 mp_opt->ahmac = 0;
Florian Westphal50c504a2020-12-10 14:25:04 -0800312 mp_opt->fastclose = 0;
Geliang Tang65b8c8a2020-10-19 18:23:16 +0800313 mp_opt->port = 0;
Paolo Abenicfde1412020-04-30 15:01:52 +0200314 mp_opt->rm_addr = 0;
315 mp_opt->dss = 0;
316
317 length = (th->doff * 4) - sizeof(struct tcphdr);
Peter Krystadcec37a62020-01-21 16:56:18 -0800318 ptr = (const unsigned char *)(th + 1);
319
320 while (length > 0) {
321 int opcode = *ptr++;
322 int opsize;
323
324 switch (opcode) {
325 case TCPOPT_EOL:
326 return;
327 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
328 length--;
329 continue;
330 default:
331 opsize = *ptr++;
332 if (opsize < 2) /* "silly options" */
333 return;
334 if (opsize > length)
335 return; /* don't parse partial options */
336 if (opcode == TCPOPT_MPTCP)
Paolo Abenicfde1412020-04-30 15:01:52 +0200337 mptcp_parse_option(skb, ptr, opsize, mp_opt);
Peter Krystadcec37a62020-01-21 16:56:18 -0800338 ptr += opsize - 2;
339 length -= opsize;
340 }
341 }
342}
343
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800344bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
345 unsigned int *size, struct mptcp_out_options *opts)
Peter Krystadcec37a62020-01-21 16:56:18 -0800346{
347 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
348
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800349 /* we will use snd_isn to detect first pkt [re]transmission
350 * in mptcp_established_options_mp()
351 */
352 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
Peter Krystadcec37a62020-01-21 16:56:18 -0800353 if (subflow->request_mptcp) {
Peter Krystadcec37a62020-01-21 16:56:18 -0800354 opts->suboptions = OPTION_MPTCP_MPC_SYN;
Peter Krystadcec37a62020-01-21 16:56:18 -0800355 *size = TCPOLEN_MPTCP_MPC_SYN;
356 return true;
Peter Krystadec3edaa2020-03-27 14:48:40 -0700357 } else if (subflow->request_join) {
358 pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
359 subflow->local_nonce);
360 opts->suboptions = OPTION_MPTCP_MPJ_SYN;
361 opts->join_id = subflow->local_id;
362 opts->token = subflow->remote_token;
363 opts->nonce = subflow->local_nonce;
364 opts->backup = subflow->request_bkup;
365 *size = TCPOLEN_MPTCP_MPJ_SYN;
366 return true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800367 }
368 return false;
369}
370
Peter Krystadec3edaa2020-03-27 14:48:40 -0700371/* MP_JOIN client subflow must wait for 4th ack before sending any data:
372 * TCP can't schedule delack timer before the subflow is fully established.
373 * MPTCP uses the delack timer to do 3rd ack retransmissions
374 */
375static void schedule_3rdack_retransmission(struct sock *sk)
376{
377 struct inet_connection_sock *icsk = inet_csk(sk);
378 struct tcp_sock *tp = tcp_sk(sk);
379 unsigned long timeout;
380
381 /* reschedule with a timeout above RTT, as we must look only for drop */
382 if (tp->srtt_us)
383 timeout = tp->srtt_us << 1;
384 else
385 timeout = TCP_TIMEOUT_INIT;
386
387 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
388 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
389 icsk->icsk_ack.timeout = timeout;
390 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
391}
392
393static void clear_3rdack_retransmission(struct sock *sk)
394{
395 struct inet_connection_sock *icsk = inet_csk(sk);
396
397 sk_stop_timer(sk, &icsk->icsk_delack_timer);
398 icsk->icsk_ack.timeout = 0;
399 icsk->icsk_ack.ato = 0;
400 icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
401}
402
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800403static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
404 unsigned int *size,
Mat Martineau6d0060f2020-01-21 16:56:23 -0800405 unsigned int remaining,
406 struct mptcp_out_options *opts)
Peter Krystadcec37a62020-01-21 16:56:18 -0800407{
408 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800409 struct mptcp_ext *mpext;
410 unsigned int data_len;
Peter Krystadcec37a62020-01-21 16:56:18 -0800411
Peter Krystadec3edaa2020-03-27 14:48:40 -0700412 /* When skb is not available, we better over-estimate the emitted
413 * options len. A full DSS option (28 bytes) is longer than
414 * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
415 * tell the caller to defer the estimate to
416 * mptcp_established_options_dss(), which will reserve enough space.
417 */
418 if (!skb)
419 return false;
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800420
Peter Krystadec3edaa2020-03-27 14:48:40 -0700421 /* MPC/MPJ needed only on 3rd ack packet */
422 if (subflow->fully_established ||
423 subflow->snd_isn != TCP_SKB_CB(skb)->seq)
424 return false;
425
426 if (subflow->mp_capable) {
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800427 mpext = mptcp_get_ext(skb);
428 data_len = mpext ? mpext->data_len : 0;
429
430 /* we will check ext_copy.data_len in mptcp_write_options() to
431 * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
432 * TCPOLEN_MPTCP_MPC_ACK
433 */
434 opts->ext_copy.data_len = data_len;
Peter Krystadcec37a62020-01-21 16:56:18 -0800435 opts->suboptions = OPTION_MPTCP_MPC_ACK;
436 opts->sndr_key = subflow->local_key;
437 opts->rcvr_key = subflow->remote_key;
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800438
439 /* Section 3.1.
440 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
441 * packets that start the first subflow of an MPTCP connection,
442 * as well as the first packet that carries data
443 */
444 if (data_len > 0)
445 *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4);
446 else
447 *size = TCPOLEN_MPTCP_MPC_ACK;
448
449 pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
450 subflow, subflow->local_key, subflow->remote_key,
451 data_len);
452
Peter Krystadcec37a62020-01-21 16:56:18 -0800453 return true;
Peter Krystadec3edaa2020-03-27 14:48:40 -0700454 } else if (subflow->mp_join) {
455 opts->suboptions = OPTION_MPTCP_MPJ_ACK;
456 memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
457 *size = TCPOLEN_MPTCP_MPJ_ACK;
458 pr_debug("subflow=%p", subflow);
459
460 schedule_3rdack_retransmission(sk);
461 return true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800462 }
463 return false;
464}
465
Mat Martineau6d0060f2020-01-21 16:56:23 -0800466static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
Paolo Abeni9c29e362020-07-03 18:06:04 +0200467 struct sk_buff *skb, struct mptcp_ext *ext)
Mat Martineau6d0060f2020-01-21 16:56:23 -0800468{
Paolo Abeni017512a2020-10-05 12:01:06 +0200469 /* The write_seq value has already been incremented, so the actual
470 * sequence number for the DATA_FIN is one less.
471 */
472 u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
Mat Martineau7279da62020-07-28 15:12:02 -0700473
Paolo Abeni9c29e362020-07-03 18:06:04 +0200474 if (!ext->use_map || !skb->len) {
Mat Martineau6d0060f2020-01-21 16:56:23 -0800475 /* RFC6824 requires a DSS mapping with specific values
476 * if DATA_FIN is set but no data payload is mapped
477 */
Mat Martineau6d37a0b2020-02-28 15:47:41 -0800478 ext->data_fin = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800479 ext->use_map = 1;
480 ext->dsn64 = 1;
Paolo Abeni017512a2020-10-05 12:01:06 +0200481 ext->data_seq = data_fin_tx_seq;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800482 ext->subflow_seq = 0;
483 ext->data_len = 1;
Mat Martineau7279da62020-07-28 15:12:02 -0700484 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
Mat Martineau6d37a0b2020-02-28 15:47:41 -0800485 /* If there's an existing DSS mapping and it is the
486 * final mapping, DATA_FIN consumes 1 additional byte of
487 * mapping space.
Mat Martineau6d0060f2020-01-21 16:56:23 -0800488 */
Mat Martineau6d37a0b2020-02-28 15:47:41 -0800489 ext->data_fin = 1;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800490 ext->data_len++;
491 }
492}
493
494static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
495 unsigned int *size,
496 unsigned int remaining,
497 struct mptcp_out_options *opts)
498{
499 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Mat Martineau7279da62020-07-28 15:12:02 -0700500 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800501 unsigned int dss_size = 0;
Mat Martineau7279da62020-07-28 15:12:02 -0700502 u64 snd_data_fin_enable;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800503 struct mptcp_ext *mpext;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800504 unsigned int ack_size;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800505 bool ret = false;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800506
Mat Martineau0bac9662020-07-28 15:11:59 -0700507 mpext = skb ? mptcp_get_ext(skb) : NULL;
Paolo Abenie16163b2020-11-16 10:48:09 +0100508 snd_data_fin_enable = mptcp_data_fin_enabled(msk);
Mat Martineau6d0060f2020-01-21 16:56:23 -0800509
Mat Martineau7279da62020-07-28 15:12:02 -0700510 if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
Mat Martineau6d0060f2020-01-21 16:56:23 -0800511 unsigned int map_size;
512
513 map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
514
515 remaining -= map_size;
516 dss_size = map_size;
517 if (mpext)
518 opts->ext_copy = *mpext;
519
Mat Martineau7279da62020-07-28 15:12:02 -0700520 if (skb && snd_data_fin_enable)
Paolo Abeni9c29e362020-07-03 18:06:04 +0200521 mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
Christoph Paaschd22f4982020-01-21 16:56:32 -0800522 ret = true;
523 }
524
Paolo Abeni2398e392020-03-04 16:51:07 +0100525 /* passive sockets msk will set the 'can_ack' after accept(), even
526 * if the first subflow may have the already the remote key handy
527 */
Christoph Paaschd22f4982020-01-21 16:56:32 -0800528 opts->ext_copy.use_ack = 0;
Paolo Abenidc093db2020-03-13 16:52:42 +0100529 if (!READ_ONCE(msk->can_ack)) {
Christoph Paaschd22f4982020-01-21 16:56:32 -0800530 *size = ALIGN(dss_size, 4);
531 return ret;
Mat Martineau6d0060f2020-01-21 16:56:23 -0800532 }
533
Davide Caratti37198e92020-10-06 18:26:17 +0200534 if (READ_ONCE(msk->use_64bit_ack)) {
Christoph Paascha0c1d0e2020-05-14 08:53:03 -0700535 ack_size = TCPOLEN_MPTCP_DSS_ACK64;
Mat Martineau917944d2020-09-29 15:08:19 -0700536 opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq);
Christoph Paascha0c1d0e2020-05-14 08:53:03 -0700537 opts->ext_copy.ack64 = 1;
538 } else {
539 ack_size = TCPOLEN_MPTCP_DSS_ACK32;
Mat Martineau917944d2020-09-29 15:08:19 -0700540 opts->ext_copy.data_ack32 = (uint32_t)READ_ONCE(msk->ack_seq);
Christoph Paascha0c1d0e2020-05-14 08:53:03 -0700541 opts->ext_copy.ack64 = 0;
542 }
543 opts->ext_copy.use_ack = 1;
Paolo Abeniea4ca582020-11-19 11:46:03 -0800544 WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
Mat Martineau6d0060f2020-01-21 16:56:23 -0800545
546 /* Add kind/length/subtype/flag overhead if mapping is not populated */
547 if (dss_size == 0)
548 ack_size += TCPOLEN_MPTCP_DSS_BASE;
549
550 dss_size += ack_size;
551
Mat Martineau6d0060f2020-01-21 16:56:23 -0800552 *size = ALIGN(dss_size, 4);
553 return true;
554}
555
Peter Krystad3df523a2020-03-27 14:48:37 -0700556static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
557 struct in_addr *addr)
558{
Todd Malsbarybd697222020-05-21 19:10:49 -0700559 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystad3df523a2020-03-27 14:48:37 -0700560 u8 msg[7];
561
562 msg[0] = addr_id;
563 memcpy(&msg[1], &addr->s_addr, 4);
564 msg[5] = 0;
565 msg[6] = 0;
566
567 mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
568
Todd Malsbarybd697222020-05-21 19:10:49 -0700569 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
Peter Krystad3df523a2020-03-27 14:48:37 -0700570}
571
572#if IS_ENABLED(CONFIG_MPTCP_IPV6)
573static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
574 struct in6_addr *addr)
575{
Todd Malsbarybd697222020-05-21 19:10:49 -0700576 u8 hmac[SHA256_DIGEST_SIZE];
Peter Krystad3df523a2020-03-27 14:48:37 -0700577 u8 msg[19];
578
579 msg[0] = addr_id;
580 memcpy(&msg[1], &addr->s6_addr, 16);
581 msg[17] = 0;
582 msg[18] = 0;
583
584 mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
585
Todd Malsbarybd697222020-05-21 19:10:49 -0700586 return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
Peter Krystad3df523a2020-03-27 14:48:37 -0700587}
588#endif
589
Geliang Tang84dfe362020-11-19 11:46:00 -0800590static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
Geliang Tangf643b802020-09-24 08:29:47 +0800591 unsigned int *size,
592 unsigned int remaining,
593 struct mptcp_out_options *opts)
Peter Krystad3df523a2020-03-27 14:48:37 -0700594{
595 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
596 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
Geliang Tang84dfe362020-11-19 11:46:00 -0800597 bool drop_other_suboptions = false;
598 unsigned int opt_size = *size;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700599 struct mptcp_addr_info saddr;
Geliang Tang6a6c05a2020-09-24 08:29:50 +0800600 bool echo;
Geliang Tang4a2777a82020-12-09 15:51:22 -0800601 bool port;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700602 int len;
Peter Krystad3df523a2020-03-27 14:48:37 -0700603
Geliang Tangfbe0f872020-12-09 15:51:23 -0800604 if ((mptcp_pm_should_add_signal_ipv6(msk) ||
605 mptcp_pm_should_add_signal_port(msk)) &&
Geliang Tang84dfe362020-11-19 11:46:00 -0800606 skb && skb_is_tcp_pure_ack(skb)) {
607 pr_debug("drop other suboptions");
608 opts->suboptions = 0;
Geliang Tang3ae32c02020-12-15 17:56:51 +0800609 opts->ext_copy.use_ack = 0;
610 opts->ext_copy.use_map = 0;
Geliang Tang84dfe362020-11-19 11:46:00 -0800611 remaining += opt_size;
612 drop_other_suboptions = true;
613 }
614
Geliang Tangf643b802020-09-24 08:29:47 +0800615 if (!mptcp_pm_should_add_signal(msk) ||
Geliang Tang4a2777a82020-12-09 15:51:22 -0800616 !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo, &port)))
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700617 return false;
Peter Krystad3df523a2020-03-27 14:48:37 -0700618
Geliang Tang4a2777a82020-12-09 15:51:22 -0800619 len = mptcp_add_addr_len(saddr.family, echo, port);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700620 if (remaining < len)
621 return false;
622
623 *size = len;
Geliang Tang84dfe362020-11-19 11:46:00 -0800624 if (drop_other_suboptions)
625 *size -= opt_size;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700626 opts->addr_id = saddr.id;
Geliang Tang4a2777a82020-12-09 15:51:22 -0800627 if (port)
628 opts->port = ntohs(saddr.port);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700629 if (saddr.family == AF_INET) {
Peter Krystad3df523a2020-03-27 14:48:37 -0700630 opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700631 opts->addr = saddr.addr;
Geliang Tang6a6c05a2020-09-24 08:29:50 +0800632 if (!echo) {
633 opts->ahmac = add_addr_generate_hmac(msk->local_key,
634 msk->remote_key,
635 opts->addr_id,
636 &opts->addr);
637 }
Peter Krystad3df523a2020-03-27 14:48:37 -0700638 }
639#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700640 else if (saddr.family == AF_INET6) {
Peter Krystad3df523a2020-03-27 14:48:37 -0700641 opts->suboptions |= OPTION_MPTCP_ADD_ADDR6;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700642 opts->addr6 = saddr.addr6;
Geliang Tang6a6c05a2020-09-24 08:29:50 +0800643 if (!echo) {
644 opts->ahmac = add_addr6_generate_hmac(msk->local_key,
645 msk->remote_key,
646 opts->addr_id,
647 &opts->addr6);
648 }
Peter Krystad3df523a2020-03-27 14:48:37 -0700649 }
650#endif
Geliang Tang4a2777a82020-12-09 15:51:22 -0800651 pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
652 opts->addr_id, opts->ahmac, echo, opts->port);
Peter Krystad3df523a2020-03-27 14:48:37 -0700653
654 return true;
655}
656
Geliang Tang5cb104a2020-09-24 08:29:48 +0800657static bool mptcp_established_options_rm_addr(struct sock *sk,
658 unsigned int *size,
659 unsigned int remaining,
660 struct mptcp_out_options *opts)
661{
662 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
663 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
664 u8 rm_id;
665
666 if (!mptcp_pm_should_rm_signal(msk) ||
667 !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_id)))
668 return false;
669
670 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
671 return false;
672
673 *size = TCPOLEN_MPTCP_RM_ADDR_BASE;
674 opts->suboptions |= OPTION_MPTCP_RM_ADDR;
675 opts->rm_id = rm_id;
676
677 pr_debug("rm_id=%d", opts->rm_id);
678
679 return true;
680}
681
Geliang Tang06706542021-01-08 16:47:57 -0800682static bool mptcp_established_options_mp_prio(struct sock *sk,
683 unsigned int *size,
684 unsigned int remaining,
685 struct mptcp_out_options *opts)
686{
687 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
688
689 if (!subflow->send_mp_prio)
690 return false;
691
692 if (remaining < TCPOLEN_MPTCP_PRIO)
693 return false;
694
695 *size = TCPOLEN_MPTCP_PRIO;
696 opts->suboptions |= OPTION_MPTCP_PRIO;
697 opts->backup = subflow->request_bkup;
698
699 pr_debug("prio=%d", opts->backup);
700
701 return true;
702}
703
Mat Martineau6d0060f2020-01-21 16:56:23 -0800704bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
705 unsigned int *size, unsigned int remaining,
706 struct mptcp_out_options *opts)
707{
708 unsigned int opt_size = 0;
709 bool ret = false;
710
Peter Krystad3df523a2020-03-27 14:48:37 -0700711 opts->suboptions = 0;
712
Davide Carattie1ff9e82020-06-29 22:26:20 +0200713 if (unlikely(mptcp_check_fallback(sk)))
714 return false;
715
Paolo Abenid5824842020-10-09 19:00:00 +0200716 /* prevent adding of any MPTCP related options on reset packet
717 * until we support MP_TCPRST/MP_FASTCLOSE
718 */
719 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
720 return false;
721
Christoph Paaschcc7972e2020-01-21 16:56:31 -0800722 if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
Mat Martineau6d0060f2020-01-21 16:56:23 -0800723 ret = true;
724 else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
725 opts))
726 ret = true;
727
728 /* we reserved enough space for the above options, and exceeding the
729 * TCP option space would be fatal
730 */
731 if (WARN_ON_ONCE(opt_size > remaining))
732 return false;
733
734 *size += opt_size;
735 remaining -= opt_size;
Geliang Tang84dfe362020-11-19 11:46:00 -0800736 if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
Peter Krystad3df523a2020-03-27 14:48:37 -0700737 *size += opt_size;
738 remaining -= opt_size;
739 ret = true;
Geliang Tang5cb104a2020-09-24 08:29:48 +0800740 } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
741 *size += opt_size;
742 remaining -= opt_size;
743 ret = true;
Peter Krystad3df523a2020-03-27 14:48:37 -0700744 }
Mat Martineau6d0060f2020-01-21 16:56:23 -0800745
Geliang Tang06706542021-01-08 16:47:57 -0800746 if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) {
747 *size += opt_size;
748 remaining -= opt_size;
749 ret = true;
750 }
751
Mat Martineau6d0060f2020-01-21 16:56:23 -0800752 return ret;
753}
754
Peter Krystadcec37a62020-01-21 16:56:18 -0800755bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
756 struct mptcp_out_options *opts)
757{
758 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
759
760 if (subflow_req->mp_capable) {
761 opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
762 opts->sndr_key = subflow_req->local_key;
763 *size = TCPOLEN_MPTCP_MPC_SYNACK;
764 pr_debug("subflow_req=%p, local_key=%llu",
765 subflow_req, subflow_req->local_key);
766 return true;
Peter Krystadf2962342020-03-27 14:48:39 -0700767 } else if (subflow_req->mp_join) {
768 opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
769 opts->backup = subflow_req->backup;
770 opts->join_id = subflow_req->local_id;
771 opts->thmac = subflow_req->thmac;
772 opts->nonce = subflow_req->local_nonce;
773 pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
774 subflow_req, opts->backup, opts->join_id,
775 opts->thmac, opts->nonce);
776 *size = TCPOLEN_MPTCP_MPJ_SYNACK;
777 return true;
Peter Krystadcec37a62020-01-21 16:56:18 -0800778 }
779 return false;
780}
781
Paolo Abenid5824842020-10-09 19:00:00 +0200782static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
Peter Krystadf2962342020-03-27 14:48:39 -0700783 struct mptcp_subflow_context *subflow,
Paolo Abeni0be534f2020-03-19 11:06:30 +0100784 struct sk_buff *skb,
785 struct mptcp_options_received *mp_opt)
Christoph Paaschd22f4982020-01-21 16:56:32 -0800786{
787 /* here we can process OoO, in-window pkts, only in-sequence 4th ack
Peter Krystadf2962342020-03-27 14:48:39 -0700788 * will make the subflow fully established
Christoph Paaschd22f4982020-01-21 16:56:32 -0800789 */
Peter Krystadf2962342020-03-27 14:48:39 -0700790 if (likely(subflow->fully_established)) {
791 /* on passive sockets, check for 3rd ack retransmission
792 * note that msk is always set by subflow_syn_recv_sock()
793 * for mp_join subflows
794 */
795 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
796 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
797 subflow->mp_join && mp_opt->mp_join &&
798 READ_ONCE(msk->pm.server_side))
Paolo Abenid5824842020-10-09 19:00:00 +0200799 tcp_send_ack(ssk);
Peter Krystadf2962342020-03-27 14:48:39 -0700800 goto fully_established;
801 }
Christoph Paaschd22f4982020-01-21 16:56:32 -0800802
Paolo Abenid5824842020-10-09 19:00:00 +0200803 /* we must process OoO packets before the first subflow is fully
804 * established. OoO packets are instead a protocol violation
805 * for MP_JOIN subflows as the peer must not send any data
806 * before receiving the forth ack - cfr. RFC 8684 section 3.2.
Peter Krystadf2962342020-03-27 14:48:39 -0700807 */
Paolo Abenid5824842020-10-09 19:00:00 +0200808 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
809 if (subflow->mp_join)
810 goto reset;
Peter Krystadf2962342020-03-27 14:48:39 -0700811 return subflow->mp_capable;
Paolo Abenid5824842020-10-09 19:00:00 +0200812 }
Peter Krystadf2962342020-03-27 14:48:39 -0700813
Paolo Abeni5a91e322020-04-30 15:01:54 +0200814 if (mp_opt->dss && mp_opt->use_ack) {
Peter Krystadf2962342020-03-27 14:48:39 -0700815 /* subflows are fully established as soon as we get any
816 * additional ack.
817 */
Paolo Abeni0be534f2020-03-19 11:06:30 +0100818 subflow->fully_established = 1;
Paolo Abenib93df082020-07-23 13:02:32 +0200819 WRITE_ONCE(msk->fully_established, true);
Peter Krystadf2962342020-03-27 14:48:39 -0700820 goto fully_established;
821 }
Christoph Paaschd22f4982020-01-21 16:56:32 -0800822
Geliang Tang84dfe362020-11-19 11:46:00 -0800823 if (mp_opt->add_addr) {
824 WRITE_ONCE(msk->fully_established, true);
825 return true;
826 }
827
Christoph Paaschd22f4982020-01-21 16:56:32 -0800828 /* If the first established packet does not contain MP_CAPABLE + data
Paolo Abenid5824842020-10-09 19:00:00 +0200829 * then fallback to TCP. Fallback scenarios requires a reset for
830 * MP_JOIN subflows.
Christoph Paaschd22f4982020-01-21 16:56:32 -0800831 */
832 if (!mp_opt->mp_capable) {
Paolo Abenid5824842020-10-09 19:00:00 +0200833 if (subflow->mp_join)
834 goto reset;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800835 subflow->mp_capable = 0;
Davide Carattie1ff9e82020-06-29 22:26:20 +0200836 pr_fallback(msk);
837 __mptcp_do_fallback(msk);
Christoph Paaschd22f4982020-01-21 16:56:32 -0800838 return false;
839 }
Peter Krystadf2962342020-03-27 14:48:39 -0700840
Paolo Abenid6085fe2020-04-30 15:01:53 +0200841 if (unlikely(!READ_ONCE(msk->pm.server_side)))
842 pr_warn_once("bogus mpc option on established client sk");
Paolo Abenib93df082020-07-23 13:02:32 +0200843 mptcp_subflow_fully_established(subflow, mp_opt);
Peter Krystadf2962342020-03-27 14:48:39 -0700844
845fully_established:
Paolo Abeni5b950ff2020-12-09 12:03:29 +0100846 /* if the subflow is not already linked into the conn_list, we can't
847 * notify the PM: this subflow is still on the listener queue
848 * and the PM possibly acquiring the subflow lock could race with
849 * the listener close
850 */
851 if (likely(subflow->pm_notified) || list_empty(&subflow->node))
Peter Krystadf2962342020-03-27 14:48:39 -0700852 return true;
853
854 subflow->pm_notified = 1;
Peter Krystadec3edaa2020-03-27 14:48:40 -0700855 if (subflow->mp_join) {
Paolo Abenid5824842020-10-09 19:00:00 +0200856 clear_3rdack_retransmission(ssk);
Peter Krystadf2962342020-03-27 14:48:39 -0700857 mptcp_pm_subflow_established(msk, subflow);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700858 } else {
Peter Krystadf2962342020-03-27 14:48:39 -0700859 mptcp_pm_fully_established(msk);
Peter Krystadec3edaa2020-03-27 14:48:40 -0700860 }
Christoph Paaschd22f4982020-01-21 16:56:32 -0800861 return true;
Paolo Abenid5824842020-10-09 19:00:00 +0200862
863reset:
864 mptcp_subflow_reset(ssk);
865 return false;
Christoph Paaschd22f4982020-01-21 16:56:32 -0800866}
867
Paolo Abenicc9d2562020-03-27 14:48:42 -0700868static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
869{
870 u32 old_ack32, cur_ack32;
871
872 if (use_64bit)
873 return cur_ack;
874
875 old_ack32 = (u32)old_ack;
876 cur_ack32 = (u32)cur_ack;
877 cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
878 if (unlikely(before(cur_ack32, old_ack32)))
879 return cur_ack + (1LL << 32);
880 return cur_ack;
881}
882
Florian Westphal6f8a6122020-11-16 10:48:13 +0100883static void ack_update_msk(struct mptcp_sock *msk,
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100884 struct sock *ssk,
Florian Westphal6f8a6122020-11-16 10:48:13 +0100885 struct mptcp_options_received *mp_opt)
Paolo Abenicc9d2562020-03-27 14:48:42 -0700886{
Paolo Abeni7439d682020-11-27 11:10:26 +0100887 u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
Florian Westphal6f8a6122020-11-16 10:48:13 +0100888 struct sock *sk = (struct sock *)msk;
Paolo Abeni7439d682020-11-27 11:10:26 +0100889 u64 old_snd_una;
890
891 mptcp_data_lock(sk);
Paolo Abenicc9d2562020-03-27 14:48:42 -0700892
893 /* avoid ack expansion on update conflict, to reduce the risk of
894 * wrongly expanding to a future ack sequence number, which is way
895 * more dangerous than missing an ack
896 */
Paolo Abeni7439d682020-11-27 11:10:26 +0100897 old_snd_una = msk->snd_una;
Paolo Abenicc9d2562020-03-27 14:48:42 -0700898 new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
899
900 /* ACK for data not even sent yet? Ignore. */
Paolo Abenieaa2ffa2020-11-16 10:48:08 +0100901 if (after64(new_snd_una, snd_nxt))
Paolo Abenicc9d2562020-03-27 14:48:42 -0700902 new_snd_una = old_snd_una;
903
Florian Westphal6f8a6122020-11-16 10:48:13 +0100904 new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
905
Paolo Abeni219d0492020-12-16 12:48:34 +0100906 if (after64(new_wnd_end, msk->wnd_end))
Paolo Abeni7439d682020-11-27 11:10:26 +0100907 msk->wnd_end = new_wnd_end;
Paolo Abeni219d0492020-12-16 12:48:34 +0100908
909 /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
910 if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)) &&
911 sk_stream_memory_free(ssk))
912 __mptcp_check_push(sk, ssk);
Florian Westphal6f8a6122020-11-16 10:48:13 +0100913
Paolo Abeni7439d682020-11-27 11:10:26 +0100914 if (after64(new_snd_una, old_snd_una)) {
915 msk->snd_una = new_snd_una;
916 __mptcp_data_acked(sk);
Paolo Abenicc9d2562020-03-27 14:48:42 -0700917 }
Paolo Abeni7439d682020-11-27 11:10:26 +0100918 mptcp_data_unlock(sk);
Paolo Abenicc9d2562020-03-27 14:48:42 -0700919}
920
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700921bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
Mat Martineau3721b9b2020-07-28 15:12:03 -0700922{
923 /* Skip if DATA_FIN was already received.
924 * If updating simultaneously with the recvmsg loop, values
925 * should match. If they mismatch, the peer is misbehaving and
926 * we will prefer the most recent information.
927 */
928 if (READ_ONCE(msk->rcv_data_fin) || !READ_ONCE(msk->first))
929 return false;
930
Mat Martineau1a49b2c2020-09-29 15:08:20 -0700931 WRITE_ONCE(msk->rcv_data_fin_seq,
932 expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
Mat Martineau3721b9b2020-07-28 15:12:03 -0700933 WRITE_ONCE(msk->rcv_data_fin, 1);
934
935 return true;
936}
937
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700938static bool add_addr_hmac_valid(struct mptcp_sock *msk,
939 struct mptcp_options_received *mp_opt)
940{
941 u64 hmac = 0;
942
943 if (mp_opt->echo)
944 return true;
945
946 if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
947 hmac = add_addr_generate_hmac(msk->remote_key,
948 msk->local_key,
949 mp_opt->addr_id, &mp_opt->addr);
950#if IS_ENABLED(CONFIG_MPTCP_IPV6)
951 else
952 hmac = add_addr6_generate_hmac(msk->remote_key,
953 msk->local_key,
954 mp_opt->addr_id, &mp_opt->addr6);
955#endif
956
957 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
958 msk, (unsigned long long)hmac,
959 (unsigned long long)mp_opt->ahmac);
960
961 return hmac == mp_opt->ahmac;
962}
963
Florian Westphal77d0cab2020-09-25 01:23:02 +0200964void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
Mat Martineau648ef4b2020-01-21 16:56:24 -0800965{
Christoph Paaschd22f4982020-01-21 16:56:32 -0800966 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700967 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
Paolo Abenicfde1412020-04-30 15:01:52 +0200968 struct mptcp_options_received mp_opt;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800969 struct mptcp_ext *mpext;
970
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100971 if (__mptcp_check_fallback(msk)) {
972 /* Keep it simple and unconditionally trigger send data cleanup and
973 * pending queue spooling. We will need to acquire the data lock
974 * for more accurate checks, and once the lock is acquired, such
975 * helpers are cheap.
976 */
977 mptcp_data_lock(subflow->conn);
Paolo Abeni219d0492020-12-16 12:48:34 +0100978 if (sk_stream_memory_free(sk))
979 __mptcp_check_push(subflow->conn, sk);
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100980 __mptcp_data_acked(subflow->conn);
981 mptcp_data_unlock(subflow->conn);
Davide Carattie1ff9e82020-06-29 22:26:20 +0200982 return;
Paolo Abeni6e628cd2020-11-27 11:10:27 +0100983 }
Davide Carattie1ff9e82020-06-29 22:26:20 +0200984
Paolo Abenicfde1412020-04-30 15:01:52 +0200985 mptcp_get_options(skb, &mp_opt);
986 if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
Christoph Paaschd22f4982020-01-21 16:56:32 -0800987 return;
Mat Martineau648ef4b2020-01-21 16:56:24 -0800988
Florian Westphal50c504a2020-12-10 14:25:04 -0800989 if (mp_opt.fastclose &&
990 msk->local_key == mp_opt.rcvr_key) {
991 WRITE_ONCE(msk->rcv_fastclose, true);
992 mptcp_schedule_work((struct sock *)msk);
993 }
994
Paolo Abenicfde1412020-04-30 15:01:52 +0200995 if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700996 struct mptcp_addr_info addr;
997
Paolo Abenicfde1412020-04-30 15:01:52 +0200998 addr.port = htons(mp_opt.port);
999 addr.id = mp_opt.addr_id;
1000 if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) {
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001001 addr.family = AF_INET;
Paolo Abenicfde1412020-04-30 15:01:52 +02001002 addr.addr = mp_opt.addr;
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001003 }
1004#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Paolo Abenicfde1412020-04-30 15:01:52 +02001005 else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) {
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001006 addr.family = AF_INET6;
Paolo Abenicfde1412020-04-30 15:01:52 +02001007 addr.addr6 = mp_opt.addr6;
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001008 }
1009#endif
Geliang Tanga877de062020-09-24 08:29:51 +08001010 if (!mp_opt.echo) {
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001011 mptcp_pm_add_addr_received(msk, &addr);
Geliang Tanga877de062020-09-24 08:29:51 +08001012 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
1013 } else {
Geliang Tang00cfd772020-09-24 08:30:02 +08001014 mptcp_pm_del_add_timer(msk, &addr);
Geliang Tanga877de062020-09-24 08:29:51 +08001015 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
1016 }
Paolo Abenicfde1412020-04-30 15:01:52 +02001017 mp_opt.add_addr = 0;
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001018 }
1019
Geliang Tangd0876b22020-09-24 08:29:49 +08001020 if (mp_opt.rm_addr) {
1021 mptcp_pm_rm_addr_received(msk, mp_opt.rm_id);
1022 mp_opt.rm_addr = 0;
1023 }
1024
Paolo Abenicfde1412020-04-30 15:01:52 +02001025 if (!mp_opt.dss)
Mat Martineau648ef4b2020-01-21 16:56:24 -08001026 return;
1027
Paolo Abenicc9d2562020-03-27 14:48:42 -07001028 /* we can't wait for recvmsg() to update the ack_seq, otherwise
1029 * monodirectional flows will stuck
1030 */
Paolo Abenicfde1412020-04-30 15:01:52 +02001031 if (mp_opt.use_ack)
Florian Westphal6f8a6122020-11-16 10:48:13 +01001032 ack_update_msk(msk, sk, &mp_opt);
Paolo Abenicc9d2562020-03-27 14:48:42 -07001033
Mat Martineau06827b32020-07-28 15:12:08 -07001034 /* Zero-data-length packets are dropped by the caller and not
1035 * propagated to the MPTCP layer, so the skb extension does not
1036 * need to be allocated or populated. DATA_FIN information, if
1037 * present, needs to be updated here before the skb is freed.
Mat Martineau43b54c62020-07-28 15:12:06 -07001038 */
1039 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
1040 if (mp_opt.data_fin && mp_opt.data_len == 1 &&
Mat Martineau1a49b2c2020-09-29 15:08:20 -07001041 mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
Mat Martineau43b54c62020-07-28 15:12:06 -07001042 schedule_work(&msk->work))
1043 sock_hold(subflow->conn);
Mat Martineau06827b32020-07-28 15:12:08 -07001044
1045 return;
Mat Martineau43b54c62020-07-28 15:12:06 -07001046 }
1047
Mat Martineau648ef4b2020-01-21 16:56:24 -08001048 mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
1049 if (!mpext)
1050 return;
1051
1052 memset(mpext, 0, sizeof(*mpext));
1053
Paolo Abenicfde1412020-04-30 15:01:52 +02001054 if (mp_opt.use_map) {
1055 if (mp_opt.mpc_map) {
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001056 /* this is an MP_CAPABLE carrying MPTCP data
1057 * we know this map the first chunk of data
1058 */
1059 mptcp_crypto_key_sha(subflow->remote_key, NULL,
1060 &mpext->data_seq);
1061 mpext->data_seq++;
1062 mpext->subflow_seq = 1;
1063 mpext->dsn64 = 1;
1064 mpext->mpc_map = 1;
Paolo Abenia77895d2020-04-30 15:01:55 +02001065 mpext->data_fin = 0;
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001066 } else {
Paolo Abenicfde1412020-04-30 15:01:52 +02001067 mpext->data_seq = mp_opt.data_seq;
1068 mpext->subflow_seq = mp_opt.subflow_seq;
1069 mpext->dsn64 = mp_opt.dsn64;
1070 mpext->data_fin = mp_opt.data_fin;
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001071 }
Paolo Abenicfde1412020-04-30 15:01:52 +02001072 mpext->data_len = mp_opt.data_len;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001073 mpext->use_map = 1;
Mat Martineau648ef4b2020-01-21 16:56:24 -08001074 }
Mat Martineau648ef4b2020-01-21 16:56:24 -08001075}
1076
Florian Westphalfa3fe2b2020-11-19 11:46:02 -08001077static void mptcp_set_rwin(const struct tcp_sock *tp)
1078{
1079 const struct sock *ssk = (const struct sock *)tp;
1080 const struct mptcp_subflow_context *subflow;
1081 struct mptcp_sock *msk;
1082 u64 ack_seq;
1083
1084 subflow = mptcp_subflow_ctx(ssk);
1085 msk = mptcp_sk(subflow->conn);
1086
1087 ack_seq = READ_ONCE(msk->ack_seq) + tp->rcv_wnd;
1088
1089 if (after64(ack_seq, READ_ONCE(msk->rcv_wnd_sent)))
1090 WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
1091}
1092
1093void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
1094 struct mptcp_out_options *opts)
Peter Krystadeda7acd2020-01-21 16:56:16 -08001095{
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001096 if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
Peter Krystadeda7acd2020-01-21 16:56:16 -08001097 OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
1098 u8 len;
1099
1100 if (OPTION_MPTCP_MPC_SYN & opts->suboptions)
1101 len = TCPOLEN_MPTCP_MPC_SYN;
Peter Krystadcec37a62020-01-21 16:56:18 -08001102 else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions)
1103 len = TCPOLEN_MPTCP_MPC_SYNACK;
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001104 else if (opts->ext_copy.data_len)
1105 len = TCPOLEN_MPTCP_MPC_ACK_DATA;
Peter Krystadeda7acd2020-01-21 16:56:16 -08001106 else
1107 len = TCPOLEN_MPTCP_MPC_ACK;
1108
Peter Krystad3df523a2020-03-27 14:48:37 -07001109 *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
1110 MPTCP_SUPPORTED_VERSION,
1111 MPTCP_CAP_HMAC_SHA256);
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001112
1113 if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
1114 opts->suboptions))
1115 goto mp_capable_done;
1116
Peter Krystadeda7acd2020-01-21 16:56:16 -08001117 put_unaligned_be64(opts->sndr_key, ptr);
1118 ptr += 2;
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001119 if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions))
1120 goto mp_capable_done;
1121
1122 put_unaligned_be64(opts->rcvr_key, ptr);
1123 ptr += 2;
1124 if (!opts->ext_copy.data_len)
1125 goto mp_capable_done;
1126
1127 put_unaligned_be32(opts->ext_copy.data_len << 16 |
1128 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1129 ptr += 1;
Peter Krystadeda7acd2020-01-21 16:56:16 -08001130 }
Mat Martineau6d0060f2020-01-21 16:56:23 -08001131
Christoph Paaschcc7972e2020-01-21 16:56:31 -08001132mp_capable_done:
Geliang Tange1ef6832020-12-09 15:51:19 -08001133 if ((OPTION_MPTCP_ADD_ADDR
1134#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1135 | OPTION_MPTCP_ADD_ADDR6
1136#endif
1137 ) & opts->suboptions) {
Geliang Tang6eb3d1e2020-12-09 15:51:18 -08001138 u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
1139 u8 echo = MPTCP_ADDR_ECHO;
Peter Krystad3df523a2020-03-27 14:48:37 -07001140
1141#if IS_ENABLED(CONFIG_MPTCP_IPV6)
Geliang Tange1ef6832020-12-09 15:51:19 -08001142 if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions)
1143 len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
1144#endif
Geliang Tang6eb3d1e2020-12-09 15:51:18 -08001145
Geliang Tang22fb85f2020-12-09 15:51:20 -08001146 if (opts->port)
1147 len += TCPOLEN_MPTCP_PORT_LEN;
1148
Peter Krystad3df523a2020-03-27 14:48:37 -07001149 if (opts->ahmac) {
Geliang Tang6eb3d1e2020-12-09 15:51:18 -08001150 len += sizeof(opts->ahmac);
1151 echo = 0;
1152 }
Geliang Tange1ef6832020-12-09 15:51:19 -08001153
Geliang Tang6eb3d1e2020-12-09 15:51:18 -08001154 *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
1155 len, echo, opts->addr_id);
Geliang Tange1ef6832020-12-09 15:51:19 -08001156 if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
1157 memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4);
1158 ptr += 1;
1159 }
1160#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1161 else if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) {
1162 memcpy((u8 *)ptr, opts->addr6.s6_addr, 16);
1163 ptr += 4;
1164 }
1165#endif
Geliang Tang22fb85f2020-12-09 15:51:20 -08001166
1167 if (!opts->port) {
1168 if (opts->ahmac) {
1169 put_unaligned_be64(opts->ahmac, ptr);
1170 ptr += 2;
1171 }
1172 } else {
1173 if (opts->ahmac) {
1174 u8 *bptr = (u8 *)ptr;
1175
1176 put_unaligned_be16(opts->port, bptr);
1177 bptr += 2;
1178 put_unaligned_be64(opts->ahmac, bptr);
1179 bptr += 8;
1180 put_unaligned_be16(TCPOPT_NOP << 8 |
1181 TCPOPT_NOP, bptr);
1182
1183 ptr += 3;
1184 } else {
1185 put_unaligned_be32(opts->port << 16 |
1186 TCPOPT_NOP << 8 |
1187 TCPOPT_NOP, ptr);
1188 ptr += 1;
1189 }
Peter Krystad3df523a2020-03-27 14:48:37 -07001190 }
1191 }
Peter Krystad3df523a2020-03-27 14:48:37 -07001192
1193 if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
1194 *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
1195 TCPOLEN_MPTCP_RM_ADDR_BASE,
1196 0, opts->rm_id);
1197 }
1198
Geliang Tang06706542021-01-08 16:47:57 -08001199 if (OPTION_MPTCP_PRIO & opts->suboptions) {
1200 const struct sock *ssk = (const struct sock *)tp;
1201 struct mptcp_subflow_context *subflow;
1202
1203 subflow = mptcp_subflow_ctx(ssk);
1204 subflow->send_mp_prio = 0;
1205
1206 *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
1207 TCPOLEN_MPTCP_PRIO,
1208 opts->backup, TCPOPT_NOP);
1209 }
1210
Peter Krystadec3edaa2020-03-27 14:48:40 -07001211 if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
1212 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1213 TCPOLEN_MPTCP_MPJ_SYN,
1214 opts->backup, opts->join_id);
1215 put_unaligned_be32(opts->token, ptr);
1216 ptr += 1;
1217 put_unaligned_be32(opts->nonce, ptr);
1218 ptr += 1;
1219 }
1220
Peter Krystadf2962342020-03-27 14:48:39 -07001221 if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
1222 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1223 TCPOLEN_MPTCP_MPJ_SYNACK,
1224 opts->backup, opts->join_id);
1225 put_unaligned_be64(opts->thmac, ptr);
1226 ptr += 2;
1227 put_unaligned_be32(opts->nonce, ptr);
1228 ptr += 1;
1229 }
1230
Peter Krystadec3edaa2020-03-27 14:48:40 -07001231 if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) {
1232 *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1233 TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
1234 memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
1235 ptr += 5;
1236 }
1237
Mat Martineau6d0060f2020-01-21 16:56:23 -08001238 if (opts->ext_copy.use_ack || opts->ext_copy.use_map) {
1239 struct mptcp_ext *mpext = &opts->ext_copy;
1240 u8 len = TCPOLEN_MPTCP_DSS_BASE;
1241 u8 flags = 0;
1242
1243 if (mpext->use_ack) {
Christoph Paascha0c1d0e2020-05-14 08:53:03 -07001244 flags = MPTCP_DSS_HAS_ACK;
1245 if (mpext->ack64) {
1246 len += TCPOLEN_MPTCP_DSS_ACK64;
1247 flags |= MPTCP_DSS_ACK64;
1248 } else {
1249 len += TCPOLEN_MPTCP_DSS_ACK32;
1250 }
Mat Martineau6d0060f2020-01-21 16:56:23 -08001251 }
1252
1253 if (mpext->use_map) {
1254 len += TCPOLEN_MPTCP_DSS_MAP64;
1255
1256 /* Use only 64-bit mapping flags for now, add
1257 * support for optional 32-bit mappings later.
1258 */
1259 flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
1260 if (mpext->data_fin)
1261 flags |= MPTCP_DSS_DATA_FIN;
1262 }
1263
Peter Krystad3df523a2020-03-27 14:48:37 -07001264 *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
Mat Martineau6d0060f2020-01-21 16:56:23 -08001265
1266 if (mpext->use_ack) {
Christoph Paascha0c1d0e2020-05-14 08:53:03 -07001267 if (mpext->ack64) {
1268 put_unaligned_be64(mpext->data_ack, ptr);
1269 ptr += 2;
1270 } else {
1271 put_unaligned_be32(mpext->data_ack32, ptr);
1272 ptr += 1;
1273 }
Mat Martineau6d0060f2020-01-21 16:56:23 -08001274 }
1275
1276 if (mpext->use_map) {
1277 put_unaligned_be64(mpext->data_seq, ptr);
1278 ptr += 2;
1279 put_unaligned_be32(mpext->subflow_seq, ptr);
1280 ptr += 1;
1281 put_unaligned_be32(mpext->data_len << 16 |
1282 TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1283 }
1284 }
Florian Westphalfa3fe2b2020-11-19 11:46:02 -08001285
1286 if (tp)
1287 mptcp_set_rwin(tp);
Peter Krystadeda7acd2020-01-21 16:56:16 -08001288}