Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2017 - 2019, Intel Corporation. |
| 5 | */ |
| 6 | |
Geliang Tang | c85adce | 2020-04-03 17:14:08 +0800 | [diff] [blame] | 7 | #define pr_fmt(fmt) "MPTCP: " fmt |
| 8 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 9 | #include <linux/kernel.h> |
Todd Malsbary | bd69722 | 2020-05-21 19:10:49 -0700 | [diff] [blame] | 10 | #include <crypto/sha.h> |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 11 | #include <net/tcp.h> |
| 12 | #include <net/mptcp.h> |
| 13 | #include "protocol.h" |
Geliang Tang | a877de06 | 2020-09-24 08:29:51 +0800 | [diff] [blame] | 14 | #include "mib.h" |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 15 | |
Paolo Abeni | 65492c5 | 2020-01-21 16:56:30 -0800 | [diff] [blame] | 16 | static bool mptcp_cap_flag_sha256(u8 flags) |
| 17 | { |
| 18 | return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256; |
| 19 | } |
| 20 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 21 | static void mptcp_parse_option(const struct sk_buff *skb, |
| 22 | const unsigned char *ptr, int opsize, |
| 23 | struct mptcp_options_received *mp_opt) |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 24 | { |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 25 | u8 subtype = *ptr >> 4; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 26 | int expected_opsize; |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 27 | u8 version; |
| 28 | u8 flags; |
| 29 | |
| 30 | switch (subtype) { |
| 31 | case MPTCPOPT_MP_CAPABLE: |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 32 | /* strict size checking */ |
| 33 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
| 34 | if (skb->len > tcp_hdr(skb)->doff << 2) |
| 35 | expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA; |
| 36 | else |
| 37 | expected_opsize = TCPOLEN_MPTCP_MPC_ACK; |
| 38 | } else { |
| 39 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) |
| 40 | expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK; |
| 41 | else |
| 42 | expected_opsize = TCPOLEN_MPTCP_MPC_SYN; |
| 43 | } |
| 44 | if (opsize != expected_opsize) |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 45 | break; |
| 46 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 47 | /* try to be gentle vs future versions on the initial syn */ |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 48 | version = *ptr++ & MPTCP_VERSION_MASK; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 49 | if (opsize != TCPOLEN_MPTCP_MPC_SYN) { |
| 50 | if (version != MPTCP_SUPPORTED_VERSION) |
| 51 | break; |
| 52 | } else if (version < MPTCP_SUPPORTED_VERSION) { |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 53 | break; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 54 | } |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 55 | |
| 56 | flags = *ptr++; |
Paolo Abeni | 65492c5 | 2020-01-21 16:56:30 -0800 | [diff] [blame] | 57 | if (!mptcp_cap_flag_sha256(flags) || |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 58 | (flags & MPTCP_CAP_EXTENSIBILITY)) |
| 59 | break; |
| 60 | |
| 61 | /* RFC 6824, Section 3.1: |
| 62 | * "For the Checksum Required bit (labeled "A"), if either |
| 63 | * host requires the use of checksums, checksums MUST be used. |
| 64 | * In other words, the only way for checksums not to be used |
| 65 | * is if both hosts in their SYNs set A=0." |
| 66 | * |
| 67 | * Section 3.3.0: |
| 68 | * "If a checksum is not present when its use has been |
| 69 | * negotiated, the receiver MUST close the subflow with a RST as |
| 70 | * it is considered broken." |
| 71 | * |
| 72 | * We don't implement DSS checksum - fall back to TCP. |
| 73 | */ |
| 74 | if (flags & MPTCP_CAP_CHECKSUM_REQD) |
| 75 | break; |
| 76 | |
| 77 | mp_opt->mp_capable = 1; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 78 | if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) { |
| 79 | mp_opt->sndr_key = get_unaligned_be64(ptr); |
| 80 | ptr += 8; |
| 81 | } |
| 82 | if (opsize >= TCPOLEN_MPTCP_MPC_ACK) { |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 83 | mp_opt->rcvr_key = get_unaligned_be64(ptr); |
| 84 | ptr += 8; |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 85 | } |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 86 | if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) { |
| 87 | /* Section 3.1.: |
| 88 | * "the data parameters in a MP_CAPABLE are semantically |
| 89 | * equivalent to those in a DSS option and can be used |
| 90 | * interchangeably." |
| 91 | */ |
| 92 | mp_opt->dss = 1; |
| 93 | mp_opt->use_map = 1; |
| 94 | mp_opt->mpc_map = 1; |
| 95 | mp_opt->data_len = get_unaligned_be16(ptr); |
| 96 | ptr += 2; |
| 97 | } |
| 98 | pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d", |
| 99 | version, flags, opsize, mp_opt->sndr_key, |
| 100 | mp_opt->rcvr_key, mp_opt->data_len); |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 101 | break; |
| 102 | |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 103 | case MPTCPOPT_MP_JOIN: |
| 104 | mp_opt->mp_join = 1; |
| 105 | if (opsize == TCPOLEN_MPTCP_MPJ_SYN) { |
| 106 | mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; |
| 107 | mp_opt->join_id = *ptr++; |
| 108 | mp_opt->token = get_unaligned_be32(ptr); |
| 109 | ptr += 4; |
| 110 | mp_opt->nonce = get_unaligned_be32(ptr); |
| 111 | ptr += 4; |
| 112 | pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u", |
| 113 | mp_opt->backup, mp_opt->join_id, |
| 114 | mp_opt->token, mp_opt->nonce); |
| 115 | } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) { |
| 116 | mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; |
| 117 | mp_opt->join_id = *ptr++; |
| 118 | mp_opt->thmac = get_unaligned_be64(ptr); |
| 119 | ptr += 8; |
| 120 | mp_opt->nonce = get_unaligned_be32(ptr); |
| 121 | ptr += 4; |
| 122 | pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u", |
| 123 | mp_opt->backup, mp_opt->join_id, |
| 124 | mp_opt->thmac, mp_opt->nonce); |
| 125 | } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) { |
| 126 | ptr += 2; |
| 127 | memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); |
| 128 | pr_debug("MP_JOIN hmac"); |
| 129 | } else { |
| 130 | pr_warn("MP_JOIN bad option size"); |
| 131 | mp_opt->mp_join = 0; |
| 132 | } |
| 133 | break; |
| 134 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 135 | case MPTCPOPT_DSS: |
| 136 | pr_debug("DSS"); |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 137 | ptr++; |
| 138 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 139 | /* we must clear 'mpc_map' be able to detect MP_CAPABLE |
| 140 | * map vs DSS map in mptcp_incoming_options(), and reconstruct |
| 141 | * map info accordingly |
| 142 | */ |
| 143 | mp_opt->mpc_map = 0; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 144 | flags = (*ptr++) & MPTCP_DSS_FLAG_MASK; |
| 145 | mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0; |
| 146 | mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0; |
| 147 | mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0; |
| 148 | mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0; |
| 149 | mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK); |
| 150 | |
| 151 | pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d", |
| 152 | mp_opt->data_fin, mp_opt->dsn64, |
| 153 | mp_opt->use_map, mp_opt->ack64, |
| 154 | mp_opt->use_ack); |
| 155 | |
| 156 | expected_opsize = TCPOLEN_MPTCP_DSS_BASE; |
| 157 | |
| 158 | if (mp_opt->use_ack) { |
| 159 | if (mp_opt->ack64) |
| 160 | expected_opsize += TCPOLEN_MPTCP_DSS_ACK64; |
| 161 | else |
| 162 | expected_opsize += TCPOLEN_MPTCP_DSS_ACK32; |
| 163 | } |
| 164 | |
| 165 | if (mp_opt->use_map) { |
| 166 | if (mp_opt->dsn64) |
| 167 | expected_opsize += TCPOLEN_MPTCP_DSS_MAP64; |
| 168 | else |
| 169 | expected_opsize += TCPOLEN_MPTCP_DSS_MAP32; |
| 170 | } |
| 171 | |
| 172 | /* RFC 6824, Section 3.3: |
| 173 | * If a checksum is present, but its use had |
| 174 | * not been negotiated in the MP_CAPABLE handshake, |
| 175 | * the checksum field MUST be ignored. |
| 176 | */ |
| 177 | if (opsize != expected_opsize && |
| 178 | opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) |
| 179 | break; |
| 180 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 181 | mp_opt->dss = 1; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 182 | |
| 183 | if (mp_opt->use_ack) { |
| 184 | if (mp_opt->ack64) { |
| 185 | mp_opt->data_ack = get_unaligned_be64(ptr); |
| 186 | ptr += 8; |
| 187 | } else { |
| 188 | mp_opt->data_ack = get_unaligned_be32(ptr); |
| 189 | ptr += 4; |
| 190 | } |
| 191 | |
| 192 | pr_debug("data_ack=%llu", mp_opt->data_ack); |
| 193 | } |
| 194 | |
| 195 | if (mp_opt->use_map) { |
| 196 | if (mp_opt->dsn64) { |
| 197 | mp_opt->data_seq = get_unaligned_be64(ptr); |
| 198 | ptr += 8; |
| 199 | } else { |
| 200 | mp_opt->data_seq = get_unaligned_be32(ptr); |
| 201 | ptr += 4; |
| 202 | } |
| 203 | |
| 204 | mp_opt->subflow_seq = get_unaligned_be32(ptr); |
| 205 | ptr += 4; |
| 206 | |
| 207 | mp_opt->data_len = get_unaligned_be16(ptr); |
| 208 | ptr += 2; |
| 209 | |
| 210 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u", |
| 211 | mp_opt->data_seq, mp_opt->subflow_seq, |
| 212 | mp_opt->data_len); |
| 213 | } |
| 214 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 215 | break; |
| 216 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 217 | case MPTCPOPT_ADD_ADDR: |
| 218 | mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO; |
| 219 | if (!mp_opt->echo) { |
| 220 | if (opsize == TCPOLEN_MPTCP_ADD_ADDR || |
| 221 | opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT) |
| 222 | mp_opt->family = MPTCP_ADDR_IPVERSION_4; |
| 223 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 224 | else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 || |
| 225 | opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT) |
| 226 | mp_opt->family = MPTCP_ADDR_IPVERSION_6; |
| 227 | #endif |
| 228 | else |
| 229 | break; |
| 230 | } else { |
| 231 | if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE || |
| 232 | opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) |
| 233 | mp_opt->family = MPTCP_ADDR_IPVERSION_4; |
| 234 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 235 | else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE || |
| 236 | opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) |
| 237 | mp_opt->family = MPTCP_ADDR_IPVERSION_6; |
| 238 | #endif |
| 239 | else |
| 240 | break; |
| 241 | } |
| 242 | |
| 243 | mp_opt->add_addr = 1; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 244 | mp_opt->addr_id = *ptr++; |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 245 | pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo); |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 246 | if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) { |
| 247 | memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4); |
| 248 | ptr += 4; |
| 249 | if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT || |
| 250 | opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) { |
| 251 | mp_opt->port = get_unaligned_be16(ptr); |
| 252 | ptr += 2; |
| 253 | } |
| 254 | } |
| 255 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 256 | else { |
| 257 | memcpy(mp_opt->addr6.s6_addr, (u8 *)ptr, 16); |
| 258 | ptr += 16; |
| 259 | if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT || |
| 260 | opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) { |
| 261 | mp_opt->port = get_unaligned_be16(ptr); |
| 262 | ptr += 2; |
| 263 | } |
| 264 | } |
| 265 | #endif |
| 266 | if (!mp_opt->echo) { |
| 267 | mp_opt->ahmac = get_unaligned_be64(ptr); |
| 268 | ptr += 8; |
| 269 | } |
| 270 | break; |
| 271 | |
| 272 | case MPTCPOPT_RM_ADDR: |
| 273 | if (opsize != TCPOLEN_MPTCP_RM_ADDR_BASE) |
| 274 | break; |
| 275 | |
Geliang Tang | 8e60eed | 2020-06-08 18:47:54 +0800 | [diff] [blame] | 276 | ptr++; |
| 277 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 278 | mp_opt->rm_addr = 1; |
| 279 | mp_opt->rm_id = *ptr++; |
| 280 | pr_debug("RM_ADDR: id=%d", mp_opt->rm_id); |
| 281 | break; |
| 282 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 283 | default: |
| 284 | break; |
| 285 | } |
| 286 | } |
| 287 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 288 | void mptcp_get_options(const struct sk_buff *skb, |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 289 | struct mptcp_options_received *mp_opt) |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 290 | { |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 291 | const struct tcphdr *th = tcp_hdr(skb); |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 292 | const unsigned char *ptr; |
| 293 | int length; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 294 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 295 | /* initialize option status */ |
| 296 | mp_opt->mp_capable = 0; |
| 297 | mp_opt->mp_join = 0; |
| 298 | mp_opt->add_addr = 0; |
Geliang Tang | fe2d9b1 | 2020-10-19 18:23:15 +0800 | [diff] [blame] | 299 | mp_opt->ahmac = 0; |
Geliang Tang | 65b8c8a | 2020-10-19 18:23:16 +0800 | [diff] [blame] | 300 | mp_opt->port = 0; |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 301 | mp_opt->rm_addr = 0; |
| 302 | mp_opt->dss = 0; |
| 303 | |
| 304 | length = (th->doff * 4) - sizeof(struct tcphdr); |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 305 | ptr = (const unsigned char *)(th + 1); |
| 306 | |
| 307 | while (length > 0) { |
| 308 | int opcode = *ptr++; |
| 309 | int opsize; |
| 310 | |
| 311 | switch (opcode) { |
| 312 | case TCPOPT_EOL: |
| 313 | return; |
| 314 | case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ |
| 315 | length--; |
| 316 | continue; |
| 317 | default: |
| 318 | opsize = *ptr++; |
| 319 | if (opsize < 2) /* "silly options" */ |
| 320 | return; |
| 321 | if (opsize > length) |
| 322 | return; /* don't parse partial options */ |
| 323 | if (opcode == TCPOPT_MPTCP) |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 324 | mptcp_parse_option(skb, ptr, opsize, mp_opt); |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 325 | ptr += opsize - 2; |
| 326 | length -= opsize; |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 331 | bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, |
| 332 | unsigned int *size, struct mptcp_out_options *opts) |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 333 | { |
| 334 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 335 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 336 | /* we will use snd_isn to detect first pkt [re]transmission |
| 337 | * in mptcp_established_options_mp() |
| 338 | */ |
| 339 | subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 340 | if (subflow->request_mptcp) { |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 341 | opts->suboptions = OPTION_MPTCP_MPC_SYN; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 342 | *size = TCPOLEN_MPTCP_MPC_SYN; |
| 343 | return true; |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 344 | } else if (subflow->request_join) { |
| 345 | pr_debug("remote_token=%u, nonce=%u", subflow->remote_token, |
| 346 | subflow->local_nonce); |
| 347 | opts->suboptions = OPTION_MPTCP_MPJ_SYN; |
| 348 | opts->join_id = subflow->local_id; |
| 349 | opts->token = subflow->remote_token; |
| 350 | opts->nonce = subflow->local_nonce; |
| 351 | opts->backup = subflow->request_bkup; |
| 352 | *size = TCPOLEN_MPTCP_MPJ_SYN; |
| 353 | return true; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 354 | } |
| 355 | return false; |
| 356 | } |
| 357 | |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 358 | /* MP_JOIN client subflow must wait for 4th ack before sending any data: |
| 359 | * TCP can't schedule delack timer before the subflow is fully established. |
| 360 | * MPTCP uses the delack timer to do 3rd ack retransmissions |
| 361 | */ |
| 362 | static void schedule_3rdack_retransmission(struct sock *sk) |
| 363 | { |
| 364 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 365 | struct tcp_sock *tp = tcp_sk(sk); |
| 366 | unsigned long timeout; |
| 367 | |
| 368 | /* reschedule with a timeout above RTT, as we must look only for drop */ |
| 369 | if (tp->srtt_us) |
| 370 | timeout = tp->srtt_us << 1; |
| 371 | else |
| 372 | timeout = TCP_TIMEOUT_INIT; |
| 373 | |
| 374 | WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); |
| 375 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
| 376 | icsk->icsk_ack.timeout = timeout; |
| 377 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
| 378 | } |
| 379 | |
| 380 | static void clear_3rdack_retransmission(struct sock *sk) |
| 381 | { |
| 382 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 383 | |
| 384 | sk_stop_timer(sk, &icsk->icsk_delack_timer); |
| 385 | icsk->icsk_ack.timeout = 0; |
| 386 | icsk->icsk_ack.ato = 0; |
| 387 | icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER); |
| 388 | } |
| 389 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 390 | static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, |
| 391 | unsigned int *size, |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 392 | unsigned int remaining, |
| 393 | struct mptcp_out_options *opts) |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 394 | { |
| 395 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 396 | struct mptcp_ext *mpext; |
| 397 | unsigned int data_len; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 398 | |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 399 | /* When skb is not available, we better over-estimate the emitted |
| 400 | * options len. A full DSS option (28 bytes) is longer than |
| 401 | * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so |
| 402 | * tell the caller to defer the estimate to |
| 403 | * mptcp_established_options_dss(), which will reserve enough space. |
| 404 | */ |
| 405 | if (!skb) |
| 406 | return false; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 407 | |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 408 | /* MPC/MPJ needed only on 3rd ack packet */ |
| 409 | if (subflow->fully_established || |
| 410 | subflow->snd_isn != TCP_SKB_CB(skb)->seq) |
| 411 | return false; |
| 412 | |
| 413 | if (subflow->mp_capable) { |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 414 | mpext = mptcp_get_ext(skb); |
| 415 | data_len = mpext ? mpext->data_len : 0; |
| 416 | |
| 417 | /* we will check ext_copy.data_len in mptcp_write_options() to |
| 418 | * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and |
| 419 | * TCPOLEN_MPTCP_MPC_ACK |
| 420 | */ |
| 421 | opts->ext_copy.data_len = data_len; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 422 | opts->suboptions = OPTION_MPTCP_MPC_ACK; |
| 423 | opts->sndr_key = subflow->local_key; |
| 424 | opts->rcvr_key = subflow->remote_key; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 425 | |
| 426 | /* Section 3.1. |
| 427 | * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK |
| 428 | * packets that start the first subflow of an MPTCP connection, |
| 429 | * as well as the first packet that carries data |
| 430 | */ |
| 431 | if (data_len > 0) |
| 432 | *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4); |
| 433 | else |
| 434 | *size = TCPOLEN_MPTCP_MPC_ACK; |
| 435 | |
| 436 | pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d", |
| 437 | subflow, subflow->local_key, subflow->remote_key, |
| 438 | data_len); |
| 439 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 440 | return true; |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 441 | } else if (subflow->mp_join) { |
| 442 | opts->suboptions = OPTION_MPTCP_MPJ_ACK; |
| 443 | memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN); |
| 444 | *size = TCPOLEN_MPTCP_MPJ_ACK; |
| 445 | pr_debug("subflow=%p", subflow); |
| 446 | |
| 447 | schedule_3rdack_retransmission(sk); |
| 448 | return true; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 449 | } |
| 450 | return false; |
| 451 | } |
| 452 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 453 | static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, |
Paolo Abeni | 9c29e36 | 2020-07-03 18:06:04 +0200 | [diff] [blame] | 454 | struct sk_buff *skb, struct mptcp_ext *ext) |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 455 | { |
Paolo Abeni | 017512a | 2020-10-05 12:01:06 +0200 | [diff] [blame] | 456 | /* The write_seq value has already been incremented, so the actual |
| 457 | * sequence number for the DATA_FIN is one less. |
| 458 | */ |
| 459 | u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 460 | |
Paolo Abeni | 9c29e36 | 2020-07-03 18:06:04 +0200 | [diff] [blame] | 461 | if (!ext->use_map || !skb->len) { |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 462 | /* RFC6824 requires a DSS mapping with specific values |
| 463 | * if DATA_FIN is set but no data payload is mapped |
| 464 | */ |
Mat Martineau | 6d37a0b | 2020-02-28 15:47:41 -0800 | [diff] [blame] | 465 | ext->data_fin = 1; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 466 | ext->use_map = 1; |
| 467 | ext->dsn64 = 1; |
Paolo Abeni | 017512a | 2020-10-05 12:01:06 +0200 | [diff] [blame] | 468 | ext->data_seq = data_fin_tx_seq; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 469 | ext->subflow_seq = 0; |
| 470 | ext->data_len = 1; |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 471 | } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) { |
Mat Martineau | 6d37a0b | 2020-02-28 15:47:41 -0800 | [diff] [blame] | 472 | /* If there's an existing DSS mapping and it is the |
| 473 | * final mapping, DATA_FIN consumes 1 additional byte of |
| 474 | * mapping space. |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 475 | */ |
Mat Martineau | 6d37a0b | 2020-02-28 15:47:41 -0800 | [diff] [blame] | 476 | ext->data_fin = 1; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 477 | ext->data_len++; |
| 478 | } |
| 479 | } |
| 480 | |
| 481 | static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, |
| 482 | unsigned int *size, |
| 483 | unsigned int remaining, |
| 484 | struct mptcp_out_options *opts) |
| 485 | { |
| 486 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 487 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 488 | unsigned int dss_size = 0; |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 489 | u64 snd_data_fin_enable; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 490 | struct mptcp_ext *mpext; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 491 | unsigned int ack_size; |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 492 | bool ret = false; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 493 | |
Mat Martineau | 0bac966 | 2020-07-28 15:11:59 -0700 | [diff] [blame] | 494 | mpext = skb ? mptcp_get_ext(skb) : NULL; |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 495 | snd_data_fin_enable = READ_ONCE(msk->snd_data_fin_enable); |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 496 | |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 497 | if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 498 | unsigned int map_size; |
| 499 | |
| 500 | map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; |
| 501 | |
| 502 | remaining -= map_size; |
| 503 | dss_size = map_size; |
| 504 | if (mpext) |
| 505 | opts->ext_copy = *mpext; |
| 506 | |
Mat Martineau | 7279da6 | 2020-07-28 15:12:02 -0700 | [diff] [blame] | 507 | if (skb && snd_data_fin_enable) |
Paolo Abeni | 9c29e36 | 2020-07-03 18:06:04 +0200 | [diff] [blame] | 508 | mptcp_write_data_fin(subflow, skb, &opts->ext_copy); |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 509 | ret = true; |
| 510 | } |
| 511 | |
Paolo Abeni | 2398e39 | 2020-03-04 16:51:07 +0100 | [diff] [blame] | 512 | /* passive sockets msk will set the 'can_ack' after accept(), even |
| 513 | * if the first subflow may have the already the remote key handy |
| 514 | */ |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 515 | opts->ext_copy.use_ack = 0; |
Paolo Abeni | dc093db | 2020-03-13 16:52:42 +0100 | [diff] [blame] | 516 | if (!READ_ONCE(msk->can_ack)) { |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 517 | *size = ALIGN(dss_size, 4); |
| 518 | return ret; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 519 | } |
| 520 | |
Davide Caratti | 37198e9 | 2020-10-06 18:26:17 +0200 | [diff] [blame] | 521 | if (READ_ONCE(msk->use_64bit_ack)) { |
Christoph Paasch | a0c1d0e | 2020-05-14 08:53:03 -0700 | [diff] [blame] | 522 | ack_size = TCPOLEN_MPTCP_DSS_ACK64; |
Mat Martineau | 917944d | 2020-09-29 15:08:19 -0700 | [diff] [blame] | 523 | opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq); |
Christoph Paasch | a0c1d0e | 2020-05-14 08:53:03 -0700 | [diff] [blame] | 524 | opts->ext_copy.ack64 = 1; |
| 525 | } else { |
| 526 | ack_size = TCPOLEN_MPTCP_DSS_ACK32; |
Mat Martineau | 917944d | 2020-09-29 15:08:19 -0700 | [diff] [blame] | 527 | opts->ext_copy.data_ack32 = (uint32_t)READ_ONCE(msk->ack_seq); |
Christoph Paasch | a0c1d0e | 2020-05-14 08:53:03 -0700 | [diff] [blame] | 528 | opts->ext_copy.ack64 = 0; |
| 529 | } |
| 530 | opts->ext_copy.use_ack = 1; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 531 | |
| 532 | /* Add kind/length/subtype/flag overhead if mapping is not populated */ |
| 533 | if (dss_size == 0) |
| 534 | ack_size += TCPOLEN_MPTCP_DSS_BASE; |
| 535 | |
| 536 | dss_size += ack_size; |
| 537 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 538 | *size = ALIGN(dss_size, 4); |
| 539 | return true; |
| 540 | } |
| 541 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 542 | static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id, |
| 543 | struct in_addr *addr) |
| 544 | { |
Todd Malsbary | bd69722 | 2020-05-21 19:10:49 -0700 | [diff] [blame] | 545 | u8 hmac[SHA256_DIGEST_SIZE]; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 546 | u8 msg[7]; |
| 547 | |
| 548 | msg[0] = addr_id; |
| 549 | memcpy(&msg[1], &addr->s_addr, 4); |
| 550 | msg[5] = 0; |
| 551 | msg[6] = 0; |
| 552 | |
| 553 | mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac); |
| 554 | |
Todd Malsbary | bd69722 | 2020-05-21 19:10:49 -0700 | [diff] [blame] | 555 | return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 559 | static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id, |
| 560 | struct in6_addr *addr) |
| 561 | { |
Todd Malsbary | bd69722 | 2020-05-21 19:10:49 -0700 | [diff] [blame] | 562 | u8 hmac[SHA256_DIGEST_SIZE]; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 563 | u8 msg[19]; |
| 564 | |
| 565 | msg[0] = addr_id; |
| 566 | memcpy(&msg[1], &addr->s6_addr, 16); |
| 567 | msg[17] = 0; |
| 568 | msg[18] = 0; |
| 569 | |
| 570 | mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac); |
| 571 | |
Todd Malsbary | bd69722 | 2020-05-21 19:10:49 -0700 | [diff] [blame] | 572 | return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 573 | } |
| 574 | #endif |
| 575 | |
Geliang Tang | f643b80 | 2020-09-24 08:29:47 +0800 | [diff] [blame] | 576 | static bool mptcp_established_options_add_addr(struct sock *sk, |
| 577 | unsigned int *size, |
| 578 | unsigned int remaining, |
| 579 | struct mptcp_out_options *opts) |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 580 | { |
| 581 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 582 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 583 | struct mptcp_addr_info saddr; |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 584 | bool echo; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 585 | int len; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 586 | |
Geliang Tang | f643b80 | 2020-09-24 08:29:47 +0800 | [diff] [blame] | 587 | if (!mptcp_pm_should_add_signal(msk) || |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 588 | !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo))) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 589 | return false; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 590 | |
Matthieu Baerts | 456afe0 | 2020-10-03 17:36:56 +0200 | [diff] [blame] | 591 | len = mptcp_add_addr_len(saddr.family, echo); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 592 | if (remaining < len) |
| 593 | return false; |
| 594 | |
| 595 | *size = len; |
| 596 | opts->addr_id = saddr.id; |
| 597 | if (saddr.family == AF_INET) { |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 598 | opts->suboptions |= OPTION_MPTCP_ADD_ADDR; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 599 | opts->addr = saddr.addr; |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 600 | if (!echo) { |
| 601 | opts->ahmac = add_addr_generate_hmac(msk->local_key, |
| 602 | msk->remote_key, |
| 603 | opts->addr_id, |
| 604 | &opts->addr); |
| 605 | } |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 606 | } |
| 607 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 608 | else if (saddr.family == AF_INET6) { |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 609 | opts->suboptions |= OPTION_MPTCP_ADD_ADDR6; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 610 | opts->addr6 = saddr.addr6; |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 611 | if (!echo) { |
| 612 | opts->ahmac = add_addr6_generate_hmac(msk->local_key, |
| 613 | msk->remote_key, |
| 614 | opts->addr_id, |
| 615 | &opts->addr6); |
| 616 | } |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 617 | } |
| 618 | #endif |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 619 | pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo); |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 620 | |
| 621 | return true; |
| 622 | } |
| 623 | |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 624 | static bool mptcp_established_options_rm_addr(struct sock *sk, |
| 625 | unsigned int *size, |
| 626 | unsigned int remaining, |
| 627 | struct mptcp_out_options *opts) |
| 628 | { |
| 629 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 630 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
| 631 | u8 rm_id; |
| 632 | |
| 633 | if (!mptcp_pm_should_rm_signal(msk) || |
| 634 | !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_id))) |
| 635 | return false; |
| 636 | |
| 637 | if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE) |
| 638 | return false; |
| 639 | |
| 640 | *size = TCPOLEN_MPTCP_RM_ADDR_BASE; |
| 641 | opts->suboptions |= OPTION_MPTCP_RM_ADDR; |
| 642 | opts->rm_id = rm_id; |
| 643 | |
| 644 | pr_debug("rm_id=%d", opts->rm_id); |
| 645 | |
| 646 | return true; |
| 647 | } |
| 648 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 649 | bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, |
| 650 | unsigned int *size, unsigned int remaining, |
| 651 | struct mptcp_out_options *opts) |
| 652 | { |
| 653 | unsigned int opt_size = 0; |
| 654 | bool ret = false; |
| 655 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 656 | opts->suboptions = 0; |
| 657 | |
Davide Caratti | e1ff9e8 | 2020-06-29 22:26:20 +0200 | [diff] [blame] | 658 | if (unlikely(mptcp_check_fallback(sk))) |
| 659 | return false; |
| 660 | |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 661 | /* prevent adding of any MPTCP related options on reset packet |
| 662 | * until we support MP_TCPRST/MP_FASTCLOSE |
| 663 | */ |
| 664 | if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) |
| 665 | return false; |
| 666 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 667 | if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts)) |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 668 | ret = true; |
| 669 | else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining, |
| 670 | opts)) |
| 671 | ret = true; |
| 672 | |
| 673 | /* we reserved enough space for the above options, and exceeding the |
| 674 | * TCP option space would be fatal |
| 675 | */ |
| 676 | if (WARN_ON_ONCE(opt_size > remaining)) |
| 677 | return false; |
| 678 | |
| 679 | *size += opt_size; |
| 680 | remaining -= opt_size; |
Geliang Tang | f643b80 | 2020-09-24 08:29:47 +0800 | [diff] [blame] | 681 | if (mptcp_established_options_add_addr(sk, &opt_size, remaining, opts)) { |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 682 | *size += opt_size; |
| 683 | remaining -= opt_size; |
| 684 | ret = true; |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 685 | } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) { |
| 686 | *size += opt_size; |
| 687 | remaining -= opt_size; |
| 688 | ret = true; |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 689 | } |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 690 | |
| 691 | return ret; |
| 692 | } |
| 693 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 694 | bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, |
| 695 | struct mptcp_out_options *opts) |
| 696 | { |
| 697 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
| 698 | |
| 699 | if (subflow_req->mp_capable) { |
| 700 | opts->suboptions = OPTION_MPTCP_MPC_SYNACK; |
| 701 | opts->sndr_key = subflow_req->local_key; |
| 702 | *size = TCPOLEN_MPTCP_MPC_SYNACK; |
| 703 | pr_debug("subflow_req=%p, local_key=%llu", |
| 704 | subflow_req, subflow_req->local_key); |
| 705 | return true; |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 706 | } else if (subflow_req->mp_join) { |
| 707 | opts->suboptions = OPTION_MPTCP_MPJ_SYNACK; |
| 708 | opts->backup = subflow_req->backup; |
| 709 | opts->join_id = subflow_req->local_id; |
| 710 | opts->thmac = subflow_req->thmac; |
| 711 | opts->nonce = subflow_req->local_nonce; |
| 712 | pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u", |
| 713 | subflow_req, opts->backup, opts->join_id, |
| 714 | opts->thmac, opts->nonce); |
| 715 | *size = TCPOLEN_MPTCP_MPJ_SYNACK; |
| 716 | return true; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 717 | } |
| 718 | return false; |
| 719 | } |
| 720 | |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 721 | static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 722 | struct mptcp_subflow_context *subflow, |
Paolo Abeni | 0be534f | 2020-03-19 11:06:30 +0100 | [diff] [blame] | 723 | struct sk_buff *skb, |
| 724 | struct mptcp_options_received *mp_opt) |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 725 | { |
| 726 | /* here we can process OoO, in-window pkts, only in-sequence 4th ack |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 727 | * will make the subflow fully established |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 728 | */ |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 729 | if (likely(subflow->fully_established)) { |
| 730 | /* on passive sockets, check for 3rd ack retransmission |
| 731 | * note that msk is always set by subflow_syn_recv_sock() |
| 732 | * for mp_join subflows |
| 733 | */ |
| 734 | if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && |
| 735 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && |
| 736 | subflow->mp_join && mp_opt->mp_join && |
| 737 | READ_ONCE(msk->pm.server_side)) |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 738 | tcp_send_ack(ssk); |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 739 | goto fully_established; |
| 740 | } |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 741 | |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 742 | /* we must process OoO packets before the first subflow is fully |
| 743 | * established. OoO packets are instead a protocol violation |
| 744 | * for MP_JOIN subflows as the peer must not send any data |
| 745 | * before receiving the forth ack - cfr. RFC 8684 section 3.2. |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 746 | */ |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 747 | if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { |
| 748 | if (subflow->mp_join) |
| 749 | goto reset; |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 750 | return subflow->mp_capable; |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 751 | } |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 752 | |
Paolo Abeni | 5a91e32 | 2020-04-30 15:01:54 +0200 | [diff] [blame] | 753 | if (mp_opt->dss && mp_opt->use_ack) { |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 754 | /* subflows are fully established as soon as we get any |
| 755 | * additional ack. |
| 756 | */ |
Paolo Abeni | 0be534f | 2020-03-19 11:06:30 +0100 | [diff] [blame] | 757 | subflow->fully_established = 1; |
Paolo Abeni | b93df08 | 2020-07-23 13:02:32 +0200 | [diff] [blame] | 758 | WRITE_ONCE(msk->fully_established, true); |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 759 | goto fully_established; |
| 760 | } |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 761 | |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 762 | /* If the first established packet does not contain MP_CAPABLE + data |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 763 | * then fallback to TCP. Fallback scenarios requires a reset for |
| 764 | * MP_JOIN subflows. |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 765 | */ |
| 766 | if (!mp_opt->mp_capable) { |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 767 | if (subflow->mp_join) |
| 768 | goto reset; |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 769 | subflow->mp_capable = 0; |
Davide Caratti | e1ff9e8 | 2020-06-29 22:26:20 +0200 | [diff] [blame] | 770 | pr_fallback(msk); |
| 771 | __mptcp_do_fallback(msk); |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 772 | return false; |
| 773 | } |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 774 | |
Paolo Abeni | d6085fe | 2020-04-30 15:01:53 +0200 | [diff] [blame] | 775 | if (unlikely(!READ_ONCE(msk->pm.server_side))) |
| 776 | pr_warn_once("bogus mpc option on established client sk"); |
Paolo Abeni | b93df08 | 2020-07-23 13:02:32 +0200 | [diff] [blame] | 777 | mptcp_subflow_fully_established(subflow, mp_opt); |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 778 | |
| 779 | fully_established: |
| 780 | if (likely(subflow->pm_notified)) |
| 781 | return true; |
| 782 | |
| 783 | subflow->pm_notified = 1; |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 784 | if (subflow->mp_join) { |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 785 | clear_3rdack_retransmission(ssk); |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 786 | mptcp_pm_subflow_established(msk, subflow); |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 787 | } else { |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 788 | mptcp_pm_fully_established(msk); |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 789 | } |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 790 | return true; |
Paolo Abeni | d582484 | 2020-10-09 19:00:00 +0200 | [diff] [blame] | 791 | |
| 792 | reset: |
| 793 | mptcp_subflow_reset(ssk); |
| 794 | return false; |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 795 | } |
| 796 | |
Paolo Abeni | cc9d256 | 2020-03-27 14:48:42 -0700 | [diff] [blame] | 797 | static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit) |
| 798 | { |
| 799 | u32 old_ack32, cur_ack32; |
| 800 | |
| 801 | if (use_64bit) |
| 802 | return cur_ack; |
| 803 | |
| 804 | old_ack32 = (u32)old_ack; |
| 805 | cur_ack32 = (u32)cur_ack; |
| 806 | cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32; |
| 807 | if (unlikely(before(cur_ack32, old_ack32))) |
| 808 | return cur_ack + (1LL << 32); |
| 809 | return cur_ack; |
| 810 | } |
| 811 | |
| 812 | static void update_una(struct mptcp_sock *msk, |
| 813 | struct mptcp_options_received *mp_opt) |
| 814 | { |
| 815 | u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una); |
| 816 | u64 write_seq = READ_ONCE(msk->write_seq); |
| 817 | |
| 818 | /* avoid ack expansion on update conflict, to reduce the risk of |
| 819 | * wrongly expanding to a future ack sequence number, which is way |
| 820 | * more dangerous than missing an ack |
| 821 | */ |
| 822 | new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64); |
| 823 | |
| 824 | /* ACK for data not even sent yet? Ignore. */ |
| 825 | if (after64(new_snd_una, write_seq)) |
| 826 | new_snd_una = old_snd_una; |
| 827 | |
| 828 | while (after64(new_snd_una, old_snd_una)) { |
| 829 | snd_una = old_snd_una; |
| 830 | old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una, |
| 831 | new_snd_una); |
Paolo Abeni | b51f9b8 | 2020-03-27 14:48:44 -0700 | [diff] [blame] | 832 | if (old_snd_una == snd_una) { |
| 833 | mptcp_data_acked((struct sock *)msk); |
Paolo Abeni | cc9d256 | 2020-03-27 14:48:42 -0700 | [diff] [blame] | 834 | break; |
Paolo Abeni | b51f9b8 | 2020-03-27 14:48:44 -0700 | [diff] [blame] | 835 | } |
Paolo Abeni | cc9d256 | 2020-03-27 14:48:42 -0700 | [diff] [blame] | 836 | } |
| 837 | } |
| 838 | |
Mat Martineau | 1a49b2c | 2020-09-29 15:08:20 -0700 | [diff] [blame] | 839 | bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit) |
Mat Martineau | 3721b9b | 2020-07-28 15:12:03 -0700 | [diff] [blame] | 840 | { |
| 841 | /* Skip if DATA_FIN was already received. |
| 842 | * If updating simultaneously with the recvmsg loop, values |
| 843 | * should match. If they mismatch, the peer is misbehaving and |
| 844 | * we will prefer the most recent information. |
| 845 | */ |
| 846 | if (READ_ONCE(msk->rcv_data_fin) || !READ_ONCE(msk->first)) |
| 847 | return false; |
| 848 | |
Mat Martineau | 1a49b2c | 2020-09-29 15:08:20 -0700 | [diff] [blame] | 849 | WRITE_ONCE(msk->rcv_data_fin_seq, |
| 850 | expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); |
Mat Martineau | 3721b9b | 2020-07-28 15:12:03 -0700 | [diff] [blame] | 851 | WRITE_ONCE(msk->rcv_data_fin, 1); |
| 852 | |
| 853 | return true; |
| 854 | } |
| 855 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 856 | static bool add_addr_hmac_valid(struct mptcp_sock *msk, |
| 857 | struct mptcp_options_received *mp_opt) |
| 858 | { |
| 859 | u64 hmac = 0; |
| 860 | |
| 861 | if (mp_opt->echo) |
| 862 | return true; |
| 863 | |
| 864 | if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) |
| 865 | hmac = add_addr_generate_hmac(msk->remote_key, |
| 866 | msk->local_key, |
| 867 | mp_opt->addr_id, &mp_opt->addr); |
| 868 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 869 | else |
| 870 | hmac = add_addr6_generate_hmac(msk->remote_key, |
| 871 | msk->local_key, |
| 872 | mp_opt->addr_id, &mp_opt->addr6); |
| 873 | #endif |
| 874 | |
| 875 | pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n", |
| 876 | msk, (unsigned long long)hmac, |
| 877 | (unsigned long long)mp_opt->ahmac); |
| 878 | |
| 879 | return hmac == mp_opt->ahmac; |
| 880 | } |
| 881 | |
Florian Westphal | 77d0cab | 2020-09-25 01:23:02 +0200 | [diff] [blame] | 882 | void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 883 | { |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 884 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 885 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 886 | struct mptcp_options_received mp_opt; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 887 | struct mptcp_ext *mpext; |
| 888 | |
Davide Caratti | e1ff9e8 | 2020-06-29 22:26:20 +0200 | [diff] [blame] | 889 | if (__mptcp_check_fallback(msk)) |
| 890 | return; |
| 891 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 892 | mptcp_get_options(skb, &mp_opt); |
| 893 | if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 894 | return; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 895 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 896 | if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) { |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 897 | struct mptcp_addr_info addr; |
| 898 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 899 | addr.port = htons(mp_opt.port); |
| 900 | addr.id = mp_opt.addr_id; |
| 901 | if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) { |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 902 | addr.family = AF_INET; |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 903 | addr.addr = mp_opt.addr; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 904 | } |
| 905 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 906 | else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) { |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 907 | addr.family = AF_INET6; |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 908 | addr.addr6 = mp_opt.addr6; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 909 | } |
| 910 | #endif |
Geliang Tang | a877de06 | 2020-09-24 08:29:51 +0800 | [diff] [blame] | 911 | if (!mp_opt.echo) { |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 912 | mptcp_pm_add_addr_received(msk, &addr); |
Geliang Tang | a877de06 | 2020-09-24 08:29:51 +0800 | [diff] [blame] | 913 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); |
| 914 | } else { |
Geliang Tang | 00cfd77 | 2020-09-24 08:30:02 +0800 | [diff] [blame] | 915 | mptcp_pm_del_add_timer(msk, &addr); |
Geliang Tang | a877de06 | 2020-09-24 08:29:51 +0800 | [diff] [blame] | 916 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); |
| 917 | } |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 918 | mp_opt.add_addr = 0; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 919 | } |
| 920 | |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 921 | if (mp_opt.rm_addr) { |
| 922 | mptcp_pm_rm_addr_received(msk, mp_opt.rm_id); |
| 923 | mp_opt.rm_addr = 0; |
| 924 | } |
| 925 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 926 | if (!mp_opt.dss) |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 927 | return; |
| 928 | |
Paolo Abeni | cc9d256 | 2020-03-27 14:48:42 -0700 | [diff] [blame] | 929 | /* we can't wait for recvmsg() to update the ack_seq, otherwise |
| 930 | * monodirectional flows will stuck |
| 931 | */ |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 932 | if (mp_opt.use_ack) |
| 933 | update_una(msk, &mp_opt); |
Paolo Abeni | cc9d256 | 2020-03-27 14:48:42 -0700 | [diff] [blame] | 934 | |
Mat Martineau | 06827b3 | 2020-07-28 15:12:08 -0700 | [diff] [blame] | 935 | /* Zero-data-length packets are dropped by the caller and not |
| 936 | * propagated to the MPTCP layer, so the skb extension does not |
| 937 | * need to be allocated or populated. DATA_FIN information, if |
| 938 | * present, needs to be updated here before the skb is freed. |
Mat Martineau | 43b54c6 | 2020-07-28 15:12:06 -0700 | [diff] [blame] | 939 | */ |
| 940 | if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { |
| 941 | if (mp_opt.data_fin && mp_opt.data_len == 1 && |
Mat Martineau | 1a49b2c | 2020-09-29 15:08:20 -0700 | [diff] [blame] | 942 | mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) && |
Mat Martineau | 43b54c6 | 2020-07-28 15:12:06 -0700 | [diff] [blame] | 943 | schedule_work(&msk->work)) |
| 944 | sock_hold(subflow->conn); |
Mat Martineau | 06827b3 | 2020-07-28 15:12:08 -0700 | [diff] [blame] | 945 | |
| 946 | return; |
Mat Martineau | 43b54c6 | 2020-07-28 15:12:06 -0700 | [diff] [blame] | 947 | } |
| 948 | |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 949 | mpext = skb_ext_add(skb, SKB_EXT_MPTCP); |
| 950 | if (!mpext) |
| 951 | return; |
| 952 | |
| 953 | memset(mpext, 0, sizeof(*mpext)); |
| 954 | |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 955 | if (mp_opt.use_map) { |
| 956 | if (mp_opt.mpc_map) { |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 957 | /* this is an MP_CAPABLE carrying MPTCP data |
| 958 | * we know this map the first chunk of data |
| 959 | */ |
| 960 | mptcp_crypto_key_sha(subflow->remote_key, NULL, |
| 961 | &mpext->data_seq); |
| 962 | mpext->data_seq++; |
| 963 | mpext->subflow_seq = 1; |
| 964 | mpext->dsn64 = 1; |
| 965 | mpext->mpc_map = 1; |
Paolo Abeni | a77895d | 2020-04-30 15:01:55 +0200 | [diff] [blame] | 966 | mpext->data_fin = 0; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 967 | } else { |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 968 | mpext->data_seq = mp_opt.data_seq; |
| 969 | mpext->subflow_seq = mp_opt.subflow_seq; |
| 970 | mpext->dsn64 = mp_opt.dsn64; |
| 971 | mpext->data_fin = mp_opt.data_fin; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 972 | } |
Paolo Abeni | cfde141 | 2020-04-30 15:01:52 +0200 | [diff] [blame] | 973 | mpext->data_len = mp_opt.data_len; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 974 | mpext->use_map = 1; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 975 | } |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 976 | } |
| 977 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 978 | void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts) |
| 979 | { |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 980 | if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 981 | OPTION_MPTCP_MPC_ACK) & opts->suboptions) { |
| 982 | u8 len; |
| 983 | |
| 984 | if (OPTION_MPTCP_MPC_SYN & opts->suboptions) |
| 985 | len = TCPOLEN_MPTCP_MPC_SYN; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 986 | else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) |
| 987 | len = TCPOLEN_MPTCP_MPC_SYNACK; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 988 | else if (opts->ext_copy.data_len) |
| 989 | len = TCPOLEN_MPTCP_MPC_ACK_DATA; |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 990 | else |
| 991 | len = TCPOLEN_MPTCP_MPC_ACK; |
| 992 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 993 | *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len, |
| 994 | MPTCP_SUPPORTED_VERSION, |
| 995 | MPTCP_CAP_HMAC_SHA256); |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 996 | |
| 997 | if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) & |
| 998 | opts->suboptions)) |
| 999 | goto mp_capable_done; |
| 1000 | |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 1001 | put_unaligned_be64(opts->sndr_key, ptr); |
| 1002 | ptr += 2; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 1003 | if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions)) |
| 1004 | goto mp_capable_done; |
| 1005 | |
| 1006 | put_unaligned_be64(opts->rcvr_key, ptr); |
| 1007 | ptr += 2; |
| 1008 | if (!opts->ext_copy.data_len) |
| 1009 | goto mp_capable_done; |
| 1010 | |
| 1011 | put_unaligned_be32(opts->ext_copy.data_len << 16 | |
| 1012 | TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); |
| 1013 | ptr += 1; |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 1014 | } |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 1015 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 1016 | mp_capable_done: |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 1017 | if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { |
| 1018 | if (opts->ahmac) |
| 1019 | *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, |
| 1020 | TCPOLEN_MPTCP_ADD_ADDR, 0, |
| 1021 | opts->addr_id); |
| 1022 | else |
| 1023 | *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, |
| 1024 | TCPOLEN_MPTCP_ADD_ADDR_BASE, |
| 1025 | MPTCP_ADDR_ECHO, |
| 1026 | opts->addr_id); |
| 1027 | memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4); |
| 1028 | ptr += 1; |
| 1029 | if (opts->ahmac) { |
| 1030 | put_unaligned_be64(opts->ahmac, ptr); |
| 1031 | ptr += 2; |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 1036 | if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) { |
| 1037 | if (opts->ahmac) |
| 1038 | *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, |
| 1039 | TCPOLEN_MPTCP_ADD_ADDR6, 0, |
| 1040 | opts->addr_id); |
| 1041 | else |
| 1042 | *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, |
| 1043 | TCPOLEN_MPTCP_ADD_ADDR6_BASE, |
| 1044 | MPTCP_ADDR_ECHO, |
| 1045 | opts->addr_id); |
| 1046 | memcpy((u8 *)ptr, opts->addr6.s6_addr, 16); |
| 1047 | ptr += 4; |
| 1048 | if (opts->ahmac) { |
| 1049 | put_unaligned_be64(opts->ahmac, ptr); |
| 1050 | ptr += 2; |
| 1051 | } |
| 1052 | } |
| 1053 | #endif |
| 1054 | |
| 1055 | if (OPTION_MPTCP_RM_ADDR & opts->suboptions) { |
| 1056 | *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR, |
| 1057 | TCPOLEN_MPTCP_RM_ADDR_BASE, |
| 1058 | 0, opts->rm_id); |
| 1059 | } |
| 1060 | |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 1061 | if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) { |
| 1062 | *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, |
| 1063 | TCPOLEN_MPTCP_MPJ_SYN, |
| 1064 | opts->backup, opts->join_id); |
| 1065 | put_unaligned_be32(opts->token, ptr); |
| 1066 | ptr += 1; |
| 1067 | put_unaligned_be32(opts->nonce, ptr); |
| 1068 | ptr += 1; |
| 1069 | } |
| 1070 | |
Peter Krystad | f296234 | 2020-03-27 14:48:39 -0700 | [diff] [blame] | 1071 | if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) { |
| 1072 | *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, |
| 1073 | TCPOLEN_MPTCP_MPJ_SYNACK, |
| 1074 | opts->backup, opts->join_id); |
| 1075 | put_unaligned_be64(opts->thmac, ptr); |
| 1076 | ptr += 2; |
| 1077 | put_unaligned_be32(opts->nonce, ptr); |
| 1078 | ptr += 1; |
| 1079 | } |
| 1080 | |
Peter Krystad | ec3edaa | 2020-03-27 14:48:40 -0700 | [diff] [blame] | 1081 | if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) { |
| 1082 | *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN, |
| 1083 | TCPOLEN_MPTCP_MPJ_ACK, 0, 0); |
| 1084 | memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN); |
| 1085 | ptr += 5; |
| 1086 | } |
| 1087 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 1088 | if (opts->ext_copy.use_ack || opts->ext_copy.use_map) { |
| 1089 | struct mptcp_ext *mpext = &opts->ext_copy; |
| 1090 | u8 len = TCPOLEN_MPTCP_DSS_BASE; |
| 1091 | u8 flags = 0; |
| 1092 | |
| 1093 | if (mpext->use_ack) { |
Christoph Paasch | a0c1d0e | 2020-05-14 08:53:03 -0700 | [diff] [blame] | 1094 | flags = MPTCP_DSS_HAS_ACK; |
| 1095 | if (mpext->ack64) { |
| 1096 | len += TCPOLEN_MPTCP_DSS_ACK64; |
| 1097 | flags |= MPTCP_DSS_ACK64; |
| 1098 | } else { |
| 1099 | len += TCPOLEN_MPTCP_DSS_ACK32; |
| 1100 | } |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 1101 | } |
| 1102 | |
| 1103 | if (mpext->use_map) { |
| 1104 | len += TCPOLEN_MPTCP_DSS_MAP64; |
| 1105 | |
| 1106 | /* Use only 64-bit mapping flags for now, add |
| 1107 | * support for optional 32-bit mappings later. |
| 1108 | */ |
| 1109 | flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64; |
| 1110 | if (mpext->data_fin) |
| 1111 | flags |= MPTCP_DSS_DATA_FIN; |
| 1112 | } |
| 1113 | |
Peter Krystad | 3df523a | 2020-03-27 14:48:37 -0700 | [diff] [blame] | 1114 | *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags); |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 1115 | |
| 1116 | if (mpext->use_ack) { |
Christoph Paasch | a0c1d0e | 2020-05-14 08:53:03 -0700 | [diff] [blame] | 1117 | if (mpext->ack64) { |
| 1118 | put_unaligned_be64(mpext->data_ack, ptr); |
| 1119 | ptr += 2; |
| 1120 | } else { |
| 1121 | put_unaligned_be32(mpext->data_ack32, ptr); |
| 1122 | ptr += 1; |
| 1123 | } |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | if (mpext->use_map) { |
| 1127 | put_unaligned_be64(mpext->data_seq, ptr); |
| 1128 | ptr += 2; |
| 1129 | put_unaligned_be32(mpext->subflow_seq, ptr); |
| 1130 | ptr += 1; |
| 1131 | put_unaligned_be32(mpext->data_len << 16 | |
| 1132 | TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); |
| 1133 | } |
| 1134 | } |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 1135 | } |