Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2017 - 2019, Intel Corporation. |
| 5 | */ |
| 6 | |
| 7 | #ifndef __MPTCP_PROTOCOL_H |
| 8 | #define __MPTCP_PROTOCOL_H |
| 9 | |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 10 | #include <linux/random.h> |
| 11 | #include <net/tcp.h> |
| 12 | #include <net/inet_connection_sock.h> |
| 13 | |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 14 | #define MPTCP_SUPPORTED_VERSION 1 |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 15 | |
| 16 | /* MPTCP option bits */ |
| 17 | #define OPTION_MPTCP_MPC_SYN BIT(0) |
| 18 | #define OPTION_MPTCP_MPC_SYNACK BIT(1) |
| 19 | #define OPTION_MPTCP_MPC_ACK BIT(2) |
| 20 | |
| 21 | /* MPTCP option subtypes */ |
| 22 | #define MPTCPOPT_MP_CAPABLE 0 |
| 23 | #define MPTCPOPT_MP_JOIN 1 |
| 24 | #define MPTCPOPT_DSS 2 |
| 25 | #define MPTCPOPT_ADD_ADDR 3 |
| 26 | #define MPTCPOPT_RM_ADDR 4 |
| 27 | #define MPTCPOPT_MP_PRIO 5 |
| 28 | #define MPTCPOPT_MP_FAIL 6 |
| 29 | #define MPTCPOPT_MP_FASTCLOSE 7 |
| 30 | |
| 31 | /* MPTCP suboption lengths */ |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 32 | #define TCPOLEN_MPTCP_MPC_SYN 4 |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 33 | #define TCPOLEN_MPTCP_MPC_SYNACK 12 |
| 34 | #define TCPOLEN_MPTCP_MPC_ACK 20 |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 35 | #define TCPOLEN_MPTCP_MPC_ACK_DATA 22 |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 36 | #define TCPOLEN_MPTCP_DSS_BASE 4 |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 37 | #define TCPOLEN_MPTCP_DSS_ACK32 4 |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 38 | #define TCPOLEN_MPTCP_DSS_ACK64 8 |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 39 | #define TCPOLEN_MPTCP_DSS_MAP32 10 |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 40 | #define TCPOLEN_MPTCP_DSS_MAP64 14 |
| 41 | #define TCPOLEN_MPTCP_DSS_CHECKSUM 2 |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 42 | |
| 43 | /* MPTCP MP_CAPABLE flags */ |
| 44 | #define MPTCP_VERSION_MASK (0x0F) |
| 45 | #define MPTCP_CAP_CHECKSUM_REQD BIT(7) |
| 46 | #define MPTCP_CAP_EXTENSIBILITY BIT(6) |
Paolo Abeni | 65492c5 | 2020-01-21 16:56:30 -0800 | [diff] [blame] | 47 | #define MPTCP_CAP_HMAC_SHA256 BIT(0) |
Peter Krystad | eda7acd | 2020-01-21 16:56:16 -0800 | [diff] [blame] | 48 | #define MPTCP_CAP_FLAG_MASK (0x3F) |
| 49 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 50 | /* MPTCP DSS flags */ |
| 51 | #define MPTCP_DSS_DATA_FIN BIT(4) |
| 52 | #define MPTCP_DSS_DSN64 BIT(3) |
| 53 | #define MPTCP_DSS_HAS_MAP BIT(2) |
| 54 | #define MPTCP_DSS_ACK64 BIT(1) |
| 55 | #define MPTCP_DSS_HAS_ACK BIT(0) |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 56 | #define MPTCP_DSS_FLAG_MASK (0x1F) |
| 57 | |
| 58 | /* MPTCP socket flags */ |
Florian Westphal | d99bfed | 2020-02-17 16:54:38 +0100 | [diff] [blame] | 59 | #define MPTCP_DATA_READY 0 |
| 60 | #define MPTCP_SEND_SPACE 1 |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 61 | |
Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 62 | /* MPTCP connection sock */ |
| 63 | struct mptcp_sock { |
| 64 | /* inet_connection_sock must be the first member */ |
| 65 | struct inet_connection_sock sk; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 66 | u64 local_key; |
| 67 | u64 remote_key; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 68 | u64 write_seq; |
| 69 | u64 ack_seq; |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 70 | u32 token; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 71 | unsigned long flags; |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 72 | bool can_ack; |
Paolo Abeni | 8099201 | 2020-02-26 10:14:47 +0100 | [diff] [blame] | 73 | struct work_struct work; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 74 | struct list_head conn_list; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 75 | struct skb_ext *cached_ext; /* for the next sendmsg */ |
Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 76 | struct socket *subflow; /* outgoing connect/listener/!mp_capable */ |
Paolo Abeni | 8ab183d | 2020-01-21 16:56:33 -0800 | [diff] [blame] | 77 | struct sock *first; |
Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 78 | }; |
| 79 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 80 | #define mptcp_for_each_subflow(__msk, __subflow) \ |
| 81 | list_for_each_entry(__subflow, &((__msk)->conn_list), node) |
| 82 | |
Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 83 | static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) |
| 84 | { |
| 85 | return (struct mptcp_sock *)sk; |
| 86 | } |
| 87 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 88 | struct mptcp_subflow_request_sock { |
| 89 | struct tcp_request_sock sk; |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 90 | u16 mp_capable : 1, |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 91 | mp_join : 1, |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 92 | backup : 1, |
| 93 | remote_key_valid : 1; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 94 | u64 local_key; |
| 95 | u64 remote_key; |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 96 | u64 idsn; |
| 97 | u32 token; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 98 | u32 ssn_offset; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 99 | }; |
| 100 | |
| 101 | static inline struct mptcp_subflow_request_sock * |
| 102 | mptcp_subflow_rsk(const struct request_sock *rsk) |
| 103 | { |
| 104 | return (struct mptcp_subflow_request_sock *)rsk; |
| 105 | } |
| 106 | |
Peter Krystad | 2303f99 | 2020-01-21 16:56:17 -0800 | [diff] [blame] | 107 | /* MPTCP subflow context */ |
| 108 | struct mptcp_subflow_context { |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 109 | struct list_head node;/* conn_list of subflows */ |
| 110 | u64 local_key; |
| 111 | u64 remote_key; |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 112 | u64 idsn; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 113 | u64 map_seq; |
Christoph Paasch | cc7972e | 2020-01-21 16:56:31 -0800 | [diff] [blame] | 114 | u32 snd_isn; |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 115 | u32 token; |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 116 | u32 rel_write_seq; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 117 | u32 map_subflow_seq; |
| 118 | u32 ssn_offset; |
| 119 | u32 map_data_len; |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 120 | u32 request_mptcp : 1, /* send MP_CAPABLE */ |
| 121 | mp_capable : 1, /* remote is MPTCP capable */ |
Paolo Abeni | 0be534f | 2020-03-19 11:06:30 +0100 | [diff] [blame^] | 122 | fully_established : 1, /* path validated */ |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 123 | conn_finished : 1, |
| 124 | map_valid : 1, |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 125 | mpc_map : 1, |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 126 | data_avail : 1, |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 127 | rx_eof : 1, |
Mat Martineau | 76c42a2 | 2020-02-28 15:47:40 -0800 | [diff] [blame] | 128 | data_fin_tx_enable : 1, |
Christoph Paasch | d22f498 | 2020-01-21 16:56:32 -0800 | [diff] [blame] | 129 | can_ack : 1; /* only after processing the remote a key */ |
Mat Martineau | 76c42a2 | 2020-02-28 15:47:40 -0800 | [diff] [blame] | 130 | u64 data_fin_tx_seq; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 131 | |
Peter Krystad | 2303f99 | 2020-01-21 16:56:17 -0800 | [diff] [blame] | 132 | struct sock *tcp_sock; /* tcp sk backpointer */ |
| 133 | struct sock *conn; /* parent mptcp_sock */ |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 134 | const struct inet_connection_sock_af_ops *icsk_af_ops; |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 135 | void (*tcp_data_ready)(struct sock *sk); |
| 136 | void (*tcp_state_change)(struct sock *sk); |
| 137 | void (*tcp_write_space)(struct sock *sk); |
| 138 | |
Peter Krystad | 2303f99 | 2020-01-21 16:56:17 -0800 | [diff] [blame] | 139 | struct rcu_head rcu; |
| 140 | }; |
| 141 | |
| 142 | static inline struct mptcp_subflow_context * |
| 143 | mptcp_subflow_ctx(const struct sock *sk) |
| 144 | { |
| 145 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 146 | |
| 147 | /* Use RCU on icsk_ulp_data only for sock diag code */ |
| 148 | return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data; |
| 149 | } |
| 150 | |
| 151 | static inline struct sock * |
| 152 | mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow) |
| 153 | { |
| 154 | return subflow->tcp_sock; |
| 155 | } |
| 156 | |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 157 | static inline u64 |
| 158 | mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow) |
| 159 | { |
| 160 | return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq - |
| 161 | subflow->ssn_offset - |
| 162 | subflow->map_subflow_seq; |
| 163 | } |
| 164 | |
| 165 | static inline u64 |
| 166 | mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) |
| 167 | { |
| 168 | return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); |
| 169 | } |
| 170 | |
| 171 | int mptcp_is_enabled(struct net *net); |
| 172 | bool mptcp_subflow_data_available(struct sock *sk); |
Peter Krystad | 2303f99 | 2020-01-21 16:56:17 -0800 | [diff] [blame] | 173 | void mptcp_subflow_init(void); |
| 174 | int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock); |
| 175 | |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 176 | static inline void mptcp_subflow_tcp_fallback(struct sock *sk, |
| 177 | struct mptcp_subflow_context *ctx) |
| 178 | { |
| 179 | sk->sk_data_ready = ctx->tcp_data_ready; |
| 180 | sk->sk_state_change = ctx->tcp_state_change; |
| 181 | sk->sk_write_space = ctx->tcp_write_space; |
| 182 | |
| 183 | inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; |
| 184 | } |
| 185 | |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 186 | extern const struct inet_connection_sock_af_ops ipv4_specific; |
| 187 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 188 | extern const struct inet_connection_sock_af_ops ipv6_specific; |
| 189 | #endif |
| 190 | |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 191 | void mptcp_proto_init(void); |
Matthieu Baerts | 784325e | 2020-01-21 16:56:28 -0800 | [diff] [blame] | 192 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
| 193 | int mptcp_proto_v6_init(void); |
| 194 | #endif |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 195 | |
Paolo Abeni | 58b0991 | 2020-03-13 16:52:41 +0100 | [diff] [blame] | 196 | struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req); |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 197 | void mptcp_get_options(const struct sk_buff *skb, |
| 198 | struct tcp_options_received *opt_rx); |
| 199 | |
| 200 | void mptcp_finish_connect(struct sock *sk); |
Florian Westphal | 2e52213 | 2020-02-26 10:14:51 +0100 | [diff] [blame] | 201 | void mptcp_data_ready(struct sock *sk, struct sock *ssk); |
Peter Krystad | cec37a6 | 2020-01-21 16:56:18 -0800 | [diff] [blame] | 202 | |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 203 | int mptcp_token_new_request(struct request_sock *req); |
| 204 | void mptcp_token_destroy_request(u32 token); |
| 205 | int mptcp_token_new_connect(struct sock *sk); |
Paolo Abeni | 58b0991 | 2020-03-13 16:52:41 +0100 | [diff] [blame] | 206 | int mptcp_token_new_accept(u32 token, struct sock *conn); |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 207 | void mptcp_token_destroy(u32 token); |
| 208 | |
| 209 | void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); |
| 210 | static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) |
| 211 | { |
| 212 | /* we might consider a faster version that computes the key as a |
| 213 | * hash of some information available in the MPTCP socket. Use |
| 214 | * random data at the moment, as it's probably the safest option |
| 215 | * in case multiple sockets are opened in different namespaces at |
| 216 | * the same time. |
| 217 | */ |
| 218 | get_random_bytes(key, sizeof(u64)); |
| 219 | mptcp_crypto_key_sha(*key, token, idsn); |
| 220 | } |
| 221 | |
| 222 | void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u32 nonce1, u32 nonce2, |
Paolo Abeni | 65492c5 | 2020-01-21 16:56:30 -0800 | [diff] [blame] | 223 | void *hash_out); |
Peter Krystad | 79c0949 | 2020-01-21 16:56:20 -0800 | [diff] [blame] | 224 | |
Mat Martineau | 6d0060f | 2020-01-21 16:56:23 -0800 | [diff] [blame] | 225 | static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb) |
| 226 | { |
| 227 | return (struct mptcp_ext *)skb_ext_find(skb, SKB_EXT_MPTCP); |
| 228 | } |
| 229 | |
Mat Martineau | 648ef4b | 2020-01-21 16:56:24 -0800 | [diff] [blame] | 230 | static inline bool before64(__u64 seq1, __u64 seq2) |
| 231 | { |
| 232 | return (__s64)(seq1 - seq2) < 0; |
| 233 | } |
| 234 | |
| 235 | #define after64(seq2, seq1) before64(seq1, seq2) |
| 236 | |
Mat Martineau | f870fa0 | 2020-01-21 16:56:15 -0800 | [diff] [blame] | 237 | #endif /* __MPTCP_PROTOCOL_H */ |