Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Herbert Xu | cf80e0e | 2016-01-24 21:20:23 +0800 | [diff] [blame] | 2 | #include <linux/crypto.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 3 | #include <linux/err.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 4 | #include <linux/init.h> |
| 5 | #include <linux/kernel.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 6 | #include <linux/list.h> |
| 7 | #include <linux/tcp.h> |
| 8 | #include <linux/rcupdate.h> |
| 9 | #include <linux/rculist.h> |
| 10 | #include <net/inetpeer.h> |
| 11 | #include <net/tcp.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 12 | |
Yuchung Cheng | 0d41cca | 2013-10-31 09:19:32 -0700 | [diff] [blame] | 13 | int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 14 | |
| 15 | struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; |
| 16 | |
| 17 | static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock); |
| 18 | |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 19 | void tcp_fastopen_init_key_once(bool publish) |
| 20 | { |
| 21 | static u8 key[TCP_FASTOPEN_KEY_LENGTH]; |
| 22 | |
| 23 | /* tcp_fastopen_reset_cipher publishes the new context |
| 24 | * atomically, so we allow this race happening here. |
| 25 | * |
| 26 | * All call sites of tcp_fastopen_cookie_gen also check |
| 27 | * for a valid cookie, so this is an acceptable risk. |
| 28 | */ |
| 29 | if (net_get_random_once(key, sizeof(key)) && publish) |
| 30 | tcp_fastopen_reset_cipher(key, sizeof(key)); |
| 31 | } |
| 32 | |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 33 | static void tcp_fastopen_ctx_free(struct rcu_head *head) |
| 34 | { |
| 35 | struct tcp_fastopen_context *ctx = |
| 36 | container_of(head, struct tcp_fastopen_context, rcu); |
| 37 | crypto_free_cipher(ctx->tfm); |
| 38 | kfree(ctx); |
| 39 | } |
| 40 | |
| 41 | int tcp_fastopen_reset_cipher(void *key, unsigned int len) |
| 42 | { |
| 43 | int err; |
| 44 | struct tcp_fastopen_context *ctx, *octx; |
| 45 | |
| 46 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 47 | if (!ctx) |
| 48 | return -ENOMEM; |
| 49 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); |
| 50 | |
| 51 | if (IS_ERR(ctx->tfm)) { |
| 52 | err = PTR_ERR(ctx->tfm); |
| 53 | error: kfree(ctx); |
| 54 | pr_err("TCP: TFO aes cipher alloc error: %d\n", err); |
| 55 | return err; |
| 56 | } |
| 57 | err = crypto_cipher_setkey(ctx->tfm, key, len); |
| 58 | if (err) { |
| 59 | pr_err("TCP: TFO cipher key error: %d\n", err); |
| 60 | crypto_free_cipher(ctx->tfm); |
| 61 | goto error; |
| 62 | } |
| 63 | memcpy(ctx->key, key, len); |
| 64 | |
| 65 | spin_lock(&tcp_fastopen_ctx_lock); |
| 66 | |
| 67 | octx = rcu_dereference_protected(tcp_fastopen_ctx, |
| 68 | lockdep_is_held(&tcp_fastopen_ctx_lock)); |
| 69 | rcu_assign_pointer(tcp_fastopen_ctx, ctx); |
| 70 | spin_unlock(&tcp_fastopen_ctx_lock); |
| 71 | |
| 72 | if (octx) |
| 73 | call_rcu(&octx->rcu, tcp_fastopen_ctx_free); |
| 74 | return err; |
| 75 | } |
| 76 | |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 77 | static bool __tcp_fastopen_cookie_gen(const void *path, |
| 78 | struct tcp_fastopen_cookie *foc) |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 79 | { |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 80 | struct tcp_fastopen_context *ctx; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 81 | bool ok = false; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 82 | |
| 83 | rcu_read_lock(); |
| 84 | ctx = rcu_dereference(tcp_fastopen_ctx); |
| 85 | if (ctx) { |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 86 | crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 87 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 88 | ok = true; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 89 | } |
| 90 | rcu_read_unlock(); |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 91 | return ok; |
| 92 | } |
| 93 | |
| 94 | /* Generate the fastopen cookie by doing aes128 encryption on both |
| 95 | * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6 |
| 96 | * addresses. For the longer IPv6 addresses use CBC-MAC. |
| 97 | * |
| 98 | * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. |
| 99 | */ |
| 100 | static bool tcp_fastopen_cookie_gen(struct request_sock *req, |
| 101 | struct sk_buff *syn, |
| 102 | struct tcp_fastopen_cookie *foc) |
| 103 | { |
| 104 | if (req->rsk_ops->family == AF_INET) { |
| 105 | const struct iphdr *iph = ip_hdr(syn); |
| 106 | |
| 107 | __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; |
| 108 | return __tcp_fastopen_cookie_gen(path, foc); |
| 109 | } |
| 110 | |
| 111 | #if IS_ENABLED(CONFIG_IPV6) |
| 112 | if (req->rsk_ops->family == AF_INET6) { |
| 113 | const struct ipv6hdr *ip6h = ipv6_hdr(syn); |
| 114 | struct tcp_fastopen_cookie tmp; |
| 115 | |
| 116 | if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { |
Shannon Nelson | 003c941 | 2017-01-12 14:24:58 -0800 | [diff] [blame] | 117 | struct in6_addr *buf = &tmp.addr; |
Li RongQing | 41c9199 | 2014-09-29 15:04:37 +0800 | [diff] [blame] | 118 | int i; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 119 | |
| 120 | for (i = 0; i < 4; i++) |
| 121 | buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; |
| 122 | return __tcp_fastopen_cookie_gen(buf, foc); |
| 123 | } |
| 124 | } |
| 125 | #endif |
| 126 | return false; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 127 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 128 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 129 | |
| 130 | /* If an incoming SYN or SYNACK frame contains a payload and/or FIN, |
| 131 | * queue this additional data / FIN. |
| 132 | */ |
| 133 | void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) |
| 134 | { |
| 135 | struct tcp_sock *tp = tcp_sk(sk); |
| 136 | |
| 137 | if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) |
| 138 | return; |
| 139 | |
| 140 | skb = skb_clone(skb, GFP_ATOMIC); |
| 141 | if (!skb) |
| 142 | return; |
| 143 | |
| 144 | skb_dst_drop(skb); |
Martin KaFai Lau | a44d6ea | 2016-03-14 10:52:15 -0700 | [diff] [blame] | 145 | /* segs_in has been initialized to 1 in tcp_create_openreq_child(). |
| 146 | * Hence, reset segs_in to 0 before calling tcp_segs_in() |
| 147 | * to avoid double counting. Also, tcp_segs_in() expects |
| 148 | * skb->len to include the tcp_hdrlen. Hence, it should |
| 149 | * be called before __skb_pull(). |
| 150 | */ |
| 151 | tp->segs_in = 0; |
| 152 | tcp_segs_in(tp, skb); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 153 | __skb_pull(skb, tcp_hdrlen(skb)); |
Eric Dumazet | 76061f6 | 2016-09-07 08:34:11 -0700 | [diff] [blame] | 154 | sk_forced_mem_schedule(sk, skb->truesize); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 155 | skb_set_owner_r(skb, sk); |
| 156 | |
Eric Dumazet | 9d69153 | 2016-02-01 21:03:08 -0800 | [diff] [blame] | 157 | TCP_SKB_CB(skb)->seq++; |
| 158 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; |
| 159 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 160 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
| 161 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 162 | tp->syn_data_acked = 1; |
| 163 | |
| 164 | /* u64_stats_update_begin(&tp->syncp) not needed here, |
| 165 | * as we certainly are not changing upper 32bit value (0) |
| 166 | */ |
| 167 | tp->bytes_received = skb->len; |
Eric Dumazet | e3e17b7 | 2016-02-06 11:16:28 -0800 | [diff] [blame] | 168 | |
| 169 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 170 | tcp_fin(sk); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 171 | } |
| 172 | |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 173 | static struct sock *tcp_fastopen_create_child(struct sock *sk, |
| 174 | struct sk_buff *skb, |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 175 | struct request_sock *req) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 176 | { |
Dave Jones | 1784637 | 2014-06-16 16:30:36 -0400 | [diff] [blame] | 177 | struct tcp_sock *tp; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 178 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 179 | struct sock *child; |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 180 | bool own_req; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 181 | |
| 182 | req->num_retrans = 0; |
| 183 | req->num_timeout = 0; |
| 184 | req->sk = NULL; |
| 185 | |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 186 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
| 187 | NULL, &own_req); |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 188 | if (!child) |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 189 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 190 | |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 191 | spin_lock(&queue->fastopenq.lock); |
| 192 | queue->fastopenq.qlen++; |
| 193 | spin_unlock(&queue->fastopenq.lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 194 | |
| 195 | /* Initialize the child socket. Have to fix some values to take |
| 196 | * into account the child is a Fast Open socket and is created |
| 197 | * only out of the bits carried in the SYN packet. |
| 198 | */ |
| 199 | tp = tcp_sk(child); |
| 200 | |
| 201 | tp->fastopen_rsk = req; |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 202 | tcp_rsk(req)->tfo_listener = true; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 203 | |
| 204 | /* RFC1323: The window in SYN & SYN/ACK segments is never |
| 205 | * scaled. So correct it appropriately. |
| 206 | */ |
| 207 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); |
Alexey Kodanev | 0dbd7ff | 2017-01-19 16:36:39 +0300 | [diff] [blame] | 208 | tp->max_window = tp->snd_wnd; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 209 | |
| 210 | /* Activate the retrans timer so that SYNACK can be retransmitted. |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 211 | * The request socket is not added to the ehash |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 212 | * because it's been added to the accept queue directly. |
| 213 | */ |
| 214 | inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, |
| 215 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
| 216 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 217 | refcount_set(&req->rsk_refcnt, 2); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 218 | |
| 219 | /* Now finish processing the fastopen child socket. */ |
| 220 | inet_csk(child)->icsk_af_ops->rebuild_header(child); |
| 221 | tcp_init_congestion_control(child); |
| 222 | tcp_mtup_init(child); |
| 223 | tcp_init_metrics(child); |
Lawrence Brakmo | 9872a4b | 2017-06-30 20:02:47 -0700 | [diff] [blame] | 224 | tcp_call_bpf(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 225 | tcp_init_buffer_space(child); |
| 226 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 227 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
Eric Dumazet | ba34e6d | 2015-02-13 04:47:12 -0800 | [diff] [blame] | 228 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 229 | tcp_fastopen_add_skb(child, skb); |
Eric Dumazet | d654976 | 2015-05-21 21:51:19 -0700 | [diff] [blame] | 230 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 231 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; |
Neal Cardwell | 28b346c | 2016-08-30 11:55:23 -0400 | [diff] [blame] | 232 | tp->rcv_wup = tp->rcv_nxt; |
Eric Dumazet | 7656d84 | 2015-10-04 21:08:07 -0700 | [diff] [blame] | 233 | /* tcp_conn_request() is sending the SYNACK, |
| 234 | * and queues the child into listener accept queue. |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 235 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 236 | return child; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 237 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 238 | |
| 239 | static bool tcp_fastopen_queue_check(struct sock *sk) |
| 240 | { |
| 241 | struct fastopen_queue *fastopenq; |
| 242 | |
| 243 | /* Make sure the listener has enabled fastopen, and we don't |
| 244 | * exceed the max # of pending TFO requests allowed before trying |
| 245 | * to validating the cookie in order to avoid burning CPU cycles |
| 246 | * unnecessarily. |
| 247 | * |
| 248 | * XXX (TFO) - The implication of checking the max_qlen before |
| 249 | * processing a cookie request is that clients can't differentiate |
| 250 | * between qlen overflow causing Fast Open to be disabled |
| 251 | * temporarily vs a server not supporting Fast Open at all. |
| 252 | */ |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 253 | fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
| 254 | if (fastopenq->max_qlen == 0) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 255 | return false; |
| 256 | |
| 257 | if (fastopenq->qlen >= fastopenq->max_qlen) { |
| 258 | struct request_sock *req1; |
| 259 | spin_lock(&fastopenq->lock); |
| 260 | req1 = fastopenq->rskq_rst_head; |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 261 | if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { |
Eric Dumazet | 02a1d6e | 2016-04-27 16:44:39 -0700 | [diff] [blame] | 262 | __NET_INC_STATS(sock_net(sk), |
| 263 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 264 | spin_unlock(&fastopenq->lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 265 | return false; |
| 266 | } |
| 267 | fastopenq->rskq_rst_head = req1->dl_next; |
| 268 | fastopenq->qlen--; |
| 269 | spin_unlock(&fastopenq->lock); |
Eric Dumazet | 13854e5 | 2015-03-15 21:12:16 -0700 | [diff] [blame] | 270 | reqsk_put(req1); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 271 | } |
| 272 | return true; |
| 273 | } |
| 274 | |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 275 | /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) |
| 276 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open |
| 277 | * cookie request (foc->len == 0). |
| 278 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 279 | struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
| 280 | struct request_sock *req, |
Tonghao Zhang | 1119936 | 2017-08-21 23:33:49 -0700 | [diff] [blame] | 281 | struct tcp_fastopen_cookie *foc) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 282 | { |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 283 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
| 284 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 285 | struct sock *child; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 286 | |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 287 | if (foc->len == 0) /* Client requests a cookie */ |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 288 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 289 | |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 290 | if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && |
| 291 | (syn_data || foc->len >= 0) && |
| 292 | tcp_fastopen_queue_check(sk))) { |
| 293 | foc->len = -1; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 294 | return NULL; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 295 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 296 | |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 297 | if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) |
| 298 | goto fastopen; |
| 299 | |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 300 | if (foc->len >= 0 && /* Client presents or requests a cookie */ |
| 301 | tcp_fastopen_cookie_gen(req, skb, &valid_foc) && |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 302 | foc->len == TCP_FASTOPEN_COOKIE_SIZE && |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 303 | foc->len == valid_foc.len && |
| 304 | !memcmp(foc->val, valid_foc.val, foc->len)) { |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 305 | /* Cookie is valid. Create a (full) child socket to accept |
| 306 | * the data in SYN before returning a SYN-ACK to ack the |
| 307 | * data. If we fail to create the socket, fall back and |
| 308 | * ack the ISN only but includes the same cookie. |
| 309 | * |
| 310 | * Note: Data-less SYN with valid cookie is allowed to send |
| 311 | * data in SYN_RECV state. |
| 312 | */ |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 313 | fastopen: |
Tonghao Zhang | 1119936 | 2017-08-21 23:33:49 -0700 | [diff] [blame] | 314 | child = tcp_fastopen_create_child(sk, skb, req); |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 315 | if (child) { |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 316 | foc->len = -1; |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 317 | NET_INC_STATS(sock_net(sk), |
| 318 | LINUX_MIB_TCPFASTOPENPASSIVE); |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 319 | return child; |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 320 | } |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 321 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 322 | } else if (foc->len > 0) /* Client presents an invalid cookie */ |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 323 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 324 | |
Daniel Lee | 7f9b838 | 2015-04-06 14:37:26 -0700 | [diff] [blame] | 325 | valid_foc.exp = foc->exp; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 326 | *foc = valid_foc; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 327 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 328 | } |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 329 | |
| 330 | bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, |
| 331 | struct tcp_fastopen_cookie *cookie) |
| 332 | { |
| 333 | unsigned long last_syn_loss = 0; |
| 334 | int syn_loss = 0; |
| 335 | |
| 336 | tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss); |
| 337 | |
| 338 | /* Recurring FO SYN losses: no cookie or data in SYN */ |
| 339 | if (syn_loss > 1 && |
| 340 | time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { |
| 341 | cookie->len = -1; |
| 342 | return false; |
| 343 | } |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 344 | |
| 345 | /* Firewall blackhole issue check */ |
| 346 | if (tcp_fastopen_active_should_disable(sk)) { |
| 347 | cookie->len = -1; |
| 348 | return false; |
| 349 | } |
| 350 | |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 351 | if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) { |
| 352 | cookie->len = -1; |
| 353 | return true; |
| 354 | } |
| 355 | return cookie->len > 0; |
| 356 | } |
Wei Wang | 19f6d3f | 2017-01-23 10:59:22 -0800 | [diff] [blame] | 357 | |
| 358 | /* This function checks if we want to defer sending SYN until the first |
| 359 | * write(). We defer under the following conditions: |
| 360 | * 1. fastopen_connect sockopt is set |
| 361 | * 2. we have a valid cookie |
| 362 | * Return value: return true if we want to defer until application writes data |
| 363 | * return false if we want to send out SYN immediately |
| 364 | */ |
| 365 | bool tcp_fastopen_defer_connect(struct sock *sk, int *err) |
| 366 | { |
| 367 | struct tcp_fastopen_cookie cookie = { .len = 0 }; |
| 368 | struct tcp_sock *tp = tcp_sk(sk); |
| 369 | u16 mss; |
| 370 | |
| 371 | if (tp->fastopen_connect && !tp->fastopen_req) { |
| 372 | if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { |
| 373 | inet_sk(sk)->defer_connect = 1; |
| 374 | return true; |
| 375 | } |
| 376 | |
| 377 | /* Alloc fastopen_req in order for FO option to be included |
| 378 | * in SYN |
| 379 | */ |
| 380 | tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), |
| 381 | sk->sk_allocation); |
| 382 | if (tp->fastopen_req) |
| 383 | tp->fastopen_req->cookie = cookie; |
| 384 | else |
| 385 | *err = -ENOBUFS; |
| 386 | } |
| 387 | return false; |
| 388 | } |
| 389 | EXPORT_SYMBOL(tcp_fastopen_defer_connect); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 390 | |
| 391 | /* |
| 392 | * The following code block is to deal with middle box issues with TFO: |
| 393 | * Middlebox firewall issues can potentially cause server's data being |
| 394 | * blackholed after a successful 3WHS using TFO. |
| 395 | * The proposed solution is to disable active TFO globally under the |
| 396 | * following circumstances: |
| 397 | * 1. client side TFO socket receives out of order FIN |
| 398 | * 2. client side TFO socket receives out of order RST |
| 399 | * We disable active side TFO globally for 1hr at first. Then if it |
| 400 | * happens again, we disable it for 2h, then 4h, 8h, ... |
| 401 | * And we reset the timeout back to 1hr when we see a successful active |
| 402 | * TFO connection with data exchanges. |
| 403 | */ |
| 404 | |
| 405 | /* Default to 1hr */ |
| 406 | unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60; |
| 407 | static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0); |
| 408 | static unsigned long tfo_active_disable_stamp __read_mostly; |
| 409 | |
| 410 | /* Disable active TFO and record current jiffies and |
| 411 | * tfo_active_disable_times |
| 412 | */ |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 413 | void tcp_fastopen_active_disable(struct sock *sk) |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 414 | { |
| 415 | atomic_inc(&tfo_active_disable_times); |
| 416 | tfo_active_disable_stamp = jiffies; |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 417 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | /* Reset tfo_active_disable_times to 0 */ |
| 421 | void tcp_fastopen_active_timeout_reset(void) |
| 422 | { |
| 423 | atomic_set(&tfo_active_disable_times, 0); |
| 424 | } |
| 425 | |
| 426 | /* Calculate timeout for tfo active disable |
| 427 | * Return true if we are still in the active TFO disable period |
| 428 | * Return false if timeout already expired and we should use active TFO |
| 429 | */ |
| 430 | bool tcp_fastopen_active_should_disable(struct sock *sk) |
| 431 | { |
| 432 | int tfo_da_times = atomic_read(&tfo_active_disable_times); |
| 433 | int multiplier; |
| 434 | unsigned long timeout; |
| 435 | |
| 436 | if (!tfo_da_times) |
| 437 | return false; |
| 438 | |
| 439 | /* Limit timout to max: 2^6 * initial timeout */ |
| 440 | multiplier = 1 << min(tfo_da_times - 1, 6); |
| 441 | timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ; |
| 442 | if (time_before(jiffies, tfo_active_disable_stamp + timeout)) |
| 443 | return true; |
| 444 | |
| 445 | /* Mark check bit so we can check for successful active TFO |
| 446 | * condition and reset tfo_active_disable_times |
| 447 | */ |
| 448 | tcp_sk(sk)->syn_fastopen_ch = 1; |
| 449 | return false; |
| 450 | } |
| 451 | |
| 452 | /* Disable active TFO if FIN is the only packet in the ofo queue |
| 453 | * and no data is received. |
| 454 | * Also check if we can reset tfo_active_disable_times if data is |
| 455 | * received successfully on a marked active TFO sockets opened on |
| 456 | * a non-loopback interface |
| 457 | */ |
| 458 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk) |
| 459 | { |
| 460 | struct tcp_sock *tp = tcp_sk(sk); |
| 461 | struct rb_node *p; |
| 462 | struct sk_buff *skb; |
| 463 | struct dst_entry *dst; |
| 464 | |
| 465 | if (!tp->syn_fastopen) |
| 466 | return; |
| 467 | |
| 468 | if (!tp->data_segs_in) { |
| 469 | p = rb_first(&tp->out_of_order_queue); |
| 470 | if (p && !rb_next(p)) { |
| 471 | skb = rb_entry(p, struct sk_buff, rbnode); |
| 472 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 473 | tcp_fastopen_active_disable(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 474 | return; |
| 475 | } |
| 476 | } |
| 477 | } else if (tp->syn_fastopen_ch && |
| 478 | atomic_read(&tfo_active_disable_times)) { |
| 479 | dst = sk_dst_get(sk); |
| 480 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) |
| 481 | tcp_fastopen_active_timeout_reset(); |
| 482 | dst_release(dst); |
| 483 | } |
| 484 | } |