Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Herbert Xu | cf80e0e | 2016-01-24 21:20:23 +0800 | [diff] [blame] | 2 | #include <linux/crypto.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 3 | #include <linux/err.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 4 | #include <linux/init.h> |
| 5 | #include <linux/kernel.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 6 | #include <linux/list.h> |
| 7 | #include <linux/tcp.h> |
| 8 | #include <linux/rcupdate.h> |
| 9 | #include <linux/rculist.h> |
| 10 | #include <net/inetpeer.h> |
| 11 | #include <net/tcp.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 12 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 13 | void tcp_fastopen_init_key_once(struct net *net) |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 14 | { |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 15 | u8 key[TCP_FASTOPEN_KEY_LENGTH]; |
| 16 | struct tcp_fastopen_context *ctxt; |
| 17 | |
| 18 | rcu_read_lock(); |
| 19 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
| 20 | if (ctxt) { |
| 21 | rcu_read_unlock(); |
| 22 | return; |
| 23 | } |
| 24 | rcu_read_unlock(); |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 25 | |
| 26 | /* tcp_fastopen_reset_cipher publishes the new context |
| 27 | * atomically, so we allow this race happening here. |
| 28 | * |
| 29 | * All call sites of tcp_fastopen_cookie_gen also check |
| 30 | * for a valid cookie, so this is an acceptable risk. |
| 31 | */ |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 32 | get_random_bytes(key, sizeof(key)); |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 33 | tcp_fastopen_reset_cipher(net, NULL, key, NULL); |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 34 | } |
| 35 | |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 36 | static void tcp_fastopen_ctx_free(struct rcu_head *head) |
| 37 | { |
| 38 | struct tcp_fastopen_context *ctx = |
| 39 | container_of(head, struct tcp_fastopen_context, rcu); |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 40 | |
Waiman Long | 453431a | 2020-08-06 23:18:13 -0700 | [diff] [blame] | 41 | kfree_sensitive(ctx); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 42 | } |
| 43 | |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 44 | void tcp_fastopen_destroy_cipher(struct sock *sk) |
| 45 | { |
| 46 | struct tcp_fastopen_context *ctx; |
| 47 | |
| 48 | ctx = rcu_dereference_protected( |
| 49 | inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); |
| 50 | if (ctx) |
| 51 | call_rcu(&ctx->rcu, tcp_fastopen_ctx_free); |
| 52 | } |
| 53 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 54 | void tcp_fastopen_ctx_destroy(struct net *net) |
| 55 | { |
| 56 | struct tcp_fastopen_context *ctxt; |
| 57 | |
| 58 | spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); |
| 59 | |
| 60 | ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, |
| 61 | lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); |
| 62 | rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL); |
| 63 | spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); |
| 64 | |
| 65 | if (ctxt) |
| 66 | call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); |
| 67 | } |
| 68 | |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 69 | int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 70 | void *primary_key, void *backup_key) |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 71 | { |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 72 | struct tcp_fastopen_context *ctx, *octx; |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 73 | struct fastopen_queue *q; |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 74 | int err = 0; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 75 | |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 76 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 77 | if (!ctx) { |
| 78 | err = -ENOMEM; |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 79 | goto out; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 80 | } |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 81 | |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 82 | ctx->key[0].key[0] = get_unaligned_le64(primary_key); |
| 83 | ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8); |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 84 | if (backup_key) { |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 85 | ctx->key[1].key[0] = get_unaligned_le64(backup_key); |
| 86 | ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8); |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 87 | ctx->num = 2; |
| 88 | } else { |
| 89 | ctx->num = 1; |
| 90 | } |
| 91 | |
Eric Dumazet | 9eba935 | 2017-11-02 11:53:04 -0700 | [diff] [blame] | 92 | spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 93 | if (sk) { |
| 94 | q = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 95 | octx = rcu_dereference_protected(q->ctx, |
Eric Dumazet | 9eba935 | 2017-11-02 11:53:04 -0700 | [diff] [blame] | 96 | lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 97 | rcu_assign_pointer(q->ctx, ctx); |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 98 | } else { |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 99 | octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, |
| 100 | lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); |
| 101 | rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); |
Yuchung Cheng | 1fba70e | 2017-10-18 11:22:51 -0700 | [diff] [blame] | 102 | } |
Eric Dumazet | 9eba935 | 2017-11-02 11:53:04 -0700 | [diff] [blame] | 103 | spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 104 | |
| 105 | if (octx) |
| 106 | call_rcu(&octx->rcu, tcp_fastopen_ctx_free); |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 107 | out: |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 108 | return err; |
| 109 | } |
| 110 | |
Jason Baron | f19008e | 2020-08-10 13:38:39 -0400 | [diff] [blame] | 111 | int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, |
| 112 | u64 *key) |
| 113 | { |
| 114 | struct tcp_fastopen_context *ctx; |
| 115 | int n_keys = 0, i; |
| 116 | |
| 117 | rcu_read_lock(); |
| 118 | if (icsk) |
| 119 | ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); |
| 120 | else |
| 121 | ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
| 122 | if (ctx) { |
| 123 | n_keys = tcp_fastopen_context_len(ctx); |
| 124 | for (i = 0; i < n_keys; i++) { |
| 125 | put_unaligned_le64(ctx->key[i].key[0], key + (i * 2)); |
| 126 | put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1); |
| 127 | } |
| 128 | } |
| 129 | rcu_read_unlock(); |
| 130 | |
| 131 | return n_keys; |
| 132 | } |
| 133 | |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 134 | static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, |
| 135 | struct sk_buff *syn, |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 136 | const siphash_key_t *key, |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 137 | struct tcp_fastopen_cookie *foc) |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 138 | { |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 139 | BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64)); |
| 140 | |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 141 | if (req->rsk_ops->family == AF_INET) { |
| 142 | const struct iphdr *iph = ip_hdr(syn); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 143 | |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 144 | foc->val[0] = cpu_to_le64(siphash(&iph->saddr, |
| 145 | sizeof(iph->saddr) + |
| 146 | sizeof(iph->daddr), |
| 147 | key)); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 148 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 149 | return true; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 150 | } |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 151 | #if IS_ENABLED(CONFIG_IPV6) |
| 152 | if (req->rsk_ops->family == AF_INET6) { |
| 153 | const struct ipv6hdr *ip6h = ipv6_hdr(syn); |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 154 | |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 155 | foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr, |
| 156 | sizeof(ip6h->saddr) + |
| 157 | sizeof(ip6h->daddr), |
| 158 | key)); |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 159 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
| 160 | return true; |
| 161 | } |
| 162 | #endif |
| 163 | return false; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 164 | } |
| 165 | |
Ard Biesheuvel | c681eda | 2019-06-17 10:09:33 +0200 | [diff] [blame] | 166 | /* Generate the fastopen cookie by applying SipHash to both the source and |
| 167 | * destination addresses. |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 168 | */ |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 169 | static void tcp_fastopen_cookie_gen(struct sock *sk, |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 170 | struct request_sock *req, |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 171 | struct sk_buff *syn, |
| 172 | struct tcp_fastopen_cookie *foc) |
| 173 | { |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 174 | struct tcp_fastopen_context *ctx; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 175 | |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 176 | rcu_read_lock(); |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 177 | ctx = tcp_fastopen_get_ctx(sk); |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 178 | if (ctx) |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 179 | __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc); |
Christoph Paasch | 483642e | 2019-05-29 12:33:56 -0400 | [diff] [blame] | 180 | rcu_read_unlock(); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 181 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 182 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 183 | /* If an incoming SYN or SYNACK frame contains a payload and/or FIN, |
| 184 | * queue this additional data / FIN. |
| 185 | */ |
| 186 | void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) |
| 187 | { |
| 188 | struct tcp_sock *tp = tcp_sk(sk); |
| 189 | |
| 190 | if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) |
| 191 | return; |
| 192 | |
| 193 | skb = skb_clone(skb, GFP_ATOMIC); |
| 194 | if (!skb) |
| 195 | return; |
| 196 | |
| 197 | skb_dst_drop(skb); |
Martin KaFai Lau | a44d6ea | 2016-03-14 10:52:15 -0700 | [diff] [blame] | 198 | /* segs_in has been initialized to 1 in tcp_create_openreq_child(). |
| 199 | * Hence, reset segs_in to 0 before calling tcp_segs_in() |
| 200 | * to avoid double counting. Also, tcp_segs_in() expects |
| 201 | * skb->len to include the tcp_hdrlen. Hence, it should |
| 202 | * be called before __skb_pull(). |
| 203 | */ |
| 204 | tp->segs_in = 0; |
| 205 | tcp_segs_in(tp, skb); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 206 | __skb_pull(skb, tcp_hdrlen(skb)); |
Eric Dumazet | 76061f6 | 2016-09-07 08:34:11 -0700 | [diff] [blame] | 207 | sk_forced_mem_schedule(sk, skb->truesize); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 208 | skb_set_owner_r(skb, sk); |
| 209 | |
Eric Dumazet | 9d69153 | 2016-02-01 21:03:08 -0800 | [diff] [blame] | 210 | TCP_SKB_CB(skb)->seq++; |
| 211 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; |
| 212 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 213 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
| 214 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 215 | tp->syn_data_acked = 1; |
| 216 | |
| 217 | /* u64_stats_update_begin(&tp->syncp) not needed here, |
| 218 | * as we certainly are not changing upper 32bit value (0) |
| 219 | */ |
| 220 | tp->bytes_received = skb->len; |
Eric Dumazet | e3e17b7 | 2016-02-06 11:16:28 -0800 | [diff] [blame] | 221 | |
| 222 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 223 | tcp_fin(sk); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 224 | } |
| 225 | |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 226 | /* returns 0 - no key match, 1 for primary, 2 for backup */ |
| 227 | static int tcp_fastopen_cookie_gen_check(struct sock *sk, |
| 228 | struct request_sock *req, |
| 229 | struct sk_buff *syn, |
| 230 | struct tcp_fastopen_cookie *orig, |
| 231 | struct tcp_fastopen_cookie *valid_foc) |
| 232 | { |
| 233 | struct tcp_fastopen_cookie search_foc = { .len = -1 }; |
| 234 | struct tcp_fastopen_cookie *foc = valid_foc; |
| 235 | struct tcp_fastopen_context *ctx; |
| 236 | int i, ret = 0; |
| 237 | |
| 238 | rcu_read_lock(); |
| 239 | ctx = tcp_fastopen_get_ctx(sk); |
| 240 | if (!ctx) |
| 241 | goto out; |
| 242 | for (i = 0; i < tcp_fastopen_context_len(ctx); i++) { |
Ard Biesheuvel | 438ac88 | 2019-06-19 23:46:28 +0200 | [diff] [blame] | 243 | __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc); |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 244 | if (tcp_fastopen_cookie_match(foc, orig)) { |
| 245 | ret = i + 1; |
| 246 | goto out; |
| 247 | } |
| 248 | foc = &search_foc; |
| 249 | } |
| 250 | out: |
| 251 | rcu_read_unlock(); |
| 252 | return ret; |
| 253 | } |
| 254 | |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 255 | static struct sock *tcp_fastopen_create_child(struct sock *sk, |
| 256 | struct sk_buff *skb, |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 257 | struct request_sock *req) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 258 | { |
Dave Jones | 1784637 | 2014-06-16 16:30:36 -0400 | [diff] [blame] | 259 | struct tcp_sock *tp; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 260 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 261 | struct sock *child; |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 262 | bool own_req; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 263 | |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 264 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
| 265 | NULL, &own_req); |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 266 | if (!child) |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 267 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 268 | |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 269 | spin_lock(&queue->fastopenq.lock); |
| 270 | queue->fastopenq.qlen++; |
| 271 | spin_unlock(&queue->fastopenq.lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 272 | |
| 273 | /* Initialize the child socket. Have to fix some values to take |
| 274 | * into account the child is a Fast Open socket and is created |
| 275 | * only out of the bits carried in the SYN packet. |
| 276 | */ |
| 277 | tp = tcp_sk(child); |
| 278 | |
Eric Dumazet | d983ea6 | 2019-10-10 20:17:38 -0700 | [diff] [blame] | 279 | rcu_assign_pointer(tp->fastopen_rsk, req); |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 280 | tcp_rsk(req)->tfo_listener = true; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 281 | |
| 282 | /* RFC1323: The window in SYN & SYN/ACK segments is never |
| 283 | * scaled. So correct it appropriately. |
| 284 | */ |
| 285 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); |
Alexey Kodanev | 0dbd7ff | 2017-01-19 16:36:39 +0300 | [diff] [blame] | 286 | tp->max_window = tp->snd_wnd; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 287 | |
| 288 | /* Activate the retrans timer so that SYNACK can be retransmitted. |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 289 | * The request socket is not added to the ehash |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 290 | * because it's been added to the accept queue directly. |
| 291 | */ |
| 292 | inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, |
| 293 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
| 294 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 295 | refcount_set(&req->rsk_refcnt, 2); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 296 | |
| 297 | /* Now finish processing the fastopen child socket. */ |
Martin KaFai Lau | 72be0fe | 2020-08-20 12:00:39 -0700 | [diff] [blame] | 298 | tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 299 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 300 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
Eric Dumazet | ba34e6d | 2015-02-13 04:47:12 -0800 | [diff] [blame] | 301 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 302 | tcp_fastopen_add_skb(child, skb); |
Eric Dumazet | d654976 | 2015-05-21 21:51:19 -0700 | [diff] [blame] | 303 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 304 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; |
Neal Cardwell | 28b346c | 2016-08-30 11:55:23 -0400 | [diff] [blame] | 305 | tp->rcv_wup = tp->rcv_nxt; |
Eric Dumazet | 7656d84 | 2015-10-04 21:08:07 -0700 | [diff] [blame] | 306 | /* tcp_conn_request() is sending the SYNACK, |
| 307 | * and queues the child into listener accept queue. |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 308 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 309 | return child; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 310 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 311 | |
| 312 | static bool tcp_fastopen_queue_check(struct sock *sk) |
| 313 | { |
| 314 | struct fastopen_queue *fastopenq; |
| 315 | |
| 316 | /* Make sure the listener has enabled fastopen, and we don't |
| 317 | * exceed the max # of pending TFO requests allowed before trying |
| 318 | * to validating the cookie in order to avoid burning CPU cycles |
| 319 | * unnecessarily. |
| 320 | * |
| 321 | * XXX (TFO) - The implication of checking the max_qlen before |
| 322 | * processing a cookie request is that clients can't differentiate |
| 323 | * between qlen overflow causing Fast Open to be disabled |
| 324 | * temporarily vs a server not supporting Fast Open at all. |
| 325 | */ |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 326 | fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
| 327 | if (fastopenq->max_qlen == 0) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 328 | return false; |
| 329 | |
| 330 | if (fastopenq->qlen >= fastopenq->max_qlen) { |
| 331 | struct request_sock *req1; |
| 332 | spin_lock(&fastopenq->lock); |
| 333 | req1 = fastopenq->rskq_rst_head; |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 334 | if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { |
Eric Dumazet | 02a1d6e | 2016-04-27 16:44:39 -0700 | [diff] [blame] | 335 | __NET_INC_STATS(sock_net(sk), |
| 336 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 337 | spin_unlock(&fastopenq->lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 338 | return false; |
| 339 | } |
| 340 | fastopenq->rskq_rst_head = req1->dl_next; |
| 341 | fastopenq->qlen--; |
| 342 | spin_unlock(&fastopenq->lock); |
Eric Dumazet | 13854e5 | 2015-03-15 21:12:16 -0700 | [diff] [blame] | 343 | reqsk_put(req1); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 344 | } |
| 345 | return true; |
| 346 | } |
| 347 | |
Christoph Paasch | 71c0237 | 2017-10-23 13:22:23 -0700 | [diff] [blame] | 348 | static bool tcp_fastopen_no_cookie(const struct sock *sk, |
| 349 | const struct dst_entry *dst, |
| 350 | int flag) |
| 351 | { |
| 352 | return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) || |
| 353 | tcp_sk(sk)->fastopen_no_cookie || |
| 354 | (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); |
| 355 | } |
| 356 | |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 357 | /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) |
| 358 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open |
| 359 | * cookie request (foc->len == 0). |
| 360 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 361 | struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
| 362 | struct request_sock *req, |
Christoph Paasch | 71c0237 | 2017-10-23 13:22:23 -0700 | [diff] [blame] | 363 | struct tcp_fastopen_cookie *foc, |
| 364 | const struct dst_entry *dst) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 365 | { |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 366 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 367 | int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; |
| 368 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 369 | struct sock *child; |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 370 | int ret = 0; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 371 | |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 372 | if (foc->len == 0) /* Client requests a cookie */ |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 373 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 374 | |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 375 | if (!((tcp_fastopen & TFO_SERVER_ENABLE) && |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 376 | (syn_data || foc->len >= 0) && |
| 377 | tcp_fastopen_queue_check(sk))) { |
| 378 | foc->len = -1; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 379 | return NULL; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 380 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 381 | |
Christoph Paasch | 71c0237 | 2017-10-23 13:22:23 -0700 | [diff] [blame] | 382 | if (syn_data && |
| 383 | tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 384 | goto fastopen; |
| 385 | |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 386 | if (foc->len == 0) { |
| 387 | /* Client requests a cookie. */ |
| 388 | tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc); |
| 389 | } else if (foc->len > 0) { |
| 390 | ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc, |
| 391 | &valid_foc); |
| 392 | if (!ret) { |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 393 | NET_INC_STATS(sock_net(sk), |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 394 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
| 395 | } else { |
| 396 | /* Cookie is valid. Create a (full) child socket to |
| 397 | * accept the data in SYN before returning a SYN-ACK to |
| 398 | * ack the data. If we fail to create the socket, fall |
| 399 | * back and ack the ISN only but includes the same |
| 400 | * cookie. |
| 401 | * |
| 402 | * Note: Data-less SYN with valid cookie is allowed to |
| 403 | * send data in SYN_RECV state. |
| 404 | */ |
| 405 | fastopen: |
| 406 | child = tcp_fastopen_create_child(sk, skb, req); |
| 407 | if (child) { |
| 408 | if (ret == 2) { |
| 409 | valid_foc.exp = foc->exp; |
| 410 | *foc = valid_foc; |
| 411 | NET_INC_STATS(sock_net(sk), |
| 412 | LINUX_MIB_TCPFASTOPENPASSIVEALTKEY); |
| 413 | } else { |
| 414 | foc->len = -1; |
| 415 | } |
| 416 | NET_INC_STATS(sock_net(sk), |
| 417 | LINUX_MIB_TCPFASTOPENPASSIVE); |
| 418 | return child; |
| 419 | } |
| 420 | NET_INC_STATS(sock_net(sk), |
| 421 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 422 | } |
Jason Baron | 9092a76 | 2019-05-29 12:33:57 -0400 | [diff] [blame] | 423 | } |
Daniel Lee | 7f9b838 | 2015-04-06 14:37:26 -0700 | [diff] [blame] | 424 | valid_foc.exp = foc->exp; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 425 | *foc = valid_foc; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 426 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 427 | } |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 428 | |
| 429 | bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, |
| 430 | struct tcp_fastopen_cookie *cookie) |
| 431 | { |
Christoph Paasch | 71c0237 | 2017-10-23 13:22:23 -0700 | [diff] [blame] | 432 | const struct dst_entry *dst; |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 433 | |
Yuchung Cheng | 7268586 | 2017-12-12 13:10:40 -0800 | [diff] [blame] | 434 | tcp_fastopen_cache_get(sk, mss, cookie); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 435 | |
| 436 | /* Firewall blackhole issue check */ |
| 437 | if (tcp_fastopen_active_should_disable(sk)) { |
| 438 | cookie->len = -1; |
| 439 | return false; |
| 440 | } |
| 441 | |
Christoph Paasch | 71c0237 | 2017-10-23 13:22:23 -0700 | [diff] [blame] | 442 | dst = __sk_dst_get(sk); |
| 443 | |
| 444 | if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) { |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 445 | cookie->len = -1; |
| 446 | return true; |
| 447 | } |
Jason Baron | 4802747 | 2019-10-23 11:09:26 -0400 | [diff] [blame] | 448 | if (cookie->len > 0) |
| 449 | return true; |
| 450 | tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; |
| 451 | return false; |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 452 | } |
Wei Wang | 19f6d3f | 2017-01-23 10:59:22 -0800 | [diff] [blame] | 453 | |
| 454 | /* This function checks if we want to defer sending SYN until the first |
| 455 | * write(). We defer under the following conditions: |
| 456 | * 1. fastopen_connect sockopt is set |
| 457 | * 2. we have a valid cookie |
| 458 | * Return value: return true if we want to defer until application writes data |
| 459 | * return false if we want to send out SYN immediately |
| 460 | */ |
| 461 | bool tcp_fastopen_defer_connect(struct sock *sk, int *err) |
| 462 | { |
| 463 | struct tcp_fastopen_cookie cookie = { .len = 0 }; |
| 464 | struct tcp_sock *tp = tcp_sk(sk); |
| 465 | u16 mss; |
| 466 | |
| 467 | if (tp->fastopen_connect && !tp->fastopen_req) { |
| 468 | if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { |
| 469 | inet_sk(sk)->defer_connect = 1; |
| 470 | return true; |
| 471 | } |
| 472 | |
| 473 | /* Alloc fastopen_req in order for FO option to be included |
| 474 | * in SYN |
| 475 | */ |
| 476 | tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), |
| 477 | sk->sk_allocation); |
| 478 | if (tp->fastopen_req) |
| 479 | tp->fastopen_req->cookie = cookie; |
| 480 | else |
| 481 | *err = -ENOBUFS; |
| 482 | } |
| 483 | return false; |
| 484 | } |
| 485 | EXPORT_SYMBOL(tcp_fastopen_defer_connect); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 486 | |
| 487 | /* |
| 488 | * The following code block is to deal with middle box issues with TFO: |
| 489 | * Middlebox firewall issues can potentially cause server's data being |
| 490 | * blackholed after a successful 3WHS using TFO. |
| 491 | * The proposed solution is to disable active TFO globally under the |
| 492 | * following circumstances: |
| 493 | * 1. client side TFO socket receives out of order FIN |
| 494 | * 2. client side TFO socket receives out of order RST |
Yuchung Cheng | 7268586 | 2017-12-12 13:10:40 -0800 | [diff] [blame] | 495 | * 3. client side TFO socket has timed out three times consecutively during |
| 496 | * or after handshake |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 497 | * We disable active side TFO globally for 1hr at first. Then if it |
| 498 | * happens again, we disable it for 2h, then 4h, 8h, ... |
| 499 | * And we reset the timeout back to 1hr when we see a successful active |
| 500 | * TFO connection with data exchanges. |
| 501 | */ |
| 502 | |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 503 | /* Disable active TFO and record current jiffies and |
| 504 | * tfo_active_disable_times |
| 505 | */ |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 506 | void tcp_fastopen_active_disable(struct sock *sk) |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 507 | { |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 508 | struct net *net = sock_net(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 509 | |
Eric Dumazet | 6f20c8a | 2021-07-19 02:12:18 -0700 | [diff] [blame^] | 510 | /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ |
| 511 | WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); |
| 512 | |
| 513 | /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). |
| 514 | * We want net->ipv4.tfo_active_disable_stamp to be updated first. |
| 515 | */ |
| 516 | smp_mb__before_atomic(); |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 517 | atomic_inc(&net->ipv4.tfo_active_disable_times); |
Eric Dumazet | 6f20c8a | 2021-07-19 02:12:18 -0700 | [diff] [blame^] | 518 | |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 519 | NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | /* Calculate timeout for tfo active disable |
| 523 | * Return true if we are still in the active TFO disable period |
| 524 | * Return false if timeout already expired and we should use active TFO |
| 525 | */ |
| 526 | bool tcp_fastopen_active_should_disable(struct sock *sk) |
| 527 | { |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 528 | unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; |
| 529 | int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 530 | unsigned long timeout; |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 531 | int multiplier; |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 532 | |
| 533 | if (!tfo_da_times) |
| 534 | return false; |
| 535 | |
Eric Dumazet | 6f20c8a | 2021-07-19 02:12:18 -0700 | [diff] [blame^] | 536 | /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ |
| 537 | smp_rmb(); |
| 538 | |
Zheng Yongjun | 974d8f8 | 2021-06-07 23:01:09 +0800 | [diff] [blame] | 539 | /* Limit timeout to max: 2^6 * initial timeout */ |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 540 | multiplier = 1 << min(tfo_da_times - 1, 6); |
Eric Dumazet | 6f20c8a | 2021-07-19 02:12:18 -0700 | [diff] [blame^] | 541 | |
| 542 | /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ |
| 543 | timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + |
| 544 | multiplier * tfo_bh_timeout * HZ; |
| 545 | if (time_before(jiffies, timeout)) |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 546 | return true; |
| 547 | |
| 548 | /* Mark check bit so we can check for successful active TFO |
| 549 | * condition and reset tfo_active_disable_times |
| 550 | */ |
| 551 | tcp_sk(sk)->syn_fastopen_ch = 1; |
| 552 | return false; |
| 553 | } |
| 554 | |
| 555 | /* Disable active TFO if FIN is the only packet in the ofo queue |
| 556 | * and no data is received. |
| 557 | * Also check if we can reset tfo_active_disable_times if data is |
| 558 | * received successfully on a marked active TFO sockets opened on |
| 559 | * a non-loopback interface |
| 560 | */ |
| 561 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk) |
| 562 | { |
| 563 | struct tcp_sock *tp = tcp_sk(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 564 | struct dst_entry *dst; |
Eric Dumazet | 18a4c0e | 2017-10-05 22:21:21 -0700 | [diff] [blame] | 565 | struct sk_buff *skb; |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 566 | |
| 567 | if (!tp->syn_fastopen) |
| 568 | return; |
| 569 | |
| 570 | if (!tp->data_segs_in) { |
Eric Dumazet | 18a4c0e | 2017-10-05 22:21:21 -0700 | [diff] [blame] | 571 | skb = skb_rb_first(&tp->out_of_order_queue); |
| 572 | if (skb && !skb_rb_next(skb)) { |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 573 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 574 | tcp_fastopen_active_disable(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 575 | return; |
| 576 | } |
| 577 | } |
| 578 | } else if (tp->syn_fastopen_ch && |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 579 | atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) { |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 580 | dst = sk_dst_get(sk); |
| 581 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 582 | atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 583 | dst_release(dst); |
| 584 | } |
| 585 | } |
Yuchung Cheng | 7268586 | 2017-12-12 13:10:40 -0800 | [diff] [blame] | 586 | |
| 587 | void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired) |
| 588 | { |
| 589 | u32 timeouts = inet_csk(sk)->icsk_retransmits; |
| 590 | struct tcp_sock *tp = tcp_sk(sk); |
| 591 | |
| 592 | /* Broken middle-boxes may black-hole Fast Open connection during or |
| 593 | * even after the handshake. Be extremely conservative and pause |
| 594 | * Fast Open globally after hitting the third consecutive timeout or |
| 595 | * exceeding the configured timeout limit. |
| 596 | */ |
| 597 | if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) && |
| 598 | (timeouts == 2 || (timeouts < 2 && expired))) { |
| 599 | tcp_fastopen_active_disable(sk); |
| 600 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
| 601 | } |
| 602 | } |