blob: c8274371c3d04bb0fdddc984d34e7402b24d814e [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07009 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/mm.h>
23#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/sysctl.h>
26#include <linux/workqueue.h>
Ursula Braun60e2a772017-10-25 11:01:45 +020027#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <net/tcp.h>
29#include <net/inet_common.h>
30#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070031#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric Dumazeta2a385d2012-05-16 23:15:34 +000033static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
35 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000036 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000038 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000039 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040}
41
Neal Cardwell4fb17a62015-02-06 16:04:41 -050042static enum tcp_tw_status
43tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 const struct sk_buff *skb, int mib_idx)
45{
46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47
48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 &tcptw->tw_last_oow_ack_time)) {
50 /* Send ACK. Note, we do not put the bucket,
51 * it will be released by caller.
52 */
53 return TCP_TW_ACK;
54 }
55
56 /* We are rate-limiting, so just release the tw sock and drop skb. */
57 inet_twsk_put(tw);
58 return TCP_TW_SUCCESS;
59}
60
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090061/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78 *
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
82 *
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010088 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
91enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070092tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000096 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000097 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
David S. Millerbb5b7c12009-12-15 20:56:42 -080099 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113 /* Just repeat all the checks of tcp_rcv_state_process() */
114
115 /* Out of window, send ACK */
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 if (th->rst)
124 goto kill;
125
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100127 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700133 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 return TCP_TW_SUCCESS;
135 }
136
137 /* New data or FIN. If new data arrive after half-duplex close,
138 * reset.
139 */
140 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 if (tmp_opt.saw_tstamp) {
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200148 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 }
151
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 return TCP_TW_ACK;
154 }
155
156 /*
157 * Now real TIME-WAIT state.
158 *
159 * RFC 1122:
160 * "When a connection is [...] on TIME-WAIT state [...]
161 * [a TCP] MAY accept a new SYN from the remote TCP to
162 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900163 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * (1) assigns its initial sequence number for the new
165 * connection to be larger than the largest sequence
166 * number it used on the previous connection incarnation,
167 * and
168 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900169 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * to be an old duplicate".
171 */
172
173 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176 /* In window segment, it may be only reset or bare ack. */
177
178 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800179 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * Oh well... nobody has a sufficient solution to this
181 * protocol bug yet.
182 */
Eric Dumazet625357a2017-10-26 21:55:02 -0700183 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700185 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return TCP_TW_SUCCESS;
187 }
Florian Westphal63cc3572018-08-30 14:24:29 +0200188 } else {
189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700193 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200194 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700197 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return TCP_TW_SUCCESS;
199 }
200
201 /* Out of window segment.
202
203 All the segments are ACKed immediately.
204
205 The only exception is new SYN. We accept it, if it is
206 not old duplicate and we are not in danger to be killed
207 by delayed old duplicates. RFC check is that it has
208 newer sequence number works at rates <40Mbit/sec.
209 However, if paws works, it is reliable AND even more,
210 we even may relax silly seq space cutoff.
211
212 RED-PEN: we violate main RFC requirement, if this SYN will appear
213 old duplicate (i.e. we receive RST in reply to SYN-ACK),
214 we must return socket to time-wait state. It is not good,
215 but not fatal yet.
216 */
217
218 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220 (tmp_opt.saw_tstamp &&
221 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 if (isn == 0)
224 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700225 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return TCP_TW_SYN;
227 }
228
229 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700230 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800232 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* In this case we must reset the TIMEWAIT timer.
234 *
235 * If it is ACKless SYN it may be both old duplicate
236 * and new good SYN with random sequence number <rcv_nxt.
237 * Do not reschedule in the last case.
238 */
239 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700240 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500242 return tcp_timewait_check_oow_rate_limit(
243 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700245 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return TCP_TW_SUCCESS;
247}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000248EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900250/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900252 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253void tcp_time_wait(struct sock *sk, int state, int timeo)
254{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800255 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700256 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700257 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800258 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Haishuang Yan1946e672016-12-28 17:52:32 +0800260 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Ian Morris00db4122015-04-03 09:17:27 +0100262 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700263 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700264 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700265 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700266
David S. Miller23978492012-06-09 14:56:12 -0700267 tw->tw_transparent = inet->transparent;
Jon Maxwell00483692018-05-10 16:53:51 +1000268 tw->tw_mark = sk->sk_mark;
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -0700269 tw->tw_priority = sk->sk_priority;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700271 tcptw->tw_rcv_nxt = tp->rcv_nxt;
272 tcptw->tw_snd_nxt = tp->snd_nxt;
273 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
274 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
275 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000276 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500277 tcptw->tw_last_oow_ack_time = 0;
Eric Dumazeta842fe12019-06-12 11:57:25 -0700278 tcptw->tw_tx_delay = tp->tcp_tx_delay;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000279#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 if (tw->tw_family == PF_INET6) {
281 struct ipv6_pinfo *np = inet6_sk(sk);
282
Eric Dumazetefe42082013-10-03 15:42:29 -0700283 tw->tw_v6_daddr = sk->sk_v6_daddr;
284 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400285 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200286 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700287 tw->tw_txhash = sk->sk_txhash;
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700288 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800291
292#ifdef CONFIG_TCP_MD5SIG
293 /*
294 * The timewait bucket does not have the key DB from the
295 * sock structure. We just make a quick copy of the
296 * md5 key being used (if indeed we are using one)
297 * so the timewait ack generating code has the key.
298 */
299 do {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000300 tcptw->tw_md5_key = NULL;
Eric Dumazet6aedbf92019-02-26 09:49:12 -0800301 if (static_branch_unlikely(&tcp_md5_needed)) {
302 struct tcp_md5sig_key *key;
303
304 key = tp->af_specific->md5_lookup(sk, sk);
305 if (key) {
306 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800309 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800310 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800311#endif
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 /* Get the TIME_WAIT timeout firing. */
314 if (timeo < rto)
315 timeo = rto;
316
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400317 if (state == TCP_TIME_WAIT)
318 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800320 /* tw_timer is pinned, so we need to make sure BH are disabled
321 * in following section, otherwise timer handler could run before
322 * we complete the initialization.
323 */
324 local_bh_disable();
Eric Dumazet789f5582015-04-12 18:51:09 -0700325 inet_twsk_schedule(tw, timeo);
Eric Dumazetec94c262017-12-11 21:25:12 -0800326 /* Linkage updates.
327 * Note that access to tw after this point is illegal.
328 */
329 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800330 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 } else {
332 /* Sorry, if we're out of memory, just CLOSE this
333 * socket up. We've got bigger problems than
334 * non-graceful socket closings.
335 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
339 tcp_update_metrics(sk);
340 tcp_done(sk);
341}
Atul Guptacc35c88a2018-03-31 21:41:59 +0530342EXPORT_SYMBOL(tcp_time_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800344void tcp_twsk_destructor(struct sock *sk)
345{
David S. Millerb6242b92012-07-10 03:27:56 -0700346#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet6aedbf92019-02-26 09:49:12 -0800347 if (static_branch_unlikely(&tcp_md5_needed)) {
348 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700349
Eric Dumazet6aedbf92019-02-26 09:49:12 -0800350 if (twsk->tw_md5_key)
351 kfree_rcu(twsk->tw_md5_key, rcu);
352 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800353#endif
354}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800355EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356
Eric Dumazetb1964b52015-09-25 07:39:09 -0700357/* Warning : This function is called without sk_listener being locked.
358 * Be sure to read socket fields once, as their value could change under us.
359 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700360void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700361 const struct sock *sk_listener,
362 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700363{
364 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700365 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700366 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700367 u32 window_clamp;
368 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700369 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800370 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700371
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800372 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700373 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700374 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700376
377 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700378 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700381
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383 if (rcv_wnd == 0)
384 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385 else if (full_space < rcv_wnd * mss)
386 full_space = rcv_wnd * mss;
387
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700388 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetceef9ab2017-10-27 07:47:24 -0700389 tcp_select_initial_window(sk_listener, full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700390 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700391 &req->rsk_rcv_wnd,
392 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700393 ireq->wscale_ok,
394 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700395 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700396 ireq->rcv_wscale = rcv_wscale;
397}
398EXPORT_SYMBOL(tcp_openreq_init_rwin);
399
Florian Westphal735d3832014-09-29 13:08:30 +0200400static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700402{
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404}
405
Daniel Borkmann81164412015-01-05 23:57:48 +0100406void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407{
408 struct inet_connection_sock *icsk = inet_csk(sk);
409 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410 bool ca_got_dst = false;
411
412 if (ca_key != TCP_CA_UNSPEC) {
413 const struct tcp_congestion_ops *ca;
414
415 rcu_read_lock();
416 ca = tcp_ca_find_key(ca_key);
Martin KaFai Lau0baf26b2020-01-08 16:35:08 -0800417 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
Daniel Borkmann81164412015-01-05 23:57:48 +0100418 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419 icsk->icsk_ca_ops = ca;
420 ca_got_dst = true;
421 }
422 rcu_read_unlock();
423 }
424
Neal Cardwell9f950412015-05-29 13:47:07 -0400425 /* If no valid choice made yet, assign current system default ca. */
426 if (!ca_got_dst &&
427 (!icsk->icsk_ca_setsockopt ||
Martin KaFai Lau0baf26b2020-01-08 16:35:08 -0800428 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100429 tcp_assign_congestion_control(sk);
430
431 tcp_set_ca_state(sk, TCP_CA_Open);
432}
433EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434
Ursula Braun60e2a772017-10-25 11:01:45 +0200435static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436 struct request_sock *req,
437 struct tcp_sock *newtp)
438{
439#if IS_ENABLED(CONFIG_SMC)
440 struct inet_request_sock *ireq;
441
442 if (static_branch_unlikely(&tcp_have_smc)) {
443 ireq = inet_rsk(req);
444 if (oldtp->syn_smc && !ireq->smc_ok)
445 newtp->syn_smc = 0;
446 }
447#endif
448}
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450/* This is not only more efficient than what we used to do, it eliminates
451 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
452 *
453 * Actually, we could lots of memory writes here. tp of listening
454 * socket contains all necessary default parameters.
455 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700456struct sock *tcp_create_openreq_child(const struct sock *sk,
457 struct request_sock *req,
458 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700461 const struct inet_request_sock *ireq = inet_rsk(req);
462 struct tcp_request_sock *treq = tcp_rsk(req);
463 struct inet_connection_sock *newicsk;
464 struct tcp_sock *oldtp, *newtp;
Eric Dumazetdba7d9b2019-10-10 20:17:39 -0700465 u32 seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700467 if (!newsk)
468 return NULL;
Ursula Braun60e2a772017-10-25 11:01:45 +0200469
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700470 newicsk = inet_csk(newsk);
471 newtp = tcp_sk(newsk);
472 oldtp = tcp_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700474 smc_check_reset_syn_req(oldtp, req, newtp);
Florian Westphal31770e32017-08-30 19:24:58 +0200475
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700476 /* Now setup tcp_sock */
477 newtp->pred_flags = 0;
William Allen Simpson435cf552009-12-02 18:17:05 +0000478
Eric Dumazetdba7d9b2019-10-10 20:17:39 -0700479 seq = treq->rcv_isn + 1;
480 newtp->rcv_wup = seq;
Eric Dumazet7db48e92019-10-10 20:17:40 -0700481 WRITE_ONCE(newtp->copied_seq, seq);
Eric Dumazetdba7d9b2019-10-10 20:17:39 -0700482 WRITE_ONCE(newtp->rcv_nxt, seq);
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700483 newtp->segs_in = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Eric Dumazete0d694d2019-10-10 20:17:42 -0700485 seq = treq->snt_isn + 1;
486 newtp->snd_sml = newtp->snd_una = seq;
487 WRITE_ONCE(newtp->snd_nxt, seq);
488 newtp->snd_up = seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700490 INIT_LIST_HEAD(&newtp->tsq_node);
491 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700493 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700495 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700496 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700498 newtp->lsndtime = tcp_jiffies32;
499 newsk->sk_txhash = treq->txhash;
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700500 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700502 tcp_init_xmit_timers(newsk);
Eric Dumazet0f317462019-10-10 20:17:41 -0700503 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700505 if (sock_flag(newsk, SOCK_KEEPOPEN))
506 inet_csk_reset_keepalive_timer(newsk,
507 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700509 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510 newtp->rx_opt.sack_ok = ireq->sack_ok;
511 newtp->window_clamp = req->rsk_window_clamp;
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513 newtp->rcv_wnd = req->rsk_rcv_wnd;
514 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515 if (newtp->rx_opt.wscale_ok) {
516 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518 } else {
519 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520 newtp->window_clamp = min(newtp->window_clamp, 65535U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700522 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523 newtp->max_window = newtp->snd_wnd;
524
525 if (newtp->rx_opt.tstamp_ok) {
526 newtp->rx_opt.ts_recent = req->ts_recent;
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200527 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700528 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529 } else {
530 newtp->rx_opt.ts_recent_stamp = 0;
531 newtp->tcp_header_len = sizeof(struct tcphdr);
532 }
Yuchung Cheng336c39a2019-04-29 15:46:16 -0700533 if (req->num_timeout) {
534 newtp->undo_marker = treq->snt_isn;
535 newtp->retrans_stamp = div_u64(treq->snt_synack,
536 USEC_PER_SEC / TCP_TS_HZ);
537 }
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700538 newtp->tsoffset = treq->ts_off;
539#ifdef CONFIG_TCP_MD5SIG
540 newtp->md5sig_info = NULL; /*XXX*/
541 if (newtp->af_specific->md5_lookup(sk, newsk))
542 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543#endif
544 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546 newtp->rx_opt.mss_clamp = req->mss;
547 tcp_ecn_openreq_child(newtp, req);
548 newtp->fastopen_req = NULL;
Eric Dumazetd983ea62019-10-10 20:17:38 -0700549 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700550
Jakub Sitnickie8025152020-02-18 17:10:15 +0000551 tcp_bpf_clone(sk, newsk);
552
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700553 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return newsk;
556}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000557EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900559/*
Jerry Chu83368862012-08-31 12:29:12 +0000560 * Process an incoming packet for SYN_RECV sockets represented as a
561 * request_sock. Normally sk is the listener socket but for TFO it
562 * points to the child socket.
563 *
564 * XXX (TFO) - The current impl contains a special check for ack
565 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100566 *
567 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 */
569
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800570struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700571 struct request_sock *req,
Eric Dumazete0f97592018-02-13 06:14:12 -0800572 bool fastopen, bool *req_stolen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000574 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000575 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700576 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000578 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700579 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
David S. Millerbb5b7c12009-12-15 20:56:42 -0800581 tmp_opt.saw_tstamp = 0;
582 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700583 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 if (tmp_opt.saw_tstamp) {
586 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100587 if (tmp_opt.rcv_tsecr)
588 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 /* We do not store true stamp, but it is not required,
590 * it can be estimated (approximately)
591 * from another data.
592 */
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200593 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000594 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 }
596 }
597
598 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700599 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 flg == TCP_FLAG_SYN &&
601 !paws_reject) {
602 /*
603 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
604 * this case on figure 6 and figure 8, but formal
605 * protocol description says NOTHING.
606 * To be more exact, it says that we should send ACK,
607 * because this segment (at least, if it has no data)
608 * is out of window.
609 *
610 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
611 * describe SYN-RECV state. All the description
612 * is wrong, we cannot believe to it and should
613 * rely only on common sense and implementation
614 * experience.
615 *
616 * Enforce "SYN-ACK" according to figure 8, figure 6
617 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000618 *
619 * Note that even if there is new data in the SYN packet
620 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000621 *
622 * Reset timer after retransmitting SYNACK, similar to
623 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500625 if (!tcp_oow_rate_limited(sock_net(sk), skb,
626 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
627 &tcp_rsk(req)->last_oow_ack_time) &&
628
Eric Dumazetdd929c12015-04-08 15:34:04 -0700629 !inet_rtx_syn_ack(sk, req)) {
630 unsigned long expires = jiffies;
631
632 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
633 TCP_RTO_MAX);
634 if (!fastopen)
635 mod_timer_pending(&req->rsk_timer, expires);
636 else
637 req->rsk_timer.expires = expires;
638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return NULL;
640 }
641
642 /* Further reproduces section "SEGMENT ARRIVES"
643 for state SYN-RECEIVED of RFC793.
644 It is broken, however, it does not work only
645 when SYNs are crossed.
646
647 You would think that SYN crossing is impossible here, since
648 we should have a SYN_SENT socket (from connect()) on our end,
649 but this is not true if the crossed SYNs were sent to both
650 ends by a malicious third party. We must defend against this,
651 and to do that we first verify the ACK (as per RFC793, page
652 36) and reset if it is invalid. Is this a true full defense?
653 To convince ourselves, let us consider a way in which the ACK
654 test can still pass in this 'malicious crossed SYNs' case.
655 Malicious sender sends identical SYNs (and thus identical sequence
656 numbers) to both A and B:
657
658 A: gets SYN, seq=7
659 B: gets SYN, seq=7
660
661 By our good fortune, both A and B select the same initial
662 send sequence number of seven :-)
663
664 A: sends SYN|ACK, seq=7, ack_seq=8
665 B: sends SYN|ACK, seq=7, ack_seq=8
666
667 So we are now A eating this SYN|ACK, ACK test passes. So
668 does sequence test, SYN is truncated, and thus we consider
669 it a bare ACK.
670
David S. Millerec0a1962008-06-12 16:31:35 -0700671 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
672 bare ACK. Otherwise, we create an established connection. Both
673 ends (listening sockets) accept the new incoming connection and try
674 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 Note: This case is both harmless, and rare. Possibility is about the
677 same as us discovering intelligent life on another plant tomorrow.
678
679 But generally, we should (RFC lies!) to accept ACK
680 from SYNACK both here and in tcp_rcv_state_process().
681 tcp_rcv_state_process() does not, hence, we do not too.
682
683 Note that the case is absolutely generic:
684 we cannot optimize anything here without
685 violating protocol. All the checks must be made
686 before attempt to create socket.
687 */
688
689 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
690 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800691 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 * a reset is sent."
693 *
Jerry Chu83368862012-08-31 12:29:12 +0000694 * Invalid ACK: reset will be sent by listening socket.
695 * Note that the ACK validity check for a Fast Open socket is done
696 * elsewhere and is checked directly against the child socket rather
697 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 */
Jerry Chu83368862012-08-31 12:29:12 +0000699 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000700 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000701 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 return sk;
703
704 /* Also, it would be not so bad idea to check rcv_tsecr, which
705 * is essentially ACK extension and too early or too late values
706 * should cause reset in unsynchronized states.
707 */
708
709 /* RFC793: "first check sequence number". */
710
711 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700712 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700714 if (!(flg & TCP_FLAG_RST) &&
715 !tcp_oow_rate_limited(sock_net(sk), skb,
716 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
717 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700718 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700720 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 return NULL;
722 }
723
724 /* In sequence, PAWS is OK. */
725
Jerry Chu83368862012-08-31 12:29:12 +0000726 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700727 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Adam Langley2aaab9a2008-08-07 20:27:45 -0700729 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
730 /* Truncate SYN, it is out of window starting
731 at tcp_rsk(req)->rcv_isn + 1. */
732 flg &= ~TCP_FLAG_SYN;
733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Adam Langley2aaab9a2008-08-07 20:27:45 -0700735 /* RFC793: "second check the RST bit" and
736 * "fourth, check the SYN bit"
737 */
738 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700739 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700740 goto embryonic_reset;
741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Adam Langley2aaab9a2008-08-07 20:27:45 -0700743 /* ACK sequence verified above, just make sure ACK is
744 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000745 *
746 * XXX (TFO) - if we ever allow "data after SYN", the
747 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700748 */
749 if (!(flg & TCP_FLAG_ACK))
750 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Jerry Chu83368862012-08-31 12:29:12 +0000752 /* For Fast Open no more processing is needed (sk is the
753 * child socket).
754 */
755 if (fastopen)
756 return sk;
757
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000758 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000759 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700760 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
761 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700762 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700763 return NULL;
764 }
David S. Millerec0a1962008-06-12 16:31:35 -0700765
Adam Langley2aaab9a2008-08-07 20:27:45 -0700766 /* OK, ACK is valid, create big socket and
767 * feed this segment to it. It will repeat all
768 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
769 * ESTABLISHED STATE. If it will be dropped after
770 * socket is created, wait for troubles.
771 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700772 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
773 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100774 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700775 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700777 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700778 tcp_synack_rtt_meas(child, req);
Eric Dumazete0f97592018-02-13 06:14:12 -0800779 *req_stolen = !own_req;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700780 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Adam Langley2aaab9a2008-08-07 20:27:45 -0700782listen_overflow:
Eric Dumazet65c94102017-10-26 21:55:03 -0700783 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
Adam Langley2aaab9a2008-08-07 20:27:45 -0700784 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700786 }
787
788embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000789 if (!(flg & TCP_FLAG_RST)) {
790 /* Received a bad SYN pkt - for TFO We try not to reset
791 * the local connection unless it's really necessary to
792 * avoid becoming vulnerable to outside attack aiming at
793 * resetting legit local connections.
794 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700795 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000796 } else if (fastopen) { /* received a valid RST pkt */
797 reqsk_fastopen_remove(sk, req, true);
798 tcp_reset(sk);
799 }
800 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700801 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700802 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000803 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700804 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000806EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808/*
809 * Queue segment on the new socket if the new socket is active,
810 * otherwise we just shortcircuit this and continue with
811 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000812 *
813 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
814 * when entering. But other states are possible due to a race condition
815 * where after __inet_lookup_established() fails but before the listener
816 * locked is obtained, other packets cause the same connection to
817 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 */
819
820int tcp_child_process(struct sock *parent, struct sock *child,
821 struct sk_buff *skb)
822{
823 int ret = 0;
824 int state = child->sk_state;
825
Alexander Duycke5907452017-03-24 10:08:00 -0700826 /* record NAPI ID of child */
827 sk_mark_napi_id(child, skb);
828
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700829 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700831 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 /* Wakeup parent, send SIGIO */
833 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400834 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 } else {
836 /* Alas, it is possible again, because we do lookup
837 * in main socket hash table and lock on listening
838 * socket does not protect us more.
839 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000840 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842
843 bh_unlock_sock(child);
844 sock_put(child);
845 return ret;
846}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847EXPORT_SYMBOL(tcp_child_process);