blob: 12affb7864d981a6494059232c4965aaee756803 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
Ursula Braun60e2a772017-10-25 11:01:45 +020026#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070030#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Eric Dumazeta2a385d2012-05-16 23:15:34 +000032static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000035 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000037 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000038 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
Neal Cardwell4fb17a62015-02-06 16:04:41 -050041static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090060/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010087 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 */
90enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070091tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000095 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000096 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
David S. Millerbb5b7c12009-12-15 20:56:42 -080098 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070099 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300103 if (tmp_opt.rcv_tsecr)
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 if (th->rst)
123 goto kill;
124
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100126 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700132 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (tmp_opt.saw_tstamp) {
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200147 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 return TCP_TW_ACK;
153 }
154
155 /*
156 * Now real TIME-WAIT state.
157 *
158 * RFC 1122:
159 * "When a connection is [...] on TIME-WAIT state [...]
160 * [a TCP] MAY accept a new SYN from the remote TCP to
161 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900162 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * (1) assigns its initial sequence number for the new
164 * connection to be larger than the largest sequence
165 * number it used on the previous connection incarnation,
166 * and
167 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900168 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * to be an old duplicate".
170 */
171
172 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 /* In window segment, it may be only reset or bare ack. */
176
177 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800178 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 * Oh well... nobody has a sufficient solution to this
180 * protocol bug yet.
181 */
Eric Dumazet625357a2017-10-26 21:55:02 -0700182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700184 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 return TCP_TW_SUCCESS;
186 }
Florian Westphal63cc3572018-08-30 14:24:29 +0200187 } else {
188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700192 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200193 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700196 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return TCP_TW_SUCCESS;
198 }
199
200 /* Out of window segment.
201
202 All the segments are ACKed immediately.
203
204 The only exception is new SYN. We accept it, if it is
205 not old duplicate and we are not in danger to be killed
206 by delayed old duplicates. RFC check is that it has
207 newer sequence number works at rates <40Mbit/sec.
208 However, if paws works, it is reliable AND even more,
209 we even may relax silly seq space cutoff.
210
211 RED-PEN: we violate main RFC requirement, if this SYN will appear
212 old duplicate (i.e. we receive RST in reply to SYN-ACK),
213 we must return socket to time-wait state. It is not good,
214 but not fatal yet.
215 */
216
217 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700218 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
219 (tmp_opt.saw_tstamp &&
220 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
221 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 if (isn == 0)
223 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700224 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return TCP_TW_SYN;
226 }
227
228 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700229 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800231 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /* In this case we must reset the TIMEWAIT timer.
233 *
234 * If it is ACKless SYN it may be both old duplicate
235 * and new good SYN with random sequence number <rcv_nxt.
236 * Do not reschedule in the last case.
237 */
238 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700239 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500241 return tcp_timewait_check_oow_rate_limit(
242 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700244 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return TCP_TW_SUCCESS;
246}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000247EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900251 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252void tcp_time_wait(struct sock *sk, int state, int timeo)
253{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800254 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700255 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700256 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800257 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Haishuang Yan1946e672016-12-28 17:52:32 +0800259 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Ian Morris00db4122015-04-03 09:17:27 +0100261 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700262 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700263 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700264 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700265
David S. Miller23978492012-06-09 14:56:12 -0700266 tw->tw_transparent = inet->transparent;
Jon Maxwell00483692018-05-10 16:53:51 +1000267 tw->tw_mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700269 tcptw->tw_rcv_nxt = tp->rcv_nxt;
270 tcptw->tw_snd_nxt = tp->snd_nxt;
271 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
272 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
273 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000274 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500275 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000277#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (tw->tw_family == PF_INET6) {
279 struct ipv6_pinfo *np = inet6_sk(sk);
280
Eric Dumazetefe42082013-10-03 15:42:29 -0700281 tw->tw_v6_daddr = sk->sk_v6_daddr;
282 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400283 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200284 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700285 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800288
289#ifdef CONFIG_TCP_MD5SIG
290 /*
291 * The timewait bucket does not have the key DB from the
292 * sock structure. We just make a quick copy of the
293 * md5 key being used (if indeed we are using one)
294 * so the timewait ack generating code has the key.
295 */
296 do {
297 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000298 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800299 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100300 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000301 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Gustavo A. R. Silva49ca1942017-10-23 13:10:56 -0500302 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800303 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800304 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800305#endif
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /* Get the TIME_WAIT timeout firing. */
308 if (timeo < rto)
309 timeo = rto;
310
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400311 if (state == TCP_TIME_WAIT)
312 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800314 /* tw_timer is pinned, so we need to make sure BH are disabled
315 * in following section, otherwise timer handler could run before
316 * we complete the initialization.
317 */
318 local_bh_disable();
Eric Dumazet789f5582015-04-12 18:51:09 -0700319 inet_twsk_schedule(tw, timeo);
Eric Dumazetec94c262017-12-11 21:25:12 -0800320 /* Linkage updates.
321 * Note that access to tw after this point is illegal.
322 */
323 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800324 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 } else {
326 /* Sorry, if we're out of memory, just CLOSE this
327 * socket up. We've got bigger problems than
328 * non-graceful socket closings.
329 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700330 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
332
333 tcp_update_metrics(sk);
334 tcp_done(sk);
335}
Atul Guptacc35c88a2018-03-31 21:41:59 +0530336EXPORT_SYMBOL(tcp_time_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800338void tcp_twsk_destructor(struct sock *sk)
339{
David S. Millerb6242b92012-07-10 03:27:56 -0700340#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800341 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700342
Eric Dumazet71cea172013-05-20 06:52:26 +0000343 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000344 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800345#endif
346}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800347EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
348
Eric Dumazetb1964b52015-09-25 07:39:09 -0700349/* Warning : This function is called without sk_listener being locked.
350 * Be sure to read socket fields once, as their value could change under us.
351 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700352void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700353 const struct sock *sk_listener,
354 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700355{
356 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700357 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700358 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700359 u32 window_clamp;
360 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700361 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800362 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700363
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800364 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700365 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700366 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700367 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700368
369 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700370 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700371 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
372 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700373
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700374 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
375 if (rcv_wnd == 0)
376 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
377 else if (full_space < rcv_wnd * mss)
378 full_space = rcv_wnd * mss;
379
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700380 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetceef9ab2017-10-27 07:47:24 -0700381 tcp_select_initial_window(sk_listener, full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700382 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700383 &req->rsk_rcv_wnd,
384 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700385 ireq->wscale_ok,
386 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700387 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700388 ireq->rcv_wscale = rcv_wscale;
389}
390EXPORT_SYMBOL(tcp_openreq_init_rwin);
391
Florian Westphal735d3832014-09-29 13:08:30 +0200392static void tcp_ecn_openreq_child(struct tcp_sock *tp,
393 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700394{
395 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
396}
397
Daniel Borkmann81164412015-01-05 23:57:48 +0100398void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
399{
400 struct inet_connection_sock *icsk = inet_csk(sk);
401 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
402 bool ca_got_dst = false;
403
404 if (ca_key != TCP_CA_UNSPEC) {
405 const struct tcp_congestion_ops *ca;
406
407 rcu_read_lock();
408 ca = tcp_ca_find_key(ca_key);
409 if (likely(ca && try_module_get(ca->owner))) {
410 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
411 icsk->icsk_ca_ops = ca;
412 ca_got_dst = true;
413 }
414 rcu_read_unlock();
415 }
416
Neal Cardwell9f950412015-05-29 13:47:07 -0400417 /* If no valid choice made yet, assign current system default ca. */
418 if (!ca_got_dst &&
419 (!icsk->icsk_ca_setsockopt ||
420 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100421 tcp_assign_congestion_control(sk);
422
423 tcp_set_ca_state(sk, TCP_CA_Open);
424}
425EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
426
Ursula Braun60e2a772017-10-25 11:01:45 +0200427static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
428 struct request_sock *req,
429 struct tcp_sock *newtp)
430{
431#if IS_ENABLED(CONFIG_SMC)
432 struct inet_request_sock *ireq;
433
434 if (static_branch_unlikely(&tcp_have_smc)) {
435 ireq = inet_rsk(req);
436 if (oldtp->syn_smc && !ireq->smc_ok)
437 newtp->syn_smc = 0;
438 }
439#endif
440}
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442/* This is not only more efficient than what we used to do, it eliminates
443 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
444 *
445 * Actually, we could lots of memory writes here. tp of listening
446 * socket contains all necessary default parameters.
447 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700448struct sock *tcp_create_openreq_child(const struct sock *sk,
449 struct request_sock *req,
450 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500452 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700453 const struct inet_request_sock *ireq = inet_rsk(req);
454 struct tcp_request_sock *treq = tcp_rsk(req);
455 struct inet_connection_sock *newicsk;
456 struct tcp_sock *oldtp, *newtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700458 if (!newsk)
459 return NULL;
Ursula Braun60e2a772017-10-25 11:01:45 +0200460
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700461 newicsk = inet_csk(newsk);
462 newtp = tcp_sk(newsk);
463 oldtp = tcp_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700465 smc_check_reset_syn_req(oldtp, req, newtp);
Florian Westphal31770e32017-08-30 19:24:58 +0200466
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700467 /* Now setup tcp_sock */
468 newtp->pred_flags = 0;
William Allen Simpson435cf552009-12-02 18:17:05 +0000469
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700470 newtp->rcv_wup = newtp->copied_seq =
471 newtp->rcv_nxt = treq->rcv_isn + 1;
472 newtp->segs_in = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700474 newtp->snd_sml = newtp->snd_una =
475 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700477 INIT_LIST_HEAD(&newtp->tsq_node);
478 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700480 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700482 newtp->srtt_us = 0;
483 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
484 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
485 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
486 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700488 newtp->packets_out = 0;
489 newtp->retrans_out = 0;
490 newtp->sacked_out = 0;
491 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
492 newtp->tlp_high_seq = 0;
493 newtp->lsndtime = tcp_jiffies32;
494 newsk->sk_txhash = treq->txhash;
495 newtp->last_oow_ack_time = 0;
496 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700498 /* So many TCP implementations out there (incorrectly) count the
499 * initial SYN frame in their delayed-ACK and congestion control
500 * algorithms that we must have the following bandaid to talk
501 * efficiently to them. -DaveM
502 */
503 newtp->snd_cwnd = TCP_INIT_CWND;
504 newtp->snd_cwnd_cnt = 0;
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400505
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700506 /* There's a bubble in the pipe until at least the first ACK. */
507 newtp->app_limited = ~0U;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700509 tcp_init_xmit_timers(newsk);
510 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700512 newtp->rx_opt.saw_tstamp = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000513
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700514 newtp->rx_opt.dsack = 0;
515 newtp->rx_opt.num_sacks = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700517 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700519 if (sock_flag(newsk, SOCK_KEEPOPEN))
520 inet_csk_reset_keepalive_timer(newsk,
521 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700523 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
524 newtp->rx_opt.sack_ok = ireq->sack_ok;
525 newtp->window_clamp = req->rsk_window_clamp;
526 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
527 newtp->rcv_wnd = req->rsk_rcv_wnd;
528 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
529 if (newtp->rx_opt.wscale_ok) {
530 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
531 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
532 } else {
533 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
534 newtp->window_clamp = min(newtp->window_clamp, 65535U);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700536 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
537 newtp->max_window = newtp->snd_wnd;
538
539 if (newtp->rx_opt.tstamp_ok) {
540 newtp->rx_opt.ts_recent = req->ts_recent;
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200541 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
Eric Dumazet242b1bb2018-06-26 08:45:49 -0700542 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
543 } else {
544 newtp->rx_opt.ts_recent_stamp = 0;
545 newtp->tcp_header_len = sizeof(struct tcphdr);
546 }
547 newtp->tsoffset = treq->ts_off;
548#ifdef CONFIG_TCP_MD5SIG
549 newtp->md5sig_info = NULL; /*XXX*/
550 if (newtp->af_specific->md5_lookup(sk, newsk))
551 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
552#endif
553 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
554 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
555 newtp->rx_opt.mss_clamp = req->mss;
556 tcp_ecn_openreq_child(newtp, req);
557 newtp->fastopen_req = NULL;
558 newtp->fastopen_rsk = NULL;
559 newtp->syn_data_acked = 0;
560 newtp->rack.mstamp = 0;
561 newtp->rack.advanced = 0;
562 newtp->rack.reo_wnd_steps = 1;
563 newtp->rack.last_delivered = 0;
564 newtp->rack.reo_wnd_persist = 0;
565 newtp->rack.dsack_seen = 0;
566
567 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 return newsk;
570}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000571EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900573/*
Jerry Chu83368862012-08-31 12:29:12 +0000574 * Process an incoming packet for SYN_RECV sockets represented as a
575 * request_sock. Normally sk is the listener socket but for TFO it
576 * points to the child socket.
577 *
578 * XXX (TFO) - The current impl contains a special check for ack
579 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100580 *
581 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 */
583
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800584struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700585 struct request_sock *req,
Eric Dumazete0f97592018-02-13 06:14:12 -0800586 bool fastopen, bool *req_stolen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000588 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000589 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700590 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800591 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000592 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700593 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
David S. Millerbb5b7c12009-12-15 20:56:42 -0800595 tmp_opt.saw_tstamp = 0;
596 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700597 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 if (tmp_opt.saw_tstamp) {
600 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100601 if (tmp_opt.rcv_tsecr)
602 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 /* We do not store true stamp, but it is not required,
604 * it can be estimated (approximately)
605 * from another data.
606 */
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200607 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000608 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
610 }
611
612 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700613 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 flg == TCP_FLAG_SYN &&
615 !paws_reject) {
616 /*
617 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
618 * this case on figure 6 and figure 8, but formal
619 * protocol description says NOTHING.
620 * To be more exact, it says that we should send ACK,
621 * because this segment (at least, if it has no data)
622 * is out of window.
623 *
624 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
625 * describe SYN-RECV state. All the description
626 * is wrong, we cannot believe to it and should
627 * rely only on common sense and implementation
628 * experience.
629 *
630 * Enforce "SYN-ACK" according to figure 8, figure 6
631 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000632 *
633 * Note that even if there is new data in the SYN packet
634 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000635 *
636 * Reset timer after retransmitting SYNACK, similar to
637 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500639 if (!tcp_oow_rate_limited(sock_net(sk), skb,
640 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
641 &tcp_rsk(req)->last_oow_ack_time) &&
642
Eric Dumazetdd929c12015-04-08 15:34:04 -0700643 !inet_rtx_syn_ack(sk, req)) {
644 unsigned long expires = jiffies;
645
646 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
647 TCP_RTO_MAX);
648 if (!fastopen)
649 mod_timer_pending(&req->rsk_timer, expires);
650 else
651 req->rsk_timer.expires = expires;
652 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 return NULL;
654 }
655
656 /* Further reproduces section "SEGMENT ARRIVES"
657 for state SYN-RECEIVED of RFC793.
658 It is broken, however, it does not work only
659 when SYNs are crossed.
660
661 You would think that SYN crossing is impossible here, since
662 we should have a SYN_SENT socket (from connect()) on our end,
663 but this is not true if the crossed SYNs were sent to both
664 ends by a malicious third party. We must defend against this,
665 and to do that we first verify the ACK (as per RFC793, page
666 36) and reset if it is invalid. Is this a true full defense?
667 To convince ourselves, let us consider a way in which the ACK
668 test can still pass in this 'malicious crossed SYNs' case.
669 Malicious sender sends identical SYNs (and thus identical sequence
670 numbers) to both A and B:
671
672 A: gets SYN, seq=7
673 B: gets SYN, seq=7
674
675 By our good fortune, both A and B select the same initial
676 send sequence number of seven :-)
677
678 A: sends SYN|ACK, seq=7, ack_seq=8
679 B: sends SYN|ACK, seq=7, ack_seq=8
680
681 So we are now A eating this SYN|ACK, ACK test passes. So
682 does sequence test, SYN is truncated, and thus we consider
683 it a bare ACK.
684
David S. Millerec0a1962008-06-12 16:31:35 -0700685 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
686 bare ACK. Otherwise, we create an established connection. Both
687 ends (listening sockets) accept the new incoming connection and try
688 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 Note: This case is both harmless, and rare. Possibility is about the
691 same as us discovering intelligent life on another plant tomorrow.
692
693 But generally, we should (RFC lies!) to accept ACK
694 from SYNACK both here and in tcp_rcv_state_process().
695 tcp_rcv_state_process() does not, hence, we do not too.
696
697 Note that the case is absolutely generic:
698 we cannot optimize anything here without
699 violating protocol. All the checks must be made
700 before attempt to create socket.
701 */
702
703 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
704 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800705 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 * a reset is sent."
707 *
Jerry Chu83368862012-08-31 12:29:12 +0000708 * Invalid ACK: reset will be sent by listening socket.
709 * Note that the ACK validity check for a Fast Open socket is done
710 * elsewhere and is checked directly against the child socket rather
711 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 */
Jerry Chu83368862012-08-31 12:29:12 +0000713 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000714 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000715 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return sk;
717
718 /* Also, it would be not so bad idea to check rcv_tsecr, which
719 * is essentially ACK extension and too early or too late values
720 * should cause reset in unsynchronized states.
721 */
722
723 /* RFC793: "first check sequence number". */
724
725 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700726 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700728 if (!(flg & TCP_FLAG_RST) &&
729 !tcp_oow_rate_limited(sock_net(sk), skb,
730 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
731 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700732 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700734 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return NULL;
736 }
737
738 /* In sequence, PAWS is OK. */
739
Jerry Chu83368862012-08-31 12:29:12 +0000740 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700741 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Adam Langley2aaab9a2008-08-07 20:27:45 -0700743 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
744 /* Truncate SYN, it is out of window starting
745 at tcp_rsk(req)->rcv_isn + 1. */
746 flg &= ~TCP_FLAG_SYN;
747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Adam Langley2aaab9a2008-08-07 20:27:45 -0700749 /* RFC793: "second check the RST bit" and
750 * "fourth, check the SYN bit"
751 */
752 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700753 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700754 goto embryonic_reset;
755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Adam Langley2aaab9a2008-08-07 20:27:45 -0700757 /* ACK sequence verified above, just make sure ACK is
758 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000759 *
760 * XXX (TFO) - if we ever allow "data after SYN", the
761 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700762 */
763 if (!(flg & TCP_FLAG_ACK))
764 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Jerry Chu83368862012-08-31 12:29:12 +0000766 /* For Fast Open no more processing is needed (sk is the
767 * child socket).
768 */
769 if (fastopen)
770 return sk;
771
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000772 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000773 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700774 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
775 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700776 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700777 return NULL;
778 }
David S. Millerec0a1962008-06-12 16:31:35 -0700779
Adam Langley2aaab9a2008-08-07 20:27:45 -0700780 /* OK, ACK is valid, create big socket and
781 * feed this segment to it. It will repeat all
782 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
783 * ESTABLISHED STATE. If it will be dropped after
784 * socket is created, wait for troubles.
785 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700786 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
787 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100788 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700789 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700791 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700792 tcp_synack_rtt_meas(child, req);
Eric Dumazete0f97592018-02-13 06:14:12 -0800793 *req_stolen = !own_req;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700794 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Adam Langley2aaab9a2008-08-07 20:27:45 -0700796listen_overflow:
Eric Dumazet65c94102017-10-26 21:55:03 -0700797 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
Adam Langley2aaab9a2008-08-07 20:27:45 -0700798 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700800 }
801
802embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000803 if (!(flg & TCP_FLAG_RST)) {
804 /* Received a bad SYN pkt - for TFO We try not to reset
805 * the local connection unless it's really necessary to
806 * avoid becoming vulnerable to outside attack aiming at
807 * resetting legit local connections.
808 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700809 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000810 } else if (fastopen) { /* received a valid RST pkt */
811 reqsk_fastopen_remove(sk, req, true);
812 tcp_reset(sk);
813 }
814 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700815 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700816 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000817 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000820EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
822/*
823 * Queue segment on the new socket if the new socket is active,
824 * otherwise we just shortcircuit this and continue with
825 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000826 *
827 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
828 * when entering. But other states are possible due to a race condition
829 * where after __inet_lookup_established() fails but before the listener
830 * locked is obtained, other packets cause the same connection to
831 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 */
833
834int tcp_child_process(struct sock *parent, struct sock *child,
835 struct sk_buff *skb)
836{
837 int ret = 0;
838 int state = child->sk_state;
839
Alexander Duycke5907452017-03-24 10:08:00 -0700840 /* record NAPI ID of child */
841 sk_mark_napi_id(child, skb);
842
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700843 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700845 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /* Wakeup parent, send SIGIO */
847 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400848 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 } else {
850 /* Alas, it is possible again, because we do lookup
851 * in main socket hash table and lock on listening
852 * socket does not protect us more.
853 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000854 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 }
856
857 bh_unlock_sock(child);
858 sock_put(child);
859 return ret;
860}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861EXPORT_SYMBOL(tcp_child_process);