blob: 1dda1341a223937580b4efdbedb21ae50b221ff7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
Ursula Braun60e2a772017-10-25 11:01:45 +020026#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070030#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Eric Dumazeta2a385d2012-05-16 23:15:34 +000032static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000035 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000037 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000038 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
Neal Cardwell4fb17a62015-02-06 16:04:41 -050041static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090060/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010087 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 */
90enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070091tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000095 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000096 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
David S. Millerbb5b7c12009-12-15 20:56:42 -080098 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070099 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700100 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300103 if (tmp_opt.rcv_tsecr)
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 if (th->rst)
123 goto kill;
124
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100126 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700132 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (tmp_opt.saw_tstamp) {
James Morris9d729f72007-03-04 16:12:44 -0800147 tcptw->tw_ts_recent_stamp = get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400151 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 return TCP_TW_ACK;
153 }
154
155 /*
156 * Now real TIME-WAIT state.
157 *
158 * RFC 1122:
159 * "When a connection is [...] on TIME-WAIT state [...]
160 * [a TCP] MAY accept a new SYN from the remote TCP to
161 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900162 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * (1) assigns its initial sequence number for the new
164 * connection to be larger than the largest sequence
165 * number it used on the previous connection incarnation,
166 * and
167 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900168 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * to be an old duplicate".
170 */
171
172 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700173 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 /* In window segment, it may be only reset or bare ack. */
176
177 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800178 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 * Oh well... nobody has a sufficient solution to this
180 * protocol bug yet.
181 */
Eric Dumazet625357a2017-10-26 21:55:02 -0700182 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700184 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 return TCP_TW_SUCCESS;
186 }
187 }
Eric Dumazeted2e9232015-09-19 09:08:34 -0700188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
James Morris9d729f72007-03-04 16:12:44 -0800192 tcptw->tw_ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 }
194
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700195 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return TCP_TW_SUCCESS;
197 }
198
199 /* Out of window segment.
200
201 All the segments are ACKed immediately.
202
203 The only exception is new SYN. We accept it, if it is
204 not old duplicate and we are not in danger to be killed
205 by delayed old duplicates. RFC check is that it has
206 newer sequence number works at rates <40Mbit/sec.
207 However, if paws works, it is reliable AND even more,
208 we even may relax silly seq space cutoff.
209
210 RED-PEN: we violate main RFC requirement, if this SYN will appear
211 old duplicate (i.e. we receive RST in reply to SYN-ACK),
212 we must return socket to time-wait state. It is not good,
213 but not fatal yet.
214 */
215
216 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700217 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
218 (tmp_opt.saw_tstamp &&
219 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
220 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 if (isn == 0)
222 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700223 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 return TCP_TW_SYN;
225 }
226
227 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700228 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800230 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* In this case we must reset the TIMEWAIT timer.
232 *
233 * If it is ACKless SYN it may be both old duplicate
234 * and new good SYN with random sequence number <rcv_nxt.
235 * Do not reschedule in the last case.
236 */
237 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700238 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500240 return tcp_timewait_check_oow_rate_limit(
241 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700243 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return TCP_TW_SUCCESS;
245}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000246EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900248/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900250 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251void tcp_time_wait(struct sock *sk, int state, int timeo)
252{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800253 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700254 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700255 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800256 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Haishuang Yan1946e672016-12-28 17:52:32 +0800258 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Ian Morris00db4122015-04-03 09:17:27 +0100260 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700261 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700262 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700263 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700264
David S. Miller23978492012-06-09 14:56:12 -0700265 tw->tw_transparent = inet->transparent;
Jon Maxwell00483692018-05-10 16:53:51 +1000266 tw->tw_mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700268 tcptw->tw_rcv_nxt = tp->rcv_nxt;
269 tcptw->tw_snd_nxt = tp->snd_nxt;
270 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
271 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
272 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000273 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500274 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000276#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 if (tw->tw_family == PF_INET6) {
278 struct ipv6_pinfo *np = inet6_sk(sk);
279
Eric Dumazetefe42082013-10-03 15:42:29 -0700280 tw->tw_v6_daddr = sk->sk_v6_daddr;
281 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400282 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200283 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700284 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800287
288#ifdef CONFIG_TCP_MD5SIG
289 /*
290 * The timewait bucket does not have the key DB from the
291 * sock structure. We just make a quick copy of the
292 * md5 key being used (if indeed we are using one)
293 * so the timewait ack generating code has the key.
294 */
295 do {
296 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000297 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800298 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100299 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000300 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Gustavo A. R. Silva49ca1942017-10-23 13:10:56 -0500301 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800302 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800303 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800304#endif
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 /* Get the TIME_WAIT timeout firing. */
307 if (timeo < rto)
308 timeo = rto;
309
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400310 if (state == TCP_TIME_WAIT)
311 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800313 /* tw_timer is pinned, so we need to make sure BH are disabled
314 * in following section, otherwise timer handler could run before
315 * we complete the initialization.
316 */
317 local_bh_disable();
Eric Dumazet789f5582015-04-12 18:51:09 -0700318 inet_twsk_schedule(tw, timeo);
Eric Dumazetec94c262017-12-11 21:25:12 -0800319 /* Linkage updates.
320 * Note that access to tw after this point is illegal.
321 */
322 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Eric Dumazetcfac7f82017-12-01 10:06:56 -0800323 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 } else {
325 /* Sorry, if we're out of memory, just CLOSE this
326 * socket up. We've got bigger problems than
327 * non-graceful socket closings.
328 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
331
332 tcp_update_metrics(sk);
333 tcp_done(sk);
334}
Atul Guptacc35c88a2018-03-31 21:41:59 +0530335EXPORT_SYMBOL(tcp_time_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800337void tcp_twsk_destructor(struct sock *sk)
338{
David S. Millerb6242b92012-07-10 03:27:56 -0700339#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800340 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700341
Eric Dumazet71cea172013-05-20 06:52:26 +0000342 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000343 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800344#endif
345}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800346EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
347
Eric Dumazetb1964b52015-09-25 07:39:09 -0700348/* Warning : This function is called without sk_listener being locked.
349 * Be sure to read socket fields once, as their value could change under us.
350 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700351void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700352 const struct sock *sk_listener,
353 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700354{
355 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700356 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700357 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700358 u32 window_clamp;
359 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700360 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800361 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700362
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800363 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700364 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700365 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700366 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700367
368 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700369 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700370 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
371 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700372
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700373 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
374 if (rcv_wnd == 0)
375 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
376 else if (full_space < rcv_wnd * mss)
377 full_space = rcv_wnd * mss;
378
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700379 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetceef9ab2017-10-27 07:47:24 -0700380 tcp_select_initial_window(sk_listener, full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700381 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700382 &req->rsk_rcv_wnd,
383 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700384 ireq->wscale_ok,
385 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700386 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700387 ireq->rcv_wscale = rcv_wscale;
388}
389EXPORT_SYMBOL(tcp_openreq_init_rwin);
390
Florian Westphal735d3832014-09-29 13:08:30 +0200391static void tcp_ecn_openreq_child(struct tcp_sock *tp,
392 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700393{
394 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
395}
396
Daniel Borkmann81164412015-01-05 23:57:48 +0100397void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
398{
399 struct inet_connection_sock *icsk = inet_csk(sk);
400 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
401 bool ca_got_dst = false;
402
403 if (ca_key != TCP_CA_UNSPEC) {
404 const struct tcp_congestion_ops *ca;
405
406 rcu_read_lock();
407 ca = tcp_ca_find_key(ca_key);
408 if (likely(ca && try_module_get(ca->owner))) {
409 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
410 icsk->icsk_ca_ops = ca;
411 ca_got_dst = true;
412 }
413 rcu_read_unlock();
414 }
415
Neal Cardwell9f950412015-05-29 13:47:07 -0400416 /* If no valid choice made yet, assign current system default ca. */
417 if (!ca_got_dst &&
418 (!icsk->icsk_ca_setsockopt ||
419 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100420 tcp_assign_congestion_control(sk);
421
422 tcp_set_ca_state(sk, TCP_CA_Open);
423}
424EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
425
Ursula Braun60e2a772017-10-25 11:01:45 +0200426static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
427 struct request_sock *req,
428 struct tcp_sock *newtp)
429{
430#if IS_ENABLED(CONFIG_SMC)
431 struct inet_request_sock *ireq;
432
433 if (static_branch_unlikely(&tcp_have_smc)) {
434 ireq = inet_rsk(req);
435 if (oldtp->syn_smc && !ireq->smc_ok)
436 newtp->syn_smc = 0;
437 }
438#endif
439}
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441/* This is not only more efficient than what we used to do, it eliminates
442 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
443 *
444 * Actually, we could lots of memory writes here. tp of listening
445 * socket contains all necessary default parameters.
446 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700447struct sock *tcp_create_openreq_child(const struct sock *sk,
448 struct request_sock *req,
449 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500451 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Ian Morris00db4122015-04-03 09:17:27 +0100453 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700454 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700455 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Meloa9948a72007-02-28 11:05:56 -0800456 struct inet_connection_sock *newicsk = inet_csk(newsk);
William Allen Simpson435cf552009-12-02 18:17:05 +0000457 struct tcp_sock *newtp = tcp_sk(newsk);
Ursula Braun60e2a772017-10-25 11:01:45 +0200458 struct tcp_sock *oldtp = tcp_sk(sk);
459
460 smc_check_reset_syn_req(oldtp, req, newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 /* Now setup tcp_sock */
Florian Westphal31770e32017-08-30 19:24:58 +0200463 newtp->pred_flags = 0;
464
William Allen Simpson435cf552009-12-02 18:17:05 +0000465 newtp->rcv_wup = newtp->copied_seq =
466 newtp->rcv_nxt = treq->rcv_isn + 1;
Eric Dumazeta9d99ce2016-03-06 09:29:21 -0800467 newtp->segs_in = 1;
William Allen Simpson435cf552009-12-02 18:17:05 +0000468
469 newtp->snd_sml = newtp->snd_una =
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000470 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000472 INIT_LIST_HEAD(&newtp->tsq_node);
Eric Dumazete2080072017-10-04 12:59:58 -0700473 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800475 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazet740b0f12014-02-26 14:02:48 -0800477 newtp->srtt_us = 0;
478 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
Eric Dumazetac9517f2017-05-16 14:00:13 -0700479 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700480 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Eric Dumazet70eabf02017-05-16 14:00:07 -0700481 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 newtp->packets_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 newtp->retrans_out = 0;
485 newtp->sacked_out = 0;
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700486 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000487 newtp->tlp_high_seq = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700488 newtp->lsndtime = tcp_jiffies32;
Eric Dumazetd8ed6252015-09-22 20:44:17 -0700489 newsk->sk_txhash = treq->txhash;
Neal Cardwellf2b2c582015-02-06 16:04:40 -0500490 newtp->last_oow_ack_time = 0;
Yuchung Cheng375fe022013-07-22 16:20:45 -0700491 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 /* So many TCP implementations out there (incorrectly) count the
494 * initial SYN frame in their delayed-ACK and congestion control
495 * algorithms that we must have the following bandaid to talk
496 * efficiently to them. -DaveM
497 */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000498 newtp->snd_cwnd = TCP_INIT_CWND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 newtp->snd_cwnd_cnt = 0;
500
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400501 /* There's a bubble in the pipe until at least the first ACK. */
502 newtp->app_limited = ~0U;
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 tcp_init_xmit_timers(newsk);
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000505 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 newtp->rx_opt.saw_tstamp = 0;
508
509 newtp->rx_opt.dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 newtp->rx_opt.num_sacks = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700515 inet_csk_reset_keepalive_timer(newsk,
516 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700518 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
Yuchung Cheng713bafe2017-11-08 13:01:26 -0800519 newtp->rx_opt.sack_ok = ireq->sack_ok;
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700520 newtp->window_clamp = req->rsk_window_clamp;
521 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
522 newtp->rcv_wnd = req->rsk_rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700523 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700525 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
526 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 } else {
528 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
529 newtp->window_clamp = min(newtp->window_clamp, 65535U);
530 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700531 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
532 newtp->rx_opt.snd_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 newtp->max_window = newtp->snd_wnd;
534
535 if (newtp->rx_opt.tstamp_ok) {
536 newtp->rx_opt.ts_recent = req->ts_recent;
James Morris9d729f72007-03-04 16:12:44 -0800537 newtp->rx_opt.ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
539 } else {
540 newtp->rx_opt.ts_recent_stamp = 0;
541 newtp->tcp_header_len = sizeof(struct tcphdr);
542 }
Florian Westphal95a22ca2016-12-01 11:32:06 +0100543 newtp->tsoffset = treq->ts_off;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800544#ifdef CONFIG_TCP_MD5SIG
545 newtp->md5sig_info = NULL; /*XXX*/
546 if (newtp->af_specific->md5_lookup(sk, newsk))
547 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
548#endif
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000549 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700550 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 newtp->rx_opt.mss_clamp = req->mss;
Florian Westphal735d3832014-09-29 13:08:30 +0200552 tcp_ecn_openreq_child(newtp, req);
Eric Dumazet8b485ce2017-05-03 06:39:31 -0700553 newtp->fastopen_req = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000554 newtp->fastopen_rsk = NULL;
Yuchung Cheng6f736012012-10-19 15:14:44 +0000555 newtp->syn_data_acked = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700556 newtp->rack.mstamp = 0;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700557 newtp->rack.advanced = 0;
Priyaranjan Jha1f255692017-11-03 16:38:48 -0700558 newtp->rack.reo_wnd_steps = 1;
559 newtp->rack.last_delivered = 0;
560 newtp->rack.reo_wnd_persist = 0;
561 newtp->rack.dsack_seen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700563 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565 return newsk;
566}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000567EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900569/*
Jerry Chu83368862012-08-31 12:29:12 +0000570 * Process an incoming packet for SYN_RECV sockets represented as a
571 * request_sock. Normally sk is the listener socket but for TFO it
572 * points to the child socket.
573 *
574 * XXX (TFO) - The current impl contains a special check for ack
575 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100576 *
577 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
579
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800580struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700581 struct request_sock *req,
Eric Dumazete0f97592018-02-13 06:14:12 -0800582 bool fastopen, bool *req_stolen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000584 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000585 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700586 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800587 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000588 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700589 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
David S. Millerbb5b7c12009-12-15 20:56:42 -0800591 tmp_opt.saw_tstamp = 0;
592 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700593 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 if (tmp_opt.saw_tstamp) {
596 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100597 if (tmp_opt.rcv_tsecr)
598 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 /* We do not store true stamp, but it is not required,
600 * it can be estimated (approximately)
601 * from another data.
602 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000603 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000604 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 }
606 }
607
608 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700609 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 flg == TCP_FLAG_SYN &&
611 !paws_reject) {
612 /*
613 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
614 * this case on figure 6 and figure 8, but formal
615 * protocol description says NOTHING.
616 * To be more exact, it says that we should send ACK,
617 * because this segment (at least, if it has no data)
618 * is out of window.
619 *
620 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
621 * describe SYN-RECV state. All the description
622 * is wrong, we cannot believe to it and should
623 * rely only on common sense and implementation
624 * experience.
625 *
626 * Enforce "SYN-ACK" according to figure 8, figure 6
627 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000628 *
629 * Note that even if there is new data in the SYN packet
630 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000631 *
632 * Reset timer after retransmitting SYNACK, similar to
633 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500635 if (!tcp_oow_rate_limited(sock_net(sk), skb,
636 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
637 &tcp_rsk(req)->last_oow_ack_time) &&
638
Eric Dumazetdd929c12015-04-08 15:34:04 -0700639 !inet_rtx_syn_ack(sk, req)) {
640 unsigned long expires = jiffies;
641
642 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
643 TCP_RTO_MAX);
644 if (!fastopen)
645 mod_timer_pending(&req->rsk_timer, expires);
646 else
647 req->rsk_timer.expires = expires;
648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return NULL;
650 }
651
652 /* Further reproduces section "SEGMENT ARRIVES"
653 for state SYN-RECEIVED of RFC793.
654 It is broken, however, it does not work only
655 when SYNs are crossed.
656
657 You would think that SYN crossing is impossible here, since
658 we should have a SYN_SENT socket (from connect()) on our end,
659 but this is not true if the crossed SYNs were sent to both
660 ends by a malicious third party. We must defend against this,
661 and to do that we first verify the ACK (as per RFC793, page
662 36) and reset if it is invalid. Is this a true full defense?
663 To convince ourselves, let us consider a way in which the ACK
664 test can still pass in this 'malicious crossed SYNs' case.
665 Malicious sender sends identical SYNs (and thus identical sequence
666 numbers) to both A and B:
667
668 A: gets SYN, seq=7
669 B: gets SYN, seq=7
670
671 By our good fortune, both A and B select the same initial
672 send sequence number of seven :-)
673
674 A: sends SYN|ACK, seq=7, ack_seq=8
675 B: sends SYN|ACK, seq=7, ack_seq=8
676
677 So we are now A eating this SYN|ACK, ACK test passes. So
678 does sequence test, SYN is truncated, and thus we consider
679 it a bare ACK.
680
David S. Millerec0a1962008-06-12 16:31:35 -0700681 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
682 bare ACK. Otherwise, we create an established connection. Both
683 ends (listening sockets) accept the new incoming connection and try
684 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 Note: This case is both harmless, and rare. Possibility is about the
687 same as us discovering intelligent life on another plant tomorrow.
688
689 But generally, we should (RFC lies!) to accept ACK
690 from SYNACK both here and in tcp_rcv_state_process().
691 tcp_rcv_state_process() does not, hence, we do not too.
692
693 Note that the case is absolutely generic:
694 we cannot optimize anything here without
695 violating protocol. All the checks must be made
696 before attempt to create socket.
697 */
698
699 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
700 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800701 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 * a reset is sent."
703 *
Jerry Chu83368862012-08-31 12:29:12 +0000704 * Invalid ACK: reset will be sent by listening socket.
705 * Note that the ACK validity check for a Fast Open socket is done
706 * elsewhere and is checked directly against the child socket rather
707 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 */
Jerry Chu83368862012-08-31 12:29:12 +0000709 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000710 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000711 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return sk;
713
714 /* Also, it would be not so bad idea to check rcv_tsecr, which
715 * is essentially ACK extension and too early or too late values
716 * should cause reset in unsynchronized states.
717 */
718
719 /* RFC793: "first check sequence number". */
720
721 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700722 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700724 if (!(flg & TCP_FLAG_RST) &&
725 !tcp_oow_rate_limited(sock_net(sk), skb,
726 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
727 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700728 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700730 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return NULL;
732 }
733
734 /* In sequence, PAWS is OK. */
735
Jerry Chu83368862012-08-31 12:29:12 +0000736 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700737 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Adam Langley2aaab9a2008-08-07 20:27:45 -0700739 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
740 /* Truncate SYN, it is out of window starting
741 at tcp_rsk(req)->rcv_isn + 1. */
742 flg &= ~TCP_FLAG_SYN;
743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Adam Langley2aaab9a2008-08-07 20:27:45 -0700745 /* RFC793: "second check the RST bit" and
746 * "fourth, check the SYN bit"
747 */
748 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700749 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700750 goto embryonic_reset;
751 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Adam Langley2aaab9a2008-08-07 20:27:45 -0700753 /* ACK sequence verified above, just make sure ACK is
754 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000755 *
756 * XXX (TFO) - if we ever allow "data after SYN", the
757 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700758 */
759 if (!(flg & TCP_FLAG_ACK))
760 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Jerry Chu83368862012-08-31 12:29:12 +0000762 /* For Fast Open no more processing is needed (sk is the
763 * child socket).
764 */
765 if (fastopen)
766 return sk;
767
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000768 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000769 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700770 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
771 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700772 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700773 return NULL;
774 }
David S. Millerec0a1962008-06-12 16:31:35 -0700775
Adam Langley2aaab9a2008-08-07 20:27:45 -0700776 /* OK, ACK is valid, create big socket and
777 * feed this segment to it. It will repeat all
778 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
779 * ESTABLISHED STATE. If it will be dropped after
780 * socket is created, wait for troubles.
781 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700782 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
783 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100784 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700785 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700787 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700788 tcp_synack_rtt_meas(child, req);
Eric Dumazete0f97592018-02-13 06:14:12 -0800789 *req_stolen = !own_req;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700790 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
Adam Langley2aaab9a2008-08-07 20:27:45 -0700792listen_overflow:
Eric Dumazet65c94102017-10-26 21:55:03 -0700793 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
Adam Langley2aaab9a2008-08-07 20:27:45 -0700794 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700796 }
797
798embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000799 if (!(flg & TCP_FLAG_RST)) {
800 /* Received a bad SYN pkt - for TFO We try not to reset
801 * the local connection unless it's really necessary to
802 * avoid becoming vulnerable to outside attack aiming at
803 * resetting legit local connections.
804 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700805 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000806 } else if (fastopen) { /* received a valid RST pkt */
807 reqsk_fastopen_remove(sk, req, true);
808 tcp_reset(sk);
809 }
810 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700811 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700812 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000813 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700814 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000816EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818/*
819 * Queue segment on the new socket if the new socket is active,
820 * otherwise we just shortcircuit this and continue with
821 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000822 *
823 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
824 * when entering. But other states are possible due to a race condition
825 * where after __inet_lookup_established() fails but before the listener
826 * locked is obtained, other packets cause the same connection to
827 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 */
829
830int tcp_child_process(struct sock *parent, struct sock *child,
831 struct sk_buff *skb)
832{
833 int ret = 0;
834 int state = child->sk_state;
835
Alexander Duycke5907452017-03-24 10:08:00 -0700836 /* record NAPI ID of child */
837 sk_mark_napi_id(child, skb);
838
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700839 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700841 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 /* Wakeup parent, send SIGIO */
843 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400844 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 } else {
846 /* Alas, it is possible again, because we do lookup
847 * in main socket hash table and lock on listening
848 * socket does not protect us more.
849 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000850 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852
853 bh_unlock_sock(child);
854 sock_put(child);
855 return ret;
856}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857EXPORT_SYMBOL(tcp_child_process);