blob: bdb443471c3914cd57c3836c43f73648b452c01e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <net/tcp.h>
27#include <net/inet_common.h>
28#include <net/xfrm.h>
29
Brian Haleyab32ea52006-09-22 14:15:41 -070030int sysctl_tcp_abort_on_overflow __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Eric Dumazeta2a385d2012-05-16 23:15:34 +000032static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000035 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000037 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000038 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
Neal Cardwell4fb17a62015-02-06 16:04:41 -050041static enum tcp_tw_status
42tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44{
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58}
59
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090060/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010087 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 */
90enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070091tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000095 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000096 bool paws_reject = false;
Haishuang Yan1946e672016-12-28 17:52:32 +080097 struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
David S. Millerbb5b7c12009-12-15 20:56:42 -080099 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000101 tcp_parse_options(skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 if (tmp_opt.saw_tstamp) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500119 return tcp_timewait_check_oow_rate_limit(
120 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 if (th->rst)
123 goto kill;
124
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700125 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100126 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000129 if (!th->ack ||
130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700132 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return TCP_TW_SUCCESS;
134 }
135
136 /* New data or FIN. If new data arrive after half-duplex close,
137 * reset.
138 */
139 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (tmp_opt.saw_tstamp) {
James Morris9d729f72007-03-04 16:12:44 -0800147 tcptw->tw_ts_recent_stamp = get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
Haishuang Yan1946e672016-12-28 17:52:32 +0800151 if (tcp_death_row->sysctl_tw_recycle &&
David S. Millerccb7c412010-12-01 18:09:13 -0800152 tcptw->tw_ts_recent_stamp &&
153 tcp_tw_remember_stamp(tw))
Eric Dumazeted2e9232015-09-19 09:08:34 -0700154 inet_twsk_reschedule(tw, tw->tw_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 else
Eric Dumazeted2e9232015-09-19 09:08:34 -0700156 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return TCP_TW_ACK;
158 }
159
160 /*
161 * Now real TIME-WAIT state.
162 *
163 * RFC 1122:
164 * "When a connection is [...] on TIME-WAIT state [...]
165 * [a TCP] MAY accept a new SYN from the remote TCP to
166 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900167 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 * (1) assigns its initial sequence number for the new
169 * connection to be larger than the largest sequence
170 * number it used on the previous connection incarnation,
171 * and
172 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900173 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * to be an old duplicate".
175 */
176
177 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700178 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
180 /* In window segment, it may be only reset or bare ack. */
181
182 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800183 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * Oh well... nobody has a sufficient solution to this
185 * protocol bug yet.
186 */
187 if (sysctl_tcp_rfc1337 == 0) {
188kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700189 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 return TCP_TW_SUCCESS;
191 }
192 }
Eric Dumazeted2e9232015-09-19 09:08:34 -0700193 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700196 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
James Morris9d729f72007-03-04 16:12:44 -0800197 tcptw->tw_ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 }
199
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700200 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 return TCP_TW_SUCCESS;
202 }
203
204 /* Out of window segment.
205
206 All the segments are ACKed immediately.
207
208 The only exception is new SYN. We accept it, if it is
209 not old duplicate and we are not in danger to be killed
210 by delayed old duplicates. RFC check is that it has
211 newer sequence number works at rates <40Mbit/sec.
212 However, if paws works, it is reliable AND even more,
213 we even may relax silly seq space cutoff.
214
215 RED-PEN: we violate main RFC requirement, if this SYN will appear
216 old duplicate (i.e. we receive RST in reply to SYN-ACK),
217 we must return socket to time-wait state. It is not good,
218 but not fatal yet.
219 */
220
221 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700222 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
223 (tmp_opt.saw_tstamp &&
224 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
225 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 if (isn == 0)
227 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700228 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 return TCP_TW_SYN;
230 }
231
232 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700233 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800235 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /* In this case we must reset the TIMEWAIT timer.
237 *
238 * If it is ACKless SYN it may be both old duplicate
239 * and new good SYN with random sequence number <rcv_nxt.
240 * Do not reschedule in the last case.
241 */
242 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700243 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500245 return tcp_timewait_check_oow_rate_limit(
246 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700248 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return TCP_TW_SUCCESS;
250}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000251EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900253/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900255 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256void tcp_time_wait(struct sock *sk, int state, int timeo)
257{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800258 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700259 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700260 struct inet_timewait_sock *tw;
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000261 bool recycle_ok = false;
Haishuang Yan1946e672016-12-28 17:52:32 +0800262 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Haishuang Yan1946e672016-12-28 17:52:32 +0800264 if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
David S. Miller3f419d22010-11-29 13:37:14 -0800265 recycle_ok = tcp_remember_stamp(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Haishuang Yan1946e672016-12-28 17:52:32 +0800267 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Ian Morris00db4122015-04-03 09:17:27 +0100269 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700270 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700271 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700272 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700273
David S. Miller23978492012-06-09 14:56:12 -0700274 tw->tw_transparent = inet->transparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700276 tcptw->tw_rcv_nxt = tp->rcv_nxt;
277 tcptw->tw_snd_nxt = tp->snd_nxt;
278 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
279 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
280 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000281 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500282 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000284#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 if (tw->tw_family == PF_INET6) {
286 struct ipv6_pinfo *np = inet6_sk(sk);
287
Eric Dumazetefe42082013-10-03 15:42:29 -0700288 tw->tw_v6_daddr = sk->sk_v6_daddr;
289 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400290 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200291 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700292 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800295
296#ifdef CONFIG_TCP_MD5SIG
297 /*
298 * The timewait bucket does not have the key DB from the
299 * sock structure. We just make a quick copy of the
300 * md5 key being used (if indeed we are using one)
301 * so the timewait ack generating code has the key.
302 */
303 do {
304 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000305 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800306 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100307 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000308 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Eric Dumazet71cea172013-05-20 06:52:26 +0000309 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800310 BUG();
311 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800312 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800313#endif
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /* Get the TIME_WAIT timeout firing. */
316 if (timeo < rto)
317 timeo = rto;
318
319 if (recycle_ok) {
320 tw->tw_timeout = rto;
321 } else {
322 tw->tw_timeout = TCP_TIMEWAIT_LEN;
323 if (state == TCP_TIME_WAIT)
324 timeo = TCP_TIMEWAIT_LEN;
325 }
326
Eric Dumazet789f5582015-04-12 18:51:09 -0700327 inet_twsk_schedule(tw, timeo);
Eric Dumazeted2e9232015-09-19 09:08:34 -0700328 /* Linkage updates. */
329 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700330 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 } else {
332 /* Sorry, if we're out of memory, just CLOSE this
333 * socket up. We've got bigger problems than
334 * non-graceful socket closings.
335 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
339 tcp_update_metrics(sk);
340 tcp_done(sk);
341}
342
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800343void tcp_twsk_destructor(struct sock *sk)
344{
David S. Millerb6242b92012-07-10 03:27:56 -0700345#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800346 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700347
Eric Dumazet71cea172013-05-20 06:52:26 +0000348 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000349 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800350#endif
351}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800352EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
353
Eric Dumazetb1964b52015-09-25 07:39:09 -0700354/* Warning : This function is called without sk_listener being locked.
355 * Be sure to read socket fields once, as their value could change under us.
356 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700357void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700358 const struct sock *sk_listener,
359 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700360{
361 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700362 const struct tcp_sock *tp = tcp_sk(sk_listener);
363 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
364 int full_space = tcp_full_space(sk_listener);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700365 int mss = dst_metric_advmss(dst);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700366 u32 window_clamp;
367 __u8 rcv_wscale;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700368
Eric Dumazetb1964b52015-09-25 07:39:09 -0700369 if (user_mss && user_mss < mss)
370 mss = user_mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700371
Eric Dumazetb1964b52015-09-25 07:39:09 -0700372 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700373 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700374 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700375
376 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700377 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700378 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
379 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700380
381 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700382 tcp_select_initial_window(full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700383 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700384 &req->rsk_rcv_wnd,
385 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700386 ireq->wscale_ok,
387 &rcv_wscale,
388 dst_metric(dst, RTAX_INITRWND));
389 ireq->rcv_wscale = rcv_wscale;
390}
391EXPORT_SYMBOL(tcp_openreq_init_rwin);
392
Florian Westphal735d3832014-09-29 13:08:30 +0200393static void tcp_ecn_openreq_child(struct tcp_sock *tp,
394 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700395{
396 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
397}
398
Daniel Borkmann81164412015-01-05 23:57:48 +0100399void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
400{
401 struct inet_connection_sock *icsk = inet_csk(sk);
402 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
403 bool ca_got_dst = false;
404
405 if (ca_key != TCP_CA_UNSPEC) {
406 const struct tcp_congestion_ops *ca;
407
408 rcu_read_lock();
409 ca = tcp_ca_find_key(ca_key);
410 if (likely(ca && try_module_get(ca->owner))) {
411 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
412 icsk->icsk_ca_ops = ca;
413 ca_got_dst = true;
414 }
415 rcu_read_unlock();
416 }
417
Neal Cardwell9f950412015-05-29 13:47:07 -0400418 /* If no valid choice made yet, assign current system default ca. */
419 if (!ca_got_dst &&
420 (!icsk->icsk_ca_setsockopt ||
421 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100422 tcp_assign_congestion_control(sk);
423
424 tcp_set_ca_state(sk, TCP_CA_Open);
425}
426EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/* This is not only more efficient than what we used to do, it eliminates
429 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
430 *
431 * Actually, we could lots of memory writes here. tp of listening
432 * socket contains all necessary default parameters.
433 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700434struct sock *tcp_create_openreq_child(const struct sock *sk,
435 struct request_sock *req,
436 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500438 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Ian Morris00db4122015-04-03 09:17:27 +0100440 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700441 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700442 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Meloa9948a72007-02-28 11:05:56 -0800443 struct inet_connection_sock *newicsk = inet_csk(newsk);
William Allen Simpson435cf552009-12-02 18:17:05 +0000444 struct tcp_sock *newtp = tcp_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 /* Now setup tcp_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 newtp->pred_flags = 0;
William Allen Simpson435cf552009-12-02 18:17:05 +0000448
449 newtp->rcv_wup = newtp->copied_seq =
450 newtp->rcv_nxt = treq->rcv_isn + 1;
Eric Dumazeta9d99ce2016-03-06 09:29:21 -0800451 newtp->segs_in = 1;
William Allen Simpson435cf552009-12-02 18:17:05 +0000452
453 newtp->snd_sml = newtp->snd_una =
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000454 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 tcp_prequeue_init(newtp);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000457 INIT_LIST_HEAD(&newtp->tsq_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800459 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Eric Dumazet740b0f12014-02-26 14:02:48 -0800461 newtp->srtt_us = 0;
462 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
Neal Cardwell64033892016-09-19 23:39:10 -0400463 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700464 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 newtp->packets_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 newtp->retrans_out = 0;
468 newtp->sacked_out = 0;
469 newtp->fackets_out = 0;
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700470 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000471 newtp->tlp_high_seq = 0;
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700472 newtp->lsndtime = treq->snt_synack.stamp_jiffies;
Eric Dumazetd8ed6252015-09-22 20:44:17 -0700473 newsk->sk_txhash = treq->txhash;
Neal Cardwellf2b2c582015-02-06 16:04:40 -0500474 newtp->last_oow_ack_time = 0;
Yuchung Cheng375fe022013-07-22 16:20:45 -0700475 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 /* So many TCP implementations out there (incorrectly) count the
478 * initial SYN frame in their delayed-ACK and congestion control
479 * algorithms that we must have the following bandaid to talk
480 * efficiently to them. -DaveM
481 */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000482 newtp->snd_cwnd = TCP_INIT_CWND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 newtp->snd_cwnd_cnt = 0;
484
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400485 /* There's a bubble in the pipe until at least the first ACK. */
486 newtp->app_limited = ~0U;
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 tcp_init_xmit_timers(newsk);
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000489 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 newtp->rx_opt.saw_tstamp = 0;
492
493 newtp->rx_opt.dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 newtp->rx_opt.num_sacks = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700499 inet_csk_reset_keepalive_timer(newsk,
500 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700502 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800503 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (sysctl_tcp_fack)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300505 tcp_enable_fack(newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 }
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700507 newtp->window_clamp = req->rsk_window_clamp;
508 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
509 newtp->rcv_wnd = req->rsk_rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700510 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700512 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
513 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 } else {
515 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
516 newtp->window_clamp = min(newtp->window_clamp, 65535U);
517 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700518 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
519 newtp->rx_opt.snd_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 newtp->max_window = newtp->snd_wnd;
521
522 if (newtp->rx_opt.tstamp_ok) {
523 newtp->rx_opt.ts_recent = req->ts_recent;
James Morris9d729f72007-03-04 16:12:44 -0800524 newtp->rx_opt.ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
526 } else {
527 newtp->rx_opt.ts_recent_stamp = 0;
528 newtp->tcp_header_len = sizeof(struct tcphdr);
529 }
Florian Westphal95a22ca2016-12-01 11:32:06 +0100530 newtp->tsoffset = treq->ts_off;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800531#ifdef CONFIG_TCP_MD5SIG
532 newtp->md5sig_info = NULL; /*XXX*/
533 if (newtp->af_specific->md5_lookup(sk, newsk))
534 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
535#endif
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000536 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700537 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 newtp->rx_opt.mss_clamp = req->mss;
Florian Westphal735d3832014-09-29 13:08:30 +0200539 tcp_ecn_openreq_child(newtp, req);
Jerry Chu83368862012-08-31 12:29:12 +0000540 newtp->fastopen_rsk = NULL;
Yuchung Cheng6f736012012-10-19 15:14:44 +0000541 newtp->syn_data_acked = 0;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700542 newtp->rack.mstamp.v64 = 0;
543 newtp->rack.advanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700545 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547 return newsk;
548}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000549EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900551/*
Jerry Chu83368862012-08-31 12:29:12 +0000552 * Process an incoming packet for SYN_RECV sockets represented as a
553 * request_sock. Normally sk is the listener socket but for TFO it
554 * points to the child socket.
555 *
556 * XXX (TFO) - The current impl contains a special check for ack
557 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100558 *
559 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 */
561
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800562struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700563 struct request_sock *req,
Jerry Chu83368862012-08-31 12:29:12 +0000564 bool fastopen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000566 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000567 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700568 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800569 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000570 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700571 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
David S. Millerbb5b7c12009-12-15 20:56:42 -0800573 tmp_opt.saw_tstamp = 0;
574 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000575 tcp_parse_options(skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 if (tmp_opt.saw_tstamp) {
578 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100579 if (tmp_opt.rcv_tsecr)
580 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 /* We do not store true stamp, but it is not required,
582 * it can be estimated (approximately)
583 * from another data.
584 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000585 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000586 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 }
588 }
589
590 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700591 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 flg == TCP_FLAG_SYN &&
593 !paws_reject) {
594 /*
595 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
596 * this case on figure 6 and figure 8, but formal
597 * protocol description says NOTHING.
598 * To be more exact, it says that we should send ACK,
599 * because this segment (at least, if it has no data)
600 * is out of window.
601 *
602 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
603 * describe SYN-RECV state. All the description
604 * is wrong, we cannot believe to it and should
605 * rely only on common sense and implementation
606 * experience.
607 *
608 * Enforce "SYN-ACK" according to figure 8, figure 6
609 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000610 *
611 * Note that even if there is new data in the SYN packet
612 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000613 *
614 * Reset timer after retransmitting SYNACK, similar to
615 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500617 if (!tcp_oow_rate_limited(sock_net(sk), skb,
618 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
619 &tcp_rsk(req)->last_oow_ack_time) &&
620
Eric Dumazetdd929c12015-04-08 15:34:04 -0700621 !inet_rtx_syn_ack(sk, req)) {
622 unsigned long expires = jiffies;
623
624 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
625 TCP_RTO_MAX);
626 if (!fastopen)
627 mod_timer_pending(&req->rsk_timer, expires);
628 else
629 req->rsk_timer.expires = expires;
630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 return NULL;
632 }
633
634 /* Further reproduces section "SEGMENT ARRIVES"
635 for state SYN-RECEIVED of RFC793.
636 It is broken, however, it does not work only
637 when SYNs are crossed.
638
639 You would think that SYN crossing is impossible here, since
640 we should have a SYN_SENT socket (from connect()) on our end,
641 but this is not true if the crossed SYNs were sent to both
642 ends by a malicious third party. We must defend against this,
643 and to do that we first verify the ACK (as per RFC793, page
644 36) and reset if it is invalid. Is this a true full defense?
645 To convince ourselves, let us consider a way in which the ACK
646 test can still pass in this 'malicious crossed SYNs' case.
647 Malicious sender sends identical SYNs (and thus identical sequence
648 numbers) to both A and B:
649
650 A: gets SYN, seq=7
651 B: gets SYN, seq=7
652
653 By our good fortune, both A and B select the same initial
654 send sequence number of seven :-)
655
656 A: sends SYN|ACK, seq=7, ack_seq=8
657 B: sends SYN|ACK, seq=7, ack_seq=8
658
659 So we are now A eating this SYN|ACK, ACK test passes. So
660 does sequence test, SYN is truncated, and thus we consider
661 it a bare ACK.
662
David S. Millerec0a1962008-06-12 16:31:35 -0700663 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
664 bare ACK. Otherwise, we create an established connection. Both
665 ends (listening sockets) accept the new incoming connection and try
666 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 Note: This case is both harmless, and rare. Possibility is about the
669 same as us discovering intelligent life on another plant tomorrow.
670
671 But generally, we should (RFC lies!) to accept ACK
672 from SYNACK both here and in tcp_rcv_state_process().
673 tcp_rcv_state_process() does not, hence, we do not too.
674
675 Note that the case is absolutely generic:
676 we cannot optimize anything here without
677 violating protocol. All the checks must be made
678 before attempt to create socket.
679 */
680
681 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
682 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800683 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 * a reset is sent."
685 *
Jerry Chu83368862012-08-31 12:29:12 +0000686 * Invalid ACK: reset will be sent by listening socket.
687 * Note that the ACK validity check for a Fast Open socket is done
688 * elsewhere and is checked directly against the child socket rather
689 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 */
Jerry Chu83368862012-08-31 12:29:12 +0000691 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000692 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000693 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return sk;
695
696 /* Also, it would be not so bad idea to check rcv_tsecr, which
697 * is essentially ACK extension and too early or too late values
698 * should cause reset in unsynchronized states.
699 */
700
701 /* RFC793: "first check sequence number". */
702
703 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700704 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700706 if (!(flg & TCP_FLAG_RST) &&
707 !tcp_oow_rate_limited(sock_net(sk), skb,
708 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
709 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700710 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700712 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return NULL;
714 }
715
716 /* In sequence, PAWS is OK. */
717
Jerry Chu83368862012-08-31 12:29:12 +0000718 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700719 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Adam Langley2aaab9a2008-08-07 20:27:45 -0700721 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
722 /* Truncate SYN, it is out of window starting
723 at tcp_rsk(req)->rcv_isn + 1. */
724 flg &= ~TCP_FLAG_SYN;
725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Adam Langley2aaab9a2008-08-07 20:27:45 -0700727 /* RFC793: "second check the RST bit" and
728 * "fourth, check the SYN bit"
729 */
730 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700731 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700732 goto embryonic_reset;
733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Adam Langley2aaab9a2008-08-07 20:27:45 -0700735 /* ACK sequence verified above, just make sure ACK is
736 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000737 *
738 * XXX (TFO) - if we ever allow "data after SYN", the
739 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700740 */
741 if (!(flg & TCP_FLAG_ACK))
742 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Jerry Chu83368862012-08-31 12:29:12 +0000744 /* For Fast Open no more processing is needed (sk is the
745 * child socket).
746 */
747 if (fastopen)
748 return sk;
749
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000750 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000751 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700752 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
753 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700754 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700755 return NULL;
756 }
David S. Millerec0a1962008-06-12 16:31:35 -0700757
Adam Langley2aaab9a2008-08-07 20:27:45 -0700758 /* OK, ACK is valid, create big socket and
759 * feed this segment to it. It will repeat all
760 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
761 * ESTABLISHED STATE. If it will be dropped after
762 * socket is created, wait for troubles.
763 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700764 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
765 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100766 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700767 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700769 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700770 tcp_synack_rtt_meas(child, req);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700771 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Adam Langley2aaab9a2008-08-07 20:27:45 -0700773listen_overflow:
774 if (!sysctl_tcp_abort_on_overflow) {
775 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700777 }
778
779embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000780 if (!(flg & TCP_FLAG_RST)) {
781 /* Received a bad SYN pkt - for TFO We try not to reset
782 * the local connection unless it's really necessary to
783 * avoid becoming vulnerable to outside attack aiming at
784 * resetting legit local connections.
785 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700786 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000787 } else if (fastopen) { /* received a valid RST pkt */
788 reqsk_fastopen_remove(sk, req, true);
789 tcp_reset(sk);
790 }
791 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700792 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700793 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000794 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700795 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000797EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799/*
800 * Queue segment on the new socket if the new socket is active,
801 * otherwise we just shortcircuit this and continue with
802 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000803 *
804 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
805 * when entering. But other states are possible due to a race condition
806 * where after __inet_lookup_established() fails but before the listener
807 * locked is obtained, other packets cause the same connection to
808 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 */
810
811int tcp_child_process(struct sock *parent, struct sock *child,
812 struct sk_buff *skb)
813{
814 int ret = 0;
815 int state = child->sk_state;
816
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700817 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700819 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 /* Wakeup parent, send SIGIO */
821 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400822 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 } else {
824 /* Alas, it is possible again, because we do lookup
825 * in main socket hash table and lock on listening
826 * socket does not protect us more.
827 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000828 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 }
830
831 bh_unlock_sock(child);
832 sock_put(child);
833 return ret;
834}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835EXPORT_SYMBOL(tcp_child_process);