blob: 0ff83c1637d894b7e653bcbc7be35099d036ea07 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
26#include <net/tcp.h>
27#include <net/inet_common.h>
28#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070029#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Brian Haleyab32ea52006-09-22 14:15:41 -070031int sysctl_tcp_abort_on_overflow __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric Dumazeta2a385d2012-05-16 23:15:34 +000033static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
35 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000036 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000038 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000039 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040}
41
Neal Cardwell4fb17a62015-02-06 16:04:41 -050042static enum tcp_tw_status
43tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 const struct sk_buff *skb, int mib_idx)
45{
46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47
48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 &tcptw->tw_last_oow_ack_time)) {
50 /* Send ACK. Note, we do not put the bucket,
51 * it will be released by caller.
52 */
53 return TCP_TW_ACK;
54 }
55
56 /* We are rate-limiting, so just release the tw sock and drop skb. */
57 inet_twsk_put(tw);
58 return TCP_TW_SUCCESS;
59}
60
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090061/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78 *
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
82 *
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010088 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
91enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070092tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000096 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000097 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
David S. Millerbb5b7c12009-12-15 20:56:42 -080099 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113 /* Just repeat all the checks of tcp_rcv_state_process() */
114
115 /* Out of window, send ACK */
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 if (th->rst)
124 goto kill;
125
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100127 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700133 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 return TCP_TW_SUCCESS;
135 }
136
137 /* New data or FIN. If new data arrive after half-duplex close,
138 * reset.
139 */
140 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 if (tmp_opt.saw_tstamp) {
James Morris9d729f72007-03-04 16:12:44 -0800148 tcptw->tw_ts_recent_stamp = get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 }
151
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 return TCP_TW_ACK;
154 }
155
156 /*
157 * Now real TIME-WAIT state.
158 *
159 * RFC 1122:
160 * "When a connection is [...] on TIME-WAIT state [...]
161 * [a TCP] MAY accept a new SYN from the remote TCP to
162 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900163 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * (1) assigns its initial sequence number for the new
165 * connection to be larger than the largest sequence
166 * number it used on the previous connection incarnation,
167 * and
168 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900169 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * to be an old duplicate".
171 */
172
173 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176 /* In window segment, it may be only reset or bare ack. */
177
178 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800179 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * Oh well... nobody has a sufficient solution to this
181 * protocol bug yet.
182 */
183 if (sysctl_tcp_rfc1337 == 0) {
184kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700185 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return TCP_TW_SUCCESS;
187 }
188 }
Eric Dumazeted2e9232015-09-19 09:08:34 -0700189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700192 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
James Morris9d729f72007-03-04 16:12:44 -0800193 tcptw->tw_ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700196 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return TCP_TW_SUCCESS;
198 }
199
200 /* Out of window segment.
201
202 All the segments are ACKed immediately.
203
204 The only exception is new SYN. We accept it, if it is
205 not old duplicate and we are not in danger to be killed
206 by delayed old duplicates. RFC check is that it has
207 newer sequence number works at rates <40Mbit/sec.
208 However, if paws works, it is reliable AND even more,
209 we even may relax silly seq space cutoff.
210
211 RED-PEN: we violate main RFC requirement, if this SYN will appear
212 old duplicate (i.e. we receive RST in reply to SYN-ACK),
213 we must return socket to time-wait state. It is not good,
214 but not fatal yet.
215 */
216
217 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700218 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
219 (tmp_opt.saw_tstamp &&
220 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
221 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 if (isn == 0)
223 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700224 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return TCP_TW_SYN;
226 }
227
228 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700229 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800231 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /* In this case we must reset the TIMEWAIT timer.
233 *
234 * If it is ACKless SYN it may be both old duplicate
235 * and new good SYN with random sequence number <rcv_nxt.
236 * Do not reschedule in the last case.
237 */
238 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700239 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500241 return tcp_timewait_check_oow_rate_limit(
242 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700244 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return TCP_TW_SUCCESS;
246}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000247EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900251 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252void tcp_time_wait(struct sock *sk, int state, int timeo)
253{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800254 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700255 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700256 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800257 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Haishuang Yan1946e672016-12-28 17:52:32 +0800259 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Ian Morris00db4122015-04-03 09:17:27 +0100261 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700262 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700263 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700264 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700265
David S. Miller23978492012-06-09 14:56:12 -0700266 tw->tw_transparent = inet->transparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700268 tcptw->tw_rcv_nxt = tp->rcv_nxt;
269 tcptw->tw_snd_nxt = tp->snd_nxt;
270 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
271 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
272 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000273 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500274 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000276#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 if (tw->tw_family == PF_INET6) {
278 struct ipv6_pinfo *np = inet6_sk(sk);
279
Eric Dumazetefe42082013-10-03 15:42:29 -0700280 tw->tw_v6_daddr = sk->sk_v6_daddr;
281 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400282 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200283 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700284 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800287
288#ifdef CONFIG_TCP_MD5SIG
289 /*
290 * The timewait bucket does not have the key DB from the
291 * sock structure. We just make a quick copy of the
292 * md5 key being used (if indeed we are using one)
293 * so the timewait ack generating code has the key.
294 */
295 do {
296 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000297 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800298 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100299 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000300 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Eric Dumazet71cea172013-05-20 06:52:26 +0000301 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800302 BUG();
303 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800304 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800305#endif
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /* Get the TIME_WAIT timeout firing. */
308 if (timeo < rto)
309 timeo = rto;
310
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400311 tw->tw_timeout = TCP_TIMEWAIT_LEN;
312 if (state == TCP_TIME_WAIT)
313 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Eric Dumazet789f5582015-04-12 18:51:09 -0700315 inet_twsk_schedule(tw, timeo);
Eric Dumazeted2e9232015-09-19 09:08:34 -0700316 /* Linkage updates. */
317 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700318 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 } else {
320 /* Sorry, if we're out of memory, just CLOSE this
321 * socket up. We've got bigger problems than
322 * non-graceful socket closings.
323 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700324 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326
327 tcp_update_metrics(sk);
328 tcp_done(sk);
329}
330
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800331void tcp_twsk_destructor(struct sock *sk)
332{
David S. Millerb6242b92012-07-10 03:27:56 -0700333#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800334 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700335
Eric Dumazet71cea172013-05-20 06:52:26 +0000336 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000337 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800338#endif
339}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800340EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
341
Eric Dumazetb1964b52015-09-25 07:39:09 -0700342/* Warning : This function is called without sk_listener being locked.
343 * Be sure to read socket fields once, as their value could change under us.
344 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700345void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700346 const struct sock *sk_listener,
347 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700348{
349 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700350 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700351 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700352 u32 window_clamp;
353 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700354 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800355 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700356
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800357 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700358 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700359 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700360 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700361
362 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700363 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700364 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
365 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700366
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700367 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
368 if (rcv_wnd == 0)
369 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
370 else if (full_space < rcv_wnd * mss)
371 full_space = rcv_wnd * mss;
372
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700373 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700374 tcp_select_initial_window(full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700375 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700376 &req->rsk_rcv_wnd,
377 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700378 ireq->wscale_ok,
379 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700380 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700381 ireq->rcv_wscale = rcv_wscale;
382}
383EXPORT_SYMBOL(tcp_openreq_init_rwin);
384
Florian Westphal735d3832014-09-29 13:08:30 +0200385static void tcp_ecn_openreq_child(struct tcp_sock *tp,
386 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700387{
388 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
389}
390
Daniel Borkmann81164412015-01-05 23:57:48 +0100391void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
392{
393 struct inet_connection_sock *icsk = inet_csk(sk);
394 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
395 bool ca_got_dst = false;
396
397 if (ca_key != TCP_CA_UNSPEC) {
398 const struct tcp_congestion_ops *ca;
399
400 rcu_read_lock();
401 ca = tcp_ca_find_key(ca_key);
402 if (likely(ca && try_module_get(ca->owner))) {
403 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
404 icsk->icsk_ca_ops = ca;
405 ca_got_dst = true;
406 }
407 rcu_read_unlock();
408 }
409
Neal Cardwell9f950412015-05-29 13:47:07 -0400410 /* If no valid choice made yet, assign current system default ca. */
411 if (!ca_got_dst &&
412 (!icsk->icsk_ca_setsockopt ||
413 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100414 tcp_assign_congestion_control(sk);
415
416 tcp_set_ca_state(sk, TCP_CA_Open);
417}
418EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420/* This is not only more efficient than what we used to do, it eliminates
421 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
422 *
423 * Actually, we could lots of memory writes here. tp of listening
424 * socket contains all necessary default parameters.
425 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700426struct sock *tcp_create_openreq_child(const struct sock *sk,
427 struct request_sock *req,
428 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500430 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Ian Morris00db4122015-04-03 09:17:27 +0100432 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700433 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700434 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Meloa9948a72007-02-28 11:05:56 -0800435 struct inet_connection_sock *newicsk = inet_csk(newsk);
William Allen Simpson435cf552009-12-02 18:17:05 +0000436 struct tcp_sock *newtp = tcp_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 /* Now setup tcp_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 newtp->pred_flags = 0;
William Allen Simpson435cf552009-12-02 18:17:05 +0000440
441 newtp->rcv_wup = newtp->copied_seq =
442 newtp->rcv_nxt = treq->rcv_isn + 1;
Eric Dumazeta9d99ce2016-03-06 09:29:21 -0800443 newtp->segs_in = 1;
William Allen Simpson435cf552009-12-02 18:17:05 +0000444
445 newtp->snd_sml = newtp->snd_una =
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000446 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 tcp_prequeue_init(newtp);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000449 INIT_LIST_HEAD(&newtp->tsq_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800451 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Eric Dumazet740b0f12014-02-26 14:02:48 -0800453 newtp->srtt_us = 0;
454 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
Eric Dumazetac9517f2017-05-16 14:00:13 -0700455 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700456 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Eric Dumazet70eabf02017-05-16 14:00:07 -0700457 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
459 newtp->packets_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 newtp->retrans_out = 0;
461 newtp->sacked_out = 0;
462 newtp->fackets_out = 0;
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700463 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000464 newtp->tlp_high_seq = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700465 newtp->lsndtime = tcp_jiffies32;
Eric Dumazetd8ed6252015-09-22 20:44:17 -0700466 newsk->sk_txhash = treq->txhash;
Neal Cardwellf2b2c582015-02-06 16:04:40 -0500467 newtp->last_oow_ack_time = 0;
Yuchung Cheng375fe022013-07-22 16:20:45 -0700468 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 /* So many TCP implementations out there (incorrectly) count the
471 * initial SYN frame in their delayed-ACK and congestion control
472 * algorithms that we must have the following bandaid to talk
473 * efficiently to them. -DaveM
474 */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000475 newtp->snd_cwnd = TCP_INIT_CWND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 newtp->snd_cwnd_cnt = 0;
477
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400478 /* There's a bubble in the pipe until at least the first ACK. */
479 newtp->app_limited = ~0U;
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 tcp_init_xmit_timers(newsk);
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000482 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 newtp->rx_opt.saw_tstamp = 0;
485
486 newtp->rx_opt.dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 newtp->rx_opt.num_sacks = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700492 inet_csk_reset_keepalive_timer(newsk,
493 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700495 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800496 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (sysctl_tcp_fack)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300498 tcp_enable_fack(newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700500 newtp->window_clamp = req->rsk_window_clamp;
501 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
502 newtp->rcv_wnd = req->rsk_rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700503 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700505 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
506 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 } else {
508 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
509 newtp->window_clamp = min(newtp->window_clamp, 65535U);
510 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700511 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
512 newtp->rx_opt.snd_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 newtp->max_window = newtp->snd_wnd;
514
515 if (newtp->rx_opt.tstamp_ok) {
516 newtp->rx_opt.ts_recent = req->ts_recent;
James Morris9d729f72007-03-04 16:12:44 -0800517 newtp->rx_opt.ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
519 } else {
520 newtp->rx_opt.ts_recent_stamp = 0;
521 newtp->tcp_header_len = sizeof(struct tcphdr);
522 }
Florian Westphal95a22ca2016-12-01 11:32:06 +0100523 newtp->tsoffset = treq->ts_off;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800524#ifdef CONFIG_TCP_MD5SIG
525 newtp->md5sig_info = NULL; /*XXX*/
526 if (newtp->af_specific->md5_lookup(sk, newsk))
527 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
528#endif
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000529 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700530 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 newtp->rx_opt.mss_clamp = req->mss;
Florian Westphal735d3832014-09-29 13:08:30 +0200532 tcp_ecn_openreq_child(newtp, req);
Eric Dumazet8b485ce2017-05-03 06:39:31 -0700533 newtp->fastopen_req = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000534 newtp->fastopen_rsk = NULL;
Yuchung Cheng6f736012012-10-19 15:14:44 +0000535 newtp->syn_data_acked = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700536 newtp->rack.mstamp = 0;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700537 newtp->rack.advanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700539 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
541 return newsk;
542}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000543EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900545/*
Jerry Chu83368862012-08-31 12:29:12 +0000546 * Process an incoming packet for SYN_RECV sockets represented as a
547 * request_sock. Normally sk is the listener socket but for TFO it
548 * points to the child socket.
549 *
550 * XXX (TFO) - The current impl contains a special check for ack
551 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100552 *
553 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 */
555
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800556struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700557 struct request_sock *req,
Jerry Chu83368862012-08-31 12:29:12 +0000558 bool fastopen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000560 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000561 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700562 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800563 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000564 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700565 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
David S. Millerbb5b7c12009-12-15 20:56:42 -0800567 tmp_opt.saw_tstamp = 0;
568 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700569 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 if (tmp_opt.saw_tstamp) {
572 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100573 if (tmp_opt.rcv_tsecr)
574 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 /* We do not store true stamp, but it is not required,
576 * it can be estimated (approximately)
577 * from another data.
578 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000579 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000580 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
582 }
583
584 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700585 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 flg == TCP_FLAG_SYN &&
587 !paws_reject) {
588 /*
589 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
590 * this case on figure 6 and figure 8, but formal
591 * protocol description says NOTHING.
592 * To be more exact, it says that we should send ACK,
593 * because this segment (at least, if it has no data)
594 * is out of window.
595 *
596 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
597 * describe SYN-RECV state. All the description
598 * is wrong, we cannot believe to it and should
599 * rely only on common sense and implementation
600 * experience.
601 *
602 * Enforce "SYN-ACK" according to figure 8, figure 6
603 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000604 *
605 * Note that even if there is new data in the SYN packet
606 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000607 *
608 * Reset timer after retransmitting SYNACK, similar to
609 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500611 if (!tcp_oow_rate_limited(sock_net(sk), skb,
612 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
613 &tcp_rsk(req)->last_oow_ack_time) &&
614
Eric Dumazetdd929c12015-04-08 15:34:04 -0700615 !inet_rtx_syn_ack(sk, req)) {
616 unsigned long expires = jiffies;
617
618 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
619 TCP_RTO_MAX);
620 if (!fastopen)
621 mod_timer_pending(&req->rsk_timer, expires);
622 else
623 req->rsk_timer.expires = expires;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return NULL;
626 }
627
628 /* Further reproduces section "SEGMENT ARRIVES"
629 for state SYN-RECEIVED of RFC793.
630 It is broken, however, it does not work only
631 when SYNs are crossed.
632
633 You would think that SYN crossing is impossible here, since
634 we should have a SYN_SENT socket (from connect()) on our end,
635 but this is not true if the crossed SYNs were sent to both
636 ends by a malicious third party. We must defend against this,
637 and to do that we first verify the ACK (as per RFC793, page
638 36) and reset if it is invalid. Is this a true full defense?
639 To convince ourselves, let us consider a way in which the ACK
640 test can still pass in this 'malicious crossed SYNs' case.
641 Malicious sender sends identical SYNs (and thus identical sequence
642 numbers) to both A and B:
643
644 A: gets SYN, seq=7
645 B: gets SYN, seq=7
646
647 By our good fortune, both A and B select the same initial
648 send sequence number of seven :-)
649
650 A: sends SYN|ACK, seq=7, ack_seq=8
651 B: sends SYN|ACK, seq=7, ack_seq=8
652
653 So we are now A eating this SYN|ACK, ACK test passes. So
654 does sequence test, SYN is truncated, and thus we consider
655 it a bare ACK.
656
David S. Millerec0a1962008-06-12 16:31:35 -0700657 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
658 bare ACK. Otherwise, we create an established connection. Both
659 ends (listening sockets) accept the new incoming connection and try
660 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
662 Note: This case is both harmless, and rare. Possibility is about the
663 same as us discovering intelligent life on another plant tomorrow.
664
665 But generally, we should (RFC lies!) to accept ACK
666 from SYNACK both here and in tcp_rcv_state_process().
667 tcp_rcv_state_process() does not, hence, we do not too.
668
669 Note that the case is absolutely generic:
670 we cannot optimize anything here without
671 violating protocol. All the checks must be made
672 before attempt to create socket.
673 */
674
675 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
676 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800677 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 * a reset is sent."
679 *
Jerry Chu83368862012-08-31 12:29:12 +0000680 * Invalid ACK: reset will be sent by listening socket.
681 * Note that the ACK validity check for a Fast Open socket is done
682 * elsewhere and is checked directly against the child socket rather
683 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 */
Jerry Chu83368862012-08-31 12:29:12 +0000685 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000686 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000687 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return sk;
689
690 /* Also, it would be not so bad idea to check rcv_tsecr, which
691 * is essentially ACK extension and too early or too late values
692 * should cause reset in unsynchronized states.
693 */
694
695 /* RFC793: "first check sequence number". */
696
697 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700698 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700700 if (!(flg & TCP_FLAG_RST) &&
701 !tcp_oow_rate_limited(sock_net(sk), skb,
702 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700704 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700706 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 return NULL;
708 }
709
710 /* In sequence, PAWS is OK. */
711
Jerry Chu83368862012-08-31 12:29:12 +0000712 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700713 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Adam Langley2aaab9a2008-08-07 20:27:45 -0700715 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
716 /* Truncate SYN, it is out of window starting
717 at tcp_rsk(req)->rcv_isn + 1. */
718 flg &= ~TCP_FLAG_SYN;
719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Adam Langley2aaab9a2008-08-07 20:27:45 -0700721 /* RFC793: "second check the RST bit" and
722 * "fourth, check the SYN bit"
723 */
724 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700725 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700726 goto embryonic_reset;
727 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Adam Langley2aaab9a2008-08-07 20:27:45 -0700729 /* ACK sequence verified above, just make sure ACK is
730 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000731 *
732 * XXX (TFO) - if we ever allow "data after SYN", the
733 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700734 */
735 if (!(flg & TCP_FLAG_ACK))
736 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Jerry Chu83368862012-08-31 12:29:12 +0000738 /* For Fast Open no more processing is needed (sk is the
739 * child socket).
740 */
741 if (fastopen)
742 return sk;
743
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000744 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000745 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700746 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
747 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700748 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700749 return NULL;
750 }
David S. Millerec0a1962008-06-12 16:31:35 -0700751
Adam Langley2aaab9a2008-08-07 20:27:45 -0700752 /* OK, ACK is valid, create big socket and
753 * feed this segment to it. It will repeat all
754 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
755 * ESTABLISHED STATE. If it will be dropped after
756 * socket is created, wait for troubles.
757 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700758 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
759 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100760 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700761 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700763 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700764 tcp_synack_rtt_meas(child, req);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700765 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Adam Langley2aaab9a2008-08-07 20:27:45 -0700767listen_overflow:
768 if (!sysctl_tcp_abort_on_overflow) {
769 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700771 }
772
773embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000774 if (!(flg & TCP_FLAG_RST)) {
775 /* Received a bad SYN pkt - for TFO We try not to reset
776 * the local connection unless it's really necessary to
777 * avoid becoming vulnerable to outside attack aiming at
778 * resetting legit local connections.
779 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700780 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000781 } else if (fastopen) { /* received a valid RST pkt */
782 reqsk_fastopen_remove(sk, req, true);
783 tcp_reset(sk);
784 }
785 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700786 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700787 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000788 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700789 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000791EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793/*
794 * Queue segment on the new socket if the new socket is active,
795 * otherwise we just shortcircuit this and continue with
796 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000797 *
798 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
799 * when entering. But other states are possible due to a race condition
800 * where after __inet_lookup_established() fails but before the listener
801 * locked is obtained, other packets cause the same connection to
802 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 */
804
805int tcp_child_process(struct sock *parent, struct sock *child,
806 struct sk_buff *skb)
807{
808 int ret = 0;
809 int state = child->sk_state;
810
Alexander Duycke5907452017-03-24 10:08:00 -0700811 /* record NAPI ID of child */
812 sk_mark_napi_id(child, skb);
813
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700814 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700816 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 /* Wakeup parent, send SIGIO */
818 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400819 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 } else {
821 /* Alas, it is possible again, because we do lookup
822 * in main socket hash table and lock on listening
823 * socket does not protect us more.
824 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000825 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
827
828 bh_unlock_sock(child);
829 sock_put(child);
830 return ret;
831}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832EXPORT_SYMBOL(tcp_child_process);