blob: 056009f1c14f13ac4af987d0a7451f32dbde0023 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/sysctl.h>
25#include <linux/workqueue.h>
Ursula Braun60e2a772017-10-25 11:01:45 +020026#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/tcp.h>
28#include <net/inet_common.h>
29#include <net/xfrm.h>
Alexander Duycke5907452017-03-24 10:08:00 -070030#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Brian Haleyab32ea52006-09-22 14:15:41 -070032int sysctl_tcp_abort_on_overflow __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Eric Dumazeta2a385d2012-05-16 23:15:34 +000034static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035{
36 if (seq == s_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +000037 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 if (after(end_seq, s_win) && before(seq, e_win))
Eric Dumazeta2a385d2012-05-16 23:15:34 +000039 return true;
Eric Dumazeta02cec22010-09-22 20:43:57 +000040 return seq == e_win && seq == end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041}
42
Neal Cardwell4fb17a62015-02-06 16:04:41 -050043static enum tcp_tw_status
44tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
45 const struct sk_buff *skb, int mib_idx)
46{
47 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
48
49 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
50 &tcptw->tw_last_oow_ack_time)) {
51 /* Send ACK. Note, we do not put the bucket,
52 * it will be released by caller.
53 */
54 return TCP_TW_ACK;
55 }
56
57 /* We are rate-limiting, so just release the tw sock and drop skb. */
58 inet_twsk_put(tw);
59 return TCP_TW_SUCCESS;
60}
61
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090062/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 * * Main purpose of TIME-WAIT state is to close connection gracefully,
64 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
65 * (and, probably, tail of data) and one or more our ACKs are lost.
66 * * What is TIME-WAIT timeout? It is associated with maximal packet
67 * lifetime in the internet, which results in wrong conclusion, that
68 * it is set to catch "old duplicate segments" wandering out of their path.
69 * It is not quite correct. This timeout is calculated so that it exceeds
70 * maximal retransmission timeout enough to allow to lose one (or more)
71 * segments sent by peer and our ACKs. This time may be calculated from RTO.
72 * * When TIME-WAIT socket receives RST, it means that another end
73 * finally closed and we are allowed to kill TIME-WAIT too.
74 * * Second purpose of TIME-WAIT is catching old duplicate segments.
75 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
76 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
77 * * If we invented some more clever way to catch duplicates
78 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
79 *
80 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
81 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
82 * from the very beginning.
83 *
84 * NOTE. With recycling (and later with fin-wait-2) TW bucket
85 * is _not_ stateless. It means, that strictly speaking we must
86 * spinlock it. I do not want! Well, probability of misbehaviour
87 * is ridiculously low and, seems, we could use some mb() tricks
88 * to avoid misread sequence numbers, states etc. --ANK
Alan Cox4308fc52012-09-19 15:46:06 +010089 *
90 * We don't need to initialize tmp_out.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 */
92enum tcp_tw_status
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -070093tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
94 const struct tcphdr *th)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +000097 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Eric Dumazeta2a385d2012-05-16 23:15:34 +000098 bool paws_reject = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
David S. Millerbb5b7c12009-12-15 20:56:42 -0800100 tmp_opt.saw_tstamp = 0;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700101 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700102 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (tmp_opt.saw_tstamp) {
Alexey Kodaneveee2faa2017-02-22 13:23:56 +0300105 if (tmp_opt.rcv_tsecr)
106 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700107 tmp_opt.ts_recent = tcptw->tw_ts_recent;
108 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000109 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
111 }
112
113 if (tw->tw_substate == TCP_FIN_WAIT2) {
114 /* Just repeat all the checks of tcp_rcv_state_process() */
115
116 /* Out of window, send ACK */
117 if (paws_reject ||
118 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700119 tcptw->tw_rcv_nxt,
120 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500121 return tcp_timewait_check_oow_rate_limit(
122 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 if (th->rst)
125 goto kill;
126
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700127 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
Florian Westphal271c3b92015-12-21 21:29:26 +0100128 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 /* Dup ACK? */
Wei Yongjun1ac530b2009-06-24 22:29:31 +0000131 if (!th->ack ||
132 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700134 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return TCP_TW_SUCCESS;
136 }
137
138 /* New data or FIN. If new data arrive after half-duplex close,
139 * reset.
140 */
141 if (!th->fin ||
Florian Westphal271c3b92015-12-21 21:29:26 +0100142 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 return TCP_TW_RST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 /* FIN arrived, enter true time-wait state. */
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700146 tw->tw_substate = TCP_TIME_WAIT;
147 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (tmp_opt.saw_tstamp) {
James Morris9d729f72007-03-04 16:12:44 -0800149 tcptw->tw_ts_recent_stamp = get_seconds();
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700150 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 }
152
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400153 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return TCP_TW_ACK;
155 }
156
157 /*
158 * Now real TIME-WAIT state.
159 *
160 * RFC 1122:
161 * "When a connection is [...] on TIME-WAIT state [...]
162 * [a TCP] MAY accept a new SYN from the remote TCP to
163 * reopen the connection directly, if it:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900164 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 * (1) assigns its initial sequence number for the new
166 * connection to be larger than the largest sequence
167 * number it used on the previous connection incarnation,
168 * and
169 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900170 * (2) returns to TIME-WAIT state if the SYN turns out
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * to be an old duplicate".
172 */
173
174 if (!paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700175 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
177 /* In window segment, it may be only reset or bare ack. */
178
179 if (th->rst) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800180 /* This is TIME_WAIT assassination, in two flavors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 * Oh well... nobody has a sufficient solution to this
182 * protocol bug yet.
183 */
184 if (sysctl_tcp_rfc1337 == 0) {
185kill:
Eric Dumazetdbe7faa2015-07-08 14:28:30 -0700186 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return TCP_TW_SUCCESS;
188 }
189 }
Eric Dumazeted2e9232015-09-19 09:08:34 -0700190 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 if (tmp_opt.saw_tstamp) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700193 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
James Morris9d729f72007-03-04 16:12:44 -0800194 tcptw->tw_ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700197 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return TCP_TW_SUCCESS;
199 }
200
201 /* Out of window segment.
202
203 All the segments are ACKed immediately.
204
205 The only exception is new SYN. We accept it, if it is
206 not old duplicate and we are not in danger to be killed
207 by delayed old duplicates. RFC check is that it has
208 newer sequence number works at rates <40Mbit/sec.
209 However, if paws works, it is reliable AND even more,
210 we even may relax silly seq space cutoff.
211
212 RED-PEN: we violate main RFC requirement, if this SYN will appear
213 old duplicate (i.e. we receive RST in reply to SYN-ACK),
214 we must return socket to time-wait state. It is not good,
215 but not fatal yet.
216 */
217
218 if (th->syn && !th->rst && !th->ack && !paws_reject &&
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220 (tmp_opt.saw_tstamp &&
221 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 if (isn == 0)
224 isn++;
Eric Dumazet04317da2014-09-05 15:33:32 -0700225 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return TCP_TW_SYN;
227 }
228
229 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700230 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800232 if (!th->rst) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* In this case we must reset the TIMEWAIT timer.
234 *
235 * If it is ACKless SYN it may be both old duplicate
236 * and new good SYN with random sequence number <rcv_nxt.
237 * Do not reschedule in the last case.
238 */
239 if (paws_reject || th->ack)
Eric Dumazeted2e9232015-09-19 09:08:34 -0700240 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500242 return tcp_timewait_check_oow_rate_limit(
243 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700245 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return TCP_TW_SUCCESS;
247}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000248EXPORT_SYMBOL(tcp_timewait_state_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900250/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * Move a socket to time-wait or dead fin-wait-2 state.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900252 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253void tcp_time_wait(struct sock *sk, int state, int timeo)
254{
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800255 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700256 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazet789f5582015-04-12 18:51:09 -0700257 struct inet_timewait_sock *tw;
Haishuang Yan1946e672016-12-28 17:52:32 +0800258 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Haishuang Yan1946e672016-12-28 17:52:32 +0800260 tw = inet_twsk_alloc(sk, tcp_death_row, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Ian Morris00db4122015-04-03 09:17:27 +0100262 if (tw) {
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700263 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700264 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
David S. Miller23978492012-06-09 14:56:12 -0700265 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700266
David S. Miller23978492012-06-09 14:56:12 -0700267 tw->tw_transparent = inet->transparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700269 tcptw->tw_rcv_nxt = tp->rcv_nxt;
270 tcptw->tw_snd_nxt = tp->snd_nxt;
271 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
272 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
273 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
Andrey Vaginceaa1fe2013-02-11 05:50:17 +0000274 tcptw->tw_ts_offset = tp->tsoffset;
Neal Cardwell4fb17a62015-02-06 16:04:41 -0500275 tcptw->tw_last_oow_ack_time = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000277#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (tw->tw_family == PF_INET6) {
279 struct ipv6_pinfo *np = inet6_sk(sk);
280
Eric Dumazetefe42082013-10-03 15:42:29 -0700281 tw->tw_v6_daddr = sk->sk_v6_daddr;
282 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
Eric Dumazetb903d322011-10-27 00:44:35 -0400283 tw->tw_tclass = np->tclass;
Florent Fourcot21858cd2015-05-16 00:24:59 +0200284 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
Eric Dumazet9fe516b2014-06-27 08:36:16 -0700285 tw->tw_ipv6only = sk->sk_ipv6only;
Arnaldo Carvalho de Meloc6762702005-08-09 20:09:59 -0700286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800288
289#ifdef CONFIG_TCP_MD5SIG
290 /*
291 * The timewait bucket does not have the key DB from the
292 * sock structure. We just make a quick copy of the
293 * md5 key being used (if indeed we are using one)
294 * so the timewait ack generating code has the key.
295 */
296 do {
297 struct tcp_md5sig_key *key;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000298 tcptw->tw_md5_key = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800299 key = tp->af_specific->md5_lookup(sk, sk);
Ian Morris00db4122015-04-03 09:17:27 +0100300 if (key) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000301 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
Gustavo A. R. Silva49ca1942017-10-23 13:10:56 -0500302 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800303 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800304 } while (0);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800305#endif
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /* Get the TIME_WAIT timeout firing. */
308 if (timeo < rto)
309 timeo = rto;
310
Soheil Hassas Yeganehd82bae12017-03-15 16:30:45 -0400311 tw->tw_timeout = TCP_TIMEWAIT_LEN;
312 if (state == TCP_TIME_WAIT)
313 timeo = TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Eric Dumazet789f5582015-04-12 18:51:09 -0700315 inet_twsk_schedule(tw, timeo);
Eric Dumazeted2e9232015-09-19 09:08:34 -0700316 /* Linkage updates. */
317 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700318 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 } else {
320 /* Sorry, if we're out of memory, just CLOSE this
321 * socket up. We've got bigger problems than
322 * non-graceful socket closings.
323 */
Eric Dumazetc10d9312016-04-29 14:16:47 -0700324 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326
327 tcp_update_metrics(sk);
328 tcp_done(sk);
329}
330
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800331void tcp_twsk_destructor(struct sock *sk)
332{
David S. Millerb6242b92012-07-10 03:27:56 -0700333#ifdef CONFIG_TCP_MD5SIG
David S. Millera9286302006-11-14 19:53:22 -0800334 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
David S. Miller23978492012-06-09 14:56:12 -0700335
Eric Dumazet71cea172013-05-20 06:52:26 +0000336 if (twsk->tw_md5_key)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000337 kfree_rcu(twsk->tw_md5_key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800338#endif
339}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800340EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
341
Eric Dumazetb1964b52015-09-25 07:39:09 -0700342/* Warning : This function is called without sk_listener being locked.
343 * Be sure to read socket fields once, as their value could change under us.
344 */
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700345void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -0700346 const struct sock *sk_listener,
347 const struct dst_entry *dst)
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700348{
349 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700350 const struct tcp_sock *tp = tcp_sk(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700351 int full_space = tcp_full_space(sk_listener);
Eric Dumazetb1964b52015-09-25 07:39:09 -0700352 u32 window_clamp;
353 __u8 rcv_wscale;
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700354 u32 rcv_wnd;
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800355 int mss;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700356
Eric Dumazet3541f9e2017-02-02 08:04:56 -0800357 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
Eric Dumazetb1964b52015-09-25 07:39:09 -0700358 window_clamp = READ_ONCE(tp->window_clamp);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700359 /* Set this up on the first call only */
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700360 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700361
362 /* limit the window selection if the user enforce a smaller rx buffer */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700363 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700364 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
365 req->rsk_window_clamp = full_space;
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700366
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700367 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
368 if (rcv_wnd == 0)
369 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
370 else if (full_space < rcv_wnd * mss)
371 full_space = rcv_wnd * mss;
372
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700373 /* tcp_full_space because it is guaranteed to be the first packet */
Eric Dumazetb1964b52015-09-25 07:39:09 -0700374 tcp_select_initial_window(full_space,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700375 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700376 &req->rsk_rcv_wnd,
377 &req->rsk_window_clamp,
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700378 ireq->wscale_ok,
379 &rcv_wscale,
Lawrence Brakmo13d3b1e2017-06-30 20:02:44 -0700380 rcv_wnd);
Yuchung Cheng843f4a52014-05-11 20:22:11 -0700381 ireq->rcv_wscale = rcv_wscale;
382}
383EXPORT_SYMBOL(tcp_openreq_init_rwin);
384
Florian Westphal735d3832014-09-29 13:08:30 +0200385static void tcp_ecn_openreq_child(struct tcp_sock *tp,
386 const struct request_sock *req)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700387{
388 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
389}
390
Daniel Borkmann81164412015-01-05 23:57:48 +0100391void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
392{
393 struct inet_connection_sock *icsk = inet_csk(sk);
394 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
395 bool ca_got_dst = false;
396
397 if (ca_key != TCP_CA_UNSPEC) {
398 const struct tcp_congestion_ops *ca;
399
400 rcu_read_lock();
401 ca = tcp_ca_find_key(ca_key);
402 if (likely(ca && try_module_get(ca->owner))) {
403 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
404 icsk->icsk_ca_ops = ca;
405 ca_got_dst = true;
406 }
407 rcu_read_unlock();
408 }
409
Neal Cardwell9f950412015-05-29 13:47:07 -0400410 /* If no valid choice made yet, assign current system default ca. */
411 if (!ca_got_dst &&
412 (!icsk->icsk_ca_setsockopt ||
413 !try_module_get(icsk->icsk_ca_ops->owner)))
Daniel Borkmann81164412015-01-05 23:57:48 +0100414 tcp_assign_congestion_control(sk);
415
416 tcp_set_ca_state(sk, TCP_CA_Open);
417}
418EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
419
Ursula Braun60e2a772017-10-25 11:01:45 +0200420static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
421 struct request_sock *req,
422 struct tcp_sock *newtp)
423{
424#if IS_ENABLED(CONFIG_SMC)
425 struct inet_request_sock *ireq;
426
427 if (static_branch_unlikely(&tcp_have_smc)) {
428 ireq = inet_rsk(req);
429 if (oldtp->syn_smc && !ireq->smc_ok)
430 newtp->syn_smc = 0;
431 }
432#endif
433}
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435/* This is not only more efficient than what we used to do, it eliminates
436 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
437 *
438 * Actually, we could lots of memory writes here. tp of listening
439 * socket contains all necessary default parameters.
440 */
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700441struct sock *tcp_create_openreq_child(const struct sock *sk,
442 struct request_sock *req,
443 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500445 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Ian Morris00db4122015-04-03 09:17:27 +0100447 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700448 const struct inet_request_sock *ireq = inet_rsk(req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700449 struct tcp_request_sock *treq = tcp_rsk(req);
Arnaldo Carvalho de Meloa9948a72007-02-28 11:05:56 -0800450 struct inet_connection_sock *newicsk = inet_csk(newsk);
William Allen Simpson435cf552009-12-02 18:17:05 +0000451 struct tcp_sock *newtp = tcp_sk(newsk);
Ursula Braun60e2a772017-10-25 11:01:45 +0200452 struct tcp_sock *oldtp = tcp_sk(sk);
453
454 smc_check_reset_syn_req(oldtp, req, newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /* Now setup tcp_sock */
Florian Westphal31770e32017-08-30 19:24:58 +0200457 newtp->pred_flags = 0;
458
William Allen Simpson435cf552009-12-02 18:17:05 +0000459 newtp->rcv_wup = newtp->copied_seq =
460 newtp->rcv_nxt = treq->rcv_isn + 1;
Eric Dumazeta9d99ce2016-03-06 09:29:21 -0800461 newtp->segs_in = 1;
William Allen Simpson435cf552009-12-02 18:17:05 +0000462
463 newtp->snd_sml = newtp->snd_una =
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000464 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000466 INIT_LIST_HEAD(&newtp->tsq_node);
Eric Dumazete2080072017-10-04 12:59:58 -0700467 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800469 tcp_init_wl(newtp, treq->rcv_isn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Eric Dumazet740b0f12014-02-26 14:02:48 -0800471 newtp->srtt_us = 0;
472 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
Eric Dumazetac9517f2017-05-16 14:00:13 -0700473 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700474 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
Eric Dumazet70eabf02017-05-16 14:00:07 -0700475 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 newtp->packets_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 newtp->retrans_out = 0;
479 newtp->sacked_out = 0;
480 newtp->fackets_out = 0;
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700481 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000482 newtp->tlp_high_seq = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700483 newtp->lsndtime = tcp_jiffies32;
Eric Dumazetd8ed6252015-09-22 20:44:17 -0700484 newsk->sk_txhash = treq->txhash;
Neal Cardwellf2b2c582015-02-06 16:04:40 -0500485 newtp->last_oow_ack_time = 0;
Yuchung Cheng375fe022013-07-22 16:20:45 -0700486 newtp->total_retrans = req->num_retrans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 /* So many TCP implementations out there (incorrectly) count the
489 * initial SYN frame in their delayed-ACK and congestion control
490 * algorithms that we must have the following bandaid to talk
491 * efficiently to them. -DaveM
492 */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000493 newtp->snd_cwnd = TCP_INIT_CWND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 newtp->snd_cwnd_cnt = 0;
495
Soheil Hassas Yeganehd7722e82016-09-19 23:39:15 -0400496 /* There's a bubble in the pipe until at least the first ACK. */
497 newtp->app_limited = ~0U;
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 tcp_init_xmit_timers(newsk);
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000500 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 newtp->rx_opt.saw_tstamp = 0;
503
504 newtp->rx_opt.dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 newtp->rx_opt.num_sacks = 0;
Ilpo Järvinencabeccb2009-02-28 04:44:38 +0000506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 newtp->urg_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (sock_flag(newsk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700510 inet_csk_reset_keepalive_timer(newsk,
511 keepalive_time_when(newtp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700513 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800514 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (sysctl_tcp_fack)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300516 tcp_enable_fack(newtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700518 newtp->window_clamp = req->rsk_window_clamp;
519 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
520 newtp->rcv_wnd = req->rsk_rcv_wnd;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700521 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 if (newtp->rx_opt.wscale_ok) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700523 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
524 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 } else {
526 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
527 newtp->window_clamp = min(newtp->window_clamp, 65535U);
528 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700529 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
530 newtp->rx_opt.snd_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 newtp->max_window = newtp->snd_wnd;
532
533 if (newtp->rx_opt.tstamp_ok) {
534 newtp->rx_opt.ts_recent = req->ts_recent;
James Morris9d729f72007-03-04 16:12:44 -0800535 newtp->rx_opt.ts_recent_stamp = get_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
537 } else {
538 newtp->rx_opt.ts_recent_stamp = 0;
539 newtp->tcp_header_len = sizeof(struct tcphdr);
540 }
Florian Westphal95a22ca2016-12-01 11:32:06 +0100541 newtp->tsoffset = treq->ts_off;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800542#ifdef CONFIG_TCP_MD5SIG
543 newtp->md5sig_info = NULL; /*XXX*/
544 if (newtp->af_specific->md5_lookup(sk, newsk))
545 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
546#endif
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000547 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700548 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 newtp->rx_opt.mss_clamp = req->mss;
Florian Westphal735d3832014-09-29 13:08:30 +0200550 tcp_ecn_openreq_child(newtp, req);
Eric Dumazet8b485ce2017-05-03 06:39:31 -0700551 newtp->fastopen_req = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000552 newtp->fastopen_rsk = NULL;
Yuchung Cheng6f736012012-10-19 15:14:44 +0000553 newtp->syn_data_acked = 0;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700554 newtp->rack.mstamp = 0;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700555 newtp->rack.advanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700557 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
559 return newsk;
560}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000561EXPORT_SYMBOL(tcp_create_openreq_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900563/*
Jerry Chu83368862012-08-31 12:29:12 +0000564 * Process an incoming packet for SYN_RECV sockets represented as a
565 * request_sock. Normally sk is the listener socket but for TFO it
566 * points to the child socket.
567 *
568 * XXX (TFO) - The current impl contains a special check for ack
569 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
Alan Cox4308fc52012-09-19 15:46:06 +0100570 *
571 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 */
573
Jianjun Kong5a5f3a82008-11-03 00:24:34 -0800574struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700575 struct request_sock *req,
Jerry Chu83368862012-08-31 12:29:12 +0000576 bool fastopen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
William Allen Simpson4957faade2009-12-02 18:25:27 +0000578 struct tcp_options_received tmp_opt;
William Allen Simpson4957faade2009-12-02 18:25:27 +0000579 struct sock *child;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700580 const struct tcphdr *th = tcp_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800581 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000582 bool paws_reject = false;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700583 bool own_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
David S. Millerbb5b7c12009-12-15 20:56:42 -0800585 tmp_opt.saw_tstamp = 0;
586 if (th->doff > (sizeof(struct tcphdr)>>2)) {
Eric Dumazeteed29f12017-06-07 10:34:36 -0700587 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 if (tmp_opt.saw_tstamp) {
590 tmp_opt.ts_recent = req->ts_recent;
Florian Westphal95a22ca2016-12-01 11:32:06 +0100591 if (tmp_opt.rcv_tsecr)
592 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 /* We do not store true stamp, but it is not required,
594 * it can be estimated (approximately)
595 * from another data.
596 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000597 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +0000598 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
600 }
601
602 /* Check for pure retransmitted SYN. */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700603 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 flg == TCP_FLAG_SYN &&
605 !paws_reject) {
606 /*
607 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
608 * this case on figure 6 and figure 8, but formal
609 * protocol description says NOTHING.
610 * To be more exact, it says that we should send ACK,
611 * because this segment (at least, if it has no data)
612 * is out of window.
613 *
614 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
615 * describe SYN-RECV state. All the description
616 * is wrong, we cannot believe to it and should
617 * rely only on common sense and implementation
618 * experience.
619 *
620 * Enforce "SYN-ACK" according to figure 8, figure 6
621 * of RFC793, fixed by RFC1122.
Jerry Chu83368862012-08-31 12:29:12 +0000622 *
623 * Note that even if there is new data in the SYN packet
624 * they will be thrown away too.
Yuchung Chengcd75eff2013-04-29 08:44:51 +0000625 *
626 * Reset timer after retransmitting SYNACK, similar to
627 * the idea of fast retransmit in recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 */
Neal Cardwella9b2c062015-02-06 16:04:39 -0500629 if (!tcp_oow_rate_limited(sock_net(sk), skb,
630 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
631 &tcp_rsk(req)->last_oow_ack_time) &&
632
Eric Dumazetdd929c12015-04-08 15:34:04 -0700633 !inet_rtx_syn_ack(sk, req)) {
634 unsigned long expires = jiffies;
635
636 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
637 TCP_RTO_MAX);
638 if (!fastopen)
639 mod_timer_pending(&req->rsk_timer, expires);
640 else
641 req->rsk_timer.expires = expires;
642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 return NULL;
644 }
645
646 /* Further reproduces section "SEGMENT ARRIVES"
647 for state SYN-RECEIVED of RFC793.
648 It is broken, however, it does not work only
649 when SYNs are crossed.
650
651 You would think that SYN crossing is impossible here, since
652 we should have a SYN_SENT socket (from connect()) on our end,
653 but this is not true if the crossed SYNs were sent to both
654 ends by a malicious third party. We must defend against this,
655 and to do that we first verify the ACK (as per RFC793, page
656 36) and reset if it is invalid. Is this a true full defense?
657 To convince ourselves, let us consider a way in which the ACK
658 test can still pass in this 'malicious crossed SYNs' case.
659 Malicious sender sends identical SYNs (and thus identical sequence
660 numbers) to both A and B:
661
662 A: gets SYN, seq=7
663 B: gets SYN, seq=7
664
665 By our good fortune, both A and B select the same initial
666 send sequence number of seven :-)
667
668 A: sends SYN|ACK, seq=7, ack_seq=8
669 B: sends SYN|ACK, seq=7, ack_seq=8
670
671 So we are now A eating this SYN|ACK, ACK test passes. So
672 does sequence test, SYN is truncated, and thus we consider
673 it a bare ACK.
674
David S. Millerec0a1962008-06-12 16:31:35 -0700675 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
676 bare ACK. Otherwise, we create an established connection. Both
677 ends (listening sockets) accept the new incoming connection and try
678 to talk to each other. 8-)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 Note: This case is both harmless, and rare. Possibility is about the
681 same as us discovering intelligent life on another plant tomorrow.
682
683 But generally, we should (RFC lies!) to accept ACK
684 from SYNACK both here and in tcp_rcv_state_process().
685 tcp_rcv_state_process() does not, hence, we do not too.
686
687 Note that the case is absolutely generic:
688 we cannot optimize anything here without
689 violating protocol. All the checks must be made
690 before attempt to create socket.
691 */
692
693 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
694 * and the incoming segment acknowledges something not yet
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800695 * sent (the segment carries an unacceptable ACK) ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * a reset is sent."
697 *
Jerry Chu83368862012-08-31 12:29:12 +0000698 * Invalid ACK: reset will be sent by listening socket.
699 * Note that the ACK validity check for a Fast Open socket is done
700 * elsewhere and is checked directly against the child socket rather
701 * than req because user data may have been sent out.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 */
Jerry Chu83368862012-08-31 12:29:12 +0000703 if ((flg & TCP_FLAG_ACK) && !fastopen &&
William Allen Simpson435cf552009-12-02 18:17:05 +0000704 (TCP_SKB_CB(skb)->ack_seq !=
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000705 tcp_rsk(req)->snt_isn + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return sk;
707
708 /* Also, it would be not so bad idea to check rcv_tsecr, which
709 * is essentially ACK extension and too early or too late values
710 * should cause reset in unsynchronized states.
711 */
712
713 /* RFC793: "first check sequence number". */
714
715 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
Eric Dumazeted53d0a2015-10-08 19:33:23 -0700716 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* Out of window: send ACK and drop. */
Eric Dumazet4ce7e932016-04-01 08:52:22 -0700718 if (!(flg & TCP_FLAG_RST) &&
719 !tcp_oow_rate_limited(sock_net(sk), skb,
720 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
721 &tcp_rsk(req)->last_oow_ack_time))
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700722 req->rsk_ops->send_ack(sk, skb, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (paws_reject)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700724 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return NULL;
726 }
727
728 /* In sequence, PAWS is OK. */
729
Jerry Chu83368862012-08-31 12:29:12 +0000730 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
Adam Langley2aaab9a2008-08-07 20:27:45 -0700731 req->ts_recent = tmp_opt.rcv_tsval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Adam Langley2aaab9a2008-08-07 20:27:45 -0700733 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
734 /* Truncate SYN, it is out of window starting
735 at tcp_rsk(req)->rcv_isn + 1. */
736 flg &= ~TCP_FLAG_SYN;
737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Adam Langley2aaab9a2008-08-07 20:27:45 -0700739 /* RFC793: "second check the RST bit" and
740 * "fourth, check the SYN bit"
741 */
742 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700743 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700744 goto embryonic_reset;
745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Adam Langley2aaab9a2008-08-07 20:27:45 -0700747 /* ACK sequence verified above, just make sure ACK is
748 * set. If ACK not set, just silently drop the packet.
Jerry Chu83368862012-08-31 12:29:12 +0000749 *
750 * XXX (TFO) - if we ever allow "data after SYN", the
751 * following check needs to be removed.
Adam Langley2aaab9a2008-08-07 20:27:45 -0700752 */
753 if (!(flg & TCP_FLAG_ACK))
754 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Jerry Chu83368862012-08-31 12:29:12 +0000756 /* For Fast Open no more processing is needed (sk is the
757 * child socket).
758 */
759 if (fastopen)
760 return sk;
761
Julian Anastasovd1b99ba2009-10-19 10:01:56 +0000762 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000763 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
Adam Langley2aaab9a2008-08-07 20:27:45 -0700764 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
765 inet_rsk(req)->acked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700766 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
Adam Langley2aaab9a2008-08-07 20:27:45 -0700767 return NULL;
768 }
David S. Millerec0a1962008-06-12 16:31:35 -0700769
Adam Langley2aaab9a2008-08-07 20:27:45 -0700770 /* OK, ACK is valid, create big socket and
771 * feed this segment to it. It will repeat all
772 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
773 * ESTABLISHED STATE. If it will be dropped after
774 * socket is created, wait for troubles.
775 */
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700776 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
777 req, &own_req);
Ian Morris51456b22015-04-03 09:17:26 +0100778 if (!child)
Adam Langley2aaab9a2008-08-07 20:27:45 -0700779 goto listen_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Eric Dumazet6bcfd7f2015-10-08 11:16:48 -0700781 sock_rps_save_rxhash(child, skb);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700782 tcp_synack_rtt_meas(child, req);
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700783 return inet_csk_complete_hashdance(sk, child, req, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Adam Langley2aaab9a2008-08-07 20:27:45 -0700785listen_overflow:
786 if (!sysctl_tcp_abort_on_overflow) {
787 inet_rsk(req)->acked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 return NULL;
Adam Langley2aaab9a2008-08-07 20:27:45 -0700789 }
790
791embryonic_reset:
Jerry Chu83368862012-08-31 12:29:12 +0000792 if (!(flg & TCP_FLAG_RST)) {
793 /* Received a bad SYN pkt - for TFO We try not to reset
794 * the local connection unless it's really necessary to
795 * avoid becoming vulnerable to outside attack aiming at
796 * resetting legit local connections.
797 */
Adam Langley2aaab9a2008-08-07 20:27:45 -0700798 req->rsk_ops->send_reset(sk, skb);
Jerry Chu83368862012-08-31 12:29:12 +0000799 } else if (fastopen) { /* received a valid RST pkt */
800 reqsk_fastopen_remove(sk, req, true);
801 tcp_reset(sk);
802 }
803 if (!fastopen) {
Eric Dumazet52452c52015-03-19 19:04:19 -0700804 inet_csk_reqsk_queue_drop(sk, req);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700805 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
Jerry Chu83368862012-08-31 12:29:12 +0000806 }
Adam Langley2aaab9a2008-08-07 20:27:45 -0700807 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000809EXPORT_SYMBOL(tcp_check_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
811/*
812 * Queue segment on the new socket if the new socket is active,
813 * otherwise we just shortcircuit this and continue with
814 * the new socket.
Jerry Chu83368862012-08-31 12:29:12 +0000815 *
816 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
817 * when entering. But other states are possible due to a race condition
818 * where after __inet_lookup_established() fails but before the listener
819 * locked is obtained, other packets cause the same connection to
820 * be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 */
822
823int tcp_child_process(struct sock *parent, struct sock *child,
824 struct sk_buff *skb)
825{
826 int ret = 0;
827 int state = child->sk_state;
828
Alexander Duycke5907452017-03-24 10:08:00 -0700829 /* record NAPI ID of child */
830 sk_mark_napi_id(child, skb);
831
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -0700832 tcp_segs_in(tcp_sk(child), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 if (!sock_owned_by_user(child)) {
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700834 ret = tcp_rcv_state_process(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 /* Wakeup parent, send SIGIO */
836 if (state == TCP_SYN_RECV && child->sk_state != state)
David S. Miller676d2362014-04-11 16:15:36 -0400837 parent->sk_data_ready(parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 } else {
839 /* Alas, it is possible again, because we do lookup
840 * in main socket hash table and lock on listening
841 * socket does not protect us more.
842 */
Zhu Yia3a858f2010-03-04 18:01:47 +0000843 __sk_add_backlog(child, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
845
846 bh_unlock_sock(child);
847 sock_put(child);
848 return ret;
849}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850EXPORT_SYMBOL(tcp_child_process);